aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-01-27 22:17:16 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-05-14 11:44:34 +0000
commit04eeddc0aa8e0a417a16eaf9d7d095207f4a8623 (patch)
tree2a5d3b2fe5c852e91531d128d9177754572d5338 /contrib
parent0eae32dcef82f6f06de6419a0d623d7def0cc8f6 (diff)
parent6f8fc217eaa12bf657be1c6468ed9938d10168b3 (diff)
downloadsrc-04eeddc0aa8e0a417a16eaf9d7d095207f4a8623.tar.gz
src-04eeddc0aa8e0a417a16eaf9d7d095207f4a8623.zip
Merge llvm-project main llvmorg-14-init-17616-g024a1fab5c35
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp to llvmorg-14-init-17616-g024a1fab5c35. PR: 261742 MFC after: 2 weeks
Diffstat (limited to 'contrib')
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/Types.h32
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTConcept.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTContext.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Attr.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AttrIterator.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Comment.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentLexer.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Decl.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclBase.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclCXX.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjC.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h6
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/AST/DeclTemplate.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclarationName.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Expr.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprObjC.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/FormatString.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LocInfoType.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OSLog.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateBase.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateName.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeLoc.h57
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h39
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFG.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h57
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h145
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h202
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h140
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h89
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h34
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h144
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attr.td9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td80
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def17
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def24
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def72
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td43
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ProfileList.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetID.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon.td22
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td38
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td33
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/ToolChain.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Types.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Util.h1
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/Format/Format.h165
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/Utils.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h50
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h25
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/Parser.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Initialization.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Overload.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def5
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h353
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/FixIt.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Tooling.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h41
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Internals.h10
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp398
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/CXXABI.h1
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentLexer.cpp65
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclObjC.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.h1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.h3
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.h50
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.h3
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/AST/Mangle.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/OSLog.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h7
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp318
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp462
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp164
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetID.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp559
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h81
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp335
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h69
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp273
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp132
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h31
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp128
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h27
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp236
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h41
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp179
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h11
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h24
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp158
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.h9
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp338
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h30
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h5
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h21
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx2intrin.h30
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h72
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cetintrin.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cpuid.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/emmintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/limits.h18
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c-base.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h100
-rw-r--r--contrib/llvm-project/clang/lib/Headers/smmintrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdatomic.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdint.h168
-rw-r--r--contrib/llvm-project/clang/lib/Headers/tmmintrin.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vaesintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h3
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp (renamed from contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp)5
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseInit.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td164
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Scope.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp125
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp453
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp97
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp107
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h10
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h1
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp1412
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp255
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp72
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1_main.cpp4
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1as_main.cpp1
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp5
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp3
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp6
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp104
-rw-r--r--contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc3
-rw-r--r--contrib/llvm-project/compiler-rt/include/sanitizer/dfsan_interface.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/assembly.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/emutls.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp76
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/adt.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/debug.cpp83
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/debug.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_ehframe_registration.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h89
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/adjacent_find.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/binary_search.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/clamp.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy_if.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy_n.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/equal.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/equal_range.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/fill.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/find_end.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/find_first_of.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/in_in_result.h45
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/in_out_result.h52
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/includes.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/is_heap.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/is_sorted_until.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/lexicographical_compare.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/lower_bound.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/make_heap.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/max.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/max_element.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/merge.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/min.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/min_element.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/minmax.h3
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/minmax_element.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/mismatch.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/move.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/move_backward.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/next_permutation.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/nth_element.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/partial_sort.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/partial_sort_copy.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/partition.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/partition_point.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/pop_heap.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/prev_permutation.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/push_heap.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/remove.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/remove_if.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/reverse.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/rotate_copy.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/search_n.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/set_difference.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/set_intersection.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/set_symmetric_difference.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/set_union.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/shift_left.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/shift_right.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/sift_down.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/sort.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/sort_heap.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/stable_partition.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/stable_sort.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/unique.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/unique_copy.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/unwrap_iter.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/upper_bound.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__bit_reference2
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/calendar.h1276
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/convert_to_timespec.h55
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/duration.h615
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/file_clock.h85
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/high_resolution_clock.h36
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/steady_clock.h44
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/system_clock.h54
-rw-r--r--contrib/llvm-project/libcxx/include/__chrono/time_point.h249
-rw-r--r--contrib/llvm-project/libcxx/include/__compare/compare_three_way.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__compare/synth_three_way.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__concepts/class_or_enum.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__concepts/convertible_to.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__config31
-rw-r--r--contrib/llvm-project/libcxx/include/__coroutine/noop_coroutine_handle.h30
-rw-r--r--contrib/llvm-project/libcxx/include/__debug25
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/directory_entry.h15
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/directory_iterator.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/filesystem_error.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/operations.h666
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/path.h6
-rw-r--r--contrib/llvm-project/libcxx/include/__filesystem/path_iterator.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__format/format_arg.h44
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter.h58
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_bool.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h717
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_integer.h24
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_integral.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_pointer.h91
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_string.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__format/parser_std_format_spec.h274
-rw-r--r--contrib/llvm-project/libcxx/include/__function_like.h51
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/bind.h26
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/bind_front.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/function.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/hash.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/mem_fn.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/mem_fun_ref.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/not_fn.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/reference_wrapper.h19
-rw-r--r--contrib/llvm-project/libcxx/include/__hash_table86
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/advance.h31
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/common_iterator.h81
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/counted_iterator.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/indirectly_comparable.h30
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/move_iterator.h147
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/next.h15
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/prev.h15
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/readable_traits.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h17
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/wrap_iter.h73
-rw-r--r--contrib/llvm-project/libcxx/include/__locale42
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/construct_at.h12
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/ranges_construct_at.h124
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/ranges_uninitialized_algorithms.h190
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/shared_ptr.h6
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h177
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/unique_ptr.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__mutex_base1
-rw-r--r--contrib/llvm-project/libcxx/include/__random/chi_squared_distribution.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__random/gamma_distribution.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__random/lognormal_distribution.h136
-rw-r--r--contrib/llvm-project/libcxx/include/__random/random_device.h19
-rw-r--r--contrib/llvm-project/libcxx/include/__random/seed_seq.h31
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/access.h51
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/all.h8
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/concepts.h27
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/data.h51
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/empty.h9
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/empty_view.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/enable_view.h12
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/owning_view.h81
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/ref_view.h3
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/single_view.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/size.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/subrange.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/transform_view.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/view_interface.h3
-rw-r--r--contrib/llvm-project/libcxx/include/__string8
-rw-r--r--contrib/llvm-project/libcxx/include/__thread/poll_with_backoff.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__thread/timed_backoff_policy.h45
-rw-r--r--contrib/llvm-project/libcxx/include/__threading_support53
-rw-r--r--contrib/llvm-project/libcxx/include/__utility/swap.h2
-rw-r--r--contrib/llvm-project/libcxx/include/algorithm23
-rw-r--r--contrib/llvm-project/libcxx/include/atomic1
-rw-r--r--contrib/llvm-project/libcxx/include/barrier1
-rw-r--r--contrib/llvm-project/libcxx/include/bitset3
-rw-r--r--contrib/llvm-project/libcxx/include/chrono2141
-rw-r--r--contrib/llvm-project/libcxx/include/cmath19
-rw-r--r--contrib/llvm-project/libcxx/include/codecvt1
-rw-r--r--contrib/llvm-project/libcxx/include/compare1
-rw-r--r--contrib/llvm-project/libcxx/include/condition_variable1
-rw-r--r--contrib/llvm-project/libcxx/include/coroutine2
-rw-r--r--contrib/llvm-project/libcxx/include/execution1
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/__memory2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/algorithm5
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/coroutine8
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/deque2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/forward_list2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/functional10
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/iterator2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/list2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/map2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/memory_resource8
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/propagate_const9
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/regex4
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/set2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/simd10
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/string2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/unordered_map2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/unordered_set2
-rw-r--r--contrib/llvm-project/libcxx/include/experimental/vector2
-rw-r--r--contrib/llvm-project/libcxx/include/ext/__hash2
-rw-r--r--contrib/llvm-project/libcxx/include/ext/hash_map2
-rw-r--r--contrib/llvm-project/libcxx/include/ext/hash_set2
-rw-r--r--contrib/llvm-project/libcxx/include/filesystem2
-rw-r--r--contrib/llvm-project/libcxx/include/format162
-rw-r--r--contrib/llvm-project/libcxx/include/fstream1
-rw-r--r--contrib/llvm-project/libcxx/include/functional2
-rw-r--r--contrib/llvm-project/libcxx/include/future1
-rw-r--r--contrib/llvm-project/libcxx/include/ios1
-rw-r--r--contrib/llvm-project/libcxx/include/iosfwd1
-rw-r--r--contrib/llvm-project/libcxx/include/iostream1
-rw-r--r--contrib/llvm-project/libcxx/include/iterator14
-rw-r--r--contrib/llvm-project/libcxx/include/latch1
-rw-r--r--contrib/llvm-project/libcxx/include/list214
-rw-r--r--contrib/llvm-project/libcxx/include/locale6
-rw-r--r--contrib/llvm-project/libcxx/include/math.h2
-rw-r--r--contrib/llvm-project/libcxx/include/memory126
-rw-r--r--contrib/llvm-project/libcxx/include/module.modulemap78
-rw-r--r--contrib/llvm-project/libcxx/include/numbers60
-rw-r--r--contrib/llvm-project/libcxx/include/optional4
-rw-r--r--contrib/llvm-project/libcxx/include/queue45
-rw-r--r--contrib/llvm-project/libcxx/include/random1
-rw-r--r--contrib/llvm-project/libcxx/include/ranges9
-rw-r--r--contrib/llvm-project/libcxx/include/ratio1
-rw-r--r--contrib/llvm-project/libcxx/include/regex38
-rw-r--r--contrib/llvm-project/libcxx/include/semaphore2
-rw-r--r--contrib/llvm-project/libcxx/include/sstream1
-rw-r--r--contrib/llvm-project/libcxx/include/stack44
-rw-r--r--contrib/llvm-project/libcxx/include/stdexcept2
-rw-r--r--contrib/llvm-project/libcxx/include/streambuf2
-rw-r--r--contrib/llvm-project/libcxx/include/string244
-rw-r--r--contrib/llvm-project/libcxx/include/strstream1
-rw-r--r--contrib/llvm-project/libcxx/include/system_error1
-rw-r--r--contrib/llvm-project/libcxx/include/thread2
-rw-r--r--contrib/llvm-project/libcxx/include/tuple29
-rw-r--r--contrib/llvm-project/libcxx/include/type_traits2
-rw-r--r--contrib/llvm-project/libcxx/include/typeindex1
-rw-r--r--contrib/llvm-project/libcxx/include/unordered_map181
-rw-r--r--contrib/llvm-project/libcxx/include/unordered_set131
-rw-r--r--contrib/llvm-project/libcxx/include/valarray1
-rw-r--r--contrib/llvm-project/libcxx/include/vector130
-rw-r--r--contrib/llvm-project/libcxx/include/version27
-rw-r--r--contrib/llvm-project/libcxx/src/atomic.cpp3
-rw-r--r--contrib/llvm-project/libcxx/src/chrono.cpp23
-rw-r--r--contrib/llvm-project/libcxx/src/chrono_system_time_init.h2
-rw-r--r--contrib/llvm-project/libcxx/src/experimental/memory_resource_init_helper.h2
-rw-r--r--contrib/llvm-project/libcxx/src/filesystem/directory_iterator.cpp89
-rw-r--r--contrib/llvm-project/libcxx/src/filesystem/filesystem_common.h83
-rw-r--r--contrib/llvm-project/libcxx/src/iostream_init.h2
-rw-r--r--contrib/llvm-project/libcxx/src/locale.cpp43
-rw-r--r--contrib/llvm-project/libcxx/src/random.cpp29
-rw-r--r--contrib/llvm-project/libcxx/src/regex.cpp120
-rw-r--r--contrib/llvm-project/libcxx/src/string.cpp2
-rw-r--r--contrib/llvm-project/libunwind/src/AddressSpace.hpp24
-rw-r--r--contrib/llvm-project/libunwind/src/DwarfParser.hpp6
-rw-r--r--contrib/llvm-project/lld/COFF/COFFLinkerContext.h3
-rw-r--r--contrib/llvm-project/lld/COFF/Chunks.cpp5
-rw-r--r--contrib/llvm-project/lld/COFF/Config.h4
-rw-r--r--contrib/llvm-project/lld/COFF/DLL.cpp4
-rw-r--r--contrib/llvm-project/lld/COFF/Driver.cpp89
-rw-r--r--contrib/llvm-project/lld/COFF/Driver.h3
-rw-r--r--contrib/llvm-project/lld/COFF/DriverUtils.cpp24
-rw-r--r--contrib/llvm-project/lld/COFF/InputFiles.cpp66
-rw-r--r--contrib/llvm-project/lld/COFF/InputFiles.h40
-rw-r--r--contrib/llvm-project/lld/COFF/LTO.cpp6
-rw-r--r--contrib/llvm-project/lld/COFF/MinGW.cpp7
-rw-r--r--contrib/llvm-project/lld/COFF/PDB.cpp13
-rw-r--r--contrib/llvm-project/lld/COFF/SymbolTable.cpp38
-rw-r--r--contrib/llvm-project/lld/COFF/SymbolTable.h2
-rw-r--r--contrib/llvm-project/lld/COFF/Symbols.cpp4
-rw-r--r--contrib/llvm-project/lld/COFF/Symbols.h5
-rw-r--r--contrib/llvm-project/lld/COFF/Writer.cpp4
-rw-r--r--contrib/llvm-project/lld/Common/CommonLinkerContext.cpp45
-rw-r--r--contrib/llvm-project/lld/Common/ErrorHandler.cpp69
-rw-r--r--contrib/llvm-project/lld/Common/Memory.cpp19
-rw-r--r--contrib/llvm-project/lld/Common/Strings.cpp13
-rw-r--r--contrib/llvm-project/lld/Common/TargetOptionsCommandFlags.cpp3
-rw-r--r--contrib/llvm-project/lld/Common/Timer.cpp1
-rw-r--r--contrib/llvm-project/lld/ELF/AArch64ErrataFix.cpp10
-rw-r--r--contrib/llvm-project/lld/ELF/ARMErrataFix.cpp8
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/AArch64.cpp92
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/PPC.cpp2
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/PPC64.cpp7
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/X86.cpp8
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/X86_64.cpp15
-rw-r--r--contrib/llvm-project/lld/ELF/Config.h4
-rw-r--r--contrib/llvm-project/lld/ELF/Driver.cpp171
-rw-r--r--contrib/llvm-project/lld/ELF/Driver.h2
-rw-r--r--contrib/llvm-project/lld/ELF/DriverUtils.cpp7
-rw-r--r--contrib/llvm-project/lld/ELF/EhFrame.cpp23
-rw-r--r--contrib/llvm-project/lld/ELF/EhFrame.h1
-rw-r--r--contrib/llvm-project/lld/ELF/ICF.cpp5
-rw-r--r--contrib/llvm-project/lld/ELF/InputFiles.cpp213
-rw-r--r--contrib/llvm-project/lld/ELF/InputFiles.h6
-rw-r--r--contrib/llvm-project/lld/ELF/InputSection.cpp241
-rw-r--r--contrib/llvm-project/lld/ELF/InputSection.h22
-rw-r--r--contrib/llvm-project/lld/ELF/LTO.cpp2
-rw-r--r--contrib/llvm-project/lld/ELF/LinkerScript.cpp88
-rw-r--r--contrib/llvm-project/lld/ELF/LinkerScript.h37
-rw-r--r--contrib/llvm-project/lld/ELF/MapFile.cpp22
-rw-r--r--contrib/llvm-project/lld/ELF/MarkLive.cpp19
-rw-r--r--contrib/llvm-project/lld/ELF/Options.td12
-rw-r--r--contrib/llvm-project/lld/ELF/OutputSections.cpp140
-rw-r--r--contrib/llvm-project/lld/ELF/OutputSections.h16
-rw-r--r--contrib/llvm-project/lld/ELF/Relocations.cpp285
-rw-r--r--contrib/llvm-project/lld/ELF/Relocations.h3
-rw-r--r--contrib/llvm-project/lld/ELF/ScriptParser.cpp62
-rw-r--r--contrib/llvm-project/lld/ELF/SymbolTable.cpp18
-rw-r--r--contrib/llvm-project/lld/ELF/SymbolTable.h13
-rw-r--r--contrib/llvm-project/lld/ELF/Symbols.cpp42
-rw-r--r--contrib/llvm-project/lld/ELF/Symbols.h97
-rw-r--r--contrib/llvm-project/lld/ELF/SyntheticSections.cpp293
-rw-r--r--contrib/llvm-project/lld/ELF/SyntheticSections.h88
-rw-r--r--contrib/llvm-project/lld/ELF/Target.cpp2
-rw-r--r--contrib/llvm-project/lld/ELF/Target.h10
-rw-r--r--contrib/llvm-project/lld/ELF/Thunks.cpp57
-rw-r--r--contrib/llvm-project/lld/ELF/Writer.cpp338
-rw-r--r--contrib/llvm-project/lld/MachO/Arch/ARM.cpp2
-rw-r--r--contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp9
-rw-r--r--contrib/llvm-project/lld/MachO/Config.h12
-rw-r--r--contrib/llvm-project/lld/MachO/Driver.cpp246
-rw-r--r--contrib/llvm-project/lld/MachO/Driver.h12
-rw-r--r--contrib/llvm-project/lld/MachO/DriverUtils.cpp11
-rw-r--r--contrib/llvm-project/lld/MachO/InputFiles.cpp148
-rw-r--r--contrib/llvm-project/lld/MachO/InputFiles.h38
-rw-r--r--contrib/llvm-project/lld/MachO/InputSection.h8
-rw-r--r--contrib/llvm-project/lld/MachO/LTO.cpp4
-rw-r--r--contrib/llvm-project/lld/MachO/MapFile.cpp37
-rw-r--r--contrib/llvm-project/lld/MachO/Options.td20
-rw-r--r--contrib/llvm-project/lld/MachO/SectionPriorities.cpp379
-rw-r--r--contrib/llvm-project/lld/MachO/SectionPriorities.h55
-rw-r--r--contrib/llvm-project/lld/MachO/SymbolTable.cpp34
-rw-r--r--contrib/llvm-project/lld/MachO/SymbolTable.h7
-rw-r--r--contrib/llvm-project/lld/MachO/Symbols.cpp14
-rw-r--r--contrib/llvm-project/lld/MachO/Symbols.h29
-rw-r--r--contrib/llvm-project/lld/MachO/SyntheticSections.cpp39
-rw-r--r--contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp2
-rw-r--r--contrib/llvm-project/lld/MachO/Writer.cpp115
-rw-r--r--contrib/llvm-project/lld/MachO/ld64-vs-lld.rst16
-rw-r--r--contrib/llvm-project/lld/docs/_templates/indexsidebar.html9
-rw-r--r--contrib/llvm-project/lld/docs/ld.lld.12
-rw-r--r--contrib/llvm-project/lld/include/lld/Common/CommonLinkerContext.h65
-rw-r--r--contrib/llvm-project/lld/include/lld/Common/Driver.h21
-rw-r--r--contrib/llvm-project/lld/include/lld/Common/ErrorHandler.h32
-rw-r--r--contrib/llvm-project/lld/include/lld/Common/Memory.h43
-rw-r--r--contrib/llvm-project/lld/include/lld/Common/Strings.h11
-rw-r--r--contrib/llvm-project/lld/include/lld/Core/LinkingContext.h3
-rw-r--r--contrib/llvm-project/lld/tools/lld/lld.cpp58
-rw-r--r--contrib/llvm-project/lldb/bindings/interface/SBModule.i7
-rw-r--r--contrib/llvm-project/lldb/bindings/interface/SBPlatform.i3
-rw-r--r--contrib/llvm-project/lldb/bindings/interface/SBThread.i6
-rw-r--r--contrib/llvm-project/lldb/bindings/python/python-swigsafecast.swig43
-rw-r--r--contrib/llvm-project/lldb/bindings/python/python-wrapper.swig155
-rw-r--r--contrib/llvm-project/lldb/bindings/python/python.swig9
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBDebugger.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBDefines.h12
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBModule.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBPlatform.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBTarget.h1
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBThread.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/API/SBType.h1
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Breakpoint/Breakpoint.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Breakpoint/BreakpointOptions.h9
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Breakpoint/WatchpointOptions.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Address.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Debugger.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Declaration.h6
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/DumpDataExtractor.h12
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/FileSpecList.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/FormatEntity.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/LoadedModuleInfoList.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Mangled.h30
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Module.h6
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/ModuleSpec.h18
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/StructuredDataImpl.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/ThreadSafeValue.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/UniqueCStringMap.h31
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Value.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/ValueObject.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/DataFormatters/FormatClasses.h8
-rw-r--r--contrib/llvm-project/lldb/include/lldb/DataFormatters/TypeSynthetic.h6
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Expression/IRExecutionUnit.h7
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/Debug.h8
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/FileSystem.h9
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/HostNativeThreadBase.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/XML.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/common/NativeProcessProtocol.h5
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Host/common/NativeThreadProtocol.h9
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/CommandInterpreter.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObject.h5
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObjectMultiword.h10
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueArray.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueProperties.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/Options.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedInterface.h9
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedProcessInterface.h12
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/CompactUnwindInfo.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/CompileUnit.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/DWARFCallFrameInfo.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/Function.h6
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/LineTable.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/ObjectContainer.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/SymbolFile.h36
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/Symtab.h27
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/Type.h15
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Symbol/UnwindPlan.h8
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/InstrumentationRuntime.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Language.h7
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h23
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/MemoryTagMap.h98
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Platform.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Process.h15
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/RegisterCheckpoint.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/SectionLoadHistory.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/SectionLoadList.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/StackFrame.h14
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Statistics.h44
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Target.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Thread.h8
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Unwind.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/UnwindLLDB.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/ConstString.h20
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Environment.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Event.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/FileSpec.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/GDBRemote.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Instrumentation.h105
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Log.h57
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Logging.h116
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/Predicate.h5
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/ProcessInfo.h7
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/ReproducerInstrumentation.h1105
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/ReproducerProvider.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/SharedCluster.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/StreamTee.h12
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/StringExtractorGDBRemote.h2
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/StringList.h3
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/StructuredData.h5
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/UserIDResolver.h1
-rw-r--r--contrib/llvm-project/lldb/include/lldb/lldb-defines.h12
-rw-r--r--contrib/llvm-project/lldb/include/lldb/module.modulemap2
-rw-r--r--contrib/llvm-project/lldb/source/API/SBAddress.cpp117
-rw-r--r--contrib/llvm-project/lldb/source/API/SBAttachInfo.cpp133
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBlock.cpp114
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBreakpoint.cpp334
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBreakpointLocation.cpp175
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBreakpointName.cpp196
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBreakpointOptionCommon.cpp24
-rw-r--r--contrib/llvm-project/lldb/source/API/SBBroadcaster.cpp92
-rw-r--r--contrib/llvm-project/lldb/source/API/SBCommandInterpreter.cpp348
-rw-r--r--contrib/llvm-project/lldb/source/API/SBCommandInterpreterRunOptions.cpp165
-rw-r--r--contrib/llvm-project/lldb/source/API/SBCommandReturnObject.cpp174
-rw-r--r--contrib/llvm-project/lldb/source/API/SBCommunication.cpp83
-rw-r--r--contrib/llvm-project/lldb/source/API/SBCompileUnit.cpp111
-rw-r--r--contrib/llvm-project/lldb/source/API/SBData.cpp233
-rw-r--r--contrib/llvm-project/lldb/source/API/SBDebugger.cpp766
-rw-r--r--contrib/llvm-project/lldb/source/API/SBDeclaration.cpp81
-rw-r--r--contrib/llvm-project/lldb/source/API/SBEnvironment.cpp63
-rw-r--r--contrib/llvm-project/lldb/source/API/SBError.cpp69
-rw-r--r--contrib/llvm-project/lldb/source/API/SBEvent.cpp87
-rw-r--r--contrib/llvm-project/lldb/source/API/SBExecutionContext.cpp75
-rw-r--r--contrib/llvm-project/lldb/source/API/SBExpressionOptions.cpp174
-rw-r--r--contrib/llvm-project/lldb/source/API/SBFile.cpp71
-rw-r--r--contrib/llvm-project/lldb/source/API/SBFileSpec.cpp85
-rw-r--r--contrib/llvm-project/lldb/source/API/SBFileSpecList.cpp64
-rw-r--r--contrib/llvm-project/lldb/source/API/SBFrame.cpp277
-rw-r--r--contrib/llvm-project/lldb/source/API/SBFunction.cpp100
-rw-r--r--contrib/llvm-project/lldb/source/API/SBHostOS.cpp63
-rw-r--r--contrib/llvm-project/lldb/source/API/SBInstruction.cpp103
-rw-r--r--contrib/llvm-project/lldb/source/API/SBInstructionList.cpp83
-rw-r--r--contrib/llvm-project/lldb/source/API/SBLanguageRuntime.cpp23
-rw-r--r--contrib/llvm-project/lldb/source/API/SBLaunchInfo.cpp207
-rw-r--r--contrib/llvm-project/lldb/source/API/SBLineEntry.cpp85
-rw-r--r--contrib/llvm-project/lldb/source/API/SBListener.cpp125
-rw-r--r--contrib/llvm-project/lldb/source/API/SBMemoryRegionInfo.cpp96
-rw-r--r--contrib/llvm-project/lldb/source/API/SBMemoryRegionInfoList.cpp59
-rw-r--r--contrib/llvm-project/lldb/source/API/SBModule.cpp287
-rw-r--r--contrib/llvm-project/lldb/source/API/SBModuleSpec.cpp155
-rw-r--r--contrib/llvm-project/lldb/source/API/SBPlatform.cpp379
-rw-r--r--contrib/llvm-project/lldb/source/API/SBProcess.cpp458
-rw-r--r--contrib/llvm-project/lldb/source/API/SBProcessInfo.cpp80
-rw-r--r--contrib/llvm-project/lldb/source/API/SBQueue.cpp83
-rw-r--r--contrib/llvm-project/lldb/source/API/SBQueueItem.cpp56
-rw-r--r--contrib/llvm-project/lldb/source/API/SBReproducer.cpp120
-rw-r--r--contrib/llvm-project/lldb/source/API/SBReproducerPrivate.h78
-rw-r--r--contrib/llvm-project/lldb/source/API/SBSection.cpp115
-rw-r--r--contrib/llvm-project/lldb/source/API/SBSourceManager.cpp57
-rw-r--r--contrib/llvm-project/lldb/source/API/SBStream.cpp51
-rw-r--r--contrib/llvm-project/lldb/source/API/SBStringList.cpp66
-rw-r--r--contrib/llvm-project/lldb/source/API/SBStructuredData.cpp116
-rw-r--r--contrib/llvm-project/lldb/source/API/SBSymbol.cpp90
-rw-r--r--contrib/llvm-project/lldb/source/API/SBSymbolContext.cpp120
-rw-r--r--contrib/llvm-project/lldb/source/API/SBSymbolContextList.cpp66
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTarget.cpp999
-rw-r--r--contrib/llvm-project/lldb/source/API/SBThread.cpp396
-rw-r--r--contrib/llvm-project/lldb/source/API/SBThreadCollection.cpp47
-rw-r--r--contrib/llvm-project/lldb/source/API/SBThreadPlan.cpp213
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTrace.cpp54
-rw-r--r--contrib/llvm-project/lldb/source/API/SBType.cpp432
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeCategory.cpp270
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeEnumMember.cpp102
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeFilter.cpp83
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeFormat.cpp77
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeNameSpecifier.cpp82
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeSummary.cpp177
-rw-r--r--contrib/llvm-project/lldb/source/API/SBTypeSynthetic.cpp102
-rw-r--r--contrib/llvm-project/lldb/source/API/SBUnixSignals.cpp82
-rw-r--r--contrib/llvm-project/lldb/source/API/SBValue.cpp452
-rw-r--r--contrib/llvm-project/lldb/source/API/SBValueList.cpp76
-rw-r--r--contrib/llvm-project/lldb/source/API/SBVariablesOptions.cpp98
-rw-r--r--contrib/llvm-project/lldb/source/API/SBWatchpoint.cpp124
-rw-r--r--contrib/llvm-project/lldb/source/API/SystemInitializerFull.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Breakpoint/Breakpoint.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Breakpoint/BreakpointResolverFileRegex.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandCompletions.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpoint.cpp56
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpointCommand.cpp9
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectCommands.cpp34
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectFrame.cpp74
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.h2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectLog.cpp5
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp47
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectMemoryTag.cpp28
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectMultiword.cpp35
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectPlatform.cpp36
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectProcess.cpp45
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectRegexCommand.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectRegister.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectReproducer.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectScript.h2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectSession.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectSettings.cpp15
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectSource.cpp12
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectStats.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectTarget.cpp55
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp43
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectTrace.cpp15
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectType.cpp44
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpoint.cpp26
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpointCommand.cpp5
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandOptionsProcessLaunch.h2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/Options.td5
-rw-r--r--contrib/llvm-project/lldb/source/Core/CoreProperties.td4
-rw-r--r--contrib/llvm-project/lldb/source/Core/Debugger.cpp22
-rw-r--r--contrib/llvm-project/lldb/source/Core/DumpDataExtractor.cpp129
-rw-r--r--contrib/llvm-project/lldb/source/Core/IOHandlerCursesGUI.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Core/Mangled.cpp14
-rw-r--r--contrib/llvm-project/lldb/source/DataFormatters/FormatManager.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Expression/DWARFExpression.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Expression/Materializer.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/Host.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/HostNativeThreadBase.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/Socket.cpp5
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/Terminal.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/XML.cpp29
-rw-r--r--contrib/llvm-project/lldb/source/Host/posix/ProcessLauncherPosixFork.cpp159
-rw-r--r--contrib/llvm-project/lldb/source/Interpreter/CommandInterpreter.cpp43
-rw-r--r--contrib/llvm-project/lldb/source/Interpreter/OptionValueProperties.cpp11
-rw-r--r--contrib/llvm-project/lldb/source/Interpreter/Options.cpp15
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/CxxModuleHandler.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS/EmulateInstructionMIPS.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS64/EmulateInstructionMIPS64.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/MainThreadChecker/InstrumentationRuntimeMainThreadChecker.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/TSan/InstrumentationRuntimeTSan.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp45
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/GenericBitset.cpp1
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp132
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h16
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/ObjC/NSString.cpp37
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp28
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h1
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTrampolineHandler.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTypeEncodingParser.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ObjectFile/PDB/ObjectFilePDB.h2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.cpp94
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.cpp141
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.cpp13
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.h8
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUserProperties.td2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp173
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.h5
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp45
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_arm64.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.cpp30
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.h3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.cpp121
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.h2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.cpp11
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.h16
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp30
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.cpp20
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.h32
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp111
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp11
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.cpp314
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.h88
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp16
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.cpp86
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.h26
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp63
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h5
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.cpp28
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.h55
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp28
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedProcess.cpp70
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp91
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.h12
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp30
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h54
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h53
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp206
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.cpp28
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.h7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.cpp19
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.h5
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/lldb-python.h7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.cpp37
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.h31
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp11
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.h4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.cpp18
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.h33
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp232
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h98
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.cpp52
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.h38
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp8
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.cpp19
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.h13
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbIndex.h1
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp525
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h33
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp71
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h11
-rw-r--r--contrib/llvm-project/lldb/source/Symbol/Function.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Symbol/Symbol.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Symbol/Symtab.cpp8
-rw-r--r--contrib/llvm-project/lldb/source/Symbol/TypeSystem.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Target/MemoryTagMap.cpp64
-rw-r--r--contrib/llvm-project/lldb/source/Target/Platform.cpp7
-rw-r--r--contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Target/StackFrame.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Target/Statistics.cpp55
-rw-r--r--contrib/llvm-project/lldb/source/Target/Target.cpp7
-rw-r--r--contrib/llvm-project/lldb/source/Target/Thread.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Target/TraceInstructionDumper.cpp6
-rw-r--r--contrib/llvm-project/lldb/source/Utility/ConstString.cpp18
-rw-r--r--contrib/llvm-project/lldb/source/Utility/Instrumentation.cpp43
-rw-r--r--contrib/llvm-project/lldb/source/Utility/Log.cpp16
-rw-r--r--contrib/llvm-project/lldb/source/Utility/Logging.cpp97
-rw-r--r--contrib/llvm-project/lldb/source/Utility/Reproducer.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Utility/ReproducerInstrumentation.cpp262
-rw-r--r--contrib/llvm-project/lldb/source/Utility/StringList.cpp12
-rw-r--r--contrib/llvm-project/lldb/source/Utility/Timer.cpp4
-rw-r--r--contrib/llvm-project/lldb/tools/driver/Driver.cpp1
-rw-r--r--contrib/llvm-project/lldb/tools/lldb-instr/Instrument.cpp236
-rw-r--r--contrib/llvm-project/lldb/tools/lldb-server/lldb-gdbserver.cpp1
-rw-r--r--contrib/llvm-project/llvm/include/llvm-c/Core.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm-c/DebugInfo.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/APInt.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/AllocatorList.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/Any.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ArrayRef.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/BitVector.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/CoalescingBitVector.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/CombinationGenerator.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/DenseSet.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/DepthFirstIterator.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h41
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ImmutableSet.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/MapVector.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/Optional.h26
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/PriorityWorklist.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h71
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/STLFunctionalExtras.h76
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ScopedHashTable.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SetVector.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SmallBitVector.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SmallSet.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SmallString.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SparseMultiSet.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/SparseSet.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringExtras.h36
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringMap.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringMapEntry.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h30
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/StringSwitch.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/Triple.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/identity.h34
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ilist.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysis.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/BasicAliasAnalysis.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/CFLAliasAnalysisUtils.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ConstantFolding.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ConstraintSystem.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h19
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DependenceAnalysis.h17
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DivergenceAnalysis.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DomPrinter.h14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h83
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/IVUsers.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InlineAdvisor.h52
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InlineOrder.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InstSimplifyFolder.h255
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/InstructionSimplify.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LazyBlockFrequencyInfo.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LazyBranchProbabilityInfo.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/Loads.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LoopAccessAnalysis.h34
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LoopAnalysisManager.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LoopInfo.h15
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/LoopNestAnalysis.h23
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MLInlineAdvisor.h27
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h135
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSAUpdater.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCInstKind.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCUtil.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/PHITransAddr.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ReplayInlineAdvisor.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h33
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionDivision.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h1550
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetFolder.h118
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h29
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h25
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ValueTracking.h29
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.def3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/MachO.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackReader.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackWriter.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/BasicTTIImpl.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/CalcSpillWeights.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/DIE.h54
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/FaultMaps.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h31
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h25
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/IndirectThunks.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/LiveInterval.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/LiveRangeEdit.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MIRFormatter.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineBasicBlock.h17
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineLoopUtils.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineModuleInfo.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassManager.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/Passes.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SDNodeProperties.td2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h31
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TailDuplicator.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetFrameLowering.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h32
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetRegisterInfo.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/CodeView.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h17
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/LookupResult.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/StringTable.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFBuilder.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MappedBlockStream.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Demangle/ItaniumDemangle.h127
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangle.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Demangle/StringView.h20
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Demangle/Utility.h15
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITEventListener.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h17
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h148
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h87
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/MemoryFlags.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/riscv.h86
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h22
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h34
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h14
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h101
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h69
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h45
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h96
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h120
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/FileCheck/FileCheck.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPAssume.h55
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h43
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h36
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/FuzzMutate/OpDescriptor.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Argument.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Attributes.h170
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Comdat.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Constant.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/ConstantFolder.h127
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Constants.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/DIBuilder.h39
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/DebugInfoMetadata.h48
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/DerivedTypes.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Function.h10
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/GetElementPtrTypeIterator.h22
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/GlobalObject.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/GlobalVariable.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IRBuilder.h103
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IRBuilderFolder.h52
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/InlineAsm.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/InstrTypes.h51
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h102
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td29
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsNVVM.td30
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td496
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/LLVMContext.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/LegacyPassManagers.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/MatrixBuilder.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Module.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/NoFolder.h93
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/PatternMatch.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/PseudoProbe.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Statepoint.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Type.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/TypeFinder.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def10
-rw-r--r--contrib/llvm-project/llvm/include/llvm/InitializePasses.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/LTO/LTO.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOModule.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Linker/Linker.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCContext.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCFixedLenDisassembler.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCAsmParser.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCPseudoProbe.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/SubtargetFeature.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/CustomBehaviour.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Instruction.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Stages/EntryStage.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Stages/ExecuteStage.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Stages/InstructionTables.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MCA/Stages/RetireStage.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/Archive.h219
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/ELFObjectFile.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/Error.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/IRObjectFile.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/MachOUniversal.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/ObjectFile.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Object/XCOFFObjectFile.h51
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ObjectYAML/DWARFEmitter.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ObjectYAML/XCOFFYAML.h124
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h32
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h25
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h10
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Remarks/RemarkSerializer.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.def96
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ARMAttributeParser.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.def11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Allocator.h12
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/AllocatorBase.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamArray.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamReader.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamRef.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamWriter.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BlockFrequency.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BranchProbability.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Caching.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Chrono.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/CodeGenCoverage.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/CommandLine.h19
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Compiler.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ConvertUTF.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/CrashRecoveryContext.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/DivisionByConstantInfo.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Duration.h28
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ELFAttributeParser.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Error.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ExtensibleRTTI.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/FileCollector.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/FileSystem.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/FileUtilities.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/FormatVariadic.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/FormatVariadicDetails.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/JSON.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/KnownBits.h15
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/LowLevelTypeImpl.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/MD5.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/MachineValueType.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/MemoryBuffer.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Parallel.h80
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/SymbolRemappingReader.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TargetOpcodes.def3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TimeProfiler.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Timer.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TrigramIndex.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/TypeSize.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h28
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/X86TargetParser.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/YAMLTraits.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h22
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TableGen/Record.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/CGPassBuilderOption.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/GenericOpcodes.td7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td13
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/Target.td2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/TargetLoweringObjectFile.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h9
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Testing/Support/Annotations.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TextAPI/Platform.h27
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TextAPI/Target.h10
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Coroutines/CoroSplit.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h19
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/IROutliner.h30
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ModuleInliner.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/InstSimplifyPass.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopReroll.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SCCP.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SROA.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/AssumeBundleBuilder.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Cloning.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeExtractor.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CtorUtils.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Evaluator.h54
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/GlobalStatus.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopPeel.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ModuleUtils.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h21
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/BlockIndexer.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/BlockPrinter.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/FDRRecordConsumer.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/FDRRecords.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/FDRTraceExpander.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/XRay/RecordPrinter.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/module.modulemap1
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp164
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/CostModel.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/DDG.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp41
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/DomPrinter.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/DominanceFrontier.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp92
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp76
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/IVUsers.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp181
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp65
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LoopPass.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp103
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp450
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/MemorySSA.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/NoInferenceModelRunner.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/RegionPass.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ReplayInlineAdvisor.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp477
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/TFUtils.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp161
-rw-r--r--contrib/llvm-project/llvm/lib/AsmParser/LLParser.cpp99
-rw-r--r--contrib/llvm-project/llvm/lib/BinaryFormat/AMDGPUMetadataVerifier.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/BinaryFormat/ELF.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/BinaryFormat/Magic.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeAnalyzer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp166
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Reader/MetadataLoader.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h28
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIE.cpp128
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfException.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/BranchFolding.h1
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CFIInstrInserter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CommandFlags.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp38
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Combiner.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp261
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Localizer.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp59
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h10
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugVariables.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveIntervals.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIParser.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIRParser.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp862
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineBlockPlacement.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineDominanceFrontier.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineInstrBundle.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineSink.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ModuloSchedule.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/NonRelocatableStringpool.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h119
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp443
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h507
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegisterScavenging.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp354
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp221
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h61
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp89
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp826
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp197
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp75
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp131
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetPassConfig.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetRegisterInfo.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/VLIWMachineScheduler.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/CodeView/EnumTables.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFCompileUnit.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBExtras.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBSymbolCompiland.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/DIPrinter.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Demangle/DLangDemangle.cpp238
-rw-r--r--contrib/llvm-project/llvm/lib/Demangle/ItaniumDemangle.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Demangle/MicrosoftDemangleNodes.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h4
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp57
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h11
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h75
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp230
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h5
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp93
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp84
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h14
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h8
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp35
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp92
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h2
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp306
-rw-r--r--contrib/llvm-project/llvm/lib/FuzzMutate/Operations.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/FuzzMutate/RandomIRBuilder.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Attributes.cpp346
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Comdat.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp335
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantFold.h3
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Constants.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Core.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp63
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Function.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Globals.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Instruction.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h10
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Verifier.cpp109
-rw-r--r--contrib/llvm-project/llvm/lib/InterfaceStub/ELFObjHandler.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/InterfaceStub/IFSHandler.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/InterfaceStub/IFSStub.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCAsmStreamer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCContext.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCDwarf.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCMachOStreamer.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCObjectStreamer.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCParser/AsmParser.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCParser/DarwinAsmParser.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCParser/MasmParser.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCSectionXCOFF.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCStreamer.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MachObjectWriter.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/MCA/Stages/DispatchStage.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/MCA/Stages/InOrderIssueStage.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Object/Archive.cpp467
-rw-r--r--contrib/llvm-project/llvm/lib/Object/ArchiveWriter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Object/IRSymtab.cpp37
-rw-r--r--contrib/llvm-project/llvm/lib/Object/Object.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Object/TapiFile.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Object/XCOFFObjectFile.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/ObjectYAML/MachOEmitter.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFEmitter.cpp187
-rw-r--r--contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFYAML.cpp157
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassBuilder.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassRegistry.def7
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/GCOV.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/InstrProfCorrelator.cpp52
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/InstrProfReader.cpp102
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/SampleProfWriter.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Remarks/BitstreamRemarkSerializer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Remarks/Remark.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Remarks/RemarkStreamer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Remarks/RemarkStringTable.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Remarks/YAMLRemarkParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/AArch64TargetParser.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Support/APInt.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ARMWinEH.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BinaryStreamError.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BlockFrequency.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Caching.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/CodeGenCoverage.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/CommandLine.cpp85
-rw-r--r--contrib/llvm-project/llvm/lib/Support/CrashRecoveryContext.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Support/DAGDeltaAlgorithm.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/DataExtractor.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/DivisionByConstantInfo.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ELFAttributeParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/FileOutputBuffer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/FileUtilities.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Support/GraphWriter.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Host.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Support/InitLLVM.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Support/JSON.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/LowLevelType.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/MD5.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Support/MSP430AttributeParser.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Support/MemAlloc.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/MemoryBuffer.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Support/NativeFormatting.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Parallel.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Path.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/Support/PrettyStackTrace.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp208
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ScopedPrinter.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Signals.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Signposts.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/SmallPtrSet.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/SmallVector.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/SpecialCaseList.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/StringMap.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/StringRef.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/SymbolRemappingReader.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/TargetParser.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ThreadPool.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/TimeProfiler.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ToolOutputFile.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Triple.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Support/TypeSize.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Unix/Path.inc25
-rw-r--r--contrib/llvm-project/llvm/lib/Support/VirtualFileSystem.cpp112
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Path.inc38
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Process.inc8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Program.inc8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/X86TargetParser.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/YAMLParser.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Support/YAMLTraits.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/TableGen/TGParser.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.td27
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FastISel.cpp214
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp190
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td60
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrFormats.td256
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td108
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MCInstLower.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp325
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA55.td12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA57.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA64FX.td10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.h19
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SystemOperands.td4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp147
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp183
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPU.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp62
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp35
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp205
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp171
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp204
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibFunc.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPTNote.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp71
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/DSInstructions.td1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/FLATInstructions.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSubtarget.h7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/MIMGInstructions.td106
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/R600InstrInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/R600Subtarget.h6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIDefines.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp418
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp149
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.h11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstructions.td130
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp137
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIModeRegister.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SOPInstructions.td15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP2Instructions.td36
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td41
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARM.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARM.td51
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp100
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.cpp56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrInfo.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrNEON.td96
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetMachine.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.h12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVR.h48
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp181
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp96
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp88
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td55
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFMIChecking.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFMIPeephole.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveDIType.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFSubtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.td34
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.cpp152
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.h16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp1376
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.cpp216
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.h221
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp548
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.h36
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.cpp784
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.h100
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF1.td274
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF2.td208
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp309
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.h27
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.td216
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF1.td420
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF2.td462
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYRegisterInfo.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYMCAsmInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/BitTracker.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCallingConv.td12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp127
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp93
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp45
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h4
-rwxr-xr-x[-rw-r--r--]contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp363
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp122
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp102
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatterns.td25
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td255
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsV65.td45
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp78
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp67
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp207
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h29
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h23
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp26
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp96
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/LanaiISelLowering.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/LanaiSubtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Lanai/MCTargetDesc/LanaiInstPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/Disassembler/M68kDisassembler.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kInstructionSelector.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBanks.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68k.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68k.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kCallingConv.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kExpandPseudo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp78
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrArithmetic.td49
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBits.td12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBuilder.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrCompiler.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrControl.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrData.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrFormats.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrShiftRotate.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kSchedule.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.h8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kBaseInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kELFObjectWriter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kFixupKinds.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/M68k/TargetInfo/M68kTargetInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/MSP430/MCTargetDesc/MSP430ELFObjectWriter.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/MSP430/MSP430Subtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips16FrameLowering.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelLowering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips16InstrInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsAsmPrinter.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp124
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsMachineFunction.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsMulMulBugPass.cpp136
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsRegisterInfo.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsSEISelLowering.cpp23
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsTargetMachine.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTX.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td32
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/P10InstrResources.td10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFastISel.cpp101
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td25
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrPrefix.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.h6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td283
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp534
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp891
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h60
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp86
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp132
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td41
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td80
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td142
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td180
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td611
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td110
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td196
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td254
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td82
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td203
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstructionSelector.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td35
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp278
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedRocket.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVScheduleB.td50
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp68
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h99
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td391
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h31
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetObjectFile.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZ.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMCInstLower.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTDC.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/LVLGen.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VE.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.cpp81
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.h79
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.cpp86
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.td16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEInstrPatternsVec.td16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEMCInstLower.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VEMachineFunctionInfo.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VESubtarget.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VVPInstrInfo.td11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VVPInstrPatternsVec.td85
-rw-r--r--contrib/llvm-project/llvm/lib/Target/VE/VVPNodes.def5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp64
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp222
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp64
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h47
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86AsmPrinter.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86CallLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86FastTileConfig.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp103
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp759
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrAVX512.td7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp92
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstructionSelector.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86OptimizeLEAs.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86PadShortFunction.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86PartialReduction.cpp65
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td38
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedHaswell.td38
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedIceLake.td46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedSandyBridge.td33
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeClient.td38
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86Schedule.td10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleAtom.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBdVer2.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBtVer2.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleSLM.td20
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver1.td81
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver2.td81
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver3.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86TargetMachine.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.cpp114
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/XCore/XCoreFrameLowering.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/XCore/XCoreSubtarget.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/Architecture.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/PackedVersion.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/Platform.cpp116
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/Target.cpp35
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/TextStub.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.h9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroEarly.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInstr.h12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInternal.h14
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroSplit.cpp167
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Coroutines/Coroutines.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/AlwaysInliner.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp305
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp727
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/IROutliner.cpp758
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp56
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/ModuleInliner.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp141
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/PartialInlining.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp63
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp82
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp177
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp100
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h14
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp49
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp651
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp68
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp52
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp102
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/ADCE.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp87
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp264
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/JumpThreading.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDeletion.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopFlatten.cpp510
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopInterchange.cpp263
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp149
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp89
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/SCCP.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp128
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/Scalarizer.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp144
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/BuildLibCalls.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/CodeExtractor.cpp244
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp290
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/GlobalStatus.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LCSSA.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUnroll.cpp18
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUtils.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LoopVersioning.cpp21
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/ModuleUtils.cpp74
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/SampleProfileInference.cpp273
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp222
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyCFG.cpp198
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/ValueMapper.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp78
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp831
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp324
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp244
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h247
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanValue.h13
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/bugpoint/BugDriver.h3
-rw-r--r--contrib/llvm-project/llvm/tools/bugpoint/CrashDebugger.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/bugpoint/FindBugs.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/bugpoint/Miscompilation.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/bugpoint/OptimizerDriver.cpp12
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-ar/llvm-ar.cpp21
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/CoverageReport.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/CoverageSummaryInfo.h7
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageView.cpp4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-diff/llvm-diff.cpp1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-dis/llvm-dis.cpp25
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp3
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/CodeRegion.h2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/PipelinePrinter.h2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp32
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.h10
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/llvm-mca.cpp13
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-modextract/llvm-modextract.cpp1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-nm/llvm-nm.cpp94
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFConfig.h8
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFObjcopy.cpp25
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ConfigManager.cpp42
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp50
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/Object.h6
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOObjcopy.cpp56
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOWriter.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td5
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/COFFDump.h1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/MachODump.cpp8
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/SourcePrinter.h2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/WasmDump.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp2
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.h4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/DumpOutputStyle.h6
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/InputFile.h1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/LinePrinter.h1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/MinimalSymbolDumper.cpp12
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/OutputStyle.h1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/PdbYaml.h4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-pdbutil/YAMLOutputStyle.h1
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp56
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/COFFDumper.cpp3
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp9
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/MachODumper.cpp67
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/XCOFFDumper.cpp262
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/llvm-readobj.cpp6
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-stress/llvm-stress.cpp7
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-strings/llvm-strings.cpp3
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-tapi-diff/DiffEngine.cpp3
-rw-r--r--contrib/llvm-project/llvm/tools/opt/NewPMDriver.cpp13
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.cpp2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.h3
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/CodeGenIntrinsics.h1
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/CodeGenSchedule.h1
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/DAGISelMatcherOpt.cpp2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDag.h4
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagInstr.h2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h1
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.cpp1
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.h2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp4
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/InstrInfoEmitter.cpp4
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/RegisterBankEmitter.cpp2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/SDNodeProperties.cpp42
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/SearchableTableEmitter.cpp6
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86DisassemblerTables.cpp2
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/X86ModRMFilters.h22
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp.h192
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp15
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_barrier.cpp6
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_csupport.cpp3
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_dispatch.cpp8
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_dispatch.h4
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_global.cpp8
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_lock.cpp27
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_lock.h13
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp7
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_settings.cpp25
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_taskdeps.cpp6
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_tasking.cpp22
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_wait_release.h4
-rw-r--r--contrib/llvm-project/openmp/runtime/src/z_Windows_NT_util.cpp4
2678 files changed, 64238 insertions, 40007 deletions
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/Types.h b/contrib/llvm-project/clang/include/clang/APINotes/Types.h
index 0d97e9ad8623..f741d9b91d76 100644
--- a/contrib/llvm-project/clang/include/clang/APINotes/Types.h
+++ b/contrib/llvm-project/clang/include/clang/APINotes/Types.h
@@ -133,7 +133,7 @@ class CommonTypeInfo : public CommonEntityInfo {
llvm::Optional<std::string> NSErrorDomain;
public:
- CommonTypeInfo() : CommonEntityInfo() {}
+ CommonTypeInfo() {}
const llvm::Optional<std::string> &getSwiftBridge() const {
return SwiftBridge;
@@ -208,10 +208,9 @@ class ObjCContextInfo : public CommonTypeInfo {
public:
ObjCContextInfo()
- : CommonTypeInfo(), HasDefaultNullability(0), DefaultNullability(0),
- HasDesignatedInits(0), SwiftImportAsNonGenericSpecified(false),
- SwiftImportAsNonGeneric(false), SwiftObjCMembersSpecified(false),
- SwiftObjCMembers(false) {}
+ : HasDefaultNullability(0), DefaultNullability(0), HasDesignatedInits(0),
+ SwiftImportAsNonGenericSpecified(false), SwiftImportAsNonGeneric(false),
+ SwiftObjCMembersSpecified(false), SwiftObjCMembers(false) {}
/// Determine the default nullability for properties and methods of this
/// class.
@@ -309,7 +308,7 @@ class VariableInfo : public CommonEntityInfo {
std::string Type;
public:
- VariableInfo() : CommonEntityInfo(), NullabilityAudited(false), Nullable(0) {}
+ VariableInfo() : NullabilityAudited(false), Nullable(0) {}
llvm::Optional<NullabilityKind> getNullability() const {
return NullabilityAudited ? llvm::Optional<NullabilityKind>(
@@ -358,8 +357,7 @@ class ObjCPropertyInfo : public VariableInfo {
public:
ObjCPropertyInfo()
- : VariableInfo(), SwiftImportAsAccessorsSpecified(false),
- SwiftImportAsAccessors(false) {}
+ : SwiftImportAsAccessorsSpecified(false), SwiftImportAsAccessors(false) {}
llvm::Optional<bool> getSwiftImportAsAccessors() const {
return SwiftImportAsAccessorsSpecified
@@ -423,8 +421,7 @@ class ParamInfo : public VariableInfo {
public:
ParamInfo()
- : VariableInfo(), NoEscapeSpecified(false), NoEscape(false),
- RawRetainCountConvention() {}
+ : NoEscapeSpecified(false), NoEscape(false), RawRetainCountConvention() {}
llvm::Optional<bool> isNoEscape() const {
if (!NoEscapeSpecified)
@@ -514,7 +511,7 @@ public:
std::vector<ParamInfo> Params;
FunctionInfo()
- : CommonEntityInfo(), NullabilityAudited(false), NumAdjustedNullable(0),
+ : NullabilityAudited(false), NumAdjustedNullable(0),
RawRetainCountConvention() {}
static unsigned getMaxNullabilityIndex() {
@@ -607,8 +604,7 @@ public:
/// Whether this is a required initializer.
unsigned RequiredInit : 1;
- ObjCMethodInfo()
- : FunctionInfo(), DesignatedInit(false), RequiredInit(false) {}
+ ObjCMethodInfo() : DesignatedInit(false), RequiredInit(false) {}
friend bool operator==(const ObjCMethodInfo &, const ObjCMethodInfo &);
@@ -639,19 +635,19 @@ inline bool operator!=(const ObjCMethodInfo &LHS, const ObjCMethodInfo &RHS) {
/// Describes API notes data for a global variable.
class GlobalVariableInfo : public VariableInfo {
public:
- GlobalVariableInfo() : VariableInfo() {}
+ GlobalVariableInfo() {}
};
/// Describes API notes data for a global function.
class GlobalFunctionInfo : public FunctionInfo {
public:
- GlobalFunctionInfo() : FunctionInfo() {}
+ GlobalFunctionInfo() {}
};
/// Describes API notes data for an enumerator.
class EnumConstantInfo : public CommonEntityInfo {
public:
- EnumConstantInfo() : CommonEntityInfo() {}
+ EnumConstantInfo() {}
};
/// Describes API notes data for a tag.
@@ -662,7 +658,7 @@ class TagInfo : public CommonTypeInfo {
public:
llvm::Optional<EnumExtensibilityKind> EnumExtensibility;
- TagInfo() : CommonTypeInfo(), HasFlagEnum(0), IsFlagEnum(0) {}
+ TagInfo() : HasFlagEnum(0), IsFlagEnum(0) {}
llvm::Optional<bool> isFlagEnum() const {
if (HasFlagEnum)
@@ -706,7 +702,7 @@ class TypedefInfo : public CommonTypeInfo {
public:
llvm::Optional<SwiftNewTypeKind> SwiftWrapper;
- TypedefInfo() : CommonTypeInfo() {}
+ TypedefInfo() {}
TypedefInfo &operator|=(const TypedefInfo &RHS) {
static_cast<CommonTypeInfo &>(*this) |= RHS;
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
index aba18b060b02..25e00860060f 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
@@ -22,7 +22,6 @@
namespace clang {
class ConceptDecl;
-class ConceptSpecializationExpr;
/// The result of a constraint satisfaction check, containing the necessary
/// information to diagnose an unsatisfied constraint.
@@ -123,17 +122,16 @@ protected:
const ASTTemplateArgumentListInfo *ArgsAsWritten;
public:
-
ConceptReference(NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
DeclarationNameInfo ConceptNameInfo, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten) :
- NestedNameSpec(NNS), TemplateKWLoc(TemplateKWLoc),
- ConceptName(ConceptNameInfo), FoundDecl(FoundDecl),
- NamedConcept(NamedConcept), ArgsAsWritten(ArgsAsWritten) {}
+ const ASTTemplateArgumentListInfo *ArgsAsWritten)
+ : NestedNameSpec(NNS), TemplateKWLoc(TemplateKWLoc),
+ ConceptName(ConceptNameInfo), FoundDecl(FoundDecl),
+ NamedConcept(NamedConcept), ArgsAsWritten(ArgsAsWritten) {}
- ConceptReference() : NestedNameSpec(), TemplateKWLoc(), ConceptName(),
- FoundDecl(nullptr), NamedConcept(nullptr), ArgsAsWritten(nullptr) {}
+ ConceptReference()
+ : FoundDecl(nullptr), NamedConcept(nullptr), ArgsAsWritten(nullptr) {}
const NestedNameSpecifierLoc &getNestedNameSpecifierLoc() const {
return NestedNameSpec;
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
index 63f2c948c79b..f39ce14bc82c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
@@ -99,15 +99,12 @@ class CXXMethodDecl;
class CXXRecordDecl;
class DiagnosticsEngine;
class ParentMapContext;
-class DynTypedNode;
class DynTypedNodeList;
class Expr;
enum class FloatModeKind;
class GlobalDecl;
-class ItaniumMangleContext;
class MangleContext;
class MangleNumberingContext;
-class MaterializeTemporaryExpr;
class MemberSpecializationInfo;
class Module;
struct MSGuidDeclParts;
@@ -126,7 +123,6 @@ class ObjCTypeParamDecl;
class OMPTraitInfo;
struct ParsedTargetAttr;
class Preprocessor;
-class Stmt;
class StoredDeclsMap;
class TargetAttr;
class TargetInfo;
@@ -2626,6 +2622,18 @@ public:
/// template.
bool hasSameTemplateName(TemplateName X, TemplateName Y);
+ /// Determine whether the two declarations refer to the same entity.
+ bool isSameEntity(NamedDecl *X, NamedDecl *Y);
+
+ /// Determine whether two template parameter lists are similar enough
+ /// that they may be used in declarations of the same template.
+ bool isSameTemplateParameterList(TemplateParameterList *X,
+ TemplateParameterList *Y);
+
+ /// Determine whether two template parameters are similar enough
+ /// that they may be used in declarations of the same template.
+ bool isSameTemplateParameter(NamedDecl *X, NamedDecl *Y);
+
/// Retrieve the "canonical" template argument.
///
/// The canonical template argument is the simplest template argument
@@ -2730,13 +2738,9 @@ public:
QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize,
QualType typeDomain) const;
- unsigned getTargetAddressSpace(QualType T) const {
- return getTargetAddressSpace(T.getQualifiers());
- }
+ unsigned getTargetAddressSpace(QualType T) const;
- unsigned getTargetAddressSpace(Qualifiers Q) const {
- return getTargetAddressSpace(Q.getAddressSpace());
- }
+ unsigned getTargetAddressSpace(Qualifiers Q) const;
unsigned getTargetAddressSpace(LangAS AS) const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
index 918c2b9e350c..2dbc44c5dcd4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
@@ -21,7 +21,6 @@
namespace clang {
-class ASTContext;
class NamedDecl;
class DeclContext;
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
index 442039044cfe..b2fc2d2c7e4b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTBASICREADER_H
-#define CLANG_AST_ABSTRACTBASICREADER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTBASICREADER_H
+#define LLVM_CLANG_AST_ABSTRACTBASICREADER_H
#include "clang/AST/DeclTemplate.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
index 75aef734ba9b..41772ba0f63c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTBASICWRITER_H
-#define CLANG_AST_ABSTRACTBASICWRITER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTBASICWRITER_H
+#define LLVM_CLANG_AST_ABSTRACTBASICWRITER_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
index 9fea7b26f678..c9162b1779bc 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTTYPEREADER_H
-#define CLANG_AST_ABSTRACTTYPEREADER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTTYPEREADER_H
+#define LLVM_CLANG_AST_ABSTRACTTYPEREADER_H
#include "clang/AST/Type.h"
#include "clang/AST/AbstractBasicReader.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
index a63cb0be099d..62006ef0f26e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTTYPEWRITER_H
-#define CLANG_AST_ABSTRACTTYPEWRITER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTTYPEWRITER_H
+#define LLVM_CLANG_AST_ABSTRACTTYPEWRITER_H
#include "clang/AST/Type.h"
#include "clang/AST/AbstractBasicWriter.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/Attr.h b/contrib/llvm-project/clang/include/clang/AST/Attr.h
index 6366d6e8837e..070e160d6517 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Attr.h
@@ -34,12 +34,7 @@
namespace clang {
class ASTContext;
class AttributeCommonInfo;
-class IdentifierInfo;
-class ObjCInterfaceDecl;
-class Expr;
-class QualType;
class FunctionDecl;
-class TypeSourceInfo;
class OMPTraitInfo;
/// Attr - This represents one attribute.
diff --git a/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h b/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
index 78ce9314a2bb..66571e1cf0b8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
@@ -22,7 +22,6 @@
namespace clang {
-class ASTContext;
class Attr;
/// AttrVec - A vector of Attr, which is how they are stored on the AST.
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
index 9b270682f8cf..cdf0804680ad 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
@@ -112,6 +112,9 @@ FIELD(HasVariantMembers, 1, NO_MERGE)
/// True if there no non-field members declared by the user.
FIELD(HasOnlyCMembers, 1, NO_MERGE)
+/// True if there is an '__init' method defined by the user.
+FIELD(HasInitMethod, 1, NO_MERGE)
+
/// True if any field has an in-class initializer, including those
/// within anonymous unions or structs.
FIELD(HasInClassInitializer, 1, NO_MERGE)
diff --git a/contrib/llvm-project/clang/include/clang/AST/Comment.h b/contrib/llvm-project/clang/include/clang/AST/Comment.h
index 4184e103206d..5ecc35791b7b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Comment.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Comment.h
@@ -424,19 +424,13 @@ public:
Attribute() { }
- Attribute(SourceLocation NameLocBegin, StringRef Name) :
- NameLocBegin(NameLocBegin), Name(Name),
- EqualsLoc(SourceLocation()),
- ValueRange(SourceRange()), Value(StringRef())
- { }
+ Attribute(SourceLocation NameLocBegin, StringRef Name)
+ : NameLocBegin(NameLocBegin), Name(Name), EqualsLoc(SourceLocation()) {}
Attribute(SourceLocation NameLocBegin, StringRef Name,
- SourceLocation EqualsLoc,
- SourceRange ValueRange, StringRef Value) :
- NameLocBegin(NameLocBegin), Name(Name),
- EqualsLoc(EqualsLoc),
- ValueRange(ValueRange), Value(Value)
- { }
+ SourceLocation EqualsLoc, SourceRange ValueRange, StringRef Value)
+ : NameLocBegin(NameLocBegin), Name(Name), EqualsLoc(EqualsLoc),
+ ValueRange(ValueRange), Value(Value) {}
SourceLocation getNameLocEnd() const {
return NameLocBegin.getLocWithOffset(Name.size());
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h b/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
index 94f778501e75..9aa1681cb2c5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
@@ -320,6 +320,9 @@ private:
/// Eat string matching regexp \code \s*\* \endcode.
void skipLineStartingDecorations();
+ /// Skip over pure text.
+ const char *skipTextToken();
+
/// Lex comment text, including commands if ParseCommands is set to true.
void lexCommentText(Token &T);
diff --git a/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
index 8db09e6b57d0..cb545aff51f8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
-#define LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
+#ifndef LLVM_CLANG_AST_COMPUTEDEPENDENCE_H
+#define LLVM_CLANG_AST_COMPUTEDEPENDENCE_H
#include "clang/AST/DependenceFlags.h"
#include "clang/Basic/ExceptionSpecificationType.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h b/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
index 34df8ce1309e..4f8343efad16 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
-#define LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
+#ifndef LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
+#define LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
#include <cassert>
@@ -71,4 +71,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
+#endif // LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/Decl.h b/contrib/llvm-project/clang/include/clang/AST/Decl.h
index 2c92571325c8..5ebd7e5d0d36 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Decl.h
@@ -53,7 +53,6 @@ namespace clang {
class ASTContext;
struct ASTTemplateArgumentListInfo;
-class Attr;
class CompoundStmt;
class DependentFunctionTemplateSpecializationInfo;
class EnumDecl;
@@ -74,7 +73,6 @@ class TemplateArgumentList;
class TemplateArgumentListInfo;
class TemplateParameterList;
class TypeAliasTemplateDecl;
-class TypeLoc;
class UnresolvedSetImpl;
class VarTemplateDecl;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
index 2a0a19597391..06d2f17d1430 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
@@ -52,14 +52,8 @@ enum Linkage : unsigned char;
class LinkageSpecDecl;
class Module;
class NamedDecl;
-class ObjCCategoryDecl;
-class ObjCCategoryImplDecl;
class ObjCContainerDecl;
-class ObjCImplDecl;
-class ObjCImplementationDecl;
-class ObjCInterfaceDecl;
class ObjCMethodDecl;
-class ObjCProtocolDecl;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
@@ -613,6 +607,20 @@ public:
return getModuleOwnershipKind() == ModuleOwnershipKind::ModulePrivate;
}
+ /// Whether this declaration was exported in a lexical context.
+ /// e.g.:
+ ///
+ /// export namespace A {
+ /// void f1(); // isInExportDeclContext() == true
+ /// }
+ /// void A::f1(); // isInExportDeclContext() == false
+ ///
+ /// namespace B {
+ /// void f2(); // isInExportDeclContext() == false
+ /// }
+ /// export void B::f2(); // isInExportDeclContext() == true
+ bool isInExportDeclContext() const;
+
/// Return true if this declaration has an attribute which acts as
/// definition of the entity, such as 'alias' or 'ifunc'.
bool hasDefiningAttr() const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
index cc7bfc86a521..2833df0505da 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
@@ -64,7 +64,6 @@ class CXXFinalOverriderMap;
class CXXIndirectPrimaryBaseSet;
class CXXMethodDecl;
class DecompositionDecl;
-class DiagnosticBuilder;
class FriendDecl;
class FunctionTemplateDecl;
class IdentifierInfo;
@@ -1140,6 +1139,9 @@ public:
/// \note This does NOT include a check for union-ness.
bool isEmpty() const { return data().Empty; }
+ void setInitMethod(bool Val) { data().HasInitMethod = Val; }
+ bool hasInitMethod() const { return data().HasInitMethod; }
+
bool hasPrivateFields() const {
return data().HasPrivateFields;
}
@@ -3291,7 +3293,7 @@ class BaseUsingDecl : public NamedDecl {
protected:
BaseUsingDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
- : NamedDecl(DK, DC, L, N), FirstUsingShadow(nullptr, 0) {}
+ : NamedDecl(DK, DC, L, N), FirstUsingShadow(nullptr, false) {}
private:
void anchor() override;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h b/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
index 9899fc29b82d..903cdb7bfcc8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
@@ -90,7 +90,7 @@ public:
StoredDeclsList(StoredDeclsList &&RHS) : Data(RHS.Data) {
RHS.Data.setPointer(nullptr);
- RHS.Data.setInt(0);
+ RHS.Data.setInt(false);
}
void MaybeDeallocList() {
@@ -114,7 +114,7 @@ public:
Data = RHS.Data;
RHS.Data.setPointer(nullptr);
- RHS.Data.setInt(0);
+ RHS.Data.setInt(false);
return *this;
}
@@ -142,7 +142,7 @@ public:
}
void setHasExternalDecls() {
- Data.setInt(1);
+ Data.setInt(true);
}
void remove(NamedDecl *D) {
@@ -155,7 +155,7 @@ public:
erase_if([](NamedDecl *ND) { return ND->isFromASTFile(); });
// Don't have any pending external decls any more.
- Data.setInt(0);
+ Data.setInt(false);
}
void replaceExternalDecls(ArrayRef<NamedDecl*> Decls) {
@@ -171,7 +171,7 @@ public:
});
// Don't have any pending external decls any more.
- Data.setInt(0);
+ Data.setInt(false);
if (Decls.empty())
return;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
index 79ec1d6e5c3c..110b7dc0c6f2 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
@@ -779,17 +779,13 @@ private:
LParenLoc(LParenLocation), DeclType(T), DeclTypeSourceInfo(TSI),
PropertyAttributes(ObjCPropertyAttribute::kind_noattr),
PropertyAttributesAsWritten(ObjCPropertyAttribute::kind_noattr),
- PropertyImplementation(propControl), GetterName(Selector()),
- SetterName(Selector()) {}
+ PropertyImplementation(propControl) {}
public:
- static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- IdentifierInfo *Id, SourceLocation AtLocation,
- SourceLocation LParenLocation,
- QualType T,
- TypeSourceInfo *TSI,
- PropertyControl propControl = None);
+ static ObjCPropertyDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
+ SourceLocation AtLocation, SourceLocation LParenLocation, QualType T,
+ TypeSourceInfo *TSI, PropertyControl propControl = None);
static ObjCPropertyDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -1075,6 +1071,9 @@ public:
bool HasUserDeclaredSetterMethod(const ObjCPropertyDecl *P) const;
ObjCIvarDecl *getIvarDecl(IdentifierInfo *Id) const;
+ ObjCPropertyDecl *getProperty(const IdentifierInfo *Id,
+ bool IsInstance) const;
+
ObjCPropertyDecl *
FindPropertyDeclaration(const IdentifierInfo *PropertyId,
ObjCPropertyQueryKind QueryKind) const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
index 5f03bce6e9a8..42c97204a613 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_DECLOBJC_COMMON_H
-#define LLVM_CLANG_AST_DECLOBJC_COMMON_H
+#ifndef LLVM_CLANG_AST_DECLOBJCCOMMON_H
+#define LLVM_CLANG_AST_DECLOBJCCOMMON_H
namespace clang {
@@ -52,4 +52,4 @@ enum {
} // namespace clang
-#endif // LLVM_CLANG_AST_DECLOBJC_COMMON_H
+#endif // LLVM_CLANG_AST_DECLOBJCCOMMON_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
index f7a2e3146d06..d216b359816e 100755
--- a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
@@ -498,7 +498,7 @@ private:
TemplateSpecializationKind TSK, const TemplateArgumentList *TemplateArgs,
const ASTTemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation POI, MemberSpecializationInfo *MSInfo)
- : Function(FD, MSInfo ? 1 : 0), Template(Template, TSK - 1),
+ : Function(FD, MSInfo ? true : false), Template(Template, TSK - 1),
TemplateArguments(TemplateArgs),
TemplateArgumentsAsWritten(TemplateArgsAsWritten),
PointOfInstantiation(POI) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
index 38da6fc727fb..0762e0a478ea 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
@@ -34,11 +34,9 @@ class ASTContext;
template <typename> class CanQual;
class DeclarationName;
class DeclarationNameTable;
-class MultiKeywordSelector;
struct PrintingPolicy;
class TemplateDecl;
class TypeSourceInfo;
-class UsingDirectiveDecl;
using CanQualType = CanQual<Type>;
diff --git a/contrib/llvm-project/clang/include/clang/AST/Expr.h b/contrib/llvm-project/clang/include/clang/AST/Expr.h
index 54ad1934e477..ce5280d77c36 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Expr.h
@@ -2387,7 +2387,7 @@ public:
/// Create an offsetof node that refers into a C++ base class.
explicit OffsetOfNode(const CXXBaseSpecifier *Base)
- : Range(), Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
+ : Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
/// Determine what kind of offsetof node this is.
Kind getKind() const { return static_cast<Kind>(Data & Mask); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
index 1544c498ef66..6849b65b71c0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
@@ -275,12 +275,12 @@ public:
friend ASTStmtWriter;
/// \brief No return type requirement was specified.
- ReturnTypeRequirement() : TypeConstraintInfo(nullptr, 0) {}
+ ReturnTypeRequirement() : TypeConstraintInfo(nullptr, false) {}
/// \brief A return type requirement was specified but it was a
/// substitution failure.
ReturnTypeRequirement(SubstitutionDiagnostic *SubstDiag) :
- TypeConstraintInfo(SubstDiag, 0) {}
+ TypeConstraintInfo(SubstDiag, false) {}
/// \brief A 'type constraint' style return type requirement.
/// \param TPL an invented template parameter list containing a single
@@ -413,12 +413,12 @@ public:
friend ASTStmtWriter;
NestedRequirement(SubstitutionDiagnostic *SubstDiag) :
- Requirement(RK_Nested, /*Dependent=*/false,
+ Requirement(RK_Nested, /*IsDependent=*/false,
/*ContainsUnexpandedParameterPack*/false,
- /*Satisfied=*/false), Value(SubstDiag) {}
+ /*IsSatisfied=*/false), Value(SubstDiag) {}
NestedRequirement(Expr *Constraint) :
- Requirement(RK_Nested, /*Dependent=*/true,
+ Requirement(RK_Nested, /*IsDependent=*/true,
Constraint->containsUnexpandedParameterPack()),
Value(Constraint) {
assert(Constraint->isInstantiationDependent() &&
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
index b0f057dbaa02..3b7ad8662ad9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
@@ -1706,7 +1706,7 @@ public:
/// This may be '*', in which case this should fold to true.
bool hasVersion() const { return !VersionToCheck.empty(); }
- VersionTuple getVersion() { return VersionToCheck; }
+ VersionTuple getVersion() const { return VersionToCheck; }
child_range children() {
return child_range(child_iterator(), child_iterator());
diff --git a/contrib/llvm-project/clang/include/clang/AST/FormatString.h b/contrib/llvm-project/clang/include/clang/AST/FormatString.h
index 8c944451f796..d7933382f13d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/FormatString.h
+++ b/contrib/llvm-project/clang/include/clang/AST/FormatString.h
@@ -15,8 +15,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_FORMATSTRING_H
-#define LLVM_CLANG_ANALYSIS_ANALYSES_FORMATSTRING_H
+#ifndef LLVM_CLANG_AST_FORMATSTRING_H
+#define LLVM_CLANG_AST_FORMATSTRING_H
#include "clang/AST/CanonicalType.h"
@@ -332,11 +332,11 @@ public:
unsigned amountLength,
bool usesPositionalArg)
: start(amountStart), length(amountLength), hs(howSpecified), amt(amount),
- UsesPositionalArg(usesPositionalArg), UsesDotPrefix(0) {}
+ UsesPositionalArg(usesPositionalArg), UsesDotPrefix(false) {}
OptionalAmount(bool valid = true)
: start(nullptr),length(0), hs(valid ? NotSpecified : Invalid), amt(0),
- UsesPositionalArg(0), UsesDotPrefix(0) {}
+ UsesPositionalArg(false), UsesDotPrefix(false) {}
explicit OptionalAmount(unsigned Amount)
: start(nullptr), length(0), hs(Constant), amt(Amount),
@@ -726,7 +726,8 @@ public:
virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
- unsigned specifierLen) {
+ unsigned specifierLen,
+ const TargetInfo &Target) {
return true;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h b/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
index e42f0449f6db..054220b8a32c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
-#define LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
+#ifndef LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
+#define LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/LLVM.h"
@@ -160,4 +160,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
+#endif // LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
index 7e845ad03587..876c7deeceb9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
@@ -10,8 +10,8 @@
// source-location information.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_LOCINFOTYPE_H
-#define LLVM_CLANG_SEMA_LOCINFOTYPE_H
+#ifndef LLVM_CLANG_AST_LOCINFOTYPE_H
+#define LLVM_CLANG_AST_LOCINFOTYPE_H
#include "clang/AST/Type.h"
@@ -54,4 +54,4 @@ public:
} // end namespace clang
-#endif // LLVM_CLANG_SEMA_LOCINFOTYPE_H
+#endif // LLVM_CLANG_AST_LOCINFOTYPE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
index eb33759682d6..a4a6ce4c2708 100644
--- a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
@@ -21,9 +21,7 @@ namespace clang {
class BlockDecl;
class CXXMethodDecl;
-class IdentifierInfo;
class TagDecl;
-class Type;
class VarDecl;
/// Keeps track of the mangled names of lambda expressions and block
diff --git a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
index c95516538ad1..cf320c8a478a 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_NON_TRIVIAL_TYPE_VISITOR_H
-#define LLVM_CLANG_NON_TRIVIAL_TYPE_VISITOR_H
+#ifndef LLVM_CLANG_AST_NONTRIVIALTYPEVISITOR_H
+#define LLVM_CLANG_AST_NONTRIVIALTYPEVISITOR_H
#include "clang/AST/Type.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/OSLog.h b/contrib/llvm-project/clang/include/clang/AST/OSLog.h
index c24e79ce6da0..3772597e2616 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OSLog.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OSLog.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_OSLOG_H
-#define LLVM_CLANG_ANALYSIS_ANALYSES_OSLOG_H
+#ifndef LLVM_CLANG_AST_OSLOG_H
+#define LLVM_CLANG_AST_OSLOG_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
index 3fd1b6d30080..3ecc1d40fafc 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
@@ -32,6 +32,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h b/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
index 899bbcb3be45..82df031d4126 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
@@ -22,7 +22,6 @@ namespace clang {
class ASTContext;
class Decl;
-class SourceManager;
/// PrettyDeclStackTraceEntry - If a crash occurs in the parser while
/// parsing something related to a declaration, include that
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
index f6816e938f2a..fd40328d8dcf 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
@@ -20,9 +20,7 @@ namespace clang {
class DeclContext;
class LangOptions;
-class SourceManager;
class Stmt;
-class TagDecl;
class PrinterHelper {
public:
@@ -75,7 +73,8 @@ struct PrintingPolicy {
MSVCFormatting(false), ConstantsAsWritten(false),
SuppressImplicitBase(false), FullyQualifiedName(false),
PrintCanonicalTypes(false), PrintInjectedClassNameWithArguments(true),
- UsePreferredNames(true), AlwaysIncludeTypeForTemplateArgument(false) {}
+ UsePreferredNames(true), AlwaysIncludeTypeForTemplateArgument(false),
+ CleanUglifiedParameters(false) {}
/// Adjust this printing policy for cases where it's known that we're
/// printing C++ code (for instance, if AST dumping reaches a C++-only
@@ -282,6 +281,11 @@ struct PrintingPolicy {
/// parameters.
unsigned AlwaysIncludeTypeForTemplateArgument : 1;
+ /// Whether to strip underscores when printing reserved parameter names.
+ /// e.g. std::vector<class _Tp> becomes std::vector<class Tp>.
+ /// This only affects parameter names, and so describes a compatible API.
+ unsigned CleanUglifiedParameters : 1;
+
/// Callbacks to use to allow the behavior of printing to be customized.
const PrintingCallbacks *Callbacks = nullptr;
};
diff --git a/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h b/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
index 8313e0441be5..daa86cda2d99 100644
--- a/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
+++ b/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
@@ -89,4 +89,4 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
bool WithGlobalNsPrefix = false);
} // end namespace TypeName
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_CORE_QUALTYPENAMES_H
+#endif // LLVM_CLANG_AST_QUALTYPENAMES_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
index fa27a12cfbb9..e8064121d279 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
@@ -52,7 +52,6 @@ template <> struct PointerLikeTypeTraits<clang::Expr *> {
namespace clang {
class ASTContext;
-class DiagnosticBuilder;
class Expr;
struct PrintingPolicy;
class TypeSourceInfo;
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
index 2befb5c1b45e..44080a7f56d4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
@@ -26,14 +26,12 @@ namespace clang {
class ASTContext;
class DependentTemplateName;
-class DiagnosticBuilder;
class IdentifierInfo;
class NamedDecl;
class NestedNameSpecifier;
enum OverloadedOperatorKind : int;
class OverloadedTemplateStorage;
class AssumedTemplateStorage;
-class PartialDiagnostic;
struct PrintingPolicy;
class QualifiedTemplateName;
class SubstTemplateTemplateParmPackStorage;
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
index 7a036836e8c4..8cfa579a22da 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
@@ -1994,12 +1994,35 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc);
};
-// FIXME: location of the 'decltype' and parens.
-class DecltypeTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- DecltypeTypeLoc,
- DecltypeType> {
+// decltype(expression) abc;
+// ~~~~~~~~ DecltypeLoc
+// ~ RParenLoc
+// FIXME: add LParenLoc, it is tricky to support due to the limitation of
+// annotated-decltype token.
+struct DecltypeTypeLocInfo {
+ SourceLocation DecltypeLoc;
+ SourceLocation RParenLoc;
+};
+class DecltypeTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, DecltypeTypeLoc, DecltypeType,
+ DecltypeTypeLocInfo> {
public:
Expr *getUnderlyingExpr() const { return getTypePtr()->getUnderlyingExpr(); }
+
+ SourceLocation getDecltypeLoc() const { return getLocalData()->DecltypeLoc; }
+ void setDecltypeLoc(SourceLocation Loc) { getLocalData()->DecltypeLoc = Loc; }
+
+ SourceLocation getRParenLoc() const { return getLocalData()->RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { getLocalData()->RParenLoc = Loc; }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getDecltypeLoc(), getRParenLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setDecltypeLoc(Loc);
+ setRParenLoc(Loc);
+ }
};
struct UnaryTransformTypeLocInfo {
@@ -2058,6 +2081,11 @@ struct AutoTypeLocInfo : TypeSpecLocInfo {
NamedDecl *FoundDecl;
SourceLocation LAngleLoc;
SourceLocation RAngleLoc;
+
+ // For decltype(auto).
+ SourceLocation RParenLoc;
+
+ // Followed by a TemplateArgumentLocInfo[]
};
class AutoTypeLoc
@@ -2070,6 +2098,10 @@ public:
return getTypePtr()->getKeyword();
}
+ bool isDecltypeAuto() const { return getTypePtr()->isDecltypeAuto(); }
+ SourceLocation getRParenLoc() const { return getLocalData()->RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { getLocalData()->RParenLoc = Loc; }
+
bool isConstrained() const {
return getTypePtr()->isConstrained();
}
@@ -2150,16 +2182,13 @@ public:
}
SourceRange getLocalSourceRange() const {
- return{
- isConstrained()
- ? (getNestedNameSpecifierLoc()
- ? getNestedNameSpecifierLoc().getBeginLoc()
- : (getTemplateKWLoc().isValid()
- ? getTemplateKWLoc()
- : getConceptNameLoc()))
- : getNameLoc(),
- getNameLoc()
- };
+ return {isConstrained()
+ ? (getNestedNameSpecifierLoc()
+ ? getNestedNameSpecifierLoc().getBeginLoc()
+ : (getTemplateKWLoc().isValid() ? getTemplateKWLoc()
+ : getConceptNameLoc()))
+ : getNameLoc(),
+ isDecltypeAuto() ? getRParenLoc() : getNameLoc()};
}
void copy(AutoTypeLoc Loc) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
index c75aa0785a63..17b47f6ab96b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
+++ b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
@@ -121,7 +121,7 @@ public:
void setAccess(iterator I, AccessSpecifier AS) { I.I->setAccess(AS); }
void clear() { decls().clear(); }
- void set_size(unsigned N) { decls().set_size(N); }
+ void truncate(unsigned N) { decls().truncate(N); }
bool empty() const { return decls().empty(); }
unsigned size() const { return decls().size(); }
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
index 55cce324b436..86bd44091b59 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -3725,10 +3725,9 @@ AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
- return BaseName.compare(Sel.getAsString()) == 0;
+ return BaseName == Sel.getAsString();
}
-
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
@@ -5171,6 +5170,25 @@ AST_POLYMORPHIC_MATCHER(isNoThrow,
return FnTy->isNothrow();
}
+/// Matches consteval function declarations and if consteval/if ! consteval
+/// statements.
+///
+/// Given:
+/// \code
+/// consteval int a();
+/// void b() { if consteval {} }
+/// void c() { if ! consteval {} }
+/// void d() { if ! consteval {} else {} }
+/// \endcode
+/// functionDecl(isConsteval())
+/// matches the declaration of "int a()".
+/// ifStmt(isConsteval())
+/// matches the if statement in "void b()", "void c()", "void d()".
+AST_POLYMORPHIC_MATCHER(isConsteval,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, IfStmt)) {
+ return Node.isConsteval();
+}
+
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
@@ -5193,6 +5211,23 @@ AST_POLYMORPHIC_MATCHER(isConstexpr,
return Node.isConstexpr();
}
+/// Matches constinit variable declarations.
+///
+/// Given:
+/// \code
+/// constinit int foo = 42;
+/// constinit const char* bar = "bar";
+/// int baz = 42;
+/// [[clang::require_constant_initialization]] int xyz = 42;
+/// \endcode
+/// varDecl(isConstinit())
+/// matches the declaration of `foo` and `bar`, but not `baz` and `xyz`.
+AST_MATCHER(VarDecl, isConstinit) {
+ if (const auto *CIA = Node.getAttr<ConstInitAttr>())
+ return CIA->isConstinit();
+ return false;
+}
+
/// Matches selection statements with initializer.
///
/// Given:
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
index 10625311c1a5..3f6f364d6505 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
@@ -40,7 +40,7 @@ struct SourceRange {
/// A VariantValue instance annotated with its parser context.
struct ParserValue {
- ParserValue() : Text(), Range(), Value() {}
+ ParserValue() {}
StringRef Text;
SourceRange Range;
VariantValue Value;
@@ -186,4 +186,4 @@ private:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_DIAGNOSTICS_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_DIAGNOSTICS_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
index af370d83782a..26e321c98ff6 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
@@ -280,4 +280,4 @@ private:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_PARSER_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_PARSER_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
index f91f5fe01c4e..ee47469c6e18 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
@@ -157,4 +157,4 @@ public:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_REGISTRY_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_REGISTRY_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
index 5b3f8a7ca5eb..e1c19eb835ba 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
@@ -356,4 +356,4 @@ private:
} // end namespace ast_matchers
} // end namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_VARIANT_VALUE_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_VARIANTVALUE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
index a0c767bf92d2..6d78fa4a897a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
@@ -20,7 +20,6 @@ class AnalysisDeclContext;
class BlockDecl;
class CFG;
class Decl;
-class DeclContext;
class Expr;
class ParmVarDecl;
class Stmt;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
index dec1ae3b2b4b..24702567ab6c 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
@@ -153,8 +153,7 @@ namespace consumed {
public:
ConsumedStateMap() = default;
ConsumedStateMap(const ConsumedStateMap &Other)
- : Reachable(Other.Reachable), From(Other.From), VarMap(Other.VarMap),
- TmpMap() {}
+ : Reachable(Other.Reachable), From(Other.From), VarMap(Other.VarMap) {}
/// Warn if any of the parameters being tracked are not in the state
/// they were declared to be in upon return from a function.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index a0ae44131b45..2f6a78126a1d 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -517,4 +517,4 @@ void printSCFG(CFGWalker &Walker);
} // namespace threadSafety
} // namespace clang
-#endif // LLVM_CLANG_THREAD_SAFETY_COMMON_H
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYCOMMON_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
index e3b6e61d3026..088474b9b298 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
@@ -354,4 +354,4 @@ inline std::ostream& operator<<(std::ostream& ss, const StringRef str) {
} // namespace threadSafety
} // namespace clang
-#endif // LLVM_CLANG_THREAD_SAFETY_UTIL_H
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYUTIL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
index 846ff7719ce1..6e5e019ce263 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
//
-#ifndef LLVM_CLANG_ANALYSIS_ANY_CALL_H
-#define LLVM_CLANG_ANALYSIS_ANY_CALL_H
+#ifndef LLVM_CLANG_ANALYSIS_ANYCALL_H
+#define LLVM_CLANG_ANALYSIS_ANYCALL_H
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
@@ -215,4 +215,4 @@ public:
}
-#endif // LLVM_CLANG_ANALYSIS_ANY_CALL_H
+#endif // LLVM_CLANG_ANALYSIS_ANYCALL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
index 72607f8839f5..d947ac070209 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
-#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+#ifndef LLVM_CLANG_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_ANALYSIS_BODYFARM_H
#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
@@ -24,7 +24,6 @@ namespace clang {
class ASTContext;
class FunctionDecl;
class ObjCMethodDecl;
-class ObjCPropertyDecl;
class Stmt;
class CodeInjector;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
index b8e453fcc235..c5512a7e1499 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
@@ -707,7 +707,7 @@ class CFGBlock {
template <bool IsOtherConst>
ElementRefIterator(ElementRefIterator<true, IsOtherConst> E)
- : ElementRefIterator(E.Parent, llvm::make_reverse_iterator(E.Pos)) {}
+ : ElementRefIterator(E.Parent, std::make_reverse_iterator(E.Pos)) {}
bool operator<(ElementRefIterator Other) const {
assert(Parent == Other.Parent);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h b/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
index 0b86c7fd86dd..b2911a5b44eb 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_CLONEDETECTION_H
-#define LLVM_CLANG_AST_CLONEDETECTION_H
+#ifndef LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
+#define LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/Regex.h"
@@ -441,4 +441,4 @@ struct MatchingVariablePatternConstraint {
} // end namespace clang
-#endif // LLVM_CLANG_AST_CLONEDETECTION_H
+#endif // LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
new file mode 100644
index 000000000000..e6ceb3a89131
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
@@ -0,0 +1,57 @@
+//===-- ControlFlowContext.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ControlFlowContext class that is used by dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Holds CFG and other derived context that is needed to perform dataflow
+/// analysis.
+class ControlFlowContext {
+public:
+ /// Builds a ControlFlowContext from an AST node.
+ static llvm::Expected<ControlFlowContext> build(const Decl *D, Stmt *S,
+ ASTContext *C);
+
+ /// Returns the CFG that is stored in this context.
+ const CFG &getCFG() const { return *Cfg; }
+
+ /// Returns a mapping from statements to basic blocks that contain them.
+ const llvm::DenseMap<const Stmt *, const CFGBlock *> &getStmtToBlock() const {
+ return StmtToBlock;
+ }
+
+private:
+ ControlFlowContext(std::unique_ptr<CFG> Cfg,
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock)
+ : Cfg(std::move(Cfg)), StmtToBlock(std::move(StmtToBlock)) {}
+
+ std::unique_ptr<CFG> Cfg;
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
index a5d4a5d6ba40..f327abe63751 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
@@ -21,6 +21,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "llvm/ADT/Any.h"
@@ -38,9 +39,13 @@ namespace dataflow {
/// must provide the following public members:
/// * `LatticeT initialElement()` - returns a lattice element that models the
/// initial state of a basic block;
-/// * `LatticeT transfer(const Stmt *, const LatticeT &, Environment &)` -
-/// applies the analysis transfer function for a given statement and lattice
-/// element.
+/// * `void transfer(const Stmt *, LatticeT &, Environment &)` - applies the
+/// analysis transfer function for a given statement and lattice element.
+///
+/// `Derived` can optionally override the following members:
+/// * `bool merge(QualType, const Value &, const Value &, Value &,
+/// Environment &)` - joins distinct values. This could be a strict
+/// lattice join or a more general widening operation.
///
/// `LatticeT` is a bounded join-semilattice that is used by `Derived` and must
/// provide the following public members:
@@ -57,6 +62,8 @@ public:
using Lattice = LatticeT;
explicit DataflowAnalysis(ASTContext &Context) : Context(Context) {}
+ explicit DataflowAnalysis(ASTContext &Context, bool ApplyBuiltinTransfer)
+ : TypeErasedDataflowAnalysis(ApplyBuiltinTransfer), Context(Context) {}
ASTContext &getASTContext() final { return Context; }
@@ -78,11 +85,10 @@ public:
return L1 == L2;
}
- TypeErasedLattice transferTypeErased(const Stmt *Stmt,
- const TypeErasedLattice &E,
- Environment &Env) final {
- const Lattice &L = llvm::any_cast<const Lattice &>(E.Value);
- return {static_cast<Derived *>(this)->transfer(Stmt, L, Env)};
+ void transferTypeErased(const Stmt *Stmt, TypeErasedLattice &E,
+ Environment &Env) final {
+ Lattice &L = llvm::any_cast<Lattice &>(E.Value);
+ static_cast<Derived *>(this)->transfer(Stmt, L, Env);
}
private:
@@ -101,17 +107,12 @@ template <typename LatticeT> struct DataflowAnalysisState {
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
-///
-/// Requirements:
-///
-/// `Cfg` must have been built with `CFG::BuildOptions::setAllAlwaysAdd()` to
-/// ensure that all sub-expressions in a basic block are evaluated.
template <typename AnalysisT>
std::vector<llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
-runDataflowAnalysis(const CFG &Cfg, AnalysisT &Analysis,
+runDataflowAnalysis(const ControlFlowContext &CFCtx, AnalysisT &Analysis,
const Environment &InitEnv) {
auto TypeErasedBlockStates =
- runTypeErasedDataflowAnalysis(Cfg, Analysis, InitEnv);
+ runTypeErasedDataflowAnalysis(CFCtx, Analysis, InitEnv);
std::vector<
llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
BlockStates;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
new file mode 100644
index 000000000000..5c1b41d53892
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -0,0 +1,145 @@
+//===-- DataflowAnalysisContext.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DataflowAnalysisContext class that owns objects that
+// encompass the state of a program and stores context that is used during
+// dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include <cassert>
+#include <memory>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace clang {
+namespace dataflow {
+
+/// Owns objects that encompass the state of a program and stores context that
+/// is used during dataflow analysis.
+class DataflowAnalysisContext {
+public:
+ DataflowAnalysisContext()
+ : TrueVal(&takeOwnership(std::make_unique<BoolValue>())),
+ FalseVal(&takeOwnership(std::make_unique<BoolValue>())) {}
+
+ /// Takes ownership of `Loc` and returns a reference to it.
+ ///
+ /// Requirements:
+ ///
+ /// `Loc` must not be null.
+ template <typename T>
+ typename std::enable_if<std::is_base_of<StorageLocation, T>::value, T &>::type
+ takeOwnership(std::unique_ptr<T> Loc) {
+ assert(Loc != nullptr);
+ Locs.push_back(std::move(Loc));
+ return *cast<T>(Locs.back().get());
+ }
+
+ /// Takes ownership of `Val` and returns a reference to it.
+ ///
+ /// Requirements:
+ ///
+ /// `Val` must not be null.
+ template <typename T>
+ typename std::enable_if<std::is_base_of<Value, T>::value, T &>::type
+ takeOwnership(std::unique_ptr<T> Val) {
+ assert(Val != nullptr);
+ Vals.push_back(std::move(Val));
+ return *cast<T>(Vals.back().get());
+ }
+
+ /// Assigns `Loc` as the storage location of `D`.
+ ///
+ /// Requirements:
+ ///
+ /// `D` must not be assigned a storage location.
+ void setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
+ assert(DeclToLoc.find(&D) == DeclToLoc.end());
+ DeclToLoc[&D] = &Loc;
+ }
+
+ /// Returns the storage location assigned to `D` or null if `D` has no
+ /// assigned storage location.
+ StorageLocation *getStorageLocation(const ValueDecl &D) const {
+ auto It = DeclToLoc.find(&D);
+ return It == DeclToLoc.end() ? nullptr : It->second;
+ }
+
+ /// Assigns `Loc` as the storage location of `E`.
+ ///
+ /// Requirements:
+ ///
+ /// `E` must not be assigned a storage location.
+ void setStorageLocation(const Expr &E, StorageLocation &Loc) {
+ assert(ExprToLoc.find(&E) == ExprToLoc.end());
+ ExprToLoc[&E] = &Loc;
+ }
+
+ /// Returns the storage location assigned to `E` or null if `E` has no
+ /// assigned storage location.
+ StorageLocation *getStorageLocation(const Expr &E) const {
+ auto It = ExprToLoc.find(&E);
+ return It == ExprToLoc.end() ? nullptr : It->second;
+ }
+
+ /// Assigns `Loc` as the storage location of the `this` pointee.
+ ///
+ /// Requirements:
+ ///
+ /// The `this` pointee must not be assigned a storage location.
+ void setThisPointeeStorageLocation(StorageLocation &Loc) {
+ assert(ThisPointeeLoc == nullptr);
+ ThisPointeeLoc = &Loc;
+ }
+
+ /// Returns the storage location assigned to the `this` pointee or null if the
+ /// `this` pointee has no assigned storage location.
+ StorageLocation *getThisPointeeStorageLocation() const {
+ return ThisPointeeLoc;
+ }
+
+ /// Returns a symbolic boolean value that models a boolean literal equal to
+ /// `Value`.
+ BoolValue &getBoolLiteralValue(bool Value) const {
+ return Value ? *TrueVal : *FalseVal;
+ }
+
+private:
+ // Storage for the state of a program.
+ std::vector<std::unique_ptr<StorageLocation>> Locs;
+ std::vector<std::unique_ptr<Value>> Vals;
+
+ // Maps from program declarations and statements to storage locations that are
+ // assigned to them. These assignments are global (aggregated across all basic
+ // blocks) and are used to produce stable storage locations when the same
+ // basic blocks are evaluated multiple times. The storage locations that are
+ // in scope for a particular basic block are stored in `Environment`.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> DeclToLoc;
+ llvm::DenseMap<const Expr *, StorageLocation *> ExprToLoc;
+
+ StorageLocation *ThisPointeeLoc = nullptr;
+
+ // FIXME: Add support for boolean expressions.
+ BoolValue *TrueVal;
+ BoolValue *FalseVal;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index 4a3c0239f8e1..e560305cf5ca 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -15,19 +15,215 @@
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include <memory>
+#include <type_traits>
+#include <utility>
namespace clang {
namespace dataflow {
+/// Indicates what kind of indirections should be skipped past when retrieving
+/// storage locations or values.
+///
+/// FIXME: Consider renaming this or replacing it with a more appropriate model.
+/// See the discussion in https://reviews.llvm.org/D116596 for context.
+enum class SkipPast {
+ /// No indirections should be skipped past.
+ None,
+ /// An optional reference should be skipped past.
+ Reference,
+ /// An optional reference should be skipped past, then an optional pointer
+ /// should be skipped past.
+ ReferenceThenPointer,
+};
+
/// Holds the state of the program (store and heap) at a given program point.
class Environment {
public:
- bool operator==(const Environment &) const { return true; }
+ /// Supplements `Environment` with non-standard join operations.
+ class Merger {
+ public:
+ virtual ~Merger() = default;
+
+ /// Given distinct `Val1` and `Val2`, modifies `MergedVal` to approximate
+ /// both `Val1` and `Val2`. This could be a strict lattice join or a more
+ /// general widening operation. If this function returns true, `MergedVal`
+ /// will be assigned to a storage location of type `Type` in `Env`.
+ ///
+ /// Requirements:
+ ///
+ /// `Val1` and `Val2` must be distinct.
+ virtual bool merge(QualType Type, const Value &Val1, const Value &Val2,
+ Value &MergedVal, Environment &Env) {
+ return false;
+ }
+ };
+
+ /// Creates an environment that uses `DACtx` to store objects that encompass
+ /// the state of a program.
+ explicit Environment(DataflowAnalysisContext &DACtx) : DACtx(&DACtx) {}
+
+ /// Creates an environment that uses `DACtx` to store objects that encompass
+ /// the state of a program.
+ ///
+ /// If `DeclCtx` is a function, initializes the environment with symbolic
+ /// representations of the function parameters.
+ ///
+ /// If `DeclCtx` is a non-static member function, initializes the environment
+ /// with a symbolic representation of the `this` pointee.
+ Environment(DataflowAnalysisContext &DACtx, const DeclContext &DeclCtx);
+
+ bool operator==(const Environment &) const;
+
+ LatticeJoinEffect join(const Environment &, Environment::Merger &);
+
+ // FIXME: Rename `createOrGetStorageLocation` to `getOrCreateStorageLocation`,
+ // `getStableStorageLocation`, or something more appropriate.
+
+ /// Creates a storage location appropriate for `Type`. Does not assign a value
+ /// to the returned storage location in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ StorageLocation &createStorageLocation(QualType Type);
+
+ /// Creates a storage location for `D`. Does not assign the returned storage
+ /// location to `D` in the environment. Does not assign a value to the
+ /// returned storage location in the environment.
+ StorageLocation &createStorageLocation(const VarDecl &D);
+
+ /// Creates a storage location for `E`. Does not assign the returned storage
+ /// location to `E` in the environment. Does not assign a value to the
+ /// returned storage location in the environment.
+ StorageLocation &createStorageLocation(const Expr &E);
+
+ /// Assigns `Loc` as the storage location of `D` in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// `D` must not be assigned a storage location in the environment.
+ void setStorageLocation(const ValueDecl &D, StorageLocation &Loc);
+
+ /// Returns the storage location assigned to `D` in the environment, applying
+ /// the `SP` policy for skipping past indirections, or null if `D` isn't
+ /// assigned a storage location in the environment.
+ StorageLocation *getStorageLocation(const ValueDecl &D, SkipPast SP) const;
+
+ /// Assigns `Loc` as the storage location of `E` in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// `E` must not be assigned a storage location in the environment.
+ void setStorageLocation(const Expr &E, StorageLocation &Loc);
+
+ /// Returns the storage location assigned to `E` in the environment, applying
+ /// the `SP` policy for skipping past indirections, or null if `E` isn't
+ /// assigned a storage location in the environment.
+ StorageLocation *getStorageLocation(const Expr &E, SkipPast SP) const;
+
+ /// Returns the storage location assigned to the `this` pointee in the
+ /// environment or null if the `this` pointee has no assigned storage location
+ /// in the environment.
+ StorageLocation *getThisPointeeStorageLocation() const;
- LatticeJoinEffect join(const Environment &) {
- return LatticeJoinEffect::Unchanged;
+ /// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
+ /// return null. If `Type` is a pointer or reference type, creates all the
+ /// necessary storage locations and values for indirections until it finds a
+ /// non-pointer/non-reference type.
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ Value *createValue(QualType Type);
+
+ /// Assigns `Val` as the value of `Loc` in the environment.
+ void setValue(const StorageLocation &Loc, Value &Val);
+
+ /// Returns the value assigned to `Loc` in the environment or null if `Loc`
+ /// isn't assigned a value in the environment.
+ Value *getValue(const StorageLocation &Loc) const;
+
+ /// Equivalent to `getValue(getStorageLocation(D, SP), SkipPast::None)` if `D`
+ /// is assigned a storage location in the environment, otherwise returns null.
+ Value *getValue(const ValueDecl &D, SkipPast SP) const;
+
+ /// Equivalent to `getValue(getStorageLocation(E, SP), SkipPast::None)` if `E`
+ /// is assigned a storage location in the environment, otherwise returns null.
+ Value *getValue(const Expr &E, SkipPast SP) const;
+
+ /// Transfers ownership of `Loc` to the analysis context and returns a
+ /// reference to it.
+ ///
+ /// Requirements:
+ ///
+ /// `Loc` must not be null.
+ template <typename T>
+ typename std::enable_if<std::is_base_of<StorageLocation, T>::value, T &>::type
+ takeOwnership(std::unique_ptr<T> Loc) {
+ return DACtx->takeOwnership(std::move(Loc));
+ }
+
+ /// Transfers ownership of `Val` to the analysis context and returns a
+ /// reference to it.
+ ///
+ /// Requirements:
+ ///
+ /// `Val` must not be null.
+ template <typename T>
+ typename std::enable_if<std::is_base_of<Value, T>::value, T &>::type
+ takeOwnership(std::unique_ptr<T> Val) {
+ return DACtx->takeOwnership(std::move(Val));
}
+
+ /// Returns a symbolic boolean value that models a boolean literal equal to
+ /// `Value`
+ BoolValue &getBoolLiteralValue(bool Value) const {
+ return DACtx->getBoolLiteralValue(Value);
+ }
+
+private:
+ /// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
+ /// return null.
+ ///
+ /// Recursively initializes storage locations and values until it sees a
+ /// self-referential pointer or reference type. `Visited` is used to track
+ /// which types appeared in the reference/pointer chain in order to avoid
+ /// creating a cyclic dependency with self-referential pointers/references.
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ Value *createValueUnlessSelfReferential(QualType Type,
+ llvm::DenseSet<QualType> &Visited);
+
+ StorageLocation &skip(StorageLocation &Loc, SkipPast SP) const;
+ const StorageLocation &skip(const StorageLocation &Loc, SkipPast SP) const;
+
+ // `DACtx` is not null and not owned by this object.
+ DataflowAnalysisContext *DACtx;
+
+ // Maps from program declarations and statements to storage locations that are
+ // assigned to them. Unlike the maps in `DataflowAnalysisContext`, these
+ // include only storage locations that are in scope for a particular basic
+ // block.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> DeclToLoc;
+ llvm::DenseMap<const Expr *, StorageLocation *> ExprToLoc;
+
+ llvm::DenseMap<const StorageLocation *, Value *> LocToVal;
+
+ // FIXME: Add flow condition constraints.
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
index 52d84eb13c56..e926adf6f0b2 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
@@ -92,4 +92,4 @@ struct BackwardDataflowWorklist
} // namespace clang
-#endif // LLVM_CLANG_ANALYSIS_ANALYSES_CONSUMED_H
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
new file mode 100644
index 000000000000..ff403f68b7c5
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
@@ -0,0 +1,140 @@
+//===------------------------ MapLattice.h ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a parameterized lattice that maps keys to individual
+// lattice elements (of the parameter lattice type). A typical usage is lifting
+// a particular lattice to all variables in a lexical scope.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
+
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "DataflowAnalysis.h"
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace dataflow {
+
+/// A lattice that maps keys to individual lattice elements. When instantiated
+/// with an `ElementLattice` that is a bounded semi-lattice, `MapLattice` is
+/// itself a bounded semi-lattice, so long as the user limits themselves to a
+/// finite number of keys. In that case, `top` is (implicitly), the map
+/// containing all valid keys mapped to `top` of `ElementLattice`.
+///
+/// Requirements on `ElementLattice`:
+/// * Provides standard declarations of a bounded semi-lattice.
+template <typename Key, typename ElementLattice> class MapLattice {
+ using Container = llvm::DenseMap<Key, ElementLattice>;
+ Container C;
+
+public:
+ using key_type = Key;
+ using mapped_type = ElementLattice;
+ using value_type = typename Container::value_type;
+ using iterator = typename Container::iterator;
+ using const_iterator = typename Container::const_iterator;
+
+ MapLattice() = default;
+
+ explicit MapLattice(Container C) { C = std::move(C); }
+
+ // The `bottom` element is the empty map.
+ static MapLattice bottom() { return MapLattice(); }
+
+ void insert(const std::pair<const key_type, mapped_type> &P) { C.insert(P); }
+
+ void insert(std::pair<const key_type, mapped_type> &&P) {
+ C.insert(std::move(P));
+ }
+
+ unsigned size() const { return C.size(); }
+ bool empty() const { return C.empty(); }
+
+ iterator begin() { return C.begin(); }
+ iterator end() { return C.end(); }
+ const_iterator begin() const { return C.begin(); }
+ const_iterator end() const { return C.end(); }
+
+ // Equality is direct equality of underlying map entries. One implication of
+ // this definition is that a map with (only) keys that map to bottom is not
+ // equal to the empty map.
+ friend bool operator==(const MapLattice &LHS, const MapLattice &RHS) {
+ return LHS.C == RHS.C;
+ }
+
+ friend bool operator!=(const MapLattice &LHS, const MapLattice &RHS) {
+ return !(LHS == RHS);
+ }
+
+ bool contains(const key_type &K) const { return C.find(K) != C.end(); }
+
+ iterator find(const key_type &K) { return C.find(K); }
+ const_iterator find(const key_type &K) const { return C.find(K); }
+
+ mapped_type &operator[](const key_type &K) { return C[K]; }
+
+ /// If an entry exists in one map but not the other, the missing entry is
+ /// treated as implicitly mapping to `bottom`. So, the joined map contains the
+ /// entry as it was in the source map.
+ LatticeJoinEffect join(const MapLattice &Other) {
+ LatticeJoinEffect Effect = LatticeJoinEffect::Unchanged;
+ for (const auto &O : Other.C) {
+ auto It = C.find(O.first);
+ if (It == C.end()) {
+ C.insert(O);
+ Effect = LatticeJoinEffect::Changed;
+ } else if (It->second.join(O.second) == LatticeJoinEffect::Changed)
+ Effect = LatticeJoinEffect::Changed;
+ }
+ return Effect;
+ }
+};
+
+/// Convenience alias that captures the common use of map lattices to model
+/// in-scope variables.
+template <typename ElementLattice>
+using VarMapLattice = MapLattice<const clang::VarDecl *, ElementLattice>;
+
+template <typename Key, typename ElementLattice>
+std::ostream &
+operator<<(std::ostream &Os,
+ const clang::dataflow::MapLattice<Key, ElementLattice> &M) {
+ std::string Separator = "";
+ Os << "{";
+ for (const auto &E : M) {
+ Os << std::exchange(Separator, ", ") << E.first << " => " << E.second;
+ }
+ Os << "}";
+ return Os;
+}
+
+template <typename ElementLattice>
+std::ostream &
+operator<<(std::ostream &Os,
+ const clang::dataflow::VarMapLattice<ElementLattice> &M) {
+ std::string Separator = "";
+ Os << "{";
+ for (const auto &E : M) {
+ Os << std::exchange(Separator, ", ") << E.first->getName().str() << " => "
+ << E.second;
+ }
+ Os << "}";
+ return Os;
+}
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
new file mode 100644
index 000000000000..5532813d6d29
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
@@ -0,0 +1,89 @@
+//===-- StorageLocation.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes that represent elements of the local variable store
+// and of the heap during dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Base class for elements of the local variable store and of the heap.
+///
+/// Each storage location holds a value. The mapping from storage locations to
+/// values is stored in the environment.
+class StorageLocation {
+public:
+ enum class Kind { Scalar, Aggregate };
+
+ StorageLocation(Kind LocKind, QualType Type) : LocKind(LocKind), Type(Type) {}
+
+ virtual ~StorageLocation() = default;
+
+ Kind getKind() const { return LocKind; }
+
+ QualType getType() const { return Type; }
+
+private:
+ Kind LocKind;
+ QualType Type;
+};
+
+/// A storage location that is not subdivided further for the purposes of
+/// abstract interpretation. For example: `int`, `int*`, `int&`.
+class ScalarStorageLocation final : public StorageLocation {
+public:
+ explicit ScalarStorageLocation(QualType Type)
+ : StorageLocation(Kind::Scalar, Type) {}
+
+ static bool classof(const StorageLocation *Loc) {
+ return Loc->getKind() == Kind::Scalar;
+ }
+};
+
+/// A storage location which is subdivided into smaller storage locations that
+/// can be traced independently by abstract interpretation. For example: a
+/// struct with public members.
+class AggregateStorageLocation final : public StorageLocation {
+public:
+ explicit AggregateStorageLocation(QualType Type)
+ : AggregateStorageLocation(
+ Type, llvm::DenseMap<const ValueDecl *, StorageLocation *>()) {}
+
+ AggregateStorageLocation(
+ QualType Type,
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> Children)
+ : StorageLocation(Kind::Aggregate, Type), Children(std::move(Children)) {}
+
+ static bool classof(const StorageLocation *Loc) {
+ return Loc->getKind() == Kind::Aggregate;
+ }
+
+ /// Returns the child storage location for `D`.
+ StorageLocation &getChild(const ValueDecl &D) const {
+ auto It = Children.find(&D);
+ assert(It != Children.end());
+ return *It->second;
+ }
+
+private:
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> Children;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
new file mode 100644
index 000000000000..a12674a173be
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
@@ -0,0 +1,33 @@
+//===-- Transfer.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a transfer function that evaluates a program statement and
+// updates an environment accordingly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
+
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Evaluates `S` and updates `Env` accordingly.
+///
+/// Requirements:
+///
+/// The type of `S` must not be `ParenExpr`.
+void transfer(const Stmt &S, Environment &Env);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
index 6193b9860d33..9f44475b14ba 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
@@ -14,11 +14,13 @@
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
+#include <utility>
#include <vector>
#include "clang/AST/ASTContext.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "llvm/ADT/Any.h"
@@ -38,8 +40,18 @@ struct TypeErasedLattice {
};
/// Type-erased base class for dataflow analyses built on a single lattice type.
-class TypeErasedDataflowAnalysis {
+class TypeErasedDataflowAnalysis : public Environment::Merger {
+ /// Determines whether to apply the built-in transfer functions.
+ // FIXME: Remove this option once the framework supports composing analyses
+ // (at which point the built-in transfer functions can be simply a standalone
+ // analysis).
+ bool ApplyBuiltinTransfer;
+
public:
+ TypeErasedDataflowAnalysis() : ApplyBuiltinTransfer(true) {}
+ TypeErasedDataflowAnalysis(bool ApplyBuiltinTransfer)
+ : ApplyBuiltinTransfer(ApplyBuiltinTransfer) {}
+
virtual ~TypeErasedDataflowAnalysis() {}
/// Returns the `ASTContext` that is used by the analysis.
@@ -62,9 +74,12 @@ public:
/// Applies the analysis transfer function for a given statement and
/// type-erased lattice element.
- virtual TypeErasedLattice transferTypeErased(const Stmt *,
- const TypeErasedLattice &,
- Environment &) = 0;
+ virtual void transferTypeErased(const Stmt *, TypeErasedLattice &,
+ Environment &) = 0;
+
+ /// Determines whether to apply the built-in transfer functions, which model
+ /// the heap and stack in the `Environment`.
+ bool applyBuiltinTransfer() const { return ApplyBuiltinTransfer; }
};
/// Type-erased model of the program at a given program point.
@@ -74,6 +89,9 @@ struct TypeErasedDataflowAnalysisState {
/// Model of the state of the program (store and heap).
Environment Env;
+
+ TypeErasedDataflowAnalysisState(TypeErasedLattice Lattice, Environment Env)
+ : Lattice(std::move(Lattice)), Env(std::move(Env)) {}
};
/// Transfers the state of a basic block by evaluating each of its statements in
@@ -87,6 +105,7 @@ struct TypeErasedDataflowAnalysisState {
/// already been transferred. States in `BlockStates` that are set to
/// `llvm::None` represent basic blocks that are not evaluated yet.
TypeErasedDataflowAnalysisState transferBlock(
+ const ControlFlowContext &CFCtx,
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> &BlockStates,
const CFGBlock &Block, const Environment &InitEnv,
TypeErasedDataflowAnalysis &Analysis,
@@ -97,13 +116,8 @@ TypeErasedDataflowAnalysisState transferBlock(
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
-///
-/// Requirements:
-///
-/// `Cfg` must have been built with `CFG::BuildOptions::setAllAlwaysAdd()` to
-/// ensure that all sub-expressions in a basic block are evaluated.
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
-runTypeErasedDataflowAnalysis(const CFG &Cfg,
+runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
new file mode 100644
index 000000000000..da04f926c597
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
@@ -0,0 +1,144 @@
+//===-- Value.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for values computed by abstract interpretation
+// during dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
+
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Base class for all values computed by abstract interpretation.
+class Value {
+public:
+ enum class Kind { Bool, Integer, Reference, Pointer, Struct };
+
+ explicit Value(Kind ValKind) : ValKind(ValKind) {}
+
+ virtual ~Value() = default;
+
+ Kind getKind() const { return ValKind; }
+
+private:
+ Kind ValKind;
+};
+
+/// Models a boolean.
+class BoolValue : public Value {
+public:
+ explicit BoolValue() : Value(Kind::Bool) {}
+
+ static bool classof(const Value *Val) { return Val->getKind() == Kind::Bool; }
+};
+
+/// Models an integer.
+class IntegerValue : public Value {
+public:
+ explicit IntegerValue() : Value(Kind::Integer) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Integer;
+ }
+};
+
+/// Base class for values that refer to storage locations.
+class IndirectionValue : public Value {
+public:
+ /// Constructs a value that refers to `PointeeLoc`.
+ explicit IndirectionValue(Kind ValueKind, StorageLocation &PointeeLoc)
+ : Value(ValueKind), PointeeLoc(PointeeLoc) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Reference || Val->getKind() == Kind::Pointer;
+ }
+
+ StorageLocation &getPointeeLoc() const { return PointeeLoc; }
+
+private:
+ StorageLocation &PointeeLoc;
+};
+
+/// Models a dereferenced pointer. For example, a reference in C++ or an lvalue
+/// in C.
+class ReferenceValue final : public IndirectionValue {
+public:
+ explicit ReferenceValue(StorageLocation &PointeeLoc)
+ : IndirectionValue(Kind::Reference, PointeeLoc) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Reference;
+ }
+};
+
+/// Models a symbolic pointer. Specifically, any value of type `T*`.
+class PointerValue final : public IndirectionValue {
+public:
+ explicit PointerValue(StorageLocation &PointeeLoc)
+ : IndirectionValue(Kind::Pointer, PointeeLoc) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Pointer;
+ }
+};
+
+/// Models a value of `struct` or `class` type.
+class StructValue final : public Value {
+public:
+ StructValue() : StructValue(llvm::DenseMap<const ValueDecl *, Value *>()) {}
+
+ explicit StructValue(llvm::DenseMap<const ValueDecl *, Value *> Children)
+ : Value(Kind::Struct), Children(std::move(Children)) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Struct;
+ }
+
+ /// Returns the child value that is assigned for `D`.
+ Value &getChild(const ValueDecl &D) const {
+ auto It = Children.find(&D);
+ assert(It != Children.end());
+ return *It->second;
+ }
+
+ /// Assigns `Val` as the child value for `D`.
+ void setChild(const ValueDecl &D, Value &Val) { Children[&D] = &Val; }
+
+ /// Returns the value of the synthetic property with the given `Name` or null
+ /// if the property isn't assigned a value.
+ Value *getProperty(llvm::StringRef Name) const {
+ auto It = Properties.find(Name);
+ return It == Properties.end() ? nullptr : It->second;
+ }
+
+ /// Assigns `Val` as the value of the synthetic property with the given
+ /// `Name`.
+ void setProperty(llvm::StringRef Name, Value &Val) {
+ Properties.insert_or_assign(Name, &Val);
+ }
+
+private:
+ llvm::DenseMap<const ValueDecl *, Value *> Children;
+ llvm::StringMap<Value *> Properties;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h b/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
index 9c02b79f58f9..78bebbdb6ec7 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_ISSUE_HASH_H
-#define LLVM_CLANG_STATICANALYZER_CORE_ISSUE_HASH_H
+#ifndef LLVM_CLANG_ANALYSIS_ISSUEHASH_H
+#define LLVM_CLANG_ANALYSIS_ISSUEHASH_H
#include "llvm/ADT/SmallString.h"
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
index 235d26083191..446f67b23e75 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
-#define LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
+#ifndef LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
+#define LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
#include "clang/AST/Stmt.h"
#include "clang/Analysis/AnalysisDeclContext.h"
@@ -41,10 +41,8 @@ class AnalysisDeclContext;
class BinaryOperator;
class CallEnter;
class CallExitEnd;
-class CallExpr;
class ConditionalOperator;
class Decl;
-class Expr;
class LocationContext;
class MemberExpr;
class ProgramPoint;
@@ -905,4 +903,4 @@ public:
} // namespace ento
} // namespace clang
-#endif // LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
+#endif // LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
index 546224bfd58d..680713a52f2f 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
@@ -30,7 +30,6 @@
namespace clang {
class AnalysisDeclContext;
-class FunctionDecl;
class LocationContext;
/// ProgramPoints can be "tagged" as representing points specific to a given
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h b/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
index b7ccb0317830..d9a0416e1ce5 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_RETAINSUMMARY_MANAGER_H
-#define LLVM_CLANG_ANALYSIS_RETAINSUMMARY_MANAGER_H
+#ifndef LLVM_CLANG_ANALYSIS_RETAINSUMMARYMANAGER_H
+#define LLVM_CLANG_ANALYSIS_RETAINSUMMARYMANAGER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h b/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
index d26e9159a937..278f20e87cc6 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_ANALYSIS_SELECTOREXTRAS_H
-#define LLVM_CLANG_LIB_ANALYSIS_SELECTOREXTRAS_H
+#ifndef LLVM_CLANG_ANALYSIS_SELECTOREXTRAS_H
+#define LLVM_CLANG_ANALYSIS_SELECTOREXTRAS_H
#include "clang/AST/ASTContext.h"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
index ab9f19da5d59..c1187b81420b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
-#define LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
+#ifndef LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
+#define LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
@@ -42,4 +42,4 @@ inline llvm::VersionTuple alignedAllocMinVersion(llvm::Triple::OSType OS) {
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
+#endif // LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attr.td b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
index 10c5c7f1b879..be56373cc3ca 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
@@ -1191,6 +1191,13 @@ def SYCLKernel : InheritableAttr {
let Documentation = [SYCLKernelDocs];
}
+def SYCLSpecialClass: InheritableAttr {
+ let Spellings = [Clang<"sycl_special_class">];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let LangOpts = [SYCL];
+ let Documentation = [SYCLSpecialClassDocs];
+}
+
def C11NoReturn : InheritableAttr {
let Spellings = [Keyword<"_Noreturn">];
let Subjects = SubjectList<[Function], ErrorDiag>;
@@ -3686,6 +3693,8 @@ def OMPDeclareTargetDecl : InheritableAttr {
EnumArgument<"DevType", "DevTypeTy",
[ "host", "nohost", "any" ],
[ "DT_Host", "DT_NoHost", "DT_Any" ]>,
+ ExprArgument<"IndirectExpr">,
+ BoolArgument<"Indirect">,
UnsignedArgument<"Level">
];
let AdditionalMembers = [{
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index 8a7424a88c9f..18fac924b114 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -409,6 +409,71 @@ The SYCL kernel in the previous code sample meets these expectations.
}];
}
+def SYCLSpecialClassDocs : Documentation {
+ let Category = DocCatStmt;
+ let Content = [{
+SYCL defines some special classes (accessor, sampler, and stream) which require
+specific handling during the generation of the SPIR entry point.
+The ``__attribute__((sycl_special_class))`` attribute is used in SYCL
+headers to indicate that a class or a struct needs a specific handling when
+it is passed from host to device.
+Special classes will have a mandatory ``__init`` method and an optional
+``__finalize`` method (the ``__finalize`` method is used only with the
+``stream`` type). Kernel parameters types are extract from the ``__init`` method
+parameters. The kernel function arguments list is derived from the
+arguments of the ``__init`` method. The arguments of the ``__init`` method are
+copied into the kernel function argument list and the ``__init`` and
+``__finalize`` methods are called at the beginning and the end of the kernel,
+respectively.
+The ``__init`` and ``__finalize`` methods must be defined inside the
+special class.
+Please note that this is an attribute that is used as an internal
+implementation detail and not intended to be used by external users.
+
+The syntax of the attribute is as follows:
+
+.. code-block:: c++
+
+ class __attribute__((sycl_special_class)) accessor {};
+ class [[clang::sycl_special_class]] accessor {};
+
+This is a code example that illustrates the use of the attribute:
+
+.. code-block:: c++
+
+ class __attribute__((sycl_special_class)) SpecialType {
+ int F1;
+ int F2;
+ void __init(int f1) {
+ F1 = f1;
+ F2 = f1;
+ }
+ void __finalize() {}
+ public:
+ SpecialType() = default;
+ int getF2() const { return F2; }
+ };
+
+ int main () {
+ SpecialType T;
+ cgh.single_task([=] {
+ T.getF2();
+ });
+}
+
+This would trigger the following kernel entry point in the AST:
+
+.. code-block:: c++
+
+ void __sycl_kernel(int f1) {
+ SpecialType T;
+ T.__init(f1);
+ ...
+ T.__finalize()
+ }
+ }];
+}
+
def C11NoReturnDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -5987,7 +6052,7 @@ attribute requires a string literal argument to identify the handle being releas
def DiagnoseAsBuiltinDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
-The ``diagnose_as_builtin` attribute indicates that Fortify diagnostics are to
+The ``diagnose_as_builtin`` attribute indicates that Fortify diagnostics are to
be applied to the declared function as if it were the function specified by the
attribute. The builtin function whose diagnostics are to be mimicked should be
given. In addition, the order in which arguments should be applied must also
@@ -5995,12 +6060,12 @@ be given.
For example, the attribute can be used as follows.
- .. code-block:: c
+.. code-block:: c
- __attribute__((diagnose_as_builtin(__builtin_memset, 3, 2, 1)))
- void *mymemset(int n, int c, void *s) {
- // ...
- }
+ __attribute__((diagnose_as_builtin(__builtin_memset, 3, 2, 1)))
+ void *mymemset(int n, int c, void *s) {
+ // ...
+ }
This indicates that calls to ``mymemset`` should be diagnosed as if they were
calls to ``__builtin_memset``. The arguments ``3, 2, 1`` indicate by index the
@@ -6015,7 +6080,8 @@ they would to the builtin function, after all normal arguments. For instance,
to diagnose a new function as if it were `sscanf`, we can use the attribute as
follows.
- .. code-block:: c
+.. code-block:: c
+
__attribute__((diagnose_as_builtin(sscanf, 1, 2)))
int mysscanf(const char *str, const char *format, ...) {
// ...
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h b/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
index 010cefcaf340..4a4c1a883cf4 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
-#define LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
+#ifndef LLVM_CLANG_BASIC_ATTRSUBJECTMATCHRULES_H
+#define LLVM_CLANG_BASIC_ATTRSUBJECTMATCHRULES_H
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
index 45d445163749..d2cb14d2fd8c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
@@ -26,6 +26,7 @@
// i -> int
// h -> half (__fp16, OpenCL)
// x -> half (_Float16)
+// y -> half (__bf16)
// f -> float
// d -> double
// z -> size_t
@@ -640,16 +641,23 @@ BUILTIN(__builtin_unreachable, "v", "nr")
BUILTIN(__builtin_shufflevector, "v." , "nct")
BUILTIN(__builtin_convertvector, "v." , "nct")
BUILTIN(__builtin_alloca, "v*z" , "Fn")
+BUILTIN(__builtin_alloca_uninitialized, "v*z", "Fn")
BUILTIN(__builtin_alloca_with_align, "v*zIz", "Fn")
+BUILTIN(__builtin_alloca_with_align_uninitialized, "v*zIz", "Fn")
BUILTIN(__builtin_call_with_static_chain, "v.", "nt")
BUILTIN(__builtin_elementwise_abs, "v.", "nct")
BUILTIN(__builtin_elementwise_max, "v.", "nct")
BUILTIN(__builtin_elementwise_min, "v.", "nct")
BUILTIN(__builtin_elementwise_ceil, "v.", "nct")
+BUILTIN(__builtin_elementwise_floor, "v.", "nct")
+BUILTIN(__builtin_elementwise_roundeven, "v.", "nct")
+BUILTIN(__builtin_elementwise_trunc, "v.", "nct")
BUILTIN(__builtin_reduce_max, "v.", "nct")
BUILTIN(__builtin_reduce_min, "v.", "nct")
BUILTIN(__builtin_reduce_xor, "v.", "nct")
+BUILTIN(__builtin_reduce_or, "v.", "nct")
+BUILTIN(__builtin_reduce_and, "v.", "nct")
BUILTIN(__builtin_matrix_transpose, "v.", "nFt")
BUILTIN(__builtin_matrix_column_major_load, "v.", "nFt")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
index 025fef05c8e0..6b94dd857300 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -402,6 +402,23 @@ BUILTIN(__nvvm_ull2d_rp, "dULLi", "")
BUILTIN(__nvvm_f2h_rn_ftz, "Usf", "")
BUILTIN(__nvvm_f2h_rn, "Usf", "")
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn, "ZUiff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn_relu, "ZUiff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz, "ZUiff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz_relu, "ZUiff", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_ff2f16x2_rn, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rn_relu, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rz, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rz_relu, "V2hff", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_f2bf16_rn, "ZUsf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rn_relu, "ZUsf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz, "ZUsf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz_relu, "ZUsf", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_f2tf32_rna, "ZUif", "", AND(SM_80,PTX70))
+
// Bitcast
BUILTIN(__nvvm_bitcast_f2i, "if", "")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
index 06560415e686..495a036e576f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -16,13 +16,13 @@
#endif
// Zbb extension
-TARGET_BUILTIN(__builtin_riscv_orc_b_32, "ZiZi", "nc", "experimental-zbb")
-TARGET_BUILTIN(__builtin_riscv_orc_b_64, "WiWi", "nc", "experimental-zbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_orc_b_32, "ZiZi", "nc", "zbb")
+TARGET_BUILTIN(__builtin_riscv_orc_b_64, "WiWi", "nc", "zbb,64bit")
// Zbc extension
-TARGET_BUILTIN(__builtin_riscv_clmul, "LiLiLi", "nc", "experimental-zbc")
-TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "experimental-zbc")
-TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "experimental-zbc")
+TARGET_BUILTIN(__builtin_riscv_clmul, "LiLiLi", "nc", "zbc")
+TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "zbc")
+TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "zbc")
// Zbe extension
TARGET_BUILTIN(__builtin_riscv_bcompress_32, "ZiZiZi", "nc", "experimental-zbe")
@@ -33,6 +33,10 @@ TARGET_BUILTIN(__builtin_riscv_bdecompress_32, "ZiZiZi", "nc",
TARGET_BUILTIN(__builtin_riscv_bdecompress_64, "WiWiWi", "nc",
"experimental-zbe,64bit")
+// Zbf extension
+TARGET_BUILTIN(__builtin_riscv_bfp_32, "ZiZiZi", "nc", "experimental-zbf")
+TARGET_BUILTIN(__builtin_riscv_bfp_64, "WiWiWi", "nc", "experimental-zbf,64bit")
+
// Zbp extension
TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp")
TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit")
@@ -54,8 +58,14 @@ TARGET_BUILTIN(__builtin_riscv_crc32_w, "LiLi", "nc", "experimental-zbr")
TARGET_BUILTIN(__builtin_riscv_crc32c_b, "LiLi", "nc", "experimental-zbr")
TARGET_BUILTIN(__builtin_riscv_crc32c_h, "LiLi", "nc", "experimental-zbr")
TARGET_BUILTIN(__builtin_riscv_crc32c_w, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr,64bit")
+TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr,64bit")
+
+// Zbt extension
+TARGET_BUILTIN(__builtin_riscv_fsl_32, "LiLiLiLi", "nc", "experimental-zbt")
+TARGET_BUILTIN(__builtin_riscv_fsr_32, "LiLiLiLi", "nc", "experimental-zbt")
+TARGET_BUILTIN(__builtin_riscv_fsl_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
+TARGET_BUILTIN(__builtin_riscv_fsr_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
index bc6208be4560..0669a96b942b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
@@ -265,10 +265,6 @@ TARGET_BUILTIN(__builtin_ia32_psubusw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmulhw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pavgb128, "V16cV16cV16c", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pavgw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pmaxub128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pminub128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pminsw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packsswb128, "V16cV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packssdw128, "V8sV4iV4i", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packuswb128, "V16cV8sV8s", "ncV:128:", "sse2")
@@ -296,9 +292,6 @@ TARGET_BUILTIN(__builtin_ia32_pshufb128, "V16cV16cV16c", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignb128, "V16cV16cV16c", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignw128, "V8sV8sV8s", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignd128, "V4iV4iV4i", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsb128, "V16cV16c", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsw128, "V8sV8s", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsd128, "V4iV4i", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_ldmxcsr, "vUi", "n", "sse")
TARGET_HEADER_BUILTIN(_mm_setcsr, "vUi", "nh","xmmintrin.h", ALL_LANGUAGES, "sse")
@@ -380,14 +373,6 @@ TARGET_BUILTIN(__builtin_ia32_blendvpd, "V2dV2dV2dV2d", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_blendvps, "V4fV4fV4fV4f", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_packusdw128, "V8sV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb128, "V16cV16cV16c", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxud128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw128, "V8sV8sV8s", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminsb128, "V16cV16cV16c", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminsd128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminud128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminuw128, "V8sV8sV8s", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_pmuldq128, "V2OiV4iV4i", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_roundps, "V4fV4fIi", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_roundss, "V4fV4fV4fIi", "ncV:128:", "sse4.1")
@@ -558,9 +543,6 @@ TARGET_BUILTIN(__builtin_ia32_vec_set_v8si, "V8iV8iiIi", "ncV:256:", "avx")
// AVX2
TARGET_BUILTIN(__builtin_ia32_mpsadbw256, "V32cV32cV32cIc", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsb256, "V32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsw256, "V16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsd256, "V8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packsswb256, "V32cV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packssdw256, "V16sV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packuswb256, "V32cV16sV16s", "ncV:256:", "avx2")
@@ -586,18 +568,6 @@ TARGET_BUILTIN(__builtin_ia32_phsubd256, "V8iV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_phsubsw256, "V16sV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmaddubsw256, "V16sV32cV32c", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmaddwd256, "V8iV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxub256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxud256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminub256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminuw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminud256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsd256, "V8iV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmovmskb256, "iV32c", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmuldq256, "V4OiV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmulhrsw256, "V16sV16sV16s", "ncV:256:", "avx2")
@@ -927,16 +897,6 @@ TARGET_BUILTIN(__builtin_ia32_cvtudq2ps512_mask, "V16fV16iV16fUsIi", "ncV:512:",
TARGET_BUILTIN(__builtin_ia32_cvtpd2ps512_mask, "V8fV8dV8fUcIi", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvtps2ph512_mask, "V16sV16fIiV16sUs", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvtph2ps512_mask, "V16fV16sV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pabsd512, "V16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pabsq512, "V8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxud512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminsd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminsq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminud512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminuq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmuldq512, "V8OiV16iV16i", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmuludq512, "V8OiV16iV16i", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_loaddqusi512_mask, "V16iiC*V16iUs", "nV:512:", "avx512f")
@@ -1045,8 +1005,6 @@ TARGET_BUILTIN(__builtin_ia32_ucmpd512_mask, "UsV16iV16iIiUs", "ncV:512:", "avx5
TARGET_BUILTIN(__builtin_ia32_ucmpq512_mask, "UcV8OiV8OiIiUc", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_ucmpw512_mask, "UiV32sV32sIiUi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pabsb512, "V64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pabsw512, "V32sV32s", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_packssdw512, "V32sV16iV16i", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_packsswb512, "V64cV32sV32s", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_packusdw512, "V32sV16iV16i", "ncV:512:", "avx512bw")
@@ -1057,14 +1015,6 @@ TARGET_BUILTIN(__builtin_ia32_paddusb512, "V64cV64cV64c", "ncV:512:", "avx512bw"
TARGET_BUILTIN(__builtin_ia32_paddusw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pavgb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pavgw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxub512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminub512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminuw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pshufb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_psubsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_psubsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
@@ -1198,16 +1148,6 @@ TARGET_BUILTIN(__builtin_ia32_getexppd128_mask, "V2dV2dV2dUc", "ncV:128:", "avx5
TARGET_BUILTIN(__builtin_ia32_getexppd256_mask, "V4dV4dV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getexpps128_mask, "V4fV4fV4fUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getexpps256_mask, "V8fV8fV8fUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pabsq128, "V2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pabsq256, "V4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminsq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminsq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminuq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminuq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscalepd_128_mask, "V2dV2dIiV2dUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscalepd_256_mask, "V4dV4dIiV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscaleps_128_mask, "V4fV4fIiV4fUc", "ncV:128:", "avx512vl")
@@ -2075,8 +2015,6 @@ TARGET_BUILTIN(__builtin_ia32_selectsd_128, "V2dUcV2dV2d", "ncV:128:", "avx512f"
// generic reduction intrinsics
TARGET_BUILTIN(__builtin_ia32_reduce_add_d512, "iV16i", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_add_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_and_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_and_q512, "OiV8Oi", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fadd_pd512, "ddV8d", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ps512, "ffV16f", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ph512, "xxV32x", "ncV:512:", "avx512fp16")
@@ -2099,16 +2037,6 @@ TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ph256, "xxV16x", "ncV:256:", "avx512fp
TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ph128, "xxV8x", "ncV:128:", "avx512fp16,avx512vl")
TARGET_BUILTIN(__builtin_ia32_reduce_mul_d512, "iV16i", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_mul_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_or_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_or_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smax_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smax_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smin_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smin_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umax_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umax_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umin_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umin_q512, "OiV8Oi", "ncV:512:", "avx512f")
// MONITORX/MWAITX
TARGET_BUILTIN(__builtin_ia32_monitorx, "vvC*UiUi", "n", "mwaitx")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
index 723302f108e2..0da875525c0c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
@@ -64,7 +64,7 @@ CODEGENOPT(DisableLifetimeMarkers, 1, 0) ///< Don't emit any lifetime markers
CODEGENOPT(DisableO0ImplyOptNone , 1, 0) ///< Don't annonate function with optnone at O0
CODEGENOPT(ExperimentalStrictFloatingPoint, 1, 0) ///< Enables the new, experimental
///< strict floating point.
-CODEGENOPT(EnableNoundefAttrs, 1, 0) ///< Enable emitting `noundef` attributes on IR call arguments and return values
+CODEGENOPT(DisableNoundefAttrs, 1, 0) ///< Disable emitting `noundef` attributes on IR call arguments and return values
CODEGENOPT(LegacyPassManager, 1, 0) ///< Use the legacy pass manager.
CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new
///< pass manager.
@@ -107,6 +107,8 @@ CODEGENOPT(CFProtectionReturn , 1, 0) ///< if -fcf-protection is
///< set to full or return.
CODEGENOPT(CFProtectionBranch , 1, 0) ///< if -fcf-protection is
///< set to full or branch.
+CODEGENOPT(IBTSeal, 1, 0) ///< set to optimize CFProtectionBranch.
+
CODEGENOPT(XRayInstrumentFunctions , 1, 0) ///< Set when -fxray-instrument is
///< enabled.
CODEGENOPT(StackSizeSection , 1, 0) ///< Set when -fstack-size-section is enabled.
@@ -139,6 +141,9 @@ VALUE_CODEGENOPT(XRaySelectedFunctionGroup, 32, 0)
VALUE_CODEGENOPT(PatchableFunctionEntryCount , 32, 0) ///< Number of NOPs at function entry
VALUE_CODEGENOPT(PatchableFunctionEntryOffset , 32, 0)
+CODEGENOPT(HotPatch, 1, 0) ///< Supports the Microsoft /HOTPATCH flag and
+ ///< generates a 'patchable-function' attribute.
+
CODEGENOPT(InstrumentForProfiling , 1, 0) ///< Set when -pg is enabled.
CODEGENOPT(CallFEntry , 1, 0) ///< Set when -mfentry is enabled.
CODEGENOPT(MNopMCount , 1, 0) ///< Set when -mnop-mcount is enabled.
@@ -231,6 +236,9 @@ CODEGENOPT(SanitizeMemoryTrackOrigins, 2, 0) ///< Enable tracking origins in
ENUM_CODEGENOPT(SanitizeAddressDtor, llvm::AsanDtorKind, 2,
llvm::AsanDtorKind::Global) ///< Set how ASan global
///< destructors are emitted.
+CODEGENOPT(SanitizeMemoryParamRetval, 1, 0) ///< Enable detection of uninitialized
+ ///< parameters and return values
+ ///< in MemorySanitizer
CODEGENOPT(SanitizeMemoryUseAfterDtor, 1, 0) ///< Enable use-after-delete detection
///< in MemorySanitizer
CODEGENOPT(SanitizeCfiCrossDso, 1, 0) ///< Enable cross-dso support in CFI.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
index 33ec03a17136..5a5c2689c689 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
@@ -307,7 +307,7 @@ public:
std::shared_ptr<llvm::Regex> Regex;
/// By default, optimization remark is missing.
- OptRemark() : Kind(RK_Missing), Pattern(""), Regex(nullptr) {}
+ OptRemark() : Kind(RK_Missing), Regex(nullptr) {}
/// Returns true iff the optimization remark holds a valid regular
/// expression.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
index 918dc7c8becc..df16827debfc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
-#define LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+#ifndef LLVM_CLANG_BASIC_DARWINSDKINFO_H
+#define LLVM_CLANG_BASIC_DARWINSDKINFO_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
@@ -57,6 +57,20 @@ public:
llvm::Triple::MacOSX, llvm::Triple::UnknownEnvironment);
}
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// iOS -> watchOS version mapping.
+ static inline constexpr OSEnvPair iOStoWatchOSPair() {
+ return OSEnvPair(llvm::Triple::IOS, llvm::Triple::UnknownEnvironment,
+ llvm::Triple::WatchOS, llvm::Triple::UnknownEnvironment);
+ }
+
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// iOS -> tvOS version mapping.
+ static inline constexpr OSEnvPair iOStoTvOSPair() {
+ return OSEnvPair(llvm::Triple::IOS, llvm::Triple::UnknownEnvironment,
+ llvm::Triple::TvOS, llvm::Triple::UnknownEnvironment);
+ }
+
private:
StorageType Value;
@@ -154,4 +168,4 @@ Expected<Optional<DarwinSDKInfo>> parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS,
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+#endif // LLVM_CLANG_BASIC_DARWINSDKINFO_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
index ab2c738a2ace..c932c9057278 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
@@ -84,6 +84,7 @@ class Diagnostic<string text, DiagClass DC, Severity defaultmapping> {
bit AccessControl = 0;
bit WarningNoWerror = 0;
bit ShowInSystemHeader = 0;
+ bit ShowInSystemMacro = 1;
bit Deferrable = 0;
Severity DefaultSeverity = defaultmapping;
DiagGroup Group;
@@ -108,6 +109,14 @@ class SuppressInSystemHeader {
bit ShowInSystemHeader = 0;
}
+class ShowInSystemMacro {
+ bit ShowInSystemMacro = 1;
+}
+
+class SuppressInSystemMacro {
+ bit ShowInSystemMacro = 0;
+}
+
class Deferrable {
bit Deferrable = 1;
}
@@ -159,4 +168,3 @@ include "DiagnosticParseKinds.td"
include "DiagnosticRefactoringKinds.td"
include "DiagnosticSemaKinds.td"
include "DiagnosticSerializationKinds.td"
-
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
index 76c31ad9508e..24ef2689eac0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define ASTSTART
#include "clang/Basic/DiagnosticASTKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
index d788c8517914..a89bdff1a10c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -590,4 +590,9 @@ def warn_padded_struct_size : Warning<
InGroup<Padded>, DefaultIgnore;
def warn_unnecessary_packed : Warning<
"packed attribute is unnecessary for %0">, InGroup<Packed>, DefaultIgnore;
+
+// -Wunaligned-access
+def warn_unaligned_access : Warning<
+ "field %1 within %0 is less aligned than %2 and is usually due to %0 being "
+ "packed, which can lead to unaligned accesses">, InGroup<UnalignedAccess>, DefaultIgnore;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
index f9037cc8d75a..676b58f7d6ef 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define ANALYSISSTART
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
index 6e011bfcebab..17c0053e9a33 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define COMMENTSTART
#include "clang/Basic/DiagnosticCommentKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
index ded85ec3f840..4341bf327b69 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define CROSSTUSTART
#include "clang/Basic/DiagnosticCrossTUKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
index cecd8fd6b4d5..6931bd46542e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define DRIVERSTART
#include "clang/Basic/DiagnosticDriverKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
index a7fd2f26478c..e635be6b6d1b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -116,10 +116,6 @@ def warn_drv_unsupported_option_for_target : Warning<
"ignoring '%0' option as it is not currently supported for target '%1'">,
InGroup<OptionIgnored>;
-def warn_drv_spirv_linking_multiple_inputs_unsupported: Warning<
- "Linking multiple input files is not supported for SPIR-V yet">,
- InGroup<OptionIgnored>;
-
def err_drv_invalid_thread_model_for_target : Error<
"invalid thread model '%0' in '%1' for this target">;
def err_drv_invalid_linker_name : Error<
@@ -384,6 +380,9 @@ def warn_drv_deprecated_arg : Warning<
"argument '%0' is deprecated, use '%1' instead">, InGroup<Deprecated>;
def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
+def warn_drv_unsupported_float_abi_by_lib : Warning<
+ "float ABI '%0' is not supported by current library">,
+ InGroup<DiagGroup<"unsupported-abi">>;
def warn_ignoring_ftabstop_value : Warning<
"ignoring invalid -ftabstop value '%0', using default value %1">;
def warn_drv_overriding_flag_option : Warning<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
index 430da6f724ed..76d893a5ccf8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
-#define LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
+#ifndef LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
+#define LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
#include "clang/Basic/PartialDiagnostic.h"
#include "llvm/Support/Error.h"
@@ -57,4 +57,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
+#endif // LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
index f57c587fb469..ab4e855f2de0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define FRONTENDSTART
#include "clang/Basic/DiagnosticFrontendKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
index c0642efaee4e..608e16147b1c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
@@ -241,6 +241,7 @@ def Documentation : DiagGroup<"documentation",
def EmptyBody : DiagGroup<"empty-body">;
def Exceptions : DiagGroup<"exceptions">;
+def DeclarationAfterStatement : DiagGroup<"declaration-after-statement">;
def GNUEmptyInitializer : DiagGroup<"gnu-empty-initializer">;
def GNUEmptyStruct : DiagGroup<"gnu-empty-struct">;
@@ -542,6 +543,7 @@ def ExplicitInitializeCall : DiagGroup<"explicit-initialize-call">;
def OrderedCompareFunctionPointers : DiagGroup<"ordered-compare-function-pointers">;
def Packed : DiagGroup<"packed">;
def Padded : DiagGroup<"padded">;
+def UnalignedAccess : DiagGroup<"unaligned-access">;
def PessimizingMove : DiagGroup<"pessimizing-move">;
def ReturnStdMove : DiagGroup<"return-std-move">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
index 375930c14848..ba5f5acc8ce6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
@@ -67,7 +67,7 @@ namespace clang {
// Get typedefs for common diagnostics.
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, CATEGORY, \
- NOWERROR, SHOWINSYSHEADER, DEFFERABLE) \
+ NOWERROR, SHOWINSYSHEADER, SHOWINSYSMACRO, DEFFERABLE) \
ENUM,
#define COMMONSTART
#include "clang/Basic/DiagnosticCommonKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
index 7a3128de3b82..5f237085ae03 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define LEXSTART
#include "clang/Basic/DiagnosticLexKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
index d066d3f71a25..81a8185d25fb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define PARSESTART
#include "clang/Basic/DiagnosticParseKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index 9dc036c03faa..770ddb3ab16f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -1344,15 +1344,17 @@ def warn_omp_unknown_assumption_clause_without_args
def note_omp_assumption_clause_continue_here
: Note<"the ignored tokens spans until here">;
def err_omp_declare_target_unexpected_clause: Error<
- "unexpected '%0' clause, only %select{'device_type'|'to' or 'link'|'to', 'link' or 'device_type'}1 clauses expected">;
+ "unexpected '%0' clause, only %select{'device_type'|'to' or 'link'|'to', 'link' or 'device_type'|'device_type', 'indirect'|'to', 'link', 'device_type' or 'indirect'}1 clauses expected">;
def err_omp_begin_declare_target_unexpected_implicit_to_clause: Error<
"unexpected '(', only 'to', 'link' or 'device_type' clauses expected for 'begin declare target' directive">;
def err_omp_declare_target_unexpected_clause_after_implicit_to: Error<
"unexpected clause after an implicit 'to' clause">;
def err_omp_declare_target_missing_to_or_link_clause: Error<
- "expected at least one 'to' or 'link' clause">;
+ "expected at least one %select{'to' or 'link'|'to', 'link' or 'indirect'}0 clause">;
def err_omp_declare_target_multiple : Error<
"%0 appears multiple times in clauses on the same declare target directive">;
+def err_omp_declare_target_indirect_device_type: Error<
+ "only 'device_type(any)' clause is allowed with indirect clause">;
def err_omp_expected_clause: Error<
"expected at least one clause on '#pragma omp %0' directive">;
def err_omp_mapper_illegal_identifier : Error<
@@ -1374,7 +1376,7 @@ def warn_omp_declare_variant_string_literal_or_identifier
"%select{set|selector|property}0; "
"%select{set|selector|property}0 skipped">,
InGroup<OpenMPClauses>;
-def warn_unknown_begin_declare_variant_isa_trait
+def warn_unknown_declare_variant_isa_trait
: Warning<"isa trait '%0' is not known to the current target; verify the "
"spelling or consider restricting the context selector with the "
"'arch' selector further">,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
index fc7564047a24..9b628dbeb7c2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define REFACTORINGSTART
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
index 7323167aeee8..45014fe21271 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define SEMASTART
#include "clang/Basic/DiagnosticSemaKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index f2089bfda04d..a8cf00c1263f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -197,7 +197,8 @@ def ext_flexible_array_init : Extension<
// C++20 designated initializers
def ext_cxx_designated_init : Extension<
- "designated initializers are a C++20 extension">, InGroup<CXX20Designator>;
+ "designated initializers are a C++20 extension">, InGroup<CXX20Designator>,
+ SuppressInSystemMacro;
def warn_cxx17_compat_designated_init : Warning<
"designated initializers are incompatible with C++ standards before C++20">,
InGroup<CXXPre20CompatPedantic>, DefaultIgnore;
@@ -449,7 +450,7 @@ def warn_decl_shadow :
"typedef in %2|"
"type alias in %2|"
"structured binding}1">,
- InGroup<Shadow>, DefaultIgnore;
+ InGroup<Shadow>, DefaultIgnore, SuppressInSystemMacro;
def warn_decl_shadow_uncaptured_local :
Warning<warn_decl_shadow.Text>,
InGroup<ShadowUncapturedLocal>, DefaultIgnore;
@@ -1695,9 +1696,6 @@ def err_missing_exception_specification : Error<
def ext_missing_exception_specification : ExtWarn<
err_missing_exception_specification.Text>,
InGroup<DiagGroup<"missing-exception-spec">>;
-def ext_ms_missing_exception_specification : ExtWarn<
- err_missing_exception_specification.Text>,
- InGroup<MicrosoftExceptionSpec>;
def err_noexcept_needs_constant_expression : Error<
"argument to noexcept specifier must be a constant expression">;
def err_exception_spec_not_parsed : Error<
@@ -3944,7 +3942,8 @@ def warn_cast_align : Warning<
"cast from %0 to %1 increases required alignment from %2 to %3">,
InGroup<CastAlign>, DefaultIgnore;
def warn_old_style_cast : Warning<
- "use of old-style cast">, InGroup<OldStyleCast>, DefaultIgnore;
+ "use of old-style cast">, InGroup<OldStyleCast>, DefaultIgnore,
+ SuppressInSystemMacro;
// Separate between casts to void* and non-void* pointers.
// Some APIs use (abuse) void* for something like a user context,
@@ -5785,7 +5784,7 @@ def err_typecheck_invalid_restrict_invalid_pointee : Error<
def ext_typecheck_zero_array_size : Extension<
"zero size arrays are an extension">, InGroup<ZeroLengthArray>;
def err_typecheck_zero_array_size : Error<
- "zero-length arrays are not permitted in C++">;
+ "zero-length arrays are not permitted in %select{C++|SYCL device code}0">;
def err_array_size_non_int : Error<"size of array has non-integer type %0">;
def err_init_element_not_constant : Error<
"initializer element is not a compile-time constant">;
@@ -7804,6 +7803,11 @@ def err_expected_class_or_namespace : Error<"%0 is not a class"
"%select{ or namespace|, namespace, or enumeration}1">;
def err_invalid_declarator_scope : Error<"cannot define or redeclare %0 here "
"because namespace %1 does not enclose namespace %2">;
+def err_export_non_namespace_scope_name : Error<
+ "cannot export %0 as it is not at namespace scope">;
+def err_redeclaration_non_exported : Error <
+ "cannot export redeclaration %0 here since the previous declaration is not "
+ "exported">;
def err_invalid_declarator_global_scope : Error<
"definition or redeclaration of %0 cannot name the global scope">;
def err_invalid_declarator_in_function : Error<
@@ -9370,6 +9374,9 @@ def warn_printf_ObjCflags_without_ObjCConversion: Warning<
def warn_printf_invalid_objc_flag: Warning<
"'%0' is not a valid object format flag">,
InGroup<Format>;
+def warn_printf_narg_not_supported : Warning<
+ "'%%n' specifier not supported on this platform">,
+ InGroup<Format>;
def warn_scanf_scanlist_incomplete : Warning<
"no closing ']' for '%%[' in scanf format string">,
InGroup<Format>;
@@ -9549,7 +9556,8 @@ def err_generic_sel_multi_match : Error<
// Blocks
def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
- " or %select{pick a deployment target that supports them|for OpenCL 2.0}0">;
+ " or %select{pick a deployment target that supports them|for OpenCL C 2.0"
+ " or OpenCL C 3.0 with __opencl_c_device_enqueue feature}0">;
def err_block_returning_array_function : Error<
"block cannot return %select{array|function}0 type %1">;
@@ -9862,8 +9870,11 @@ def err_constant_integer_arg_type : Error<
"argument to %0 must be a constant integer">;
def ext_mixed_decls_code : Extension<
- "ISO C90 forbids mixing declarations and code">,
- InGroup<DiagGroup<"declaration-after-statement">>;
+ "mixing declarations and code is a C99 extension">,
+ InGroup<DeclarationAfterStatement>;
+def warn_mixed_decls_code : Warning<
+ "mixing declarations and code is incompatible with standards before C99">,
+ InGroup<DeclarationAfterStatement>, DefaultIgnore;
def err_non_local_variable_decl_in_for : Error<
"declaration of non-local variable in 'for' loop">;
@@ -10813,11 +10824,6 @@ def err_omp_non_lvalue_in_map_or_motion_clauses: Error<
"expected addressable lvalue in '%0' clause">;
def err_omp_var_expected : Error<
"expected variable of the '%0' type%select{|, not %2}1">;
-def warn_unknown_declare_variant_isa_trait
- : Warning<"isa trait '%0' is not known to the current target; verify the "
- "spelling or consider restricting the context selector with the "
- "'arch' selector further">,
- InGroup<SourceUsesOpenMP>;
def err_omp_non_pointer_type_array_shaping_base : Error<
"expected expression with a pointer to a complete type as a base of an array "
"shaping operation">;
@@ -11047,7 +11053,7 @@ def warn_deprecated_coroutine_namespace : Warning<
"use std::%0 instead">,
InGroup<DeprecatedExperimentalCoroutine>;
def err_mixed_use_std_and_experimental_namespace_for_coroutine : Error<
- "mixed use of std and std::experimental namespaces for "
+ "conflicting mixed use of std and std::experimental namespaces for "
"coroutine components">;
def err_implicit_coroutine_std_nothrow_type_not_found : Error<
"std::nothrow was not found; include <new> before defining a coroutine which "
@@ -11443,6 +11449,9 @@ def warn_sycl_kernel_num_of_function_params : Warning<
def warn_sycl_kernel_return_type : Warning<
"function template with 'sycl_kernel' attribute must have a 'void' return type">,
InGroup<IgnoredAttributes>;
+def err_sycl_special_type_num_init_method : Error<
+ "types with 'sycl_special_class' attribute must have one and only one '__init' "
+ "method defined">;
def err_bit_int_bad_size : Error<"%select{signed|unsigned}0 _BitInt must "
"have a bit size of at least %select{2|1}0">;
@@ -11466,7 +11475,7 @@ def warn_tcb_enforcement_violation : Warning<
// RISC-V builtin required extension warning
def err_riscv_builtin_requires_extension : Error<
- "builtin requires '%0' extension support to be enabled">;
+ "builtin requires at least one of the following extensions support to be enabled : %0">;
def err_riscv_builtin_invalid_lmul : Error<
"LMUL argument must be in the range [0,3] or [5,7]">;
} // end of sema component.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
index b3d99fb3feaa..0c622a565773 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define SERIALIZATIONSTART
#include "clang/Basic/DiagnosticSerializationKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
index edb8031a20b8..ac8e790230fc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorOr.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
index 19c967efcc42..aaf1297c1a64 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
@@ -458,6 +458,10 @@ public:
/// 7.1.3, C++ [lib.global.names]).
ReservedIdentifierStatus isReserved(const LangOptions &LangOpts) const;
+ /// If the identifier is an "uglified" reserved name, return a cleaned form.
+ /// e.g. _Foo => Foo. Otherwise, just returns the name.
+ StringRef deuglifiedName() const;
+
/// Provide less than operator for lexicographical sorting.
bool operator<(const IdentifierInfo &RHS) const {
return getName() < RHS.getName();
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
index 35b33c2e0971..09afa641acf9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
@@ -124,6 +124,7 @@ public:
MSVC2017_5 = 1912,
MSVC2017_7 = 1914,
MSVC2019 = 1920,
+ MSVC2019_5 = 1925,
MSVC2019_8 = 1928,
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h b/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
index d6cb1a210519..512bcb1e6ef1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
@@ -212,6 +212,15 @@ private:
bool isEnabled(llvm::StringRef Ext) const;
OpenCLOptionInfoMap OptMap;
+
+ // First feature in a pair requires the second one to be supported.
+ using FeatureDepEntry = std::pair<llvm::StringRef, llvm::StringRef>;
+ using FeatureDepList = llvm::SmallVector<FeatureDepEntry, 8>;
+
+ static const FeatureDepList DependentFeaturesList;
+
+ // Extensions and equivalent feature pairs.
+ static const llvm::StringMap<llvm::StringRef> FeatureExtensionMap;
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h b/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
index 61ac7ad62f6b..9bda3eb28fdf 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
@@ -49,4 +49,4 @@ prec::Level getBinOpPrecedence(tok::TokenKind Kind, bool GreaterThanIsOperator,
} // end namespace clang
-#endif // LLVM_CLANG_OPERATOR_PRECEDENCE_H
+#endif // LLVM_CLANG_BASIC_OPERATORPRECEDENCE_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
index 9fb70bff7fee..ddee6821e2e1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
@@ -28,9 +28,6 @@
namespace clang {
-class DeclContext;
-class IdentifierInfo;
-
class PartialDiagnostic : public StreamingDiagnostic {
private:
// NOTE: Sema assumes that PartialDiagnostic is location-invariant
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
index 82c0d5f0a551..176bbc9ac7ca 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_PRAGMA_KINDS_H
-#define LLVM_CLANG_BASIC_PRAGMA_KINDS_H
+#ifndef LLVM_CLANG_BASIC_PRAGMAKINDS_H
+#define LLVM_CLANG_BASIC_PRAGMAKINDS_H
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h b/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
index 989c36549a3d..aa472f126818 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
@@ -10,8 +10,8 @@
// functions.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_INSTRPROFLIST_H
-#define LLVM_CLANG_BASIC_INSTRPROFLIST_H
+#ifndef LLVM_CLANG_BASIC_PROFILELIST_H
+#define LLVM_CLANG_BASIC_PROFILELIST_H
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LLVM.h"
@@ -21,10 +21,6 @@
#include "llvm/ADT/StringRef.h"
#include <memory>
-namespace llvm {
-class SpecialCaseList;
-}
-
namespace clang {
class ProfileSpecialCaseList;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
index f6ef62a64636..1d4024dfb20d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
@@ -30,8 +30,8 @@
//
// - ElBits is the size of one element in bits (SEW).
//
-// - NF is the number of fields (NFIELDS) used in the Zvlsseg instructions
-// (TODO).
+// - NF is the number of fields (NFIELDS) used in the Load/Store Segment
+// instructions (TODO).
//
// - IsSigned is true for vectors of signed integer elements and
// for vectors of floating-point elements.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
index 1a9785574d06..2579276fc034 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_TARGET_ID_H
-#define LLVM_CLANG_BASIC_TARGET_ID_H
+#ifndef LLVM_CLANG_BASIC_TARGETID_H
+#define LLVM_CLANG_BASIC_TARGETID_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -21,7 +21,7 @@ namespace clang {
/// postfixed by a plus or minus sign delimited by colons, e.g.
/// gfx908:xnack+:sramecc-. Each processor have a limited
/// number of predefined features when showing up in a target ID.
-const llvm::SmallVector<llvm::StringRef, 4>
+llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Processor);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
index 437feba85e23..686a365b8c12 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
@@ -47,9 +47,6 @@ class DiagnosticsEngine;
class LangOptions;
class CodeGenOptions;
class MacroBuilder;
-class QualType;
-class SourceLocation;
-class SourceManager;
namespace Builtin { struct Info; }
@@ -215,6 +212,7 @@ protected:
unsigned char RegParmMax, SSERegParmMax;
TargetCXXABI TheCXXABI;
const LangASMap *AddrSpaceMap;
+ unsigned ProgramAddrSpace;
mutable StringRef PlatformName;
mutable VersionTuple PlatformMinVersion;
@@ -770,6 +768,9 @@ public:
return getTypeWidth(IntMaxType);
}
+ /// Return the address space for functions for the given target.
+ unsigned getProgramAddressSpace() const { return ProgramAddrSpace; }
+
// Return the size of unwind_word for this target.
virtual unsigned getUnwindWordWidth() const { return getPointerWidth(0); }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
index 173003d171ee..2e9798129fdf 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
@@ -80,10 +80,8 @@ def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
-def OP_QRDMLAH : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, $p2))>;
-def OP_QRDMLSH : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, $p2))>;
-def OP_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
-def OP_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
+def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
def OP_FMS_LN : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
def OP_FMS_LNQ : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
def OP_TRN1 : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
@@ -185,10 +183,10 @@ def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">;
def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">;
def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">;
-def OP_SCALAR_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1,
- (call "vget_lane", $p2, $p3)))>;
-def OP_SCALAR_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1,
- (call "vget_lane", $p2, $p3)))>;
+def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1,
+ (call "vget_lane", $p2, $p3))>;
+def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1,
+ (call "vget_lane", $p2, $p3))>;
def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
(call "vget_lane",
@@ -326,8 +324,8 @@ def VQDMULH : SInst<"vqdmulh", "...", "siQsQi">;
def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">;
let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in {
-def VQRDMLAH : SOpInst<"vqrdmlah", "....", "siQsQi", OP_QRDMLAH>;
-def VQRDMLSH : SOpInst<"vqrdmlsh", "....", "siQsQi", OP_QRDMLSH>;
+def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">;
+def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">;
}
def VQDMLAL : SInst<"vqdmlal", "(>Q)(>Q)..", "si">;
@@ -1400,11 +1398,11 @@ def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
////////////////////////////////////////////////////////////////////////////////
// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
-def SCALAR_SQRDMLAH : SOpInst<"vqrdmlah", "1111", "SsSi", OP_QRDMLAH>;
+def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
////////////////////////////////////////////////////////////////////////////////
// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
-def SCALAR_SQRDMLSH : SOpInst<"vqrdmlsh", "1111", "SsSi", OP_QRDMLSH>;
+def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
index cc242da7f1ca..6451e77e77f6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
@@ -215,10 +215,10 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// an automatic definition in header is emitted.
string HeaderCode = "";
- // Sub extension of vector spec. Currently only support Zvlsseg.
- string RequiredExtension = "";
+ // Features required to enable for this builtin.
+ list<string> RequiredFeatures = [];
- // Number of fields for Zvlsseg.
+ // Number of fields for Load/Store Segment instructions.
int NF = 1;
}
@@ -595,6 +595,7 @@ let HasNoMaskedOverloaded = false,
ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
}],
ManualCodegenMask= [{
// Move mask to right before vl.
@@ -628,6 +629,7 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
Value *NewVL = Ops[1];
Ops.erase(Ops.begin() + 1);
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
@@ -677,6 +679,7 @@ multiclass RVVVLSEBuiltin<list<string> types> {
ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[2]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
}],
ManualCodegenMask= [{
// Move mask to right before vl.
@@ -698,6 +701,7 @@ multiclass RVVIndexedLoad<string op> {
let ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
}],
ManualCodegenMask = [{
// Move mask to right before vl.
@@ -707,7 +711,7 @@ multiclass RVVIndexedLoad<string op> {
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
foreach type = TypeList in {
- foreach eew_list = EEWList in {
+ foreach eew_list = EEWList[0-2] in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
@@ -717,6 +721,15 @@ multiclass RVVIndexedLoad<string op> {
}
}
}
+ defvar eew64 = "64";
+ defvar eew64_type = "(Log2EEW:6)";
+ let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask",
+ RequiredFeatures = ["RV64"] in {
+ def: RVVBuiltin<"v", "vPCe" # eew64_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def: RVVBuiltin<"Uv", "UvPCUe" # eew64_type # "Uv", type>;
+ }
+ }
}
}
}
@@ -797,7 +810,7 @@ multiclass RVVIndexedStore<string op> {
IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
}] in {
foreach type = TypeList in {
- foreach eew_list = EEWList in {
+ foreach eew_list = EEWList[0-2] in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
@@ -807,6 +820,15 @@ multiclass RVVIndexedStore<string op> {
}
}
}
+ defvar eew64 = "64";
+ defvar eew64_type = "(Log2EEW:6)";
+ let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask",
+ RequiredFeatures = ["RV64"] in {
+ def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>;
+ }
+ }
}
}
}
@@ -1549,7 +1571,6 @@ defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
// 7.8 Vector Load/Store Segment Instructions
-let RequiredExtension = "Zvlsseg" in {
defm : RVVUnitStridedSegLoad<"vlseg">;
defm : RVVUnitStridedSegLoadFF<"vlseg">;
defm : RVVStridedSegLoad<"vlsseg">;
@@ -1559,7 +1580,6 @@ defm : RVVUnitStridedSegStore<"vsseg">;
defm : RVVStridedSegStore<"vssseg">;
defm : RVVIndexedSegStore<"vsuxseg">;
defm : RVVIndexedSegStore<"vsoxseg">;
-}
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
@@ -1652,11 +1672,13 @@ defm vmax : RVVSignedBinBuiltinSet;
// 12.10. Vector Single-Width Integer Multiply Instructions
defm vmul : RVVIntBinBuiltinSet;
+let RequiredFeatures = ["FullMultiply"] in {
defm vmulh : RVVSignedBinBuiltinSet;
defm vmulhu : RVVUnsignedBinBuiltinSet;
defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
[["vv", "v", "vvUv"],
["vx", "v", "vvUe"]]>;
+}
// 12.11. Vector Integer Divide Instructions
defm vdivu : RVVUnsignedBinBuiltinSet;
@@ -1743,7 +1765,9 @@ defm vasubu : RVVUnsignedBinBuiltinSet;
defm vasub : RVVSignedBinBuiltinSet;
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+let RequiredFeatures = ["FullMultiply"] in {
defm vsmul : RVVSignedBinBuiltinSet;
+}
// 13.4. Vector Single-Width Scaling Shift Instructions
defm vssrl : RVVUnsignedShiftBuiltinSet;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
index 3c745fadbe78..fda0855dc868 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
@@ -32,26 +32,18 @@
namespace llvm {
class AttrBuilder;
class Constant;
-class DataLayout;
-class Module;
class Function;
class FunctionType;
class Type;
}
namespace clang {
-class ASTContext;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXRecordDecl;
class CXXMethodDecl;
-class CodeGenOptions;
-class CoverageSourceInfo;
-class DiagnosticsEngine;
-class HeaderSearchOptions;
class ObjCMethodDecl;
class ObjCProtocolDecl;
-class PreprocessorOptions;
namespace CodeGen {
class CGFunctionInfo;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
index 8821cd70362e..c13e052149d9 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CODEGEN_OBJECT_FILE_PCH_CONTAINER_OPERATIONS_H
-#define LLVM_CLANG_CODEGEN_OBJECT_FILE_PCH_CONTAINER_OPERATIONS_H
+#ifndef LLVM_CLANG_CODEGEN_OBJECTFILEPCHCONTAINEROPERATIONS_H
+#define LLVM_CLANG_CODEGEN_OBJECTFILEPCHCONTAINEROPERATIONS_H
#include "clang/Frontend/PCHContainerOperations.h"
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h b/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
index b1a638a58a09..d7a0c84699ab 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
@@ -28,7 +28,6 @@ namespace llvm {
}
namespace clang {
-class Decl;
class FieldDecl;
class ASTRecordLayout;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index dc8bd831f2a2..b3de12e8c7b5 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -1076,8 +1076,12 @@ def emit_interface_stubs : Flag<["-"], "emit-interface-stubs">, Flags<[CC1Option
def emit_merged_ifs : Flag<["-"], "emit-merged-ifs">,
Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Generate Interface Stub Files, emit merged text not binary.">;
+def end_no_unused_arguments : Flag<["--"], "end-no-unused-arguments">, Flags<[CoreOption]>,
+ HelpText<"Start emitting warnings for unused driver arguments">;
def interface_stub_version_EQ : JoinedOrSeparate<["-"], "interface-stub-version=">, Flags<[CC1Option]>;
def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
+def extract_api : Flag<["-"], "extract-api">, Flags<[CC1Option]>, Group<Action_Group>,
+ HelpText<"Extract API information">;
def e : JoinedOrSeparate<["-"], "e">, Flags<[LinkerInput]>, Group<Link_Group>;
def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Max total number of preprocessed tokens for -Wmax-tokens.">,
@@ -1667,7 +1671,13 @@ def sanitize_address_destructor_EQ
NormalizedValuesScope<"llvm::AsanDtorKind">,
NormalizedValues<["None", "Global"]>,
MarshallingInfoEnum<CodeGenOpts<"SanitizeAddressDtor">, "Global">;
-// Note: This flag was introduced when it was necessary to distinguish between
+defm sanitize_memory_param_retval
+ : BoolFOption<"sanitize-memory-param-retval",
+ CodeGenOpts<"SanitizeMemoryParamRetval">,
+ DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[], " detection of uninitialized parameters and return values">>;
+//// Note: This flag was introduced when it was necessary to distinguish between
// ABI for correct codegen. This is no longer needed, but the flag is
// not removed since targeting either ABI will behave the same.
// This way we cause no disturbance to existing scripts & code, and if we
@@ -1919,6 +1929,8 @@ def fcf_protection_EQ : Joined<["-"], "fcf-protection=">, Flags<[CoreOption, CC1
def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
Alias<fcf_protection_EQ>, AliasArgs<["full"]>,
HelpText<"Enable cf-protection in 'full' mode">;
+def mibt_seal : Flag<["-"], "mibt-seal">, Group<m_Group>, Flags<[CoreOption, CC1Option]>,
+ HelpText<"Optimize fcf-protection=branch/full (requires LTO).">;
defm xray_instrument : BoolFOption<"xray-instrument",
LangOpts<"XRayInstrument">, DefaultFalse,
@@ -2490,6 +2502,9 @@ defm pascal_strings : BoolFOption<"pascal-strings",
def fpatchable_function_entry_EQ : Joined<["-"], "fpatchable-function-entry=">, Group<f_Group>, Flags<[CC1Option]>,
MetaVarName<"<N,M>">, HelpText<"Generate M NOPs before function entry and N-M NOPs after function entry">,
MarshallingInfoInt<CodeGenOpts<"PatchableFunctionEntryCount">>;
+def fms_hotpatch : Flag<["-"], "fms-hotpatch">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ HelpText<"Ensure that all functions can be hotpatched at runtime">,
+ MarshallingInfoFlag<CodeGenOpts<"HotPatch">>;
def fpcc_struct_return : Flag<["-"], "fpcc-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return all structs on the stack">;
def fpch_preprocess : Flag<["-"], "fpch-preprocess">, Group<f_Group>;
@@ -3617,6 +3632,7 @@ def mcheck_zero_division : Flag<["-"], "mcheck-zero-division">,
Group<m_mips_Features_Group>;
def mno_check_zero_division : Flag<["-"], "mno-check-zero-division">,
Group<m_mips_Features_Group>;
+def mfix4300 : Flag<["-"], "mfix4300">, Group<m_mips_Features_Group>;
def mcompact_branches_EQ : Joined<["-"], "mcompact-branches=">,
Group<m_mips_Features_Group>;
def mbranch_likely : Flag<["-"], "mbranch-likely">, Group<m_Group>,
@@ -3912,6 +3928,8 @@ def shared : Flag<["-", "--"], "shared">, Group<Link_Group>;
def single__module : Flag<["-"], "single_module">;
def specs_EQ : Joined<["-", "--"], "specs=">, Group<Link_Group>;
def specs : Separate<["-", "--"], "specs">, Flags<[Unsupported]>;
+def start_no_unused_arguments : Flag<["--"], "start-no-unused-arguments">, Flags<[CoreOption]>,
+ HelpText<"Don't emit warnings about unused arguments for the following arguments">;
def static_libgcc : Flag<["-"], "static-libgcc">;
def static_libstdcxx : Flag<["-"], "static-libstdc++">;
def static : Flag<["-", "--"], "static">, Group<Link_Group>, Flags<[NoArgumentUnused]>;
@@ -4956,8 +4974,8 @@ def record_command_line : Separate<["-"], "record-command-line">,
HelpText<"The string to embed in the .LLVM.command.line section.">,
MarshallingInfoString<CodeGenOpts<"RecordCommandLine">>;
def compress_debug_sections_EQ : Joined<["-", "--"], "compress-debug-sections=">,
- HelpText<"DWARF debug sections compression type">, Values<"none,zlib,zlib-gnu">,
- NormalizedValuesScope<"llvm::DebugCompressionType">, NormalizedValues<["None", "Z", "GNU"]>,
+ HelpText<"DWARF debug sections compression type">, Values<"none,zlib">,
+ NormalizedValuesScope<"llvm::DebugCompressionType">, NormalizedValues<["None", "Z"]>,
MarshallingInfoEnum<CodeGenOpts<"CompressDebugSections">, "None">;
def compress_debug_sections : Flag<["-", "--"], "compress-debug-sections">,
Alias<compress_debug_sections_EQ>, AliasArgs<["zlib"]>;
@@ -5385,9 +5403,9 @@ defm clear_ast_before_backend : BoolOption<"",
PosFlag<SetTrue, [], "Clear">,
NegFlag<SetFalse, [], "Don't clear">,
BothFlags<[], " the Clang AST before running backend code generation">>;
-def enable_noundef_analysis : Flag<["-"], "enable-noundef-analysis">, Group<f_Group>,
- HelpText<"Enable analyzing function argument and return types for mandatory definedness">,
- MarshallingInfoFlag<CodeGenOpts<"EnableNoundefAttrs">>;
+def disable_noundef_analysis : Flag<["-"], "disable-noundef-analysis">, Group<f_Group>,
+ HelpText<"Disable analyzing function argument and return types for mandatory definedness">,
+ MarshallingInfoFlag<CodeGenOpts<"DisableNoundefAttrs">>;
def discard_value_names : Flag<["-"], "discard-value-names">,
HelpText<"Discard value names in LLVM IR">,
MarshallingInfoFlag<CodeGenOpts<"DiscardValueNames">>;
@@ -6113,6 +6131,8 @@ def _SLASH_Gw_ : CLFlag<"Gw-">,
def _SLASH_help : CLFlag<"help">, Alias<help>,
HelpText<"Display available options">;
def _SLASH_HELP : CLFlag<"HELP">, Alias<help>;
+def _SLASH_hotpatch : CLFlag<"hotpatch">, Alias<fms_hotpatch>,
+ HelpText<"Create hotpatchable image">;
def _SLASH_I : CLJoinedOrSeparate<"I">,
HelpText<"Add directory to include search path">, MetaVarName<"<dir>">,
Alias<I>;
@@ -6469,7 +6489,6 @@ def _SLASH_headerUnit : CLJoinedOrSeparate<"headerUnit">;
def _SLASH_headerUnitAngle : CLJoinedOrSeparate<"headerUnit:angle">;
def _SLASH_headerUnitQuote : CLJoinedOrSeparate<"headerUnit:quote">;
def _SLASH_homeparams : CLFlag<"homeparams">;
-def _SLASH_hotpatch : CLFlag<"hotpatch">;
def _SLASH_kernel : CLFlag<"kernel">;
def _SLASH_LN : CLFlag<"LN">;
def _SLASH_MP : CLJoined<"MP">;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
index 84bb324775d1..d288b0151c9f 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
@@ -33,6 +33,7 @@ class SanitizerArgs {
int CoverageFeatures = 0;
int MsanTrackOrigins = 0;
bool MsanUseAfterDtor = true;
+ bool MsanParamRetval = false;
bool CfiCrossDso = false;
bool CfiICallGeneralizePointers = false;
bool CfiCanonicalJumpTables = false;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
index 4afc9bf36b5f..329833bb13be 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
@@ -409,6 +409,9 @@ public:
/// Check whether to enable x86 relax relocations by default.
virtual bool useRelaxRelocations() const;
+ /// Check whether use IEEE binary128 as long double format by default.
+ bool defaultToIEEELongDouble() const;
+
/// GetDefaultStackProtectorLevel - Get the default stack protector level for
/// this tool chain.
virtual LangOptions::StackProtectorMode
@@ -452,11 +455,11 @@ public:
StringRef Component,
FileType Type = ToolChain::FT_Static) const;
- // Returns target specific runtime path if it exists.
- virtual std::string getRuntimePath() const;
+ // Returns target specific runtime paths.
+ path_list getRuntimePaths() const;
- // Returns target specific standard library path if it exists.
- virtual std::string getStdlibPath() const;
+ // Returns target specific standard library paths.
+ path_list getStdlibPaths() const;
// Returns <ResourceDir>/lib/<OSName>/<arch>. This is used by runtimes (such
// as OpenMP) to find arch-specific libraries.
@@ -510,7 +513,7 @@ public:
// Return the DWARF version to emit, in the absence of arguments
// to the contrary.
- virtual unsigned GetDefaultDwarfVersion() const { return 4; }
+ virtual unsigned GetDefaultDwarfVersion() const { return 5; }
// Some toolchains may have different restrictions on the DWARF version and
// may need to adjust it. E.g. NVPTX may need to enforce DWARF2 even when host
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Types.def b/contrib/llvm-project/clang/include/clang/Driver/Types.def
index 997eea445c22..7adf59ca5c99 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm-project/clang/include/clang/Driver/Types.def
@@ -100,4 +100,5 @@ TYPE("dSYM", dSYM, INVALID, "dSYM", phases
TYPE("dependencies", Dependencies, INVALID, "d", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda-fatbin", CUDA_FATBIN, INVALID, "fatbin", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("hip-fatbin", HIP_FATBIN, INVALID, "hipfb", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("api-information", API_INFO, INVALID, "json", phases::Compile)
TYPE("none", Nothing, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Util.h b/contrib/llvm-project/clang/include/clang/Driver/Util.h
index 6788420912a1..92d3d40433a3 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Util.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Util.h
@@ -13,7 +13,6 @@
#include "llvm/ADT/DenseMap.h"
namespace clang {
-class DiagnosticsEngine;
namespace driver {
class Action;
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index 0f97e80d425e..326e85305c8e 100755
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
@@ -29,11 +29,6 @@ class FileSystem;
} // namespace llvm
namespace clang {
-
-class Lexer;
-class SourceManager;
-class DiagnosticConsumer;
-
namespace format {
enum class ParseError {
@@ -87,6 +82,19 @@ struct FormatStyle {
/// argument1, argument2);
/// \endcode
BAS_AlwaysBreak,
+ /// Always break after an open bracket, if the parameters don't fit
+ /// on a single line. Closing brackets will be placed on a new line.
+ /// E.g.:
+ /// \code
+ /// someLongFunction(
+ /// argument1, argument2
+ /// )
+ /// \endcode
+ ///
+ /// \warning
+ /// Note: This currently only applies to parentheses.
+ /// \endwarning
+ BAS_BlockIndent,
};
/// If ``true``, horizontally aligns arguments after an open bracket.
@@ -2704,7 +2712,7 @@ struct FormatStyle {
/// readability to have the signature indented two levels and to use
/// ``OuterScope``. The KJ style guide requires ``OuterScope``.
/// `KJ style guide
- /// <https://github.com/capnproto/capnproto/blob/master/kjdoc/style-guide.md>`_
+ /// <https://github.com/capnproto/capnproto/blob/master/style-guide.md>`_
/// \version 13
LambdaBodyIndentationKind LambdaBodyIndentation;
@@ -2887,6 +2895,10 @@ struct FormatStyle {
/// \version 3.7
unsigned PenaltyBreakFirstLessLess;
+ /// The penalty for breaking after ``(``.
+ /// \version 14
+ unsigned PenaltyBreakOpenParenthesis;
+
/// The penalty for each line break introduced inside a string literal.
/// \version 3.7
unsigned PenaltyBreakString;
@@ -3050,6 +3062,118 @@ struct FormatStyle {
bool ReflowComments;
// clang-format on
+ /// Remove optional braces of control statements (``if``, ``else``, ``for``,
+ /// and ``while``) in C++ according to the LLVM coding style.
+ /// \warning
+ /// This option will be renamed and expanded to support other styles.
+ /// \endwarning
+ /// \warning
+ /// Setting this option to `true` could lead to incorrect code formatting due
+ /// to clang-format's lack of complete semantic information. As such, extra
+ /// care should be taken to review code changes made by this option.
+ /// \endwarning
+ /// \code
+ /// false: true:
+ ///
+ /// if (isa<FunctionDecl>(D)) { vs. if (isa<FunctionDecl>(D))
+ /// handleFunctionDecl(D); handleFunctionDecl(D);
+ /// } else if (isa<VarDecl>(D)) { else if (isa<VarDecl>(D))
+ /// handleVarDecl(D); handleVarDecl(D);
+ /// }
+ ///
+ /// if (isa<VarDecl>(D)) { vs. if (isa<VarDecl>(D)) {
+ /// for (auto *A : D.attrs()) { for (auto *A : D.attrs())
+ /// if (shouldProcessAttr(A)) { if (shouldProcessAttr(A))
+ /// handleAttr(A); handleAttr(A);
+ /// } }
+ /// }
+ /// }
+ ///
+ /// if (isa<FunctionDecl>(D)) { vs. if (isa<FunctionDecl>(D))
+ /// for (auto *A : D.attrs()) { for (auto *A : D.attrs())
+ /// handleAttr(A); handleAttr(A);
+ /// }
+ /// }
+ ///
+ /// if (auto *D = (T)(D)) { vs. if (auto *D = (T)(D)) {
+ /// if (shouldProcess(D)) { if (shouldProcess(D))
+ /// handleVarDecl(D); handleVarDecl(D);
+ /// } else { else
+ /// markAsIgnored(D); markAsIgnored(D);
+ /// } }
+ /// }
+ ///
+ /// if (a) { vs. if (a)
+ /// b(); b();
+ /// } else { else if (c)
+ /// if (c) { d();
+ /// d(); else
+ /// } else { e();
+ /// e();
+ /// }
+ /// }
+ /// \endcode
+ /// \version 14
+ bool RemoveBracesLLVM;
+
+ /// \brief The style if definition blocks should be separated.
+ enum SeparateDefinitionStyle {
+ /// Leave definition blocks as they are.
+ SDS_Leave,
+ /// Insert an empty line between definition blocks.
+ SDS_Always,
+ /// Remove any empty line between definition blocks.
+ SDS_Never
+ };
+
+ /// Specifies the use of empty lines to separate definition blocks, including
+ /// classes, structs, enums, and functions.
+ /// \code
+ /// Never v.s. Always
+ /// #include <cstring> #include <cstring>
+ /// struct Foo {
+ /// int a, b, c; struct Foo {
+ /// }; int a, b, c;
+ /// namespace Ns { };
+ /// class Bar {
+ /// public: namespace Ns {
+ /// struct Foobar { class Bar {
+ /// int a; public:
+ /// int b; struct Foobar {
+ /// }; int a;
+ /// private: int b;
+ /// int t; };
+ /// int method1() {
+ /// // ... private:
+ /// } int t;
+ /// enum List {
+ /// ITEM1, int method1() {
+ /// ITEM2 // ...
+ /// }; }
+ /// template<typename T>
+ /// int method2(T x) { enum List {
+ /// // ... ITEM1,
+ /// } ITEM2
+ /// int i, j, k; };
+ /// int method3(int par) {
+ /// // ... template<typename T>
+ /// } int method2(T x) {
+ /// }; // ...
+ /// class C {}; }
+ /// }
+ /// int i, j, k;
+ ///
+ /// int method3(int par) {
+ /// // ...
+ /// }
+ /// };
+ ///
+ /// class C {};
+ /// }
+ /// \endcode
+ /// \version 14
+ SeparateDefinitionStyle SeparateDefinitionBlocks;
+
/// The maximal number of unwrapped lines that a short namespace spans.
/// Defaults to 1.
///
@@ -3368,6 +3492,14 @@ struct FormatStyle {
/// <conditional-body> <conditional-body>
/// \endcode
bool AfterIfMacros;
+ /// If ``true``, put a space between operator overloading and opening
+ /// parentheses.
+ /// \code
+ /// true: false:
+ /// void operator++ (int a); vs. void operator++(int a);
+ /// object.operator++ (10); object.operator++(10);
+ /// \endcode
+ bool AfterOverloadedOperator;
/// If ``true``, put a space before opening parentheses only if the
/// parentheses are not empty.
/// \code
@@ -3381,7 +3513,7 @@ struct FormatStyle {
: AfterControlStatements(false), AfterForeachMacros(false),
AfterFunctionDeclarationName(false),
AfterFunctionDefinitionName(false), AfterIfMacros(false),
- BeforeNonEmptyParentheses(false) {}
+ AfterOverloadedOperator(false), BeforeNonEmptyParentheses(false) {}
bool operator==(const SpaceBeforeParensCustom &Other) const {
return AfterControlStatements == Other.AfterControlStatements &&
@@ -3390,6 +3522,7 @@ struct FormatStyle {
Other.AfterFunctionDeclarationName &&
AfterFunctionDefinitionName == Other.AfterFunctionDefinitionName &&
AfterIfMacros == Other.AfterIfMacros &&
+ AfterOverloadedOperator == Other.AfterOverloadedOperator &&
BeforeNonEmptyParentheses == Other.BeforeNonEmptyParentheses;
}
};
@@ -3781,6 +3914,7 @@ struct FormatStyle {
R.PenaltyBreakBeforeFirstCallParameter &&
PenaltyBreakComment == R.PenaltyBreakComment &&
PenaltyBreakFirstLessLess == R.PenaltyBreakFirstLessLess &&
+ PenaltyBreakOpenParenthesis == R.PenaltyBreakOpenParenthesis &&
PenaltyBreakString == R.PenaltyBreakString &&
PenaltyExcessCharacter == R.PenaltyExcessCharacter &&
PenaltyReturnTypeOnItsOwnLine == R.PenaltyReturnTypeOnItsOwnLine &&
@@ -3791,6 +3925,8 @@ struct FormatStyle {
QualifierOrder == R.QualifierOrder &&
RawStringFormats == R.RawStringFormats &&
ReferenceAlignment == R.ReferenceAlignment &&
+ RemoveBracesLLVM == R.RemoveBracesLLVM &&
+ SeparateDefinitionBlocks == R.SeparateDefinitionBlocks &&
ShortNamespaceLines == R.ShortNamespaceLines &&
SortIncludes == R.SortIncludes &&
SortJavaStaticImport == R.SortJavaStaticImport &&
@@ -3888,7 +4024,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language);
FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language);
/// Returns a format style complying with Mozilla's style guide:
-/// https://developer.mozilla.org/en-US/docs/Developer_Guide/Coding_Style.
+/// https://firefox-source-docs.mozilla.org/code-quality/coding-style/index.html.
FormatStyle getMozillaStyle();
/// Returns a format style complying with Webkit's style guide:
@@ -4028,6 +4164,17 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
ArrayRef<tooling::Range> Ranges,
StringRef FileName = "<stdin>");
+/// Inserts or removes empty lines separating definition blocks including
+/// classes, structs, functions, namespaces, and enums in the given \p Ranges in
+/// \p Code.
+///
+/// Returns the ``Replacements`` that inserts or removes empty lines separating
+/// definition blocks in all \p Ranges in \p Code.
+tooling::Replacements separateDefinitionBlocks(const FormatStyle &Style,
+ StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName = "<stdin>");
+
/// Sort consecutive using declarations in the given \p Ranges in
/// \p Code.
///
@@ -4066,6 +4213,8 @@ extern const char *DefaultFallbackStyle;
/// * "file" - Load style configuration from a file called ``.clang-format``
/// located in one of the parent directories of ``FileName`` or the current
/// directory if ``FileName`` is empty.
+/// * "file:<format_file_path>" to explicitly specify the configuration file to
+/// use.
///
/// \param[in] StyleName Style name to interpret according to the description
/// above.
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
index 98cfc7cadc0d..0e068bf5cccb 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
@@ -20,12 +20,6 @@
namespace clang {
class ASTConsumer;
-class CodeGenOptions;
-class DiagnosticsEngine;
-class FileManager;
-class LangOptions;
-class Preprocessor;
-class TargetOptions;
// AST pretty-printer: prints out the AST in a format that is close to the
// original C code. The output is intended to be in a format such that
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
index 74e152ea5952..577daad86b20 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
@@ -41,8 +41,6 @@ class ASTReader;
class CodeCompleteConsumer;
class DiagnosticsEngine;
class DiagnosticConsumer;
-class ExternalASTSource;
-class FileEntry;
class FileManager;
class FrontendAction;
class InMemoryModuleCache;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
index 545a7e842c4f..8eceb81723d2 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
@@ -10,14 +10,12 @@
#define LLVM_CLANG_FRONTEND_FRONTENDACTIONS_H
#include "clang/Frontend/FrontendAction.h"
+#include <memory>
#include <string>
#include <vector>
namespace clang {
-class Module;
-class FileEntry;
-
//===----------------------------------------------------------------------===//
// Custom Consumer Actions
//===----------------------------------------------------------------------===//
@@ -273,6 +271,12 @@ protected:
bool usesPreprocessorOnly() const override { return true; }
};
+class ExtractAPIAction : public ASTFrontendAction {
+protected:
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+};
+
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
index 1d9d89a28c6c..7ce8076a3ee4 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
@@ -75,6 +75,9 @@ enum ActionKind {
/// Emit a .o file.
EmitObj,
+ // Extract API information
+ ExtractAPI,
+
/// Parse and apply any fixits to the source.
FixIt,
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
index fa977a63f32e..098d32ec3869 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_PCH_CONTAINER_OPERATIONS_H
-#define LLVM_CLANG_PCH_CONTAINER_OPERATIONS_H
+#ifndef LLVM_CLANG_FRONTEND_PCHCONTAINEROPERATIONS_H
+#define LLVM_CLANG_FRONTEND_PCHCONTAINEROPERATIONS_H
#include "clang/Serialization/PCHContainerOperations.h"
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
index dacbffef0b12..628736f34091 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_PRECOMPILED_PREAMBLE_H
-#define LLVM_CLANG_FRONTEND_PRECOMPILED_PREAMBLE_H
+#ifndef LLVM_CLANG_FRONTEND_PRECOMPILEDPREAMBLE_H
+#define LLVM_CLANG_FRONTEND_PRECOMPILEDPREAMBLE_H
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
index 58954dc6bafa..5586ef65e393 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
@@ -19,7 +19,6 @@ class raw_ostream;
namespace clang {
class DiagnosticConsumer;
-class DiagnosticsEngine;
class DiagnosticOptions;
namespace serialized_diags {
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
index 4e67fd13ac5b..6464693c1482 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTICS_H_
-#define LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTICS_H_
+#ifndef LLVM_CLANG_FRONTEND_SERIALIZEDDIAGNOSTICS_H
+#define LLVM_CLANG_FRONTEND_SERIALIZEDDIAGNOSTICS_H
#include "llvm/Bitstream/BitCodes.h"
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
index da2d79af2eba..a05584bfe551 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
@@ -32,12 +32,6 @@
#include <utility>
#include <vector>
-namespace llvm {
-
-class Triple;
-
-} // namespace llvm
-
namespace clang {
class ASTReader;
@@ -46,20 +40,11 @@ class CompilerInvocation;
class DiagnosticsEngine;
class ExternalSemaSource;
class FrontendOptions;
-class HeaderSearch;
-class HeaderSearchOptions;
-class LangOptions;
class PCHContainerReader;
class Preprocessor;
class PreprocessorOptions;
class PreprocessorOutputOptions;
-/// Apply the header search options to get given HeaderSearch object.
-void ApplyHeaderSearchOptions(HeaderSearch &HS,
- const HeaderSearchOptions &HSOpts,
- const LangOptions &Lang,
- const llvm::Triple &triple);
-
/// InitializePreprocessor - Initialize the preprocessor getting it and the
/// environment ready to process a single file.
void InitializePreprocessor(Preprocessor &PP, const PreprocessorOptions &PPOpts,
diff --git a/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h b/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
index 20cf8fbdad96..eb66e725000c 100644
--- a/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
+++ b/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_INDEX_SerializablePathCollection_H
-#define LLVM_CLANG_INDEX_SerializablePathCollection_H
+#ifndef LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
+#define LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
#include "clang/Basic/FileManager.h"
#include "llvm/ADT/APInt.h"
@@ -126,4 +126,4 @@ private:
} // namespace index
} // namespace clang
-#endif // LLVM_CLANG_INDEX_SerializablePathCollection_H
+#endif // LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
index 2dc0fd5963a2..721a649deb43 100644
--- a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
@@ -28,13 +28,11 @@ namespace llvm {
namespace orc {
class ThreadSafeContext;
}
-class Module;
} // namespace llvm
namespace clang {
class CompilerInstance;
-class DeclGroupRef;
class IncrementalExecutor;
class IncrementalParser;
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
index 121ca893e314..56025c8a3ed5 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
@@ -14,8 +14,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
-#define LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
+#ifndef LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSOURCEMINIMIZER_H
+#define LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSOURCEMINIMIZER_H
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
@@ -112,4 +112,4 @@ bool minimizeSourceToDependencyDirectives(
} // end namespace clang
-#endif // LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
+#endif // LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSOURCEMINIMIZER_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
index b3445703f782..74768717470b 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
@@ -34,6 +34,12 @@
#include <utility>
#include <vector>
+namespace llvm {
+
+class Triple;
+
+} // namespace llvm
+
namespace clang {
class DiagnosticsEngine;
@@ -51,6 +57,8 @@ class TargetInfo;
/// The preprocessor keeps track of this information for each
/// file that is \#included.
struct HeaderFileInfo {
+ // TODO: Whether the file was imported is not a property of the file itself.
+ // It's a preprocessor state, move it there.
/// True if this is a \#import'd file.
unsigned isImport : 1;
@@ -89,9 +97,6 @@ struct HeaderFileInfo {
/// Whether this file has been looked up as a header.
unsigned IsValid : 1;
- /// The number of times the file has been included already.
- unsigned short NumIncludes = 0;
-
/// The ID number of the controlling macro.
///
/// This ID number will be non-zero when there is a controlling
@@ -281,26 +286,15 @@ public:
/// Interface for setting the file search paths.
void SetSearchPaths(std::vector<DirectoryLookup> dirs, unsigned angledDirIdx,
unsigned systemDirIdx, bool noCurDirSearch,
- llvm::DenseMap<unsigned, unsigned> searchDirToHSEntry) {
- assert(angledDirIdx <= systemDirIdx && systemDirIdx <= dirs.size() &&
- "Directory indices are unordered");
- SearchDirs = std::move(dirs);
- SearchDirsUsage.assign(SearchDirs.size(), false);
- AngledDirIdx = angledDirIdx;
- SystemDirIdx = systemDirIdx;
- NoCurDirSearch = noCurDirSearch;
- SearchDirToHSEntry = std::move(searchDirToHSEntry);
- //LookupFileCache.clear();
- }
+ llvm::DenseMap<unsigned, unsigned> searchDirToHSEntry);
/// Add an additional search path.
- void AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
- unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
- SearchDirs.insert(SearchDirs.begin() + idx, dir);
- SearchDirsUsage.insert(SearchDirsUsage.begin() + idx, false);
- if (!isAngled)
- AngledDirIdx++;
- SystemDirIdx++;
+ void AddSearchPath(const DirectoryLookup &dir, bool isAngled);
+
+ /// Add an additional system search path.
+ void AddSystemSearchPath(const DirectoryLookup &dir) {
+ SearchDirs.push_back(dir);
+ SearchDirsUsage.push_back(false);
}
/// Set the list of system header prefixes.
@@ -413,7 +407,7 @@ public:
/// found.
Optional<FileEntryRef> LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
- const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
+ const DirectoryLookup *FromDir, const DirectoryLookup **CurDir,
ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
@@ -474,12 +468,6 @@ public:
ModuleMap::ModuleHeaderRole Role,
bool isCompilingModuleHeader);
- /// Increment the count for the number of times the specified
- /// FileEntry has been entered.
- void IncrementIncludeCount(const FileEntry *File) {
- ++getFileInfo(File).NumIncludes;
- }
-
/// Mark the specified file as having a controlling macro.
///
/// This is used by the multiple-include optimization to eliminate
@@ -856,6 +844,12 @@ private:
bool IsSystem, bool IsFramework);
};
+/// Apply the header search options to get given HeaderSearch object.
+void ApplyHeaderSearchOptions(HeaderSearch &HS,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &Lang,
+ const llvm::Triple &triple);
+
} // namespace clang
#endif // LLVM_CLANG_LEX_HEADERSEARCH_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index ea96bb12bec6..e567f6391531 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -450,6 +450,8 @@ public:
ElseLoc(ElseLoc) {}
};
+ using IncludedFilesSet = llvm::DenseSet<const FileEntry *>;
+
private:
friend class ASTReader;
friend class MacroArgs;
@@ -765,6 +767,9 @@ private:
/// in a submodule.
SubmoduleState *CurSubmoduleState;
+ /// The files that have been included.
+ IncludedFilesSet IncludedFiles;
+
/// The set of known macros exported from modules.
llvm::FoldingSet<ModuleMacro> ModuleMacros;
@@ -1224,6 +1229,22 @@ public:
/// \}
+ /// Mark the file as included.
+ /// Returns true if this is the first time the file was included.
+ bool markIncluded(const FileEntry *File) {
+ HeaderInfo.getFileInfo(File);
+ return IncludedFiles.insert(File).second;
+ }
+
+ /// Return true if this header has already been included.
+ bool alreadyIncluded(const FileEntry *File) const {
+ return IncludedFiles.count(File);
+ }
+
+ /// Get the set of included files.
+ IncludedFilesSet &getIncludedFiles() { return IncludedFiles; }
+ const IncludedFilesSet &getIncludedFiles() const { return IncludedFiles; }
+
/// Return the name of the macro defined before \p Loc that has
/// spelling \p Tokens. If there are multiple macros with same spelling,
/// return the last one defined.
@@ -2051,7 +2072,7 @@ public:
Optional<FileEntryRef>
LookupFile(SourceLocation FilenameLoc, StringRef Filename, bool isAngled,
const DirectoryLookup *FromDir, const FileEntry *FromFile,
- const DirectoryLookup *&CurDir, SmallVectorImpl<char> *SearchPath,
+ const DirectoryLookup **CurDir, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
bool *IsFrameworkFound, bool SkipCache = false);
@@ -2300,7 +2321,7 @@ private:
};
Optional<FileEntryRef> LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef &Filename,
+ const DirectoryLookup **CurDir, StringRef &Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
bool &IsMapped, const DirectoryLookup *LookupFrom,
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h
index 1a0d5ed57b28..49687cb5cc85 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
-#define LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
+#ifndef LLVM_CLANG_LEX_PREPROCESSOREXCLUDEDCONDITIONALDIRECTIVESKIPMAPPING_H
+#define LLVM_CLANG_LEX_PREPROCESSOREXCLUDEDCONDITIONALDIRECTIVESKIPMAPPING_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
@@ -27,4 +27,4 @@ using ExcludedPreprocessorDirectiveSkipMapping =
} // end namespace clang
-#endif // LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
+#endif // LLVM_CLANG_LEX_PREPROCESSOREXCLUDEDCONDITIONALDIRECTIVESKIPMAPPING_H
diff --git a/contrib/llvm-project/clang/include/clang/Parse/Parser.h b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
index 741a484390b2..74010ca66f99 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
@@ -1475,8 +1475,7 @@ private:
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
- ParsedTemplateInfo()
- : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
+ ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {}
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
@@ -1976,6 +1975,7 @@ private:
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
+ bool MissingOK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
DeclGroupPtrTy
@@ -2080,8 +2080,8 @@ private:
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
- SourceLocation *LParenLoc = nullptr,
- SourceLocation *RParenLoc = nullptr);
+ bool MissingOK, SourceLocation *LParenLoc,
+ SourceLocation *RParenLoc);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
@@ -3315,6 +3315,11 @@ private:
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
+ /// Parses indirect clause
+ /// \param ParseOnly true to skip the clause's semantic actions and return
+ // false;
+ bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
+ bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
@@ -3454,7 +3459,8 @@ private:
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
- SourceLocation &RAngleLoc);
+ SourceLocation &RAngleLoc,
+ TemplateTy NameHint = nullptr);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
@@ -3464,7 +3470,8 @@ private:
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
- bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
+ bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
+ TemplateTy Template, SourceLocation OpenLoc);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
diff --git a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
index bc1754614ad9..8e6e03685c50 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
-#define LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
+#ifndef LLVM_CLANG_PARSE_RAIIOBJECTSFORPARSER_H
+#define LLVM_CLANG_PARSE_RAIIOBJECTSFORPARSER_H
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
diff --git a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
index 49b69c585ff7..13a88bb9f896 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
@@ -18,10 +18,8 @@
namespace clang {
-class BlockExpr;
class Decl;
class FunctionDecl;
-class ObjCMethodDecl;
class QualType;
class Sema;
namespace sema {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h b/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
index ea9df49f77e1..45d16fea93e0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_CLEANUP_INFO_H
-#define LLVM_CLANG_SEMA_CLEANUP_INFO_H
+#ifndef LLVM_CLANG_SEMA_CLEANUPINFO_H
+#define LLVM_CLANG_SEMA_CLEANUPINFO_H
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
index 6b37e3c50dba..41c495882b27 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -1009,12 +1009,18 @@ public:
/// The candidate is a function declaration.
CK_Function,
- /// The candidate is a function template.
+ /// The candidate is a function template, arguments are being completed.
CK_FunctionTemplate,
/// The "candidate" is actually a variable, expression, or block
/// for which we only have a function prototype.
- CK_FunctionType
+ CK_FunctionType,
+
+ /// The candidate is a template, template arguments are being completed.
+ CK_Template,
+
+ /// The candidate is aggregate initialization of a record type.
+ CK_Aggregate,
};
private:
@@ -1033,17 +1039,39 @@ public:
/// The function type that describes the entity being called,
/// when Kind == CK_FunctionType.
const FunctionType *Type;
+
+ /// The template overload candidate, available when
+ /// Kind == CK_Template.
+ const TemplateDecl *Template;
+
+ /// The class being aggregate-initialized,
+ /// when Kind == CK_Aggregate
+ const RecordDecl *AggregateType;
};
public:
OverloadCandidate(FunctionDecl *Function)
- : Kind(CK_Function), Function(Function) {}
+ : Kind(CK_Function), Function(Function) {
+ assert(Function != nullptr);
+ }
OverloadCandidate(FunctionTemplateDecl *FunctionTemplateDecl)
- : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) {}
+ : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) {
+ assert(FunctionTemplateDecl != nullptr);
+ }
OverloadCandidate(const FunctionType *Type)
- : Kind(CK_FunctionType), Type(Type) {}
+ : Kind(CK_FunctionType), Type(Type) {
+ assert(Type != nullptr);
+ }
+
+ OverloadCandidate(const RecordDecl *Aggregate)
+ : Kind(CK_Aggregate), AggregateType(Aggregate) {
+ assert(Aggregate != nullptr);
+ }
+
+ OverloadCandidate(const TemplateDecl *Template)
+ : Kind(CK_Template), Template(Template) {}
/// Determine the kind of overload candidate.
CandidateKind getKind() const { return Kind; }
@@ -1062,13 +1090,35 @@ public:
/// function is stored.
const FunctionType *getFunctionType() const;
+ const TemplateDecl *getTemplate() const {
+ assert(getKind() == CK_Template && "Not a template");
+ return Template;
+ }
+
+ /// Retrieve the aggregate type being initialized.
+ const RecordDecl *getAggregate() const {
+ assert(getKind() == CK_Aggregate);
+ return AggregateType;
+ }
+
+ /// Get the number of parameters in this signature.
+ unsigned getNumParams() const;
+
+ /// Get the type of the Nth parameter.
+ /// Returns null if the type is unknown or N is out of range.
+ QualType getParamType(unsigned N) const;
+
+ /// Get the declaration of the Nth parameter.
+ /// Returns null if the decl is unknown or N is out of range.
+ const NamedDecl *getParamDecl(unsigned N) const;
+
/// Create a new code-completion string that describes the function
/// signature of this overload candidate.
- CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
- Sema &S,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) const;
+ CodeCompletionString *
+ CreateSignatureString(unsigned CurrentArg, Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments, bool Braced) const;
};
CodeCompleteConsumer(const CodeCompleteOptions &CodeCompleteOpts)
@@ -1142,7 +1192,8 @@ public:
virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) {}
+ SourceLocation OpenParLoc,
+ bool Braced) {}
//@}
/// Retrieve the allocator that will be used to allocate
@@ -1193,7 +1244,8 @@ public:
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) override;
+ SourceLocation OpenParLoc,
+ bool Braced) override;
bool isResultFilteredOut(StringRef Filter, CodeCompletionResult Results) override;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
index 2704a9c1fc78..2437be497de4 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
@@ -434,8 +434,7 @@ public:
FS_noreturn_specified(false), Friend_specified(false),
ConstexprSpecifier(
static_cast<unsigned>(ConstexprSpecKind::Unspecified)),
- FS_explicit_specifier(), Attrs(attrFactory), writtenBS(),
- ObjCQualifiers(nullptr) {}
+ Attrs(attrFactory), writtenBS(), ObjCQualifiers(nullptr) {}
// storage-class-specifier
SCS getStorageClassSpec() const { return (SCS)StorageClassSpec; }
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
index 9c18aa1398d3..17a7ffd3bb68 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
@@ -26,11 +26,9 @@ template <class T, unsigned n> class SmallSetVector;
namespace clang {
class CXXConstructorDecl;
-class CXXDeleteExpr;
class CXXRecordDecl;
class DeclaratorDecl;
class LookupResult;
-struct ObjCMethodList;
class Scope;
class Sema;
class TypedefNameDecl;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
index 679e12ee22d4..21adc9fa2ac0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
@@ -38,7 +38,6 @@
namespace clang {
-class APValue;
class CXXBaseSpecifier;
class CXXConstructorDecl;
class ObjCMethodDecl;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Overload.h b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
index 88405a63b735..48997e186ef6 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
@@ -577,8 +577,7 @@ class Sema;
ImplicitConversionSequence()
: ConversionKind(Uninitialized),
- InitializerListOfIncompleteArray(false),
- InitializerListContainerType() {
+ InitializerListOfIncompleteArray(false) {
Standard.setAsIdentityConversion();
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
index 6403179cb327..4fa6f09d3321 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_ATTRIBUTELIST_H
-#define LLVM_CLANG_SEMA_ATTRIBUTELIST_H
+#ifndef LLVM_CLANG_SEMA_PARSEDATTR_H
+#define LLVM_CLANG_SEMA_PARSEDATTR_H
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/AttributeCommonInfo.h"
@@ -1080,7 +1080,7 @@ struct ParsedAttributesWithRange : ParsedAttributes {
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
- ParsedAttributesViewWithRange() : ParsedAttributesView() {}
+ ParsedAttributesViewWithRange() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
@@ -1159,4 +1159,4 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
} // namespace clang
-#endif // LLVM_CLANG_SEMA_ATTRIBUTELIST_H
+#endif // LLVM_CLANG_SEMA_PARSEDATTR_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
index f0245b93c7eb..926cec2c547c 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
@@ -63,8 +63,7 @@ namespace clang {
ParsedTemplateTy Template,
SourceLocation TemplateLoc)
: Kind(ParsedTemplateArgument::Template),
- Arg(Template.getAsOpaquePtr()),
- SS(SS), Loc(TemplateLoc), EllipsisLoc() { }
+ Arg(Template.getAsOpaquePtr()), SS(SS), Loc(TemplateLoc) {}
/// Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == nullptr; }
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
index ccd15ea6a818..08bf53d22f8a 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
@@ -58,7 +58,6 @@ class Scope;
class Stmt;
class SwitchStmt;
class TemplateParameterList;
-class TemplateTypeParmDecl;
class VarDecl;
namespace sema {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index 79834554a50d..4b609f4b1477 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -1365,10 +1365,10 @@ public:
};
private:
- llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
+ llvm::PointerIntPair<CXXMethodDecl *, 2> Pair;
public:
- SpecialMemberOverloadResult() : Pair() {}
+ SpecialMemberOverloadResult() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
@@ -1565,8 +1565,11 @@ public:
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
+private:
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
+ bool WarnedDarwinSDKInfoMissing = false;
+
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
@@ -1595,8 +1598,10 @@ public:
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
+
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
+ DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking();
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
@@ -4315,6 +4320,8 @@ public:
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
+ bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old);
+ bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
@@ -4861,7 +4868,8 @@ public:
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
- StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+ StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ bool AllowRecovery = false);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
@@ -5052,6 +5060,7 @@ public:
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
+ TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
@@ -7520,7 +7529,7 @@ public:
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
- RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
+ RequiredTemplateKind(TemplateNameIsRequiredTag) {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
@@ -10332,6 +10341,9 @@ private:
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
+ /// The directive with indirect clause.
+ Optional<Expr *> Indirect;
+
/// The directive location.
SourceLocation Loc;
@@ -10638,7 +10650,7 @@ public:
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
- OMPDeclareTargetDeclAttr::DevTypeTy DT);
+ DeclareTargetContextInfo &DTCI);
/// Check declaration inside target region.
void
@@ -12079,9 +12091,12 @@ public:
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
+ QualType PreferredConditionType(ConditionKind K) const {
+ return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy;
+ }
- ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
- Expr *SubExpr, ConditionKind CK);
+ ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr,
+ ConditionKind CK, bool MissingOK = false);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
@@ -12537,18 +12552,18 @@ public:
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
- QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
+ QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
- QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
- SourceLocation Loc,
+ QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc);
- QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
- CXXScopeSpec SS,
- ParsedType TemplateTypeTy,
- ArrayRef<Expr *> ArgExprs,
- IdentifierInfo *II,
- SourceLocation OpenParLoc);
+ SourceLocation OpenParLoc,
+ bool Braced);
+ QualType ProduceCtorInitMemberSignatureHelp(
+ Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
+ ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
+ bool Braced);
+ QualType ProduceTemplateArgumentSignatureHelp(
+ TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
@@ -13061,7 +13076,7 @@ private:
ValueDecl *MD;
CharUnits Alignment;
- MisalignedMember() : E(), RD(), MD(), Alignment() {}
+ MisalignedMember() : E(), RD(), MD() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
@@ -13142,6 +13157,9 @@ public:
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
+ void deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
+ llvm::DenseSet<QualType> Visited,
+ ValueDecl *DeclToCheck);
};
/// RAII object that enters a new expression evaluation context.
diff --git a/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h b/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
index dc5f0ec97e85..b73a152533d1 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
@@ -152,4 +152,4 @@ private:
} // clang
-#endif //LLVM_CLANG_SEMA_SEMACONCEPT_H
+#endif // LLVM_CLANG_SEMA_SEMACONCEPT_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h b/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
index 3ab0e8c6be9f..9258a7f41ac1 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
@@ -11,8 +11,8 @@
//
//===---------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TEMPLATE_INST_CALLBACK_H
-#define LLVM_CLANG_TEMPLATE_INST_CALLBACK_H
+#ifndef LLVM_CLANG_SEMA_TEMPLATEINSTCALLBACK_H
+#define LLVM_CLANG_SEMA_TEMPLATEINSTCALLBACK_H
#include "clang/Sema/Sema.h"
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
index 341da5bd1d62..f98e173b158c 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
@@ -695,6 +695,9 @@ enum ASTRecordTypes {
/// Record code for \#pragma float_control options.
FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
+
+ /// Record code for included files.
+ PP_INCLUDED_FILES = 66,
};
/// Record types used within a source manager block.
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
index f24ccf579aa8..d46a6c4500f4 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
@@ -84,7 +84,6 @@ class GlobalModuleIndex;
struct HeaderFileInfo;
class HeaderSearchOptions;
class LangOptions;
-class LazyASTUnresolvedSet;
class MacroInfo;
class InMemoryModuleCache;
class NamedDecl;
@@ -94,7 +93,6 @@ class ObjCInterfaceDecl;
class PCHContainerReader;
class Preprocessor;
class PreprocessorOptions;
-struct QualifierInfo;
class Sema;
class SourceManager;
class Stmt;
@@ -1331,6 +1329,7 @@ private:
llvm::Error ReadSourceManagerBlock(ModuleFile &F);
llvm::BitstreamCursor &SLocCursorForID(int ID);
SourceLocation getImportLocation(ModuleFile *F);
+ void readIncludedFiles(ModuleFile &F, StringRef Blob, Preprocessor &PP);
ASTReadResult ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
const ModuleFile *ImportedBy,
unsigned ClientLoadCapabilities);
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
index 978f6d86ea5c..e455e4d4d96a 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
@@ -43,26 +43,13 @@
#include <utility>
#include <vector>
-namespace llvm {
-
-class APFloat;
-class APInt;
-class APSInt;
-
-} // namespace llvm
-
namespace clang {
class ASTContext;
class ASTReader;
-class ASTUnresolvedSet;
class Attr;
-class CXXBaseSpecifier;
-class CXXCtorInitializer;
class CXXRecordDecl;
-class CXXTemporary;
class FileEntry;
-class FPOptions;
class FPOptionsOverride;
class FunctionDecl;
class HeaderSearch;
@@ -79,16 +66,13 @@ class NamedDecl;
class ObjCInterfaceDecl;
class PreprocessingRecord;
class Preprocessor;
-struct QualifierInfo;
class RecordDecl;
class Sema;
class SourceManager;
class Stmt;
class StoredDeclsList;
class SwitchCase;
-class TemplateParameterList;
class Token;
-class TypeSourceInfo;
/// Writes an AST file containing the contents of a translation unit.
///
@@ -481,6 +465,7 @@ private:
std::set<const FileEntry *> &AffectingModuleMaps);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP);
+ void writeIncludedFiles(raw_ostream &Out, const Preprocessor &PP);
void WritePreprocessor(const Preprocessor &PP, bool IsModule);
void WriteHeaderSearch(const HeaderSearch &HS);
void WritePreprocessorDetail(PreprocessingRecord &PPRec,
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
index 5f4812626224..9d6b52a97f52 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
@@ -31,8 +31,6 @@ class MemoryBuffer;
namespace clang {
-class DirectoryEntry;
-class FileEntry;
class FileManager;
class IdentifierIterator;
class PCHContainerOperations;
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
index 3e84a65c4b80..2168ce2ce607 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
@@ -154,4 +154,4 @@ public:
} // end namespace clang
-#endif // LLVM_CLANG_FRONTEND_MODULEFILEEXTENSION_H
+#endif // LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
index 7081eedad4b4..4305bae5ee95 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
@@ -105,10 +105,6 @@ class ModuleManager {
Stack.reserve(N);
}
- ~VisitState() {
- delete NextState;
- }
-
/// The stack used when marking the imports of a particular module
/// as not-to-be-visited.
SmallVector<ModuleFile *, 4> Stack;
@@ -121,14 +117,14 @@ class ModuleManager {
unsigned NextVisitNumber = 1;
/// The next visit state.
- VisitState *NextState = nullptr;
+ std::unique_ptr<VisitState> NextState;
};
/// The first visit() state in the chain.
- VisitState *FirstVisitState = nullptr;
+ std::unique_ptr<VisitState> FirstVisitState;
- VisitState *allocateVisitState();
- void returnVisitState(VisitState *State);
+ std::unique_ptr<VisitState> allocateVisitState();
+ void returnVisitState(std::unique_ptr<VisitState> State);
public:
using ModuleIterator = llvm::pointee_iterator<
@@ -142,7 +138,6 @@ public:
explicit ModuleManager(FileManager &FileMgr, InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr,
const HeaderSearch &HeaderSearchInfo);
- ~ModuleManager();
/// Forward iterator to traverse all loaded modules.
ModuleIterator begin() { return Chain.begin(); }
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
index 33fc4a0a24e0..9f9700a418a9 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
@@ -22,8 +22,6 @@ class raw_pwrite_stream;
namespace clang {
class ASTConsumer;
-class CodeGenOptions;
-class DiagnosticsEngine;
class CompilerInstance;
struct PCHBuffer {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
index e2be957821b9..bdfe3901c5b8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
@@ -11,19 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
+#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_BUILTINCHECKERREGISTRATION_H
+#define LLVM_CLANG_STATICANALYZER_CHECKERS_BUILTINCHECKERREGISTRATION_H
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
namespace clang {
-
-class LangOptions;
-
namespace ento {
class CheckerManager;
-class CheckerRegistry;
#define GET_CHECKERS
#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
index bbc5111ccacc..6243bbd5d53b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
+#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_MPIFUNCTIONCLASSIFIER_H
+#define LLVM_CLANG_STATICANALYZER_CHECKERS_MPIFUNCTIONCLASSIFIER_H
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
index c0cade46d614..7b976ddeaba2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
@@ -320,6 +320,11 @@ ANALYZER_OPTION(bool, ShouldDisplayCheckerNameForText, "display-checker-name",
"Display the checker name for textual outputs",
true)
+ANALYZER_OPTION(bool, ShouldSupportSymbolicIntegerCasts,
+ "support-symbolic-integer-casts",
+ "Produce cast symbols for integral types.",
+ false)
+
ANALYZER_OPTION(
bool, ShouldConsiderSingleElementArraysAsFlexibleArrayMembers,
"consider-single-element-arrays-as-flexible-array-members",
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 3c93ebeccde8..9ec5bcc87554 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -48,7 +48,6 @@ namespace clang {
class AnalyzerOptions;
class ASTContext;
class Decl;
-class DiagnosticsEngine;
class LocationContext;
class SourceManager;
class Stmt;
@@ -61,7 +60,6 @@ class ExplodedGraph;
class ExplodedNode;
class ExprEngine;
class MemRegion;
-class SValBuilder;
//===----------------------------------------------------------------------===//
// Interface for individual bug reports.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
index 49ab25eca2dd..558881176327 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -23,8 +23,6 @@ namespace clang {
namespace ento {
class BugReporter;
-class ExplodedNode;
-class ExprEngine;
class BugType {
private:
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index d2f71baa56a4..7e45d24e5695 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -28,7 +28,6 @@ namespace clang {
class AnalyzerOptions;
class CallExpr;
-class CXXNewExpr;
class Decl;
class LocationContext;
class Stmt;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
index 71a590d9e9a2..2694aac478cd 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -20,7 +20,6 @@
namespace clang {
-class AnalyzerOptions;
class MacroExpansionContext;
class Preprocessor;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index bb598af68166..8a778389bcbe 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -34,7 +34,6 @@
namespace clang {
class CXXBaseSpecifier;
-class DeclaratorDecl;
namespace ento {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index d135e70dd75d..bfaeb06951d7 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -76,7 +76,6 @@ enum CallEventKind {
};
class CallEvent;
-class CallDescription;
template<typename T = CallEvent>
class CallEventRef : public IntrusiveRefCntPtr<const T> {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index a383012dc351..94fa0d9ecdc3 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -84,6 +84,8 @@ public:
return Eng.getContext();
}
+ const ASTContext &getASTContext() const { return Eng.getContext(); }
+
const LangOptions &getLangOpts() const {
return Eng.getContext().getLangOpts();
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index a81d67ab3063..e89a993ca8d2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -24,7 +24,6 @@ namespace clang {
class Expr;
class VarDecl;
class QualType;
-class AttributedType;
class Preprocessor;
namespace ento {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
index 6d2b495dc0f5..3ff453a8de4f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
@@ -18,7 +18,7 @@ namespace ento {
/// of a region in a given state along the analysis path.
class DynamicTypeInfo {
public:
- DynamicTypeInfo() : DynTy(QualType()) {}
+ DynamicTypeInfo() {}
DynamicTypeInfo(QualType Ty, bool CanBeSub = true)
: DynTy(Ty), CanBeASubClass(CanBeSub) {}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index cef7dda172f3..feb4a72fe8d0 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -77,13 +77,9 @@ namespace ento {
class AnalysisManager;
class BasicValueFactory;
-class BlockCounter;
-class BranchNodeBuilder;
class CallEvent;
class CheckerManager;
class ConstraintManager;
-class CXXTempObjectRegion;
-class EndOfFunctionNodeBuilder;
class ExplodedNodeSet;
class ExplodedNode;
class IndirectGotoNodeBuilder;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
index 53b221cb53c9..eb2b0b343428 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
@@ -28,7 +28,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
namespace clang {
namespace ento {
-class AnalysisManager;
/// Returns if the given State indicates that is inside a completely unrolled
/// loop.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 9a34639e2707..3204ac460ed0 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -47,8 +47,6 @@ typedef std::unique_ptr<StoreManager>(*StoreManagerCreator)(
// ProgramStateTrait - Traits used by the Generic Data Map of a ProgramState.
//===----------------------------------------------------------------------===//
-template <typename T> struct ProgramStatePartialTrait;
-
template <typename T> struct ProgramStateTrait {
typedef typename T::data_type data_type;
static inline void *MakeVoidPtr(data_type D) { return (void*) D; }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
index 3a0bec9d04e5..6c487697bc55 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_RANGEDCONSTRAINTMANAGER_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_RANGEDCONSTRAINTMANAGER_H
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 61dfdbb0688b..1df47dae25bf 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -96,6 +96,17 @@ protected:
QualType OriginalTy);
SVal evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
QualType OriginalTy);
+ /// Reduce cast expression by removing redundant intermediate casts.
+ /// E.g.
+ /// - (char)(short)(int x) -> (char)(int x)
+ /// - (int)(int x) -> int x
+ ///
+ /// \param V -- SymbolVal, which pressumably contains SymbolCast or any symbol
+ /// that is applicable for cast operation.
+ /// \param CastTy -- QualType, which `V` shall be cast to.
+ /// \return SVal with simplified cast expression.
+ /// \note: Currently only support integral casts.
+ SVal simplifySymbolCast(nonloc::SymbolVal V, QualType CastTy);
public:
SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
@@ -103,18 +114,6 @@ public:
virtual ~SValBuilder() = default;
- bool haveSameType(const SymExpr *Sym1, const SymExpr *Sym2) {
- return haveSameType(Sym1->getType(), Sym2->getType());
- }
-
- bool haveSameType(QualType Ty1, QualType Ty2) {
- // FIXME: Remove the second disjunct when we support symbolic
- // truncation/extension.
- return (Context.getCanonicalType(Ty1) == Context.getCanonicalType(Ty2) ||
- (Ty1->isIntegralOrEnumerationType() &&
- Ty2->isIntegralOrEnumerationType()));
- }
-
SVal evalCast(SVal V, QualType CastTy, QualType OriginalTy);
// Handles casts of type CK_IntegralCast.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index 6199c8d8d179..f4b229070d67 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -35,7 +35,6 @@
namespace clang {
class CXXBaseSpecifier;
-class DeclaratorDecl;
class FunctionDecl;
class LabelDecl;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
index bcc29a60ad70..f3b1c1f20645 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -21,14 +21,10 @@
namespace clang {
-class Preprocessor;
-class DiagnosticsEngine;
-class CodeInjector;
class CompilerInstance;
namespace ento {
class PathDiagnosticConsumer;
-class CheckerManager;
class CheckerRegistry;
class AnalysisASTConsumer : public ASTConsumer {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
index 2b12330e4f2d..31b1c245200e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
@@ -16,12 +16,9 @@
namespace clang {
class Stmt;
-class AnalyzerOptions;
namespace ento {
-class CheckerManager;
-
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
index 5f9ae78dac63..7b7087622bc2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_GR_MODELCONSUMER_H
-#define LLVM_CLANG_GR_MODELCONSUMER_H
+#ifndef LLVM_CLANG_STATICANALYZER_FRONTEND_MODELCONSUMER_H
+#define LLVM_CLANG_STATICANALYZER_FRONTEND_MODELCONSUMER_H
#include "clang/AST/ASTConsumer.h"
#include "llvm/ADT/StringMap.h"
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
index c772ad84c139..5fe6db6cb133 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
@@ -48,20 +48,6 @@ struct Node {
llvm::Optional<std::string> getQualifiedIdentifier() const;
};
-class ASTDiff {
-public:
- ASTDiff(SyntaxTree &Src, SyntaxTree &Dst, const ComparisonOptions &Options);
- ~ASTDiff();
-
- // Returns the ID of the node that is mapped to the given node in SourceTree.
- NodeId getMapped(const SyntaxTree &SourceTree, NodeId Id) const;
-
- class Impl;
-
-private:
- std::unique_ptr<Impl> DiffImpl;
-};
-
/// SyntaxTree objects represent subtrees of the AST.
/// They can be constructed from any Decl or Stmt.
class SyntaxTree {
@@ -120,6 +106,20 @@ struct ComparisonOptions {
}
};
+class ASTDiff {
+public:
+ ASTDiff(SyntaxTree &Src, SyntaxTree &Dst, const ComparisonOptions &Options);
+ ~ASTDiff();
+
+ // Returns the ID of the node that is mapped to the given node in SourceTree.
+ NodeId getMapped(const SyntaxTree &SourceTree, NodeId Id) const;
+
+ class Impl;
+
+private:
+ std::unique_ptr<Impl> DiffImpl;
+};
+
} // end namespace diff
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
index 1e784ef43ac1..b74af5e8f24f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
@@ -17,10 +17,6 @@ namespace diff {
using DynTypedNode = DynTypedNode;
-class SyntaxTree;
-class SyntaxTreeImpl;
-struct ComparisonOptions;
-
/// Within a tree, this identifies a node by its preorder offset.
struct NodeId {
private:
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h b/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
index 0f072c2886ab..3c0480af3779 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
@@ -141,4 +141,4 @@ private:
} // namespace tooling
} // namespace clang
-#endif // LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMONOPTIONSPARSER_H
+#endif // LLVM_CLANG_TOOLING_COMMONOPTIONSPARSER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
index 90af15536961..fee584acb486 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
@@ -216,6 +216,8 @@ private:
/// Transforms a compile command so that it applies the same configuration to
/// a different file. Most args are left intact, but tweaks may be needed
/// to certain flags (-x, -std etc).
+///
+/// The output command will always end in {"--", Filename}.
tooling::CompileCommand transferCompileCommand(tooling::CompileCommand,
StringRef Filename);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
index 7d0b8f2138f9..7c830d3f2733 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
#include "clang/Basic/LLVM.h"
#include "clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/VirtualFileSystem.h"
@@ -22,6 +22,26 @@ namespace clang {
namespace tooling {
namespace dependencies {
+/// Original and minimized contents of a cached file entry. Single instance can
+/// be shared between multiple entries.
+struct CachedFileContents {
+ CachedFileContents(std::unique_ptr<llvm::MemoryBuffer> Original)
+ : Original(std::move(Original)), MinimizedAccess(nullptr) {}
+
+ /// Owning storage for the minimized contents.
+ std::unique_ptr<llvm::MemoryBuffer> Original;
+
+ /// The mutex that must be locked before mutating minimized contents.
+ std::mutex ValueLock;
+ /// Owning storage for the minimized contents.
+ std::unique_ptr<llvm::MemoryBuffer> MinimizedStorage;
+ /// Accessor to the minimized contents that's atomic to avoid data races.
+ std::atomic<llvm::MemoryBuffer *> MinimizedAccess;
+ /// Skipped range mapping of the minimized contents.
+ /// This is initialized iff `MinimizedAccess != nullptr`.
+ PreprocessorSkippedRangeMapping PPSkippedRangeMapping;
+};
+
/// An in-memory representation of a file system entity that is of interest to
/// the dependency scanning filesystem.
///
@@ -29,100 +49,99 @@ namespace dependencies {
/// - opened file with original contents and a stat value,
/// - opened file with original contents, minimized contents and a stat value,
/// - directory entry with its stat value,
-/// - filesystem error,
-/// - uninitialized entry with unknown status.
+/// - filesystem error.
+///
+/// Single instance of this class can be shared across different filenames (e.g.
+/// a regular file and a symlink). For this reason the status filename is empty
+/// and is only materialized by \c EntryRef that knows the requested filename.
class CachedFileSystemEntry {
public:
- /// Creates an uninitialized entry.
- CachedFileSystemEntry()
- : MaybeStat(llvm::vfs::Status()), MinimizedContentsAccess(nullptr) {}
-
- /// Initialize the cached file system entry.
- void init(llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus, StringRef Filename,
- llvm::vfs::FileSystem &FS);
+ /// Creates an entry without contents: either a filesystem error or
+ /// a directory with stat value.
+ CachedFileSystemEntry(llvm::ErrorOr<llvm::vfs::Status> Stat)
+ : MaybeStat(std::move(Stat)), Contents(nullptr) {
+ clearStatName();
+ }
- /// Initialize the entry as file with minimized or original contents.
- ///
- /// The filesystem opens the file even for `stat` calls open to avoid the
- /// issues with stat + open of minimized files that might lead to a
- /// mismatching size of the file.
- llvm::ErrorOr<llvm::vfs::Status> initFile(StringRef Filename,
- llvm::vfs::FileSystem &FS);
-
- /// Minimize contents of the file.
- void minimizeFile();
-
- /// \returns True if the entry is initialized.
- bool isInitialized() const {
- return !MaybeStat || MaybeStat->isStatusKnown();
+ /// Creates an entry representing a file with contents.
+ CachedFileSystemEntry(llvm::ErrorOr<llvm::vfs::Status> Stat,
+ CachedFileContents *Contents)
+ : MaybeStat(std::move(Stat)), Contents(std::move(Contents)) {
+ clearStatName();
}
- /// \returns True if the current entry points to a directory.
- bool isDirectory() const { return MaybeStat && MaybeStat->isDirectory(); }
+ /// \returns True if the entry is a filesystem error.
+ bool isError() const { return !MaybeStat; }
+
+ /// \returns True if the current entry represents a directory.
+ bool isDirectory() const { return !isError() && MaybeStat->isDirectory(); }
- /// \returns The error or the file's original contents.
- llvm::ErrorOr<StringRef> getOriginalContents() const {
- if (!MaybeStat)
- return MaybeStat.getError();
+ /// \returns Original contents of the file.
+ StringRef getOriginalContents() const {
+ assert(!isError() && "error");
assert(!MaybeStat->isDirectory() && "not a file");
- assert(isInitialized() && "not initialized");
- assert(OriginalContents && "not read");
- return OriginalContents->getBuffer();
+ assert(Contents && "contents not initialized");
+ return Contents->Original->getBuffer();
}
- /// \returns The error or the file's minimized contents.
- llvm::ErrorOr<StringRef> getMinimizedContents() const {
- if (!MaybeStat)
- return MaybeStat.getError();
+ /// \returns Minimized contents of the file.
+ StringRef getMinimizedContents() const {
+ assert(!isError() && "error");
assert(!MaybeStat->isDirectory() && "not a file");
- assert(isInitialized() && "not initialized");
- llvm::MemoryBuffer *Buffer = MinimizedContentsAccess.load();
+ assert(Contents && "contents not initialized");
+ llvm::MemoryBuffer *Buffer = Contents->MinimizedAccess.load();
assert(Buffer && "not minimized");
return Buffer->getBuffer();
}
- /// \returns True if this entry represents a file that can be read.
- bool isReadable() const { return MaybeStat && !MaybeStat->isDirectory(); }
+ /// \returns The error.
+ std::error_code getError() const { return MaybeStat.getError(); }
- /// \returns True if this cached entry needs to be updated.
- bool needsUpdate(bool ShouldBeMinimized) const {
- return isReadable() && needsMinimization(ShouldBeMinimized);
+ /// \returns The entry status with empty filename.
+ llvm::vfs::Status getStatus() const {
+ assert(!isError() && "error");
+ assert(MaybeStat->getName().empty() && "stat name must be empty");
+ return *MaybeStat;
}
- /// \returns True if the contents of this entry need to be minimized.
- bool needsMinimization(bool ShouldBeMinimized) const {
- return ShouldBeMinimized && !MinimizedContentsAccess.load();
+ /// \returns The unique ID of the entry.
+ llvm::sys::fs::UniqueID getUniqueID() const {
+ assert(!isError() && "error");
+ return MaybeStat->getUniqueID();
}
- /// \returns The error or the status of the entry.
- llvm::ErrorOr<llvm::vfs::Status> getStatus() const {
- assert(isInitialized() && "not initialized");
- return MaybeStat;
+ /// \returns The mapping between location -> distance that is used to speed up
+ /// the block skipping in the preprocessor.
+ const PreprocessorSkippedRangeMapping &getPPSkippedRangeMapping() const {
+ assert(!isError() && "error");
+ assert(!isDirectory() && "not a file");
+ assert(Contents && "contents not initialized");
+ return Contents->PPSkippedRangeMapping;
}
- /// \returns the name of the file.
- StringRef getName() const {
- assert(isInitialized() && "not initialized");
- return MaybeStat->getName();
+ /// \returns The data structure holding both original and minimized contents.
+ CachedFileContents *getContents() const {
+ assert(!isError() && "error");
+ assert(!isDirectory() && "not a file");
+ return Contents;
}
- /// Return the mapping between location -> distance that is used to speed up
- /// the block skipping in the preprocessor.
- const PreprocessorSkippedRangeMapping &getPPSkippedRangeMapping() const {
- return PPSkippedRangeMapping;
+private:
+ void clearStatName() {
+ if (MaybeStat)
+ MaybeStat = llvm::vfs::Status::copyWithNewName(*MaybeStat, "");
}
-private:
+ /// Either the filesystem error or status of the entry.
+ /// The filename is empty and only materialized by \c EntryRef.
llvm::ErrorOr<llvm::vfs::Status> MaybeStat;
- std::unique_ptr<llvm::MemoryBuffer> OriginalContents;
-
- /// Owning storage for the minimized file contents.
- std::unique_ptr<llvm::MemoryBuffer> MinimizedContentsStorage;
- /// Atomic view of the minimized file contents.
- /// This prevents data races when multiple threads call `needsMinimization`.
- std::atomic<llvm::MemoryBuffer *> MinimizedContentsAccess;
- PreprocessorSkippedRangeMapping PPSkippedRangeMapping;
+ /// Non-owning pointer to the file contents.
+ ///
+ /// We're using pointer here to keep the size of this class small. Instances
+ /// representing directories and filesystem errors don't hold any contents
+ /// anyway.
+ CachedFileContents *Contents;
};
/// This class is a shared cache, that caches the 'stat' and 'open' calls to the
@@ -133,24 +152,59 @@ private:
/// the worker threads.
class DependencyScanningFilesystemSharedCache {
public:
- struct SharedFileSystemEntry {
- std::mutex ValueLock;
- CachedFileSystemEntry Value;
+ struct CacheShard {
+ /// The mutex that needs to be locked before mutation of any member.
+ mutable std::mutex CacheLock;
+
+ /// Map from filenames to cached entries.
+ llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator>
+ EntriesByFilename;
+
+ /// Map from unique IDs to cached entries.
+ llvm::DenseMap<llvm::sys::fs::UniqueID, const CachedFileSystemEntry *>
+ EntriesByUID;
+
+ /// The backing storage for cached entries.
+ llvm::SpecificBumpPtrAllocator<CachedFileSystemEntry> EntryStorage;
+
+ /// The backing storage for cached contents.
+ llvm::SpecificBumpPtrAllocator<CachedFileContents> ContentsStorage;
+
+ /// Returns entry associated with the filename or nullptr if none is found.
+ const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const;
+
+ /// Returns entry associated with the unique ID or nullptr if none is found.
+ const CachedFileSystemEntry *
+ findEntryByUID(llvm::sys::fs::UniqueID UID) const;
+
+ /// Returns entry associated with the filename if there is some. Otherwise,
+ /// constructs new one with the given status, associates it with the
+ /// filename and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceEntryForFilename(StringRef Filename,
+ llvm::ErrorOr<llvm::vfs::Status> Stat);
+
+ /// Returns entry associated with the unique ID if there is some. Otherwise,
+ /// constructs new one with the given status and contents, associates it
+ /// with the unique ID and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceEntryForUID(llvm::sys::fs::UniqueID UID, llvm::vfs::Status Stat,
+ std::unique_ptr<llvm::MemoryBuffer> Contents);
+
+ /// Returns entry associated with the filename if there is some. Otherwise,
+ /// associates the given entry with the filename and returns it.
+ const CachedFileSystemEntry &
+ getOrInsertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry);
};
DependencyScanningFilesystemSharedCache();
- /// Returns a cache entry for the corresponding key.
- ///
- /// A new cache entry is created if the key is not in the cache. This is a
- /// thread safe call.
- SharedFileSystemEntry &get(StringRef Key);
+ /// Returns shard for the given key.
+ CacheShard &getShardForFilename(StringRef Filename) const;
+ CacheShard &getShardForUID(llvm::sys::fs::UniqueID UID) const;
private:
- struct CacheShard {
- std::mutex CacheLock;
- llvm::StringMap<SharedFileSystemEntry, llvm::BumpPtrAllocator> Cache;
- };
std::unique_ptr<CacheShard[]> CacheShards;
unsigned NumShards;
};
@@ -162,8 +216,20 @@ class DependencyScanningFilesystemLocalCache {
llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator> Cache;
public:
- const CachedFileSystemEntry *getCachedEntry(StringRef Filename) {
- return Cache[Filename];
+ /// Returns entry associated with the filename or nullptr if none is found.
+ const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const {
+ auto It = Cache.find(Filename);
+ return It == Cache.end() ? nullptr : It->getValue();
+ }
+
+ /// Associates the given entry with the filename and returns the given entry
+ /// pointer (for convenience).
+ const CachedFileSystemEntry &
+ insertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ const auto *InsertedEntry = Cache.insert({Filename, &Entry}).first->second;
+ assert(InsertedEntry == &Entry && "entry already present");
+ return *InsertedEntry;
}
};
@@ -176,26 +242,34 @@ class EntryRef {
/// are minimized.
bool Minimized;
+ /// The filename used to access this entry.
+ std::string Filename;
+
/// The underlying cached entry.
const CachedFileSystemEntry &Entry;
public:
- EntryRef(bool Minimized, const CachedFileSystemEntry &Entry)
- : Minimized(Minimized), Entry(Entry) {}
-
- llvm::ErrorOr<llvm::vfs::Status> getStatus() const {
- auto MaybeStat = Entry.getStatus();
- if (!MaybeStat || MaybeStat->isDirectory())
- return MaybeStat;
- return llvm::vfs::Status::copyWithNewSize(*MaybeStat,
- getContents()->size());
+ EntryRef(bool Minimized, StringRef Name, const CachedFileSystemEntry &Entry)
+ : Minimized(Minimized), Filename(Name), Entry(Entry) {}
+
+ llvm::vfs::Status getStatus() const {
+ llvm::vfs::Status Stat = Entry.getStatus();
+ if (!Stat.isDirectory())
+ Stat = llvm::vfs::Status::copyWithNewSize(Stat, getContents().size());
+ return llvm::vfs::Status::copyWithNewName(Stat, Filename);
}
+ bool isError() const { return Entry.isError(); }
bool isDirectory() const { return Entry.isDirectory(); }
- StringRef getName() const { return Entry.getName(); }
+ /// If the cached entry represents an error, promotes it into `ErrorOr`.
+ llvm::ErrorOr<EntryRef> unwrapError() const {
+ if (isError())
+ return Entry.getError();
+ return *this;
+ }
- llvm::ErrorOr<StringRef> getContents() const {
+ StringRef getContents() const {
return Minimized ? Entry.getMinimizedContents()
: Entry.getOriginalContents();
}
@@ -234,9 +308,90 @@ public:
private:
/// Check whether the file should be minimized.
- bool shouldMinimize(StringRef Filename);
+ bool shouldMinimize(StringRef Filename, llvm::sys::fs::UniqueID UID);
+
+ /// Returns entry for the given filename.
+ ///
+ /// Attempts to use the local and shared caches first, then falls back to
+ /// using the underlying filesystem.
+ llvm::ErrorOr<EntryRef>
+ getOrCreateFileSystemEntry(StringRef Filename,
+ bool DisableMinimization = false);
+
+ /// For a filename that's not yet associated with any entry in the caches,
+ /// uses the underlying filesystem to either look up the entry based in the
+ /// shared cache indexed by unique ID, or creates new entry from scratch.
+ llvm::ErrorOr<const CachedFileSystemEntry &>
+ computeAndStoreResult(StringRef Filename);
+
+ /// Minimizes the given entry if necessary and returns a wrapper object with
+ /// reference semantics.
+ EntryRef minimizeIfNecessary(const CachedFileSystemEntry &Entry,
+ StringRef Filename, bool Disable);
+
+ /// Represents a filesystem entry that has been stat-ed (and potentially read)
+ /// and that's about to be inserted into the cache as `CachedFileSystemEntry`.
+ struct TentativeEntry {
+ llvm::vfs::Status Status;
+ std::unique_ptr<llvm::MemoryBuffer> Contents;
+
+ TentativeEntry(llvm::vfs::Status Status,
+ std::unique_ptr<llvm::MemoryBuffer> Contents = nullptr)
+ : Status(std::move(Status)), Contents(std::move(Contents)) {}
+ };
+
+ /// Reads file at the given path. Enforces consistency between the file size
+ /// in status and size of read contents.
+ llvm::ErrorOr<TentativeEntry> readFile(StringRef Filename);
- llvm::ErrorOr<EntryRef> getOrCreateFileSystemEntry(StringRef Filename);
+ /// Returns entry associated with the unique ID of the given tentative entry
+ /// if there is some in the shared cache. Otherwise, constructs new one,
+ /// associates it with the unique ID and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceSharedEntryForUID(TentativeEntry TEntry);
+
+ /// Returns entry associated with the filename or nullptr if none is found.
+ ///
+ /// Returns entry from local cache if there is some. Otherwise, if the entry
+ /// is found in the shared cache, writes it through the local cache and
+ /// returns it. Otherwise returns nullptr.
+ const CachedFileSystemEntry *
+ findEntryByFilenameWithWriteThrough(StringRef Filename);
+
+ /// Returns entry associated with the unique ID in the shared cache or nullptr
+ /// if none is found.
+ const CachedFileSystemEntry *
+ findSharedEntryByUID(llvm::vfs::Status Stat) const {
+ return SharedCache.getShardForUID(Stat.getUniqueID())
+ .findEntryByUID(Stat.getUniqueID());
+ }
+
+ /// Associates the given entry with the filename in the local cache and
+ /// returns it.
+ const CachedFileSystemEntry &
+ insertLocalEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ return LocalCache.insertEntryForFilename(Filename, Entry);
+ }
+
+ /// Returns entry associated with the filename in the shared cache if there is
+ /// some. Otherwise, constructs new one with the given error code, associates
+ /// it with the filename and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceSharedEntryForFilename(StringRef Filename, std::error_code EC) {
+ return SharedCache.getShardForFilename(Filename)
+ .getOrEmplaceEntryForFilename(Filename, EC);
+ }
+
+ /// Returns entry associated with the filename in the shared cache if there is
+ /// some. Otherwise, associates the given entry with the filename and returns
+ /// it.
+ const CachedFileSystemEntry &
+ getOrInsertSharedEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ return SharedCache.getShardForFilename(Filename)
+ .getOrInsertEntryForFilename(Filename, Entry);
+ }
/// The global cache shared between worker threads.
DependencyScanningFilesystemSharedCache &SharedCache;
@@ -248,11 +403,11 @@ private:
/// currently active preprocessor.
ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
/// The set of files that should not be minimized.
- llvm::StringSet<> NotToBeMinimized;
+ llvm::DenseSet<llvm::sys::fs::UniqueID> NotToBeMinimized;
};
} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
index d58e736ab6a6..5c6dce611a95 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h"
@@ -83,4 +83,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index 9e2ff82f5614..2eb7a35b27b9 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
@@ -111,4 +111,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
index 0f3a5369a021..b7631c09f275 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
@@ -91,4 +91,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index e61147d6f2b0..d1a7aab8c24b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
@@ -234,4 +234,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h b/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
index 5fce71f2d8f7..1624c2d6be36 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
@@ -76,4 +76,4 @@ FixItHint createReplacement(const D &Destination, StringRef Source) {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_FIXINT_H
+#endif // LLVM_CLANG_TOOLING_FIXIT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
index 239be36012c3..33dd386d2340 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Stmt.h"
@@ -152,4 +152,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
index f1034a3d0579..3945a7c9fefb 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
-#define LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
+#define LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
@@ -187,4 +187,4 @@ applyAtomicChanges(llvm::StringRef FilePath, llvm::StringRef Code,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
index 930991328ca0..2f7c5bc9acff 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
-#define LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
+#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
#include "clang/Tooling/Refactoring/ASTSelection.h"
#include "clang/Tooling/Refactoring/RefactoringActionRules.h"
@@ -49,4 +49,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
index 034a0aaaf6db..be44518d4bce 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
-#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
#include "clang/Basic/LLVM.h"
@@ -48,4 +48,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif //LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
index 448bc422c4e7..dcb40b7eee66 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
-#define LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
+#define LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -47,4 +47,4 @@ std::string replaceNestedName(const NestedNameSpecifier *Use,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index 63d46abc2034..6fb2decf8614 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
#include "clang/AST/AST.h"
#include "clang/AST/RecursiveASTVisitor.h"
@@ -150,4 +150,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
index d4294ddb2f66..b362f655965e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRules.h"
@@ -60,4 +60,4 @@ std::vector<std::unique_ptr<RefactoringAction>> createRefactoringActions();
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
index 57dffa945acc..388535a69b8b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/Optional.h"
@@ -69,4 +69,4 @@ public:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
index 6a6dd83731e9..49e4a0c149f1 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/ASTSelection.h"
@@ -119,4 +119,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
index e9606fd6018e..86fcc6ad0a79 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
#include "clang/Tooling/Refactoring/RefactoringActionRule.h"
#include "clang/Tooling/Refactoring/RefactoringActionRulesInternal.h"
@@ -90,4 +90,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
index fb373fcf5029..e6ebaea5248a 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRule.h"
@@ -154,4 +154,4 @@ createRefactoringActionRule(const RequirementTypes &... Requirements) {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
index 659e02b48e5c..b022c5d61b03 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
#include "clang/Basic/LLVM.h"
#include <memory>
@@ -60,4 +60,4 @@ std::shared_ptr<OptionType> createRefactoringOption() {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
index d58b11355a26..f9f85f6eeb82 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
#include "clang/Basic/LLVM.h"
#include <type_traits>
@@ -58,4 +58,4 @@ struct IsValidOptionType : internal::HasHandle<T>::Type {};
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
index 84122b111ee1..1575a136b11c 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h"
@@ -54,4 +54,4 @@ public:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
index 2035c02bc17a..016eff80ca7b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
@@ -48,4 +48,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
index e0da9469deb5..7d97f811f024 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
#include "clang/Basic/DiagnosticError.h"
#include "clang/Basic/SourceManager.h"
@@ -86,4 +86,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
index b04bc3e2d202..43a8d56e4e71 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
#include "clang/Tooling/Refactoring.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
@@ -23,7 +23,6 @@
namespace clang {
class ASTConsumer;
-class CompilerInstance;
namespace tooling {
@@ -120,4 +119,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
index 9131a4565da7..6c28d40f3679 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
@@ -45,4 +45,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
index c4bfaa9cc377..0ae023b8d4e4 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -88,4 +88,4 @@ using SymbolOccurrences = std::vector<SymbolOccurrence>;
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
index 30f7f0a0008c..a7ffa8556888 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
#include "clang/AST/AST.h"
#include "clang/AST/ASTContext.h"
@@ -46,4 +46,4 @@ std::string getUSRForDecl(const Decl *Decl);
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
index 726987d9d46a..e81b5c2345c9 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
@@ -23,7 +23,6 @@
namespace clang {
class ASTConsumer;
class ASTContext;
-class CompilerInstance;
class NamedDecl;
namespace tooling {
@@ -64,4 +63,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
index 7a7dd76c4238..c3ffb4421e00 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
#include "clang/AST/AST.h"
#include "clang/Tooling/Core/Replacement.h"
@@ -49,4 +49,4 @@ SymbolOccurrences getOccurrencesOfUSRs(ArrayRef<std::string> USRs,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
index 83e35d623255..838f87fd1978 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
@@ -30,8 +30,7 @@ template <> struct MappingTraits<clang::tooling::Replacement> {
/// Helper to (de)serialize a Replacement since we don't have direct
/// access to its data members.
struct NormalizedReplacement {
- NormalizedReplacement(const IO &)
- : FilePath(""), Offset(0), Length(0), ReplacementText("") {}
+ NormalizedReplacement(const IO &) : Offset(0), Length(0) {}
NormalizedReplacement(const IO &, const clang::tooling::Replacement &R)
: FilePath(R.getFilePath()), Offset(R.getOffset()),
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
index 3c8dd8ceed09..d6235797fd7a 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
@@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
// Functions to construct a syntax tree from an AST.
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_H
-#define LLVM_CLANG_TOOLING_SYNTAX_TREE_H
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_BUILDTREE_H
+#define LLVM_CLANG_TOOLING_SYNTAX_BUILDTREE_H
#include "clang/AST/Decl.h"
#include "clang/Basic/TokenKinds.h"
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
index b92e92305417..2063c6b7d82a 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
@@ -18,8 +18,8 @@
// This is still work in progress and highly experimental, we leave room for
// ourselves to completely change the design and/or implementation.
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_CASCADE_H
-#define LLVM_CLANG_TOOLING_SYNTAX_TREE_CASCADE_H
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_H
+#define LLVM_CLANG_TOOLING_SYNTAX_TREE_H
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
@@ -181,7 +181,10 @@ class Tree : public Node {
ChildIteratorBase() = default;
explicit ChildIteratorBase(NodeT *N) : N(N) {}
- bool operator==(const DerivedT &O) const { return O.N == N; }
+ friend bool operator==(const DerivedT &LHS, const DerivedT &RHS) {
+ return LHS.N == RHS.N;
+ }
+
NodeT &operator*() const { return *N; }
DerivedT &operator++() {
N = N->getNextSibling();
@@ -269,14 +272,6 @@ private:
Node *LastChild = nullptr;
};
-// Provide missing non_const == const overload.
-// iterator_facade_base requires == to be a member, but implicit conversions
-// don't work on the LHS of a member operator.
-inline bool operator==(const Tree::ConstChildIterator &A,
- const Tree::ConstChildIterator &B) {
- return A.operator==(B);
-}
-
/// A list of Elements separated or terminated by a fixed token.
///
/// This type models the following grammar construct:
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
index c9c6a2ffb7b3..b36f58ad3046 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
@@ -54,7 +54,6 @@ class CompilerInstance;
class CompilerInvocation;
class DiagnosticConsumer;
class DiagnosticsEngine;
-class SourceManager;
namespace driver {
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
index cb0a5f684b7d..fb57dabb0a6f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
#include "clang/AST/ASTTypeTraits.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
@@ -100,4 +100,4 @@ llvm::Expected<T> MatchComputation<T>::eval(
}
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
index b143f63d8ca8..177eca6a044d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
-#define LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Basic/SourceLocation.h"
@@ -37,4 +37,4 @@ llvm::Expected<RangeSelector> parseRangeSelector(llvm::StringRef Input);
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
index 38ec24efec65..1e288043f0a8 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
-#define LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Basic/SourceLocation.h"
@@ -105,4 +105,4 @@ RangeSelector expansion(RangeSelector S);
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
index ac93db8446df..6b14861e92d7 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
@@ -450,4 +450,4 @@ findSelectedCase(const ast_matchers::MatchFinder::MatchResult &Result,
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
index 2c7eb65371cf..16411b9c398d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
-#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceLocation.h"
@@ -100,4 +100,4 @@ getRangeForEdit(const CharSourceRange &EditRange, const ASTContext &Context) {
}
} // namespace tooling
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
index 6c79a7588f28..ab0eb71ef44e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
@@ -43,6 +43,15 @@ inline bool needParensBeforeDotOrArrow(const Expr &E) {
/// Determines whether printing this expression to the right of a unary operator
/// requires a parentheses to preserve its meaning.
bool needParensAfterUnaryOperator(const Expr &E);
+
+// Recognizes known types (and sugared versions thereof) that overload the `*`
+// and `->` operator. Below is the list of currently included types, but it is
+// subject to change:
+//
+// * std::unique_ptr, std::shared_ptr, std::weak_ptr,
+// * std::optional, absl::optional, llvm::Optional,
+// * absl::StatusOr, llvm::Expected.
+bool isKnownPointerLikeType(QualType Ty, ASTContext &Context);
/// @}
/// \name Basic code-string generation utilities.
@@ -69,6 +78,8 @@ llvm::Optional<std::string> buildAddressOf(const Expr &E,
/// `x` becomes `x.`
/// `*a` becomes `a->`
/// `a+b` becomes `(a+b).`
+///
+/// DEPRECATED. Use `buildAccess`.
llvm::Optional<std::string> buildDot(const Expr &E, const ASTContext &Context);
/// Adds an arrow to the end of the given expression, but adds parentheses
@@ -77,10 +88,34 @@ llvm::Optional<std::string> buildDot(const Expr &E, const ASTContext &Context);
/// `x` becomes `x->`
/// `&a` becomes `a.`
/// `a+b` becomes `(a+b)->`
+///
+/// DEPRECATED. Use `buildAccess`.
llvm::Optional<std::string> buildArrow(const Expr &E,
const ASTContext &Context);
+
+/// Specifies how to classify pointer-like types -- like values or like pointers
+/// -- with regard to generating member-access syntax.
+enum class PLTClass : bool {
+ Value,
+ Pointer,
+};
+
+/// Adds an appropriate access operator (`.`, `->` or nothing, in the case of
+/// implicit `this`) to the end of the given expression. Adds parentheses when
+/// needed by the syntax and simplifies when possible. If `PLTypeClass` is
+/// `Pointer`, for known pointer-like types (see `isKnownPointerLikeType`),
+/// treats `operator->` and `operator*` like the built-in `->` and `*`
+/// operators.
+///
+/// `x` becomes `x->` or `x.`, depending on `E`'s type
+/// `a+b` becomes `(a+b)->` or `(a+b).`, depending on `E`'s type
+/// `&a` becomes `a.`
+/// `*a` becomes `a->`
+llvm::Optional<std::string>
+buildAccess(const Expr &E, ASTContext &Context,
+ PLTClass Classification = PLTClass::Pointer);
/// @}
} // namespace tooling
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
index ed0136e4867a..8b482738cc89 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
@@ -154,13 +154,11 @@ public:
std::vector<SourceLocation> &ARCMTMacroLocs;
Optional<bool> EnableCFBridgeFns;
- MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
- Sema &sema, TransformActions &TA,
- const CapturedDiagList &capturedDiags,
+ MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode, Sema &sema,
+ TransformActions &TA, const CapturedDiagList &capturedDiags,
std::vector<SourceLocation> &ARCMTMacroLocs)
- : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(),
- SemaRef(sema), TA(TA), CapturedDiags(capturedDiags),
- ARCMTMacroLocs(ARCMTMacroLocs) { }
+ : Ctx(Ctx), OrigGCMode(OrigGCMode), SemaRef(sema), TA(TA),
+ CapturedDiags(capturedDiags), ARCMTMacroLocs(ARCMTMacroLocs) {}
const CapturedDiagList &getDiags() const { return CapturedDiags; }
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
index 393adcd85a3f..47587d81850a 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -229,8 +229,9 @@ private:
bool IsFollowedBySimpleReturnStmt;
SmallVector<ObjCMessageExpr *, 4> Releases;
- PoolScope() : PoolVar(nullptr), CompoundParent(nullptr), Begin(), End(),
- IsFollowedBySimpleReturnStmt(false) { }
+ PoolScope()
+ : PoolVar(nullptr), CompoundParent(nullptr),
+ IsFollowedBySimpleReturnStmt(false) {}
SourceRange getIndentedRange() const {
Stmt::child_iterator rangeS = Begin;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
index ca48160d9c85..a08b0d084bb6 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
@@ -417,7 +417,7 @@ bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr,
if (tok.is(tok::r_paren))
return false;
- while (1) {
+ while (true) {
if (tok.isNot(tok::raw_identifier)) return false;
if (tok.getRawIdentifier() == fromAttr) {
if (!toAttr.empty()) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index 701260881a93..a1d5673950e1 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -6147,6 +6147,376 @@ bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
return X.getAsVoidPointer() == Y.getAsVoidPointer();
}
+bool ASTContext::isSameTemplateParameter(NamedDecl *X, NamedDecl *Y) {
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
+ auto *TY = cast<TemplateTypeParmDecl>(Y);
+ if (TX->isParameterPack() != TY->isParameterPack())
+ return false;
+ if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
+ return false;
+ const TypeConstraint *TXTC = TX->getTypeConstraint();
+ const TypeConstraint *TYTC = TY->getTypeConstraint();
+ if (!TXTC != !TYTC)
+ return false;
+ if (TXTC && TYTC) {
+ auto *NCX = TXTC->getNamedConcept();
+ auto *NCY = TYTC->getNamedConcept();
+ if (!NCX || !NCY || !isSameEntity(NCX, NCY))
+ return false;
+ if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
+ return false;
+ if (TXTC->hasExplicitTemplateArgs()) {
+ auto *TXTCArgs = TXTC->getTemplateArgsAsWritten();
+ auto *TYTCArgs = TYTC->getTemplateArgsAsWritten();
+ if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs)
+ return false;
+ llvm::FoldingSetNodeID XID, YID;
+ for (auto &ArgLoc : TXTCArgs->arguments())
+ ArgLoc.getArgument().Profile(XID, X->getASTContext());
+ for (auto &ArgLoc : TYTCArgs->arguments())
+ ArgLoc.getArgument().Profile(YID, Y->getASTContext());
+ if (XID != YID)
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
+ auto *TY = cast<NonTypeTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ TX->getASTContext().hasSameType(TX->getType(), TY->getType());
+ }
+
+ auto *TX = cast<TemplateTemplateParmDecl>(X);
+ auto *TY = cast<TemplateTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ isSameTemplateParameterList(TX->getTemplateParameters(),
+ TY->getTemplateParameters());
+}
+
+bool ASTContext::isSameTemplateParameterList(TemplateParameterList *X,
+ TemplateParameterList *Y) {
+ if (X->size() != Y->size())
+ return false;
+
+ for (unsigned I = 0, N = X->size(); I != N; ++I)
+ if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
+ return false;
+
+ const Expr *XRC = X->getRequiresClause();
+ const Expr *YRC = Y->getRequiresClause();
+ if (!XRC != !YRC)
+ return false;
+ if (XRC) {
+ llvm::FoldingSetNodeID XRCID, YRCID;
+ XRC->Profile(XRCID, *this, /*Canonical=*/true);
+ YRC->Profile(YRCID, *this, /*Canonical=*/true);
+ if (XRCID != YRCID)
+ return false;
+ }
+
+ return true;
+}
+
+static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
+ if (auto *NS = X->getAsNamespace())
+ return NS;
+ if (auto *NAS = X->getAsNamespaceAlias())
+ return NAS->getNamespace();
+ return nullptr;
+}
+
+static bool isSameQualifier(const NestedNameSpecifier *X,
+ const NestedNameSpecifier *Y) {
+ if (auto *NSX = getNamespace(X)) {
+ auto *NSY = getNamespace(Y);
+ if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
+ return false;
+ } else if (X->getKind() != Y->getKind())
+ return false;
+
+ // FIXME: For namespaces and types, we're permitted to check that the entity
+ // is named via the same tokens. We should probably do so.
+ switch (X->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (X->getAsIdentifier() != Y->getAsIdentifier())
+ return false;
+ break;
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // We've already checked that we named the same namespace.
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (X->getAsType()->getCanonicalTypeInternal() !=
+ Y->getAsType()->getCanonicalTypeInternal())
+ return false;
+ break;
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return true;
+ }
+
+ // Recurse into earlier portion of NNS, if any.
+ auto *PX = X->getPrefix();
+ auto *PY = Y->getPrefix();
+ if (PX && PY)
+ return isSameQualifier(PX, PY);
+ return !PX && !PY;
+}
+
+/// Determine whether the attributes we can overload on are identical for A and
+/// B. Will ignore any overloadable attrs represented in the type of A and B.
+static bool hasSameOverloadableAttrs(const FunctionDecl *A,
+ const FunctionDecl *B) {
+ // Note that pass_object_size attributes are represented in the function's
+ // ExtParameterInfo, so we don't need to check them here.
+
+ llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+ auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
+ auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
+
+ for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
+ Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
+
+ // Return false if the number of enable_if attributes is different.
+ if (!Cand1A || !Cand2A)
+ return false;
+
+ Cand1ID.clear();
+ Cand2ID.clear();
+
+ (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+
+ // Return false if any of the enable_if expressions of A and B are
+ // different.
+ if (Cand1ID != Cand2ID)
+ return false;
+ }
+ return true;
+}
+
+bool ASTContext::isSameEntity(NamedDecl *X, NamedDecl *Y) {
+ if (X == Y)
+ return true;
+
+ if (X->getDeclName() != Y->getDeclName())
+ return false;
+
+ // Must be in the same context.
+ //
+ // Note that we can't use DeclContext::Equals here, because the DeclContexts
+ // could be two different declarations of the same function. (We will fix the
+ // semantic DC to refer to the primary definition after merging.)
+ if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
+ cast<Decl>(Y->getDeclContext()->getRedeclContext())))
+ return false;
+
+ // Two typedefs refer to the same entity if they have the same underlying
+ // type.
+ if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ return hasSameType(TypedefX->getUnderlyingType(),
+ TypedefY->getUnderlyingType());
+
+ // Must have the same kind.
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ // Objective-C classes and protocols with the same name always match.
+ if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
+ return true;
+
+ if (isa<ClassTemplateSpecializationDecl>(X)) {
+ // No need to handle these here: we merge them when adding them to the
+ // template.
+ return false;
+ }
+
+ // Compatible tags match.
+ if (const auto *TagX = dyn_cast<TagDecl>(X)) {
+ const auto *TagY = cast<TagDecl>(Y);
+ return (TagX->getTagKind() == TagY->getTagKind()) ||
+ ((TagX->getTagKind() == TTK_Struct ||
+ TagX->getTagKind() == TTK_Class ||
+ TagX->getTagKind() == TTK_Interface) &&
+ (TagY->getTagKind() == TTK_Struct ||
+ TagY->getTagKind() == TTK_Class ||
+ TagY->getTagKind() == TTK_Interface));
+ }
+
+ // Functions with the same type and linkage match.
+ // FIXME: This needs to cope with merging of prototyped/non-prototyped
+ // functions, etc.
+ if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
+ const auto *FuncY = cast<FunctionDecl>(Y);
+ if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
+ const auto *CtorY = cast<CXXConstructorDecl>(Y);
+ if (CtorX->getInheritedConstructor() &&
+ !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
+ CtorY->getInheritedConstructor().getConstructor()))
+ return false;
+ }
+
+ if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
+ return false;
+
+ // Multiversioned functions with different feature strings are represented
+ // as separate declarations.
+ if (FuncX->isMultiVersion()) {
+ const auto *TAX = FuncX->getAttr<TargetAttr>();
+ const auto *TAY = FuncY->getAttr<TargetAttr>();
+ assert(TAX && TAY && "Multiversion Function without target attribute");
+
+ if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
+ return false;
+ }
+
+ const Expr *XRC = FuncX->getTrailingRequiresClause();
+ const Expr *YRC = FuncY->getTrailingRequiresClause();
+ if (!XRC != !YRC)
+ return false;
+ if (XRC) {
+ llvm::FoldingSetNodeID XRCID, YRCID;
+ XRC->Profile(XRCID, *this, /*Canonical=*/true);
+ YRC->Profile(YRCID, *this, /*Canonical=*/true);
+ if (XRCID != YRCID)
+ return false;
+ }
+
+ auto GetTypeAsWritten = [](const FunctionDecl *FD) {
+ // Map to the first declaration that we've already merged into this one.
+ // The TSI of redeclarations might not match (due to calling conventions
+ // being inherited onto the type but not the TSI), but the TSI type of
+ // the first declaration of the function should match across modules.
+ FD = FD->getCanonicalDecl();
+ return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
+ : FD->getType();
+ };
+ QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
+ if (!hasSameType(XT, YT)) {
+ // We can get functions with different types on the redecl chain in C++17
+ // if they have differing exception specifications and at least one of
+ // the excpetion specs is unresolved.
+ auto *XFPT = XT->getAs<FunctionProtoType>();
+ auto *YFPT = YT->getAs<FunctionProtoType>();
+ if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
+ (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
+ isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
+ hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
+ return true;
+ return false;
+ }
+
+ return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
+ hasSameOverloadableAttrs(FuncX, FuncY);
+ }
+
+ // Variables with the same type and linkage match.
+ if (const auto *VarX = dyn_cast<VarDecl>(X)) {
+ const auto *VarY = cast<VarDecl>(Y);
+ if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
+ if (hasSameType(VarX->getType(), VarY->getType()))
+ return true;
+
+ // We can get decls with different types on the redecl chain. Eg.
+ // template <typename T> struct S { static T Var[]; }; // #1
+ // template <typename T> T S<T>::Var[sizeof(T)]; // #2
+ // Only? happens when completing an incomplete array type. In this case
+ // when comparing #1 and #2 we should go through their element type.
+ const ArrayType *VarXTy = getAsArrayType(VarX->getType());
+ const ArrayType *VarYTy = getAsArrayType(VarY->getType());
+ if (!VarXTy || !VarYTy)
+ return false;
+ if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
+ return hasSameType(VarXTy->getElementType(), VarYTy->getElementType());
+ }
+ return false;
+ }
+
+ // Namespaces with the same name and inlinedness match.
+ if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ const auto *NamespaceY = cast<NamespaceDecl>(Y);
+ return NamespaceX->isInline() == NamespaceY->isInline();
+ }
+
+ // Identical template names and kinds match if their template parameter lists
+ // and patterns match.
+ if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
+ const auto *TemplateY = cast<TemplateDecl>(Y);
+ return isSameEntity(TemplateX->getTemplatedDecl(),
+ TemplateY->getTemplatedDecl()) &&
+ isSameTemplateParameterList(TemplateX->getTemplateParameters(),
+ TemplateY->getTemplateParameters());
+ }
+
+ // Fields with the same name and the same type match.
+ if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
+ const auto *FDY = cast<FieldDecl>(Y);
+ // FIXME: Also check the bitwidth is odr-equivalent, if any.
+ return hasSameType(FDX->getType(), FDY->getType());
+ }
+
+ // Indirect fields with the same target field match.
+ if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
+ const auto *IFDY = cast<IndirectFieldDecl>(Y);
+ return IFDX->getAnonField()->getCanonicalDecl() ==
+ IFDY->getAnonField()->getCanonicalDecl();
+ }
+
+ // Enumerators with the same name match.
+ if (isa<EnumConstantDecl>(X))
+ // FIXME: Also check the value is odr-equivalent.
+ return true;
+
+ // Using shadow declarations with the same target match.
+ if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
+ const auto *USY = cast<UsingShadowDecl>(Y);
+ return USX->getTargetDecl() == USY->getTargetDecl();
+ }
+
+ // Using declarations with the same qualifier match. (We already know that
+ // the name matches.)
+ if (const auto *UX = dyn_cast<UsingDecl>(X)) {
+ const auto *UY = cast<UsingDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->hasTypename() == UY->hasTypename() &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
+ const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
+ return isSameQualifier(
+ UX->getQualifier(),
+ cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
+ }
+
+ // Using-pack declarations are only created by instantiation, and match if
+ // they're instantiated from matching UnresolvedUsing...Decls.
+ if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
+ return declaresSameEntity(
+ UX->getInstantiatedFromUsingDecl(),
+ cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
+ }
+
+ // Namespace alias definitions with the same target match.
+ if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
+ const auto *NAY = cast<NamespaceAliasDecl>(Y);
+ return NAX->getNamespace()->Equals(NAY->getNamespace());
+ }
+
+ return false;
+}
+
TemplateArgument
ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
switch (Arg.getKind()) {
@@ -8474,8 +8844,8 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
FieldDecl *Field = FieldDecl::Create(
const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
- /*TInfo=*/0,
- /*BitWidth=*/0,
+ /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
/*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
@@ -9817,12 +10187,13 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// designates the object or function denoted by the reference, and the
// expression is an lvalue unless the reference is an rvalue reference and
// the expression is a function call (possibly inside parentheses).
- if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() &&
- RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass())
- return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(),
- RHS->getAs<ReferenceType>()->getPointeeType(),
+ auto *LHSRefTy = LHS->getAs<ReferenceType>();
+ auto *RHSRefTy = RHS->getAs<ReferenceType>();
+ if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
+ LHS->getTypeClass() == RHS->getTypeClass())
+ return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(),
OfBlockPointer, Unqualified, BlockReturnType);
- if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>())
+ if (LHSRefTy || RHSRefTy)
return {};
if (Unqualified) {
@@ -10309,7 +10680,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
// For _BitInt, return an unsigned _BitInt with same width.
if (const auto *EITy = T->getAs<BitIntType>())
- return getBitIntType(/*IsUnsigned=*/true, EITy->getNumBits());
+ return getBitIntType(/*Unsigned=*/true, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
@@ -10377,7 +10748,7 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
// For _BitInt, return a signed _BitInt with same width.
if (const auto *EITy = T->getAs<BitIntType>())
- return getBitIntType(/*IsUnsigned=*/false, EITy->getNumBits());
+ return getBitIntType(/*Unsigned=*/false, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
@@ -11572,6 +11943,15 @@ uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
return getTargetInfo().getNullPointerValue(AS);
}
+unsigned ASTContext::getTargetAddressSpace(QualType T) const {
+ return T->isFunctionType() ? getTargetInfo().getProgramAddressSpace()
+ : getTargetAddressSpace(T.getQualifiers());
+}
+
+unsigned ASTContext::getTargetAddressSpace(Qualifiers Q) const {
+ return getTargetAddressSpace(Q.getAddressSpace());
+}
+
unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
if (isTargetAddressSpace(AS))
return toTargetAddressSpace(AS);
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 7f78da10e0b3..457465e87d93 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -8409,8 +8409,8 @@ Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
// and destructed after it is created. The construction already performs the
// import of the data.
template <typename T> struct AttrArgImporter {
- AttrArgImporter<T>(const AttrArgImporter<T> &) = delete;
- AttrArgImporter<T>(AttrArgImporter<T> &&) = default;
+ AttrArgImporter(const AttrArgImporter<T> &) = delete;
+ AttrArgImporter(AttrArgImporter<T> &&) = default;
AttrArgImporter<T> &operator=(const AttrArgImporter<T> &) = delete;
AttrArgImporter<T> &operator=(AttrArgImporter<T> &&) = default;
@@ -8429,8 +8429,8 @@ private:
// is used by the attribute classes. This object should be created once for the
// array data to be imported (the array size is not imported, just copied).
template <typename T> struct AttrArgArrayImporter {
- AttrArgArrayImporter<T>(const AttrArgArrayImporter<T> &) = delete;
- AttrArgArrayImporter<T>(AttrArgArrayImporter<T> &&) = default;
+ AttrArgArrayImporter(const AttrArgArrayImporter<T> &) = delete;
+ AttrArgArrayImporter(AttrArgArrayImporter<T> &&) = default;
AttrArgArrayImporter<T> &operator=(const AttrArgArrayImporter<T> &) = delete;
AttrArgArrayImporter<T> &operator=(AttrArgArrayImporter<T> &&) = default;
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index c2f13cf63830..7b8acfcd92be 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -139,6 +139,13 @@ void OMPDeclareTargetDeclAttr::printPrettyPragma(
OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")";
if (getMapType() != MT_To)
OS << ' ' << ConvertMapTypeTyToStr(getMapType());
+ if (Expr *E = getIndirectExpr()) {
+ OS << " indirect(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ } else if (getIndirect()) {
+ OS << " indirect";
+ }
}
llvm::Optional<OMPDeclareTargetDeclAttr *>
diff --git a/contrib/llvm-project/clang/lib/AST/CXXABI.h b/contrib/llvm-project/clang/lib/AST/CXXABI.h
index ca9424bcb7a4..9258a53fefeb 100644
--- a/contrib/llvm-project/clang/lib/AST/CXXABI.h
+++ b/contrib/llvm-project/clang/lib/AST/CXXABI.h
@@ -21,7 +21,6 @@ namespace clang {
class ASTContext;
class CXXConstructorDecl;
class DeclaratorDecl;
-class Expr;
class MangleContext;
class MangleNumberingContext;
class MemberPointerType;
diff --git a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
index 93531c06192d..61ce8979f13f 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
@@ -94,31 +94,12 @@ void Lexer::skipLineStartingDecorations() {
if (BufferPtr == CommentEnd)
return;
- switch (*BufferPtr) {
- case ' ':
- case '\t':
- case '\f':
- case '\v': {
- const char *NewBufferPtr = BufferPtr;
- NewBufferPtr++;
- if (NewBufferPtr == CommentEnd)
+ const char *NewBufferPtr = BufferPtr;
+ while (isHorizontalWhitespace(*NewBufferPtr))
+ if (++NewBufferPtr == CommentEnd)
return;
-
- char C = *NewBufferPtr;
- while (isHorizontalWhitespace(C)) {
- NewBufferPtr++;
- if (NewBufferPtr == CommentEnd)
- return;
- C = *NewBufferPtr;
- }
- if (C == '*')
- BufferPtr = NewBufferPtr + 1;
- break;
- }
- case '*':
- BufferPtr++;
- break;
- }
+ if (*NewBufferPtr == '*')
+ BufferPtr = NewBufferPtr + 1;
}
namespace {
@@ -289,6 +270,29 @@ void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
BufferPtr = TokEnd;
}
+const char *Lexer::skipTextToken() {
+ const char *TokenPtr = BufferPtr;
+ assert(TokenPtr < CommentEnd);
+ StringRef TokStartSymbols = ParseCommands ? "\n\r\\@\"&<" : "\n\r";
+
+again:
+ size_t End =
+ StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of(TokStartSymbols);
+ if (End == StringRef::npos)
+ return CommentEnd;
+
+ // Doxygen doesn't recognize any commands in a one-line double quotation.
+ // If we don't find an ending quotation mark, we pretend it never began.
+ if (*(TokenPtr + End) == '\"') {
+ TokenPtr += End + 1;
+ End = StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of("\n\r\"");
+ if (End != StringRef::npos && *(TokenPtr + End) == '\"')
+ TokenPtr += End + 1;
+ goto again;
+ }
+ return TokenPtr + End;
+}
+
void Lexer::lexCommentText(Token &T) {
assert(CommentState == LCS_InsideBCPLComment ||
CommentState == LCS_InsideCComment);
@@ -309,17 +313,8 @@ void Lexer::lexCommentText(Token &T) {
skipLineStartingDecorations();
return;
- default: {
- StringRef TokStartSymbols = ParseCommands ? "\n\r\\@&<" : "\n\r";
- size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr)
- .find_first_of(TokStartSymbols);
- if (End != StringRef::npos)
- TokenPtr += End;
- else
- TokenPtr = CommentEnd;
- formTextToken(T, TokenPtr);
- return;
- }
+ default:
+ return formTextToken(T, skipTextToken());
}
};
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 94a644cbe8e5..e6ce5a80dacd 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -786,6 +786,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
//
// Note that we don't want to make the variable non-external
// because of this, but unique-external linkage suits us.
+
if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var) &&
!IgnoreVarTypeLinkage) {
LinkageInfo TypeLV = getLVForType(*Var->getType(), computation);
@@ -912,10 +913,6 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo(LV.getLinkage(), DefaultVisibility, false);
- // Mark the symbols as hidden when compiling for the device.
- if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice)
- LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false);
-
return LV;
}
@@ -1069,6 +1066,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// Finally, merge in information from the class.
LV.mergeMaybeWithVisibility(classLV, considerClassVisibility);
+
return LV;
}
@@ -3236,7 +3234,6 @@ bool FunctionDecl::isGlobal() const {
if (const auto *Namespace = cast<NamespaceDecl>(DC)) {
if (!Namespace->getDeclName())
return false;
- break;
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index 064012ba865c..9ee1cc083086 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -995,6 +995,15 @@ bool Decl::AccessDeclContextCheck() const {
return true;
}
+bool Decl::isInExportDeclContext() const {
+ const DeclContext *DC = getLexicalDeclContext();
+
+ while (DC && !isa<ExportDecl>(DC))
+ DC = DC->getLexicalParent();
+
+ return DC && isa<ExportDecl>(DC);
+}
+
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
@@ -1212,7 +1221,8 @@ bool DeclContext::Encloses(const DeclContext *DC) const {
return getPrimaryContext()->Encloses(DC);
for (; DC; DC = DC->getParent())
- if (!isa<LinkageSpecDecl>(DC) && DC->getPrimaryContext() == this)
+ if (!isa<LinkageSpecDecl>(DC) && !isa<ExportDecl>(DC) &&
+ DC->getPrimaryContext() == this)
return true;
return false;
}
@@ -1643,9 +1653,9 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
- assert(getDeclKind() != Decl::LinkageSpec &&
- getDeclKind() != Decl::Export &&
- "should not perform lookups into transparent contexts");
+ // For transparent DeclContext, we should lookup in their enclosing context.
+ if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export)
+ return getParent()->lookup(Name);
const DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index 1780358cc348..0cf6e60b2a6c 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -79,10 +79,9 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasBasesWithFields(false), HasBasesWithNonStaticDataMembers(false),
HasPrivateFields(false), HasProtectedFields(false),
HasPublicFields(false), HasMutableFields(false), HasVariantMembers(false),
- HasOnlyCMembers(true), HasInClassInitializer(false),
+ HasOnlyCMembers(true), HasInitMethod(false), HasInClassInitializer(false),
HasUninitializedReferenceMember(false), HasUninitializedFields(false),
- HasInheritedConstructor(false),
- HasInheritedDefaultConstructor(false),
+ HasInheritedConstructor(false), HasInheritedDefaultConstructor(false),
HasInheritedAssignment(false),
NeedOverloadResolutionForCopyConstructor(false),
NeedOverloadResolutionForMoveConstructor(false),
@@ -3272,7 +3271,7 @@ void MSGuidDecl::anchor() {}
MSGuidDecl::MSGuidDecl(DeclContext *DC, QualType T, Parts P)
: ValueDecl(Decl::MSGuid, DC, SourceLocation(), DeclarationName(), T),
- PartVal(P), APVal() {}
+ PartVal(P) {}
MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) {
DeclContext *DC = C.getTranslationUnitDecl();
diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
index ba827a79c022..f15dd78929e2 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
@@ -232,6 +232,18 @@ ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
return &Ctx.Idents.get(ivarName.str());
}
+ObjCPropertyDecl *ObjCContainerDecl::getProperty(const IdentifierInfo *Id,
+ bool IsInstance) const {
+ for (auto *LookupResult : lookup(Id)) {
+ if (auto *Prop = dyn_cast<ObjCPropertyDecl>(LookupResult)) {
+ if (Prop->isInstanceProperty() == IsInstance) {
+ return Prop;
+ }
+ }
+ }
+ return nullptr;
+}
+
/// FindPropertyDeclaration - Finds declaration of the property given its name
/// in 'PropertyId' and returns it. It returns 0, if not found.
ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index 044eb8f8f8e5..c3f1d1544f79 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -588,7 +588,7 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
}
EOut << " ";
EOut.flush();
- Out << EOut.str();
+ Out << Proto;
}
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
@@ -731,7 +731,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
Indentation, "\n", &Context);
EOut.flush();
- Proto += EOut.str();
Proto += ")";
}
}
@@ -885,7 +884,10 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
}
- printDeclType(T, D->getName());
+ printDeclType(T, (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
+ D->getIdentifier())
+ ? D->getIdentifier()->deuglifiedName()
+ : D->getName());
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
@@ -1131,8 +1133,12 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
else if (TTP->getDeclName())
Out << ' ';
- if (TTP->getDeclName())
- Out << TTP->getDeclName();
+ if (TTP->getDeclName()) {
+ if (Policy.CleanUglifiedParameters && TTP->getIdentifier())
+ Out << TTP->getIdentifier()->deuglifiedName();
+ else
+ Out << TTP->getDeclName();
+ }
} else if (auto *TD = D->getTemplatedDecl())
Visit(TD);
else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) {
@@ -1742,8 +1748,12 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
else if (TTP->getDeclName())
Out << ' ';
- if (TTP->getDeclName())
- Out << TTP->getDeclName();
+ if (TTP->getDeclName()) {
+ if (Policy.CleanUglifiedParameters && TTP->getIdentifier())
+ Out << TTP->getIdentifier()->deuglifiedName();
+ else
+ Out << TTP->getDeclName();
+ }
if (TTP->hasDefaultArgument()) {
Out << " = ";
@@ -1755,7 +1765,8 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl(
const NonTypeTemplateParmDecl *NTTP) {
StringRef Name;
if (IdentifierInfo *II = NTTP->getIdentifier())
- Name = II->getName();
+ Name =
+ Policy.CleanUglifiedParameters ? II->deuglifiedName() : II->getName();
printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
if (NTTP->hasDefaultArgument()) {
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index 2530beb89d17..45e94847caee 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -1281,7 +1281,7 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
StringOffset = *StartTokenByteOffset;
ByteNo -= StringOffset;
}
- while (1) {
+ while (true) {
assert(TokNo < getNumConcatenated() && "Invalid byte number!");
SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
index 8cb8625e2a1a..c17453fb45fb 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
@@ -57,9 +57,9 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
}
ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty,
- unsigned NumTemplateArgs)
- : Expr(ConceptSpecializationExprClass, Empty), ConceptReference(),
- NumTemplateArgs(NumTemplateArgs) { }
+ unsigned NumTemplateArgs)
+ : Expr(ConceptSpecializationExprClass, Empty),
+ NumTemplateArgs(NumTemplateArgs) {}
void ConceptSpecializationExpr::setTemplateArguments(
ArrayRef<TemplateArgument> Converted) {
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index 6f979d3264bb..40d0bd44b5d9 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -1706,8 +1706,8 @@ namespace {
struct MemberPtr {
MemberPtr() {}
- explicit MemberPtr(const ValueDecl *Decl) :
- DeclAndIsDerivedMember(Decl, false), Path() {}
+ explicit MemberPtr(const ValueDecl *Decl)
+ : DeclAndIsDerivedMember(Decl, false) {}
/// The member or (direct or indirect) field referred to by this member
/// pointer, or 0 if this is a null member pointer.
@@ -4936,8 +4936,13 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
if (SS->getConditionVariable() &&
!EvaluateDecl(Info, SS->getConditionVariable()))
return ESR_Failed;
- if (!EvaluateInteger(SS->getCond(), Value, Info))
- return ESR_Failed;
+ if (SS->getCond()->isValueDependent()) {
+ if (!EvaluateDependentExpr(SS->getCond(), Info))
+ return ESR_Failed;
+ } else {
+ if (!EvaluateInteger(SS->getCond(), Value, Info))
+ return ESR_Failed;
+ }
if (!CondScope.destroy())
return ESR_Failed;
}
@@ -6040,7 +6045,7 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call,
unsigned ASTIdx = Idx.getASTIndex();
if (ASTIdx >= Args.size())
continue;
- ForbiddenNullArgs[ASTIdx] = 1;
+ ForbiddenNullArgs[ASTIdx] = true;
}
}
}
@@ -10437,6 +10442,15 @@ bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (!Evaluate(SubExprValue, Info, SubExpr))
return false;
+ // FIXME: This vector evaluator someday needs to be changed to be LValue
+ // aware/keep LValue information around, rather than dealing with just vector
+ // types directly. Until then, we cannot handle cases where the operand to
+ // these unary operators is an LValue. The only case I've been able to see
+ // cause this is operator++ assigning to a member expression (only valid in
+ // altivec compilations) in C mode, so this shouldn't limit us too much.
+ if (SubExprValue.isLValue())
+ return false;
+
assert(SubExprValue.getVectorLength() == VD->getNumElements() &&
"Vector length doesn't match type?");
@@ -10683,28 +10697,55 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
- unsigned N = CAT->getSize().getZExtValue();
+ unsigned FinalSize = CAT->getSize().getZExtValue();
// Preserve the array filler if we had prior zero-initialization.
APValue Filler =
HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
: APValue();
- *Value = APValue(APValue::UninitArray(), N, N);
-
- if (HadZeroInit)
- for (unsigned I = 0; I != N; ++I)
- Value->getArrayInitializedElt(I) = Filler;
+ *Value = APValue(APValue::UninitArray(), 0, FinalSize);
+ if (FinalSize == 0)
+ return true;
- // Initialize the elements.
LValue ArrayElt = Subobject;
ArrayElt.addArray(Info, E, CAT);
- for (unsigned I = 0; I != N; ++I)
- if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I),
- CAT->getElementType()) ||
- !HandleLValueArrayAdjustment(Info, E, ArrayElt, CAT->getElementType(),
- 1))
- return false;
+ // We do the whole initialization in two passes, first for just one element,
+ // then for the whole array. It's possible we may find out we can't do const
+ // init in the first pass, in which case we avoid allocating a potentially
+ // large array. We don't do more passes because expanding array requires
+ // copying the data, which is wasteful.
+ for (const unsigned N : {1u, FinalSize}) {
+ unsigned OldElts = Value->getArrayInitializedElts();
+ if (OldElts == N)
+ break;
+
+ // Expand the array to appropriate size.
+ APValue NewValue(APValue::UninitArray(), N, FinalSize);
+ for (unsigned I = 0; I < OldElts; ++I)
+ NewValue.getArrayInitializedElt(I).swap(
+ Value->getArrayInitializedElt(I));
+ Value->swap(NewValue);
+
+ if (HadZeroInit)
+ for (unsigned I = OldElts; I < N; ++I)
+ Value->getArrayInitializedElt(I) = Filler;
+
+ // Initialize the elements.
+ for (unsigned I = OldElts; I < N; ++I) {
+ if (!VisitCXXConstructExpr(E, ArrayElt,
+ &Value->getArrayInitializedElt(I),
+ CAT->getElementType()) ||
+ !HandleLValueArrayAdjustment(Info, E, ArrayElt,
+ CAT->getElementType(), 1))
+ return false;
+ // When checking for const initilization any diagnostic is considered
+ // an error.
+ if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() &&
+ !Info.keepEvaluatingAfterFailure())
+ return false;
+ }
+ }
return true;
}
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index 83b952116a5e..102bcca96a38 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -21,7 +21,6 @@ using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::FormatSpecifier;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
-using clang::analyze_format_string::PositionContext;
using clang::analyze_format_string::ConversionSpecifier;
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 5c8cb4274260..da538aa332ff 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -207,13 +207,13 @@ bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
template <class Emitter>
bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
- OptionScope<Emitter> Scope(this, /*discardResult=*/true);
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true);
return this->Visit(E);
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
- OptionScope<Emitter> Scope(this, /*discardResult=*/false);
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false);
return this->Visit(E);
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
index 716f28551e58..124a6ff03f18 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -28,8 +28,6 @@ namespace clang {
class QualType;
namespace interp {
-class Function;
-class State;
template <class Emitter> class LocalScope;
template <class Emitter> class RecordScope;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
index d9c0b64ed4b8..3bc665b84b4d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
@@ -25,11 +25,7 @@
#include "llvm/ADT/Optional.h"
namespace clang {
-class QualType;
-
namespace interp {
-class Function;
-class State;
template <class Emitter> class LoopScope;
template <class Emitter> class SwitchScope;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.h b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
index 4f25ff977b81..0627d9fb14f5 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
@@ -23,7 +23,6 @@
namespace clang {
class ASTContext;
class LangOptions;
-class Stmt;
class FunctionDecl;
class VarDecl;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
index 0ccdef221c83..2d5386e60b8c 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
@@ -25,10 +25,8 @@ namespace clang {
namespace interp {
class Block;
class DeadBlock;
-class Context;
class InterpState;
class Pointer;
-class Function;
enum PrimType : unsigned;
/// A memory block, either on the stack or in the heap.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
index f2f6e0e76018..587531aec82a 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
@@ -26,10 +26,7 @@ namespace clang {
namespace interp {
class Block;
class DeadBlock;
-class Context;
-class InterpState;
class Pointer;
-class Function;
enum PrimType : unsigned;
/// A pointer to a memory block, live or dead.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
index f5f4f8e5c32d..de4bf9bf802e 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
@@ -81,35 +81,27 @@ inline bool isPrimitiveIntegral(PrimType Type) {
/// Helper macro to simplify type switches.
/// The macro implicitly exposes a type T in the scope of the inner block.
#define TYPE_SWITCH_CASE(Name, B) \
- case Name: { using T = PrimConv<Name>::T; do {B;} while(0); break; }
+ case Name: { using T = PrimConv<Name>::T; B; break; }
#define TYPE_SWITCH(Expr, B) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Sint8, B) \
- TYPE_SWITCH_CASE(PT_Uint8, B) \
- TYPE_SWITCH_CASE(PT_Sint16, B) \
- TYPE_SWITCH_CASE(PT_Uint16, B) \
- TYPE_SWITCH_CASE(PT_Sint32, B) \
- TYPE_SWITCH_CASE(PT_Uint32, B) \
- TYPE_SWITCH_CASE(PT_Sint64, B) \
- TYPE_SWITCH_CASE(PT_Uint64, B) \
- TYPE_SWITCH_CASE(PT_Bool, B) \
- TYPE_SWITCH_CASE(PT_Ptr, B) \
- }
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Sint8, B) \
+ TYPE_SWITCH_CASE(PT_Uint8, B) \
+ TYPE_SWITCH_CASE(PT_Sint16, B) \
+ TYPE_SWITCH_CASE(PT_Uint16, B) \
+ TYPE_SWITCH_CASE(PT_Sint32, B) \
+ TYPE_SWITCH_CASE(PT_Uint32, B) \
+ TYPE_SWITCH_CASE(PT_Sint64, B) \
+ TYPE_SWITCH_CASE(PT_Uint64, B) \
+ TYPE_SWITCH_CASE(PT_Bool, B) \
+ TYPE_SWITCH_CASE(PT_Ptr, B) \
+ } \
+ } while (0)
#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Ptr, B) \
- default: do { D; } while(0); break; \
- }
-#define INT_TYPE_SWITCH(Expr, B) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Sint8, B) \
- TYPE_SWITCH_CASE(PT_Uint8, B) \
- TYPE_SWITCH_CASE(PT_Sint16, B) \
- TYPE_SWITCH_CASE(PT_Uint16, B) \
- TYPE_SWITCH_CASE(PT_Sint32, B) \
- TYPE_SWITCH_CASE(PT_Uint32, B) \
- TYPE_SWITCH_CASE(PT_Sint64, B) \
- TYPE_SWITCH_CASE(PT_Uint64, B) \
- default: llvm_unreachable("not an integer"); \
- }
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Ptr, B) \
+ default: { D; break; } \
+ } \
+ } while (0)
#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.h b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
index c81ec777a5fe..ca985af8ad30 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
@@ -29,15 +29,12 @@ namespace clang {
class RecordDecl;
class Expr;
class FunctionDecl;
-class Stmt;
class StringLiteral;
class VarDecl;
namespace interp {
class Context;
-class State;
class Record;
-class Scope;
/// The program contains and links the bytecode for all functions.
class Program {
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 7afc1250a36f..2e734e2b28cd 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -659,8 +659,7 @@ bool ItaniumMangleContextImpl::isUniqueInternalLinkageDecl(
}
bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (FD) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
LanguageLinkage L = FD->getLanguageLinkage();
// Overloadable functions need mangling.
if (FD->hasAttr<OverloadableAttr>())
@@ -696,21 +695,24 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (!getASTContext().getLangOpts().CPlusPlus)
return false;
- const VarDecl *VD = dyn_cast<VarDecl>(D);
- if (VD && !isa<DecompositionDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ // Decompositions are mangled.
+ if (isa<DecompositionDecl>(VD))
+ return true;
+
// C variables are not mangled.
if (VD->isExternC())
return false;
- // Variables at global scope with non-internal linkage are not mangled
+ // Variables at global scope with non-internal linkage are not mangled.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
+ while (!DC->isFileContext())
DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage &&
!CXXNameMangler::shouldHaveAbiTags(*this, VD) &&
- !isa<VarTemplateSpecializationDecl>(D))
+ !isa<VarTemplateSpecializationDecl>(VD))
return false;
}
@@ -5889,9 +5891,11 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) {
}
void CXXNameMangler::mangleSeqID(unsigned SeqID) {
- if (SeqID == 1)
+ if (SeqID == 0) {
+ // Nothing.
+ } else if (SeqID == 1) {
Out << '0';
- else if (SeqID > 1) {
+ } else {
SeqID--;
// <seq-id> is encoded in base-36, using digits and upper case letters.
diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
index 54dbf484f377..984da9909ce2 100644
--- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
@@ -225,11 +225,17 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (!MD->isStatic())
++ArgWords;
- for (const auto &AT : Proto->param_types())
+ for (const auto &AT : Proto->param_types()) {
+ // If an argument type is incomplete there is no way to get its size to
+ // correctly encode into the mangling scheme.
+ // Follow GCCs behaviour by simply breaking out of the loop.
+ if (AT->isIncompleteType())
+ break;
// Size should be aligned to pointer size.
ArgWords +=
llvm::alignTo(ASTContext.getTypeSize(AT), TI.getPointerWidth(0)) /
TI.getPointerWidth(0);
+ }
Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
}
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
index 53d7e0b042ff..b7dc0e62e66a 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -36,8 +36,8 @@ class MicrosoftNumberingContext : public MangleNumberingContext {
public:
MicrosoftNumberingContext()
- : MangleNumberingContext(), LambdaManglingNumber(0),
- StaticLocalNumber(0), StaticThreadlocalNumber(0) {}
+ : LambdaManglingNumber(0), StaticLocalNumber(0),
+ StaticThreadlocalNumber(0) {}
unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
return ++LambdaManglingNumber;
diff --git a/contrib/llvm-project/clang/lib/AST/OSLog.cpp b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
index 094c0102854b..4cc5def0651f 100644
--- a/contrib/llvm-project/clang/lib/AST/OSLog.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
@@ -56,8 +56,8 @@ public:
}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *StartSpecifier,
- unsigned SpecifierLen) override {
+ const char *StartSpecifier, unsigned SpecifierLen,
+ const TargetInfo &) override {
if (!FS.consumesDataArgument() &&
FS.getConversionSpecifier().getKind() !=
clang::analyze_format_string::ConversionSpecifier::PrintErrno)
diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
index fc658d5ba5c3..e5f356187988 100644
--- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
@@ -428,7 +428,7 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
continue;
// We have a format specifier. Pass it to the callback.
if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(),
- I - FSR.getStart()))
+ I - FSR.getStart(), Target))
return true;
}
assert(I == E && "Format string not exhausted");
@@ -711,8 +711,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
CS.setKind(ConversionSpecifier::sArg);
// Disable irrelevant flags
- HasAlternativeForm = 0;
- HasLeadingZeroes = 0;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
// Set the long length modifier for wide characters
if (QT->getPointeeType()->isWideCharType())
@@ -878,9 +878,9 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
CS.setKind(ConversionSpecifier::cArg);
LM.setKind(LengthModifier::None);
Precision.setHowSpecified(OptionalAmount::NotSpecified);
- HasAlternativeForm = 0;
- HasLeadingZeroes = 0;
- HasPlusPrefix = 0;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
+ HasPlusPrefix = false;
}
// Test for Floating type first as LongDouble can pass isUnsignedIntegerType
else if (QT->isRealFloatingType()) {
@@ -888,12 +888,12 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
else if (QT->isSignedIntegerType()) {
CS.setKind(ConversionSpecifier::dArg);
- HasAlternativeForm = 0;
+ HasAlternativeForm = false;
}
else if (QT->isUnsignedIntegerType()) {
CS.setKind(ConversionSpecifier::uArg);
- HasAlternativeForm = 0;
- HasPlusPrefix = 0;
+ HasAlternativeForm = false;
+ HasPlusPrefix = false;
} else {
llvm_unreachable("Unexpected type");
}
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 3e39ec1c718d..61a30ead165e 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -2021,6 +2021,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
CharUnits UnpackedFieldAlign =
!DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign;
CharUnits UnpackedFieldOffset = FieldOffset;
+ CharUnits OriginalFieldAlign = UnpackedFieldAlign;
if (FieldPacked) {
FieldAlign = CharUnits::One();
@@ -2105,6 +2106,22 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// Remember max struct/class ABI-specified alignment.
UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign);
UpdateAlignment(FieldAlign, UnpackedFieldAlign, PreferredAlign);
+
+ // For checking the alignment of inner fields against
+ // the alignment of its parent record.
+ if (const RecordDecl *RD = D->getParent()) {
+ // Check if packed attribute or pragma pack is present.
+ if (RD->hasAttr<PackedAttr>() || !MaxFieldAlignment.isZero())
+ if (FieldAlign < OriginalFieldAlign)
+ if (D->getType()->isRecordType()) {
+ // If the offset is a multiple of the alignment of
+ // the type, raise the warning.
+ // TODO: Takes no account the alignment of the outer struct
+ if (FieldOffset % OriginalFieldAlign != 0)
+ Diag(D->getLocation(), diag::warn_unaligned_access)
+ << Context.getTypeDeclType(RD) << D->getName() << D->getType();
+ }
+ }
}
void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index 4f76f6ec12ed..be19d3b2cce2 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -568,21 +568,20 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
- unsigned NumPlusOperands = 0;
-
// Check if this is an output operand.
- for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ unsigned NumOutputs = getNumOutputs();
+ for (unsigned i = 0; i != NumOutputs; ++i)
if (getOutputName(i) == SymbolicName)
return i;
- }
- for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ unsigned NumInputs = getNumInputs();
+ for (unsigned i = 0; i != NumInputs; ++i)
if (getInputName(i) == SymbolicName)
- return getNumOutputs() + NumPlusOperands + i;
+ return NumOutputs + i;
for (unsigned i = 0, e = getNumLabels(); i != e; ++i)
if (getLabelName(i) == SymbolicName)
- return i + getNumOutputs() + getNumInputs();
+ return NumOutputs + NumInputs + getNumPlusOperands() + i;
// Not found.
return -1;
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index b336a0637d5e..8a9f73d3dbf0 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -518,7 +518,7 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
bool HasCancel) {
auto *Dir =
createDirective<OMPSectionDirective>(C, llvm::None, AssociatedStmt,
- /*NumChildre=*/0, StartLoc, EndLoc);
+ /*NumChildren=*/0, StartLoc, EndLoc);
Dir->setHasCancel(HasCancel);
return Dir;
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index b65a38d1e566..adc0720fe000 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -1030,7 +1030,12 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
- OS << Node->getNameInfo();
+ if (Policy.CleanUglifiedParameters &&
+ isa<ParmVarDecl, NonTypeTemplateParmDecl>(Node->getDecl()) &&
+ Node->getDecl()->getIdentifier())
+ OS << Node->getDecl()->getIdentifier()->deuglifiedName();
+ else
+ Node->getNameInfo().printName(OS, Policy);
if (Node->hasExplicitTemplateArgs()) {
const TemplateParameterList *TPL = nullptr;
if (!Node->hadMultipleCandidates())
@@ -2069,7 +2074,10 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
} else {
NeedComma = true;
}
- std::string ParamStr = P->getNameAsString();
+ std::string ParamStr =
+ (Policy.CleanUglifiedParameters && P->getIdentifier())
+ ? P->getIdentifier()->deuglifiedName().str()
+ : P->getNameAsString();
P->getOriginalType().print(OS, Policy, ParamStr);
}
if (Method->isVariadic()) {
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index c8bd74f0b5bb..05d7d58b71c4 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -223,8 +223,12 @@ bool TemplateName::containsUnexpandedParameterPack() const {
void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Qualified Qual) const {
if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
- if (Qual == Qualified::Fully &&
- getDependence() != TemplateNameDependenceScope::DependentInstantiation)
+ if (Policy.CleanUglifiedParameters &&
+ isa<TemplateTemplateParmDecl>(Template) && Template->getIdentifier())
+ OS << Template->getIdentifier()->deuglifiedName();
+ else if (Qual == Qualified::Fully &&
+ getDependence() !=
+ TemplateNameDependenceScope::DependentInstantiation)
Template->printQualifiedName(OS, Policy);
else
OS << *Template;
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index c771fe264b0c..774b3e94159d 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -194,7 +194,7 @@ void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddInteger(ArraySize.getZExtValue());
ID.AddInteger(SizeMod);
ID.AddInteger(TypeQuals);
- ID.AddBoolean(SizeExpr != 0);
+ ID.AddBoolean(SizeExpr != nullptr);
if (SizeExpr)
SizeExpr->Profile(ID, Context, true);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index c3ed08d5a8b3..13aa54c48f66 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -622,6 +622,7 @@ void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) {
setFoundDecl(nullptr);
setRAngleLoc(Loc);
setLAngleLoc(Loc);
+ setRParenLoc(Loc);
TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
getTypePtr()->getArgs(),
getArgInfos(), Loc);
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index 2a33a69f288d..bba323f651aa 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -280,7 +280,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Attributed: {
// We still want to print the address_space before the type if it is an
// address_space attribute.
- const auto *AttrTy = cast<AttributedType>(T);
+ const auto *AttrTy = cast<AttributedType>(UnderlyingType);
CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace;
}
}
@@ -1418,7 +1418,8 @@ void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
}
OS << "auto";
} else if (IdentifierInfo *Id = T->getIdentifier())
- OS << Id->getName();
+ OS << (Policy.CleanUglifiedParameters ? Id->deuglifiedName()
+ : Id->getName());
else
OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index f938565c3cb4..24586d6b70d4 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -2328,7 +2328,7 @@ ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
return;
ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(),
- /*MostDerivedClassIsVirtual=*/0, RD);
+ /*MostDerivedClassIsVirtual=*/false, RD);
Entry = CreateVTableLayout(Builder);
MethodVTableIndices.insert(Builder.vtable_indices_begin(),
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index fa9d42247e24..3e9c4f31b84d 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -524,8 +524,9 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
}
return {};
}
- InnerArgs.set_size(i + 1);
- InnerArgsPtr[i] = new (&InnerArgs[i]) ArgT(ArgTraits::get(Value));
+ assert(InnerArgs.size() < InnerArgs.capacity());
+ InnerArgs.emplace_back(ArgTraits::get(Value));
+ InnerArgsPtr[i] = &InnerArgs[i];
}
return outvalueToVariantMatcher(Func(InnerArgsPtr));
}
@@ -1166,4 +1167,4 @@ std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
+#endif // LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 4f3efdb0a663..47db6b51966a 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -404,7 +404,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isComparisonOperator);
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
+ REGISTER_MATCHER(isConsteval);
REGISTER_MATCHER(isConstexpr);
+ REGISTER_MATCHER(isConstinit);
REGISTER_MATCHER(isCopyAssignmentOperator);
REGISTER_MATCHER(isCopyConstructor);
REGISTER_MATCHER(isDefaultConstructor);
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index 9ef3b5b6277a..8246854dc1b5 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -531,9 +531,7 @@ class CFGBuilder {
public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
- : Context(astContext), cfg(new CFG()), // crew a new CFG
- ConstructionContextMap(), BuildOpts(buildOpts) {}
-
+ : Context(astContext), cfg(new CFG()), BuildOpts(buildOpts) {}
// buildCFG - Used by external clients to construct the CFG.
std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
@@ -3354,7 +3352,7 @@ CFGBlock *CFGBuilder::VisitGCCAsmStmt(GCCAsmStmt *G, AddStmtChoice asc) {
// Save "Succ" in BackpatchBlocks. In the backpatch processing, "Succ" is
// used to avoid adding "Succ" again.
BackpatchBlocks.push_back(JumpSource(Succ, ScopePos));
- return Block;
+ return VisitChildren(G);
}
CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
new file mode 100644
index 000000000000..3ad2ed640cc1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
@@ -0,0 +1,69 @@
+//===- ControlFlowContext.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ControlFlowContext class that is used by dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Error.h"
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Returns a map from statements to basic blocks that contain them.
+static llvm::DenseMap<const Stmt *, const CFGBlock *>
+buildStmtToBasicBlockMap(const CFG &Cfg) {
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+ for (const CFGBlock *Block : Cfg) {
+ if (Block == nullptr)
+ continue;
+
+ for (const CFGElement &Element : *Block) {
+ auto Stmt = Element.getAs<CFGStmt>();
+ if (!Stmt.hasValue())
+ continue;
+
+ StmtToBlock[Stmt.getValue().getStmt()] = Block;
+ }
+ }
+ return StmtToBlock;
+}
+
+llvm::Expected<ControlFlowContext>
+ControlFlowContext::build(const Decl *D, Stmt *S, ASTContext *C) {
+ CFG::BuildOptions Options;
+ Options.PruneTriviallyFalseEdges = false;
+ Options.AddImplicitDtors = true;
+ Options.AddTemporaryDtors = true;
+ Options.AddInitializers = true;
+ Options.AddCXXDefaultInitExprInCtors = true;
+
+ // Ensure that all sub-expressions in basic blocks are evaluated.
+ Options.setAllAlwaysAdd();
+
+ auto Cfg = CFG::buildCFG(D, S, C, Options);
+ if (Cfg == nullptr)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "CFG::buildCFG failed");
+
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock =
+ buildStmtToBasicBlockMap(*Cfg);
+ return ControlFlowContext(std::move(Cfg), std::move(StmtToBlock));
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
new file mode 100644
index 000000000000..938f7338b640
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -0,0 +1,318 @@
+//===-- DataflowEnvironment.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an Environment class that is used by dataflow analyses
+// that run over Control-Flow Graphs (CFGs) to keep track of the state of the
+// program at given program points.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Returns a map consisting of key-value entries that are present in both maps.
+template <typename K, typename V>
+llvm::DenseMap<K, V> intersectDenseMaps(const llvm::DenseMap<K, V> &Map1,
+ const llvm::DenseMap<K, V> &Map2) {
+ llvm::DenseMap<K, V> Result;
+ for (auto &Entry : Map1) {
+ auto It = Map2.find(Entry.first);
+ if (It != Map2.end() && Entry.second == It->second)
+ Result.insert({Entry.first, Entry.second});
+ }
+ return Result;
+}
+
+Environment::Environment(DataflowAnalysisContext &DACtx,
+ const DeclContext &DeclCtx)
+ : Environment(DACtx) {
+ if (const auto *FuncDecl = dyn_cast<FunctionDecl>(&DeclCtx)) {
+ for (const auto *ParamDecl : FuncDecl->parameters()) {
+ assert(ParamDecl != nullptr);
+ auto &ParamLoc = createStorageLocation(*ParamDecl);
+ setStorageLocation(*ParamDecl, ParamLoc);
+ if (Value *ParamVal = createValue(ParamDecl->getType()))
+ setValue(ParamLoc, *ParamVal);
+ }
+ }
+
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(&DeclCtx)) {
+ if (!MethodDecl->isStatic()) {
+ QualType ThisPointeeType = MethodDecl->getThisObjectType();
+ // FIXME: Add support for union types.
+ if (!ThisPointeeType->isUnionType()) {
+ auto &ThisPointeeLoc = createStorageLocation(ThisPointeeType);
+ DACtx.setThisPointeeStorageLocation(ThisPointeeLoc);
+ if (Value *ThisPointeeVal = createValue(ThisPointeeType))
+ setValue(ThisPointeeLoc, *ThisPointeeVal);
+ }
+ }
+ }
+}
+
+bool Environment::operator==(const Environment &Other) const {
+ assert(DACtx == Other.DACtx);
+ return DeclToLoc == Other.DeclToLoc && LocToVal == Other.LocToVal;
+}
+
+LatticeJoinEffect Environment::join(const Environment &Other,
+ Environment::Merger &Merger) {
+ assert(DACtx == Other.DACtx);
+
+ auto Effect = LatticeJoinEffect::Unchanged;
+
+ const unsigned DeclToLocSizeBefore = DeclToLoc.size();
+ DeclToLoc = intersectDenseMaps(DeclToLoc, Other.DeclToLoc);
+ if (DeclToLocSizeBefore != DeclToLoc.size())
+ Effect = LatticeJoinEffect::Changed;
+
+ const unsigned ExprToLocSizeBefore = ExprToLoc.size();
+ ExprToLoc = intersectDenseMaps(ExprToLoc, Other.ExprToLoc);
+ if (ExprToLocSizeBefore != ExprToLoc.size())
+ Effect = LatticeJoinEffect::Changed;
+
+ llvm::DenseMap<const StorageLocation *, Value *> MergedLocToVal;
+ for (auto &Entry : LocToVal) {
+ const StorageLocation *Loc = Entry.first;
+ assert(Loc != nullptr);
+
+ Value *Val = Entry.second;
+ assert(Val != nullptr);
+
+ auto It = Other.LocToVal.find(Loc);
+ if (It == Other.LocToVal.end())
+ continue;
+ assert(It->second != nullptr);
+
+ if (It->second == Val) {
+ MergedLocToVal.insert({Loc, Val});
+ continue;
+ }
+
+ // FIXME: Consider destroying `MergedValue` immediately if `Merger::merge`
+ // returns false to avoid storing unneeded values in `DACtx`.
+ if (Value *MergedVal = createValue(Loc->getType()))
+ if (Merger.merge(Loc->getType(), *Val, *It->second, *MergedVal, *this))
+ MergedLocToVal.insert({Loc, MergedVal});
+ }
+ const unsigned LocToValSizeBefore = LocToVal.size();
+ LocToVal = std::move(MergedLocToVal);
+ if (LocToValSizeBefore != LocToVal.size())
+ Effect = LatticeJoinEffect::Changed;
+
+ return Effect;
+}
+
+StorageLocation &Environment::createStorageLocation(QualType Type) {
+ assert(!Type.isNull());
+ if (Type->isStructureOrClassType() || Type->isUnionType()) {
+ // FIXME: Explore options to avoid eager initialization of fields as some of
+ // them might not be needed for a particular analysis.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
+ FieldLocs.insert({Field, &createStorageLocation(Field->getType())});
+ }
+ return takeOwnership(
+ std::make_unique<AggregateStorageLocation>(Type, std::move(FieldLocs)));
+ }
+ return takeOwnership(std::make_unique<ScalarStorageLocation>(Type));
+}
+
+StorageLocation &Environment::createStorageLocation(const VarDecl &D) {
+ // Evaluated declarations are always assigned the same storage locations to
+ // ensure that the environment stabilizes across loop iterations. Storage
+ // locations for evaluated declarations are stored in the analysis context.
+ if (auto *Loc = DACtx->getStorageLocation(D))
+ return *Loc;
+ auto &Loc = createStorageLocation(D.getType());
+ DACtx->setStorageLocation(D, Loc);
+ return Loc;
+}
+
+StorageLocation &Environment::createStorageLocation(const Expr &E) {
+ // Evaluated expressions are always assigned the same storage locations to
+ // ensure that the environment stabilizes across loop iterations. Storage
+ // locations for evaluated expressions are stored in the analysis context.
+ if (auto *Loc = DACtx->getStorageLocation(E))
+ return *Loc;
+ auto &Loc = createStorageLocation(E.getType());
+ DACtx->setStorageLocation(E, Loc);
+ return Loc;
+}
+
+void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
+ assert(DeclToLoc.find(&D) == DeclToLoc.end());
+ DeclToLoc[&D] = &Loc;
+}
+
+StorageLocation *Environment::getStorageLocation(const ValueDecl &D,
+ SkipPast SP) const {
+ auto It = DeclToLoc.find(&D);
+ return It == DeclToLoc.end() ? nullptr : &skip(*It->second, SP);
+}
+
+void Environment::setStorageLocation(const Expr &E, StorageLocation &Loc) {
+ assert(ExprToLoc.find(&E) == ExprToLoc.end());
+ ExprToLoc[&E] = &Loc;
+}
+
+StorageLocation *Environment::getStorageLocation(const Expr &E,
+ SkipPast SP) const {
+ auto It = ExprToLoc.find(&E);
+ return It == ExprToLoc.end() ? nullptr : &skip(*It->second, SP);
+}
+
+StorageLocation *Environment::getThisPointeeStorageLocation() const {
+ return DACtx->getThisPointeeStorageLocation();
+}
+
+void Environment::setValue(const StorageLocation &Loc, Value &Val) {
+ LocToVal[&Loc] = &Val;
+
+ if (auto *StructVal = dyn_cast<StructValue>(&Val)) {
+ auto &AggregateLoc = *cast<AggregateStorageLocation>(&Loc);
+
+ const QualType Type = AggregateLoc.getType();
+ assert(Type->isStructureOrClassType());
+
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
+ assert(Field != nullptr);
+ setValue(AggregateLoc.getChild(*Field), StructVal->getChild(*Field));
+ }
+ }
+}
+
+Value *Environment::getValue(const StorageLocation &Loc) const {
+ auto It = LocToVal.find(&Loc);
+ return It == LocToVal.end() ? nullptr : It->second;
+}
+
+Value *Environment::getValue(const ValueDecl &D, SkipPast SP) const {
+ auto *Loc = getStorageLocation(D, SP);
+ if (Loc == nullptr)
+ return nullptr;
+ return getValue(*Loc);
+}
+
+Value *Environment::getValue(const Expr &E, SkipPast SP) const {
+ auto *Loc = getStorageLocation(E, SP);
+ if (Loc == nullptr)
+ return nullptr;
+ return getValue(*Loc);
+}
+
+Value *Environment::createValue(QualType Type) {
+ llvm::DenseSet<QualType> Visited;
+ return createValueUnlessSelfReferential(Type, Visited);
+}
+
+Value *Environment::createValueUnlessSelfReferential(
+ QualType Type, llvm::DenseSet<QualType> &Visited) {
+ assert(!Type.isNull());
+
+ if (Type->isIntegerType()) {
+ return &takeOwnership(std::make_unique<IntegerValue>());
+ }
+
+ if (Type->isReferenceType()) {
+ QualType PointeeType = Type->getAs<ReferenceType>()->getPointeeType();
+ auto &PointeeLoc = createStorageLocation(PointeeType);
+
+ if (!Visited.contains(PointeeType.getCanonicalType())) {
+ Visited.insert(PointeeType.getCanonicalType());
+ Value *PointeeVal =
+ createValueUnlessSelfReferential(PointeeType, Visited);
+ Visited.erase(PointeeType.getCanonicalType());
+
+ if (PointeeVal != nullptr)
+ setValue(PointeeLoc, *PointeeVal);
+ }
+
+ return &takeOwnership(std::make_unique<ReferenceValue>(PointeeLoc));
+ }
+
+ if (Type->isPointerType()) {
+ QualType PointeeType = Type->getAs<PointerType>()->getPointeeType();
+ auto &PointeeLoc = createStorageLocation(PointeeType);
+
+ if (!Visited.contains(PointeeType.getCanonicalType())) {
+ Visited.insert(PointeeType.getCanonicalType());
+ Value *PointeeVal =
+ createValueUnlessSelfReferential(PointeeType, Visited);
+ Visited.erase(PointeeType.getCanonicalType());
+
+ if (PointeeVal != nullptr)
+ setValue(PointeeLoc, *PointeeVal);
+ }
+
+ return &takeOwnership(std::make_unique<PointerValue>(PointeeLoc));
+ }
+
+ if (Type->isStructureOrClassType()) {
+ // FIXME: Initialize only fields that are accessed in the context that is
+ // being analyzed.
+ llvm::DenseMap<const ValueDecl *, Value *> FieldValues;
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
+ assert(Field != nullptr);
+
+ QualType FieldType = Field->getType();
+ if (Visited.contains(FieldType.getCanonicalType()))
+ continue;
+
+ Visited.insert(FieldType.getCanonicalType());
+ FieldValues.insert(
+ {Field, createValueUnlessSelfReferential(FieldType, Visited)});
+ Visited.erase(FieldType.getCanonicalType());
+ }
+
+ return &takeOwnership(
+ std::make_unique<StructValue>(std::move(FieldValues)));
+ }
+
+ return nullptr;
+}
+
+StorageLocation &Environment::skip(StorageLocation &Loc, SkipPast SP) const {
+ switch (SP) {
+ case SkipPast::None:
+ return Loc;
+ case SkipPast::Reference:
+ // References cannot be chained so we only need to skip past one level of
+ // indirection.
+ if (auto *Val = dyn_cast_or_null<ReferenceValue>(getValue(Loc)))
+ return Val->getPointeeLoc();
+ return Loc;
+ case SkipPast::ReferenceThenPointer:
+ StorageLocation &LocPastRef = skip(Loc, SkipPast::Reference);
+ if (auto *Val = dyn_cast_or_null<PointerValue>(getValue(LocPastRef)))
+ return Val->getPointeeLoc();
+ return LocPastRef;
+ }
+ llvm_unreachable("bad SkipPast kind");
+}
+
+const StorageLocation &Environment::skip(const StorageLocation &Loc,
+ SkipPast SP) const {
+ return skip(*const_cast<StorageLocation *>(&Loc), SP);
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
new file mode 100644
index 000000000000..51a86b727e33
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -0,0 +1,462 @@
+//===-- Transfer.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines transfer functions that evaluate program statements and
+// update an environment accordingly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Transfer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <memory>
+#include <tuple>
+
+namespace clang {
+namespace dataflow {
+
+static const Expr *skipExprWithCleanups(const Expr *E) {
+ if (auto *C = dyn_cast_or_null<ExprWithCleanups>(E))
+ return C->getSubExpr();
+ return E;
+}
+
+class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
+public:
+ TransferVisitor(Environment &Env) : Env(Env) {}
+
+ void VisitBinaryOperator(const BinaryOperator *S) {
+ if (S->getOpcode() == BO_Assign) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ assert(S->getLHS() != nullptr);
+ const Expr *LHS = S->getLHS()->IgnoreParens();
+
+ assert(LHS != nullptr);
+ auto *LHSLoc = Env.getStorageLocation(*LHS, SkipPast::Reference);
+ if (LHSLoc == nullptr)
+ return;
+
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ assert(S->getRHS() != nullptr);
+ const Expr *RHS = S->getRHS()->IgnoreParens();
+
+ assert(RHS != nullptr);
+ Value *RHSVal = Env.getValue(*RHS, SkipPast::Reference);
+ if (RHSVal == nullptr)
+ return;
+
+ // Assign a value to the storage location of the left-hand side.
+ Env.setValue(*LHSLoc, *RHSVal);
+
+ // Assign a storage location for the whole expression.
+ Env.setStorageLocation(*S, *LHSLoc);
+ }
+ // FIXME: Add support for BO_EQ, BO_NE.
+ }
+
+ void VisitDeclRefExpr(const DeclRefExpr *S) {
+ assert(S->getDecl() != nullptr);
+ auto *DeclLoc = Env.getStorageLocation(*S->getDecl(), SkipPast::None);
+ if (DeclLoc == nullptr)
+ return;
+
+ if (S->getDecl()->getType()->isReferenceType()) {
+ Env.setStorageLocation(*S, *DeclLoc);
+ } else {
+ auto &Loc = Env.createStorageLocation(*S);
+ auto &Val = Env.takeOwnership(std::make_unique<ReferenceValue>(*DeclLoc));
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, Val);
+ }
+ }
+
+ void VisitDeclStmt(const DeclStmt *S) {
+ // Group decls are converted into single decls in the CFG so the cast below
+ // is safe.
+ const auto &D = *cast<VarDecl>(S->getSingleDecl());
+ auto &Loc = Env.createStorageLocation(D);
+ Env.setStorageLocation(D, Loc);
+
+ const Expr *InitExpr = D.getInit();
+ if (InitExpr == nullptr) {
+ // No initializer expression - associate `Loc` with a new value.
+ if (Value *Val = Env.createValue(D.getType()))
+ Env.setValue(Loc, *Val);
+ return;
+ }
+
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ InitExpr = skipExprWithCleanups(D.getInit()->IgnoreParens());
+ assert(InitExpr != nullptr);
+
+ if (D.getType()->isReferenceType()) {
+ // Initializing a reference variable - do not create a reference to
+ // reference.
+ if (auto *InitExprLoc =
+ Env.getStorageLocation(*InitExpr, SkipPast::Reference)) {
+ auto &Val =
+ Env.takeOwnership(std::make_unique<ReferenceValue>(*InitExprLoc));
+ Env.setValue(Loc, Val);
+ } else {
+ // FIXME: The initializer expression must always be assigned a value.
+ // Replace this with an assert when we have sufficient coverage of
+ // language features.
+ if (Value *Val = Env.createValue(D.getType()))
+ Env.setValue(Loc, *Val);
+ }
+ return;
+ }
+
+ if (auto *InitExprVal = Env.getValue(*InitExpr, SkipPast::None)) {
+ Env.setValue(Loc, *InitExprVal);
+ } else if (!D.getType()->isStructureOrClassType()) {
+ // FIXME: The initializer expression must always be assigned a value.
+ // Replace this with an assert when we have sufficient coverage of
+ // language features.
+ if (Value *Val = Env.createValue(D.getType()))
+ Env.setValue(Loc, *Val);
+ } else {
+ llvm_unreachable("structs and classes must always be assigned values");
+ }
+ }
+
+ void VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ assert(S->getSubExpr() != nullptr);
+ const Expr *SubExpr = S->getSubExpr()->IgnoreParens();
+ assert(SubExpr != nullptr);
+
+ switch (S->getCastKind()) {
+ case CK_LValueToRValue: {
+ auto *SubExprVal = Env.getValue(*SubExpr, SkipPast::Reference);
+ if (SubExprVal == nullptr)
+ break;
+
+ auto &ExprLoc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, ExprLoc);
+ Env.setValue(ExprLoc, *SubExprVal);
+ break;
+ }
+ case CK_NoOp: {
+ // FIXME: Consider making `Environment::getStorageLocation` skip noop
+ // expressions (this and other similar expressions in the file) instead of
+ // assigning them storage locations.
+ auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
+ if (SubExprLoc == nullptr)
+ break;
+
+ Env.setStorageLocation(*S, *SubExprLoc);
+ break;
+ }
+ default:
+ // FIXME: Add support for CK_UserDefinedConversion,
+ // CK_ConstructorConversion, CK_UncheckedDerivedToBase.
+ break;
+ }
+ }
+
+ void VisitUnaryOperator(const UnaryOperator *S) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ assert(S->getSubExpr() != nullptr);
+ const Expr *SubExpr = S->getSubExpr()->IgnoreParens();
+ assert(SubExpr != nullptr);
+
+ switch (S->getOpcode()) {
+ case UO_Deref: {
+ // Skip past a reference to handle dereference of a dependent pointer.
+ const auto *SubExprVal = cast_or_null<PointerValue>(
+ Env.getValue(*SubExpr, SkipPast::Reference));
+ if (SubExprVal == nullptr)
+ break;
+
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, Env.takeOwnership(std::make_unique<ReferenceValue>(
+ SubExprVal->getPointeeLoc())));
+ break;
+ }
+ case UO_AddrOf: {
+ // Do not form a pointer to a reference. If `SubExpr` is assigned a
+ // `ReferenceValue` then form a value that points to the location of its
+ // pointee.
+ StorageLocation *PointeeLoc =
+ Env.getStorageLocation(*SubExpr, SkipPast::Reference);
+ if (PointeeLoc == nullptr)
+ break;
+
+ auto &PointerLoc = Env.createStorageLocation(*S);
+ auto &PointerVal =
+ Env.takeOwnership(std::make_unique<PointerValue>(*PointeeLoc));
+ Env.setStorageLocation(*S, PointerLoc);
+ Env.setValue(PointerLoc, PointerVal);
+ break;
+ }
+ default:
+ // FIXME: Add support for UO_LNot.
+ break;
+ }
+ }
+
+ void VisitCXXThisExpr(const CXXThisExpr *S) {
+ auto *ThisPointeeLoc = Env.getThisPointeeStorageLocation();
+ assert(ThisPointeeLoc != nullptr);
+
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, Env.takeOwnership(
+ std::make_unique<PointerValue>(*ThisPointeeLoc)));
+ }
+
+ void VisitMemberExpr(const MemberExpr *S) {
+ ValueDecl *Member = S->getMemberDecl();
+ assert(Member != nullptr);
+
+ // FIXME: Consider assigning pointer values to function member expressions.
+ if (Member->isFunctionOrFunctionTemplate())
+ return;
+
+ // The receiver can be either a value or a pointer to a value. Skip past the
+ // indirection to handle both cases.
+ auto *BaseLoc = cast_or_null<AggregateStorageLocation>(
+ Env.getStorageLocation(*S->getBase(), SkipPast::ReferenceThenPointer));
+ if (BaseLoc == nullptr)
+ return;
+
+ // FIXME: Add support for union types.
+ if (BaseLoc->getType()->isUnionType())
+ return;
+
+ auto &MemberLoc = BaseLoc->getChild(*Member);
+ if (MemberLoc.getType()->isReferenceType()) {
+ Env.setStorageLocation(*S, MemberLoc);
+ } else {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(
+ Loc, Env.takeOwnership(std::make_unique<ReferenceValue>(MemberLoc)));
+ }
+ }
+
+ void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
+ const Expr *InitExpr = S->getExpr();
+ assert(InitExpr != nullptr);
+
+ Value *InitExprVal = Env.getValue(*InitExpr, SkipPast::None);
+ if (InitExprVal == nullptr)
+ return;
+
+ const FieldDecl *Field = S->getField();
+ assert(Field != nullptr);
+
+ auto &ThisLoc =
+ *cast<AggregateStorageLocation>(Env.getThisPointeeStorageLocation());
+ auto &FieldLoc = ThisLoc.getChild(*Field);
+ Env.setValue(FieldLoc, *InitExprVal);
+ }
+
+ void VisitCXXConstructExpr(const CXXConstructExpr *S) {
+ const CXXConstructorDecl *ConstructorDecl = S->getConstructor();
+ assert(ConstructorDecl != nullptr);
+
+ if (ConstructorDecl->isCopyOrMoveConstructor()) {
+ assert(S->getNumArgs() == 1);
+
+ const Expr *Arg = S->getArg(0);
+ assert(Arg != nullptr);
+
+ if (S->isElidable()) {
+ auto *ArgLoc = Env.getStorageLocation(*Arg, SkipPast::Reference);
+ if (ArgLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *ArgLoc);
+ } else if (auto *ArgVal = Env.getValue(*Arg, SkipPast::Reference)) {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, *ArgVal);
+ }
+ return;
+ }
+
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(Loc, *Val);
+ }
+
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
+ if (S->getOperator() == OO_Equal) {
+ assert(S->getNumArgs() == 2);
+
+ const Expr *Arg0 = S->getArg(0);
+ assert(Arg0 != nullptr);
+
+ const Expr *Arg1 = S->getArg(1);
+ assert(Arg1 != nullptr);
+
+ // Evaluate only copy and move assignment operators.
+ auto *Arg0Type = Arg0->getType()->getUnqualifiedDesugaredType();
+ auto *Arg1Type = Arg1->getType()->getUnqualifiedDesugaredType();
+ if (Arg0Type != Arg1Type)
+ return;
+
+ auto *ObjectLoc = Env.getStorageLocation(*Arg0, SkipPast::Reference);
+ if (ObjectLoc == nullptr)
+ return;
+
+ auto *Val = Env.getValue(*Arg1, SkipPast::Reference);
+ if (Val == nullptr)
+ return;
+
+ Env.setValue(*ObjectLoc, *Val);
+ }
+ }
+
+ void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
+ if (S->getCastKind() == CK_ConstructorConversion) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however sub-expressions can still be of that type.
+ assert(S->getSubExpr() != nullptr);
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
+ if (SubExprLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *SubExprLoc);
+ }
+ }
+
+ void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(Loc, *Val);
+ }
+
+ void VisitCallExpr(const CallExpr *S) {
+ if (S->isCallToStdMove()) {
+ assert(S->getNumArgs() == 1);
+
+ const Expr *Arg = S->getArg(0);
+ assert(Arg != nullptr);
+
+ auto *ArgLoc = Env.getStorageLocation(*Arg, SkipPast::None);
+ if (ArgLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *ArgLoc);
+ }
+ }
+
+ void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
+ if (SubExprLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *SubExprLoc);
+ }
+
+ void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
+ if (SubExprLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *SubExprLoc);
+ }
+
+ void VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
+ if (S->getCastKind() == CK_NoOp) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
+ if (SubExprLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *SubExprLoc);
+ }
+ }
+
+ void VisitConditionalOperator(const ConditionalOperator *S) {
+ // FIXME: Revisit this once flow conditions are added to the framework. For
+ // `a = b ? c : d` we can add `b => a == c && !b => a == d` to the flow
+ // condition.
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(Loc, *Val);
+ }
+
+ void VisitInitListExpr(const InitListExpr *S) {
+ QualType Type = S->getType();
+
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+
+ auto *Val = Env.createValue(Type);
+ if (Val == nullptr)
+ return;
+
+ Env.setValue(Loc, *Val);
+
+ if (Type->isStructureOrClassType()) {
+ for (auto IT : llvm::zip(Type->getAsRecordDecl()->fields(), S->inits())) {
+ const FieldDecl *Field = std::get<0>(IT);
+ assert(Field != nullptr);
+
+ const Expr *Init = std::get<1>(IT);
+ assert(Init != nullptr);
+
+ if (Value *InitVal = Env.getValue(*Init, SkipPast::None))
+ cast<StructValue>(Val)->setChild(*Field, *InitVal);
+ }
+ }
+ // FIXME: Implement array initialization.
+ }
+
+ void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, Env.getBoolLiteralValue(S->getValue()));
+ }
+
+private:
+ Environment &Env;
+};
+
+void transfer(const Stmt &S, Environment &Env) {
+ assert(!isa<ParenExpr>(&S));
+ TransferVisitor(Env).Visit(&S);
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index 413e8d14bf0a..aaf6a834f5b3 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -11,14 +11,19 @@
//
//===----------------------------------------------------------------------===//
+#include <memory>
#include <utility>
#include <vector>
+#include "clang/AST/DeclCXX.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Analysis/FlowSensitive/Transfer.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
@@ -35,15 +40,44 @@ namespace dataflow {
/// already been transferred. States in `BlockStates` that are set to
/// `llvm::None` represent basic blocks that are not evaluated yet.
static TypeErasedDataflowAnalysisState computeBlockInputState(
+ const ControlFlowContext &CFCtx,
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> &BlockStates,
const CFGBlock &Block, const Environment &InitEnv,
TypeErasedDataflowAnalysis &Analysis) {
- // FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()`
- // to enable building analyses like computation of dominators that initialize
- // the state of each basic block differently.
- TypeErasedDataflowAnalysisState State = {Analysis.typeErasedInitialElement(),
- InitEnv};
- for (const CFGBlock *Pred : Block.preds()) {
+ llvm::DenseSet<const CFGBlock *> Preds;
+ Preds.insert(Block.pred_begin(), Block.pred_end());
+ if (Block.getTerminator().isTemporaryDtorsBranch()) {
+ // This handles a special case where the code that produced the CFG includes
+ // a conditional operator with a branch that constructs a temporary and
+ // calls a destructor annotated as noreturn. The CFG models this as follows:
+ //
+ // B1 (contains the condition of the conditional operator) - succs: B2, B3
+ // B2 (contains code that does not call a noreturn destructor) - succs: B4
+ // B3 (contains code that calls a noreturn destructor) - succs: B4
+ // B4 (has temporary destructor terminator) - succs: B5, B6
+ // B5 (noreturn block that is associated with the noreturn destructor call)
+ // B6 (contains code that follows the conditional operator statement)
+ //
+ // The first successor (B5 above) of a basic block with a temporary
+ // destructor terminator (B4 above) is the block that evaluates the
+ // destructor. If that block has a noreturn element then the predecessor
+ // block that constructed the temporary object (B3 above) is effectively a
+ // noreturn block and its state should not be used as input for the state
+ // of the block that has a temporary destructor terminator (B4 above). This
+ // holds regardless of which branch of the ternary operator calls the
+ // noreturn destructor. However, it doesn't cases where a nested ternary
+ // operator includes a branch that contains a noreturn destructor call.
+ //
+ // See `NoreturnDestructorTest` for concrete examples.
+ if (Block.succ_begin()->getReachableBlock()->hasNoReturnElement()) {
+ auto StmtBlock = CFCtx.getStmtToBlock().find(Block.getTerminatorStmt());
+ assert(StmtBlock != CFCtx.getStmtToBlock().end());
+ Preds.erase(StmtBlock->getSecond());
+ }
+ }
+
+ llvm::Optional<TypeErasedDataflowAnalysisState> MaybeState;
+ for (const CFGBlock *Pred : Preds) {
// Skip if the `Block` is unreachable or control flow cannot get past it.
if (!Pred || Pred->hasNoReturnElement())
continue;
@@ -57,13 +91,79 @@ static TypeErasedDataflowAnalysisState computeBlockInputState(
const TypeErasedDataflowAnalysisState &PredState =
MaybePredState.getValue();
- Analysis.joinTypeErased(State.Lattice, PredState.Lattice);
- State.Env.join(PredState.Env);
+ if (MaybeState.hasValue()) {
+ Analysis.joinTypeErased(MaybeState->Lattice, PredState.Lattice);
+ MaybeState->Env.join(PredState.Env, Analysis);
+ } else {
+ MaybeState = PredState;
+ }
+ }
+ if (!MaybeState.hasValue()) {
+ // FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()`
+ // to enable building analyses like computation of dominators that
+ // initialize the state of each basic block differently.
+ MaybeState.emplace(Analysis.typeErasedInitialElement(), InitEnv);
+ }
+ return *MaybeState;
+}
+
+/// Transfers `State` by evaluating `CfgStmt` in the context of `Analysis`.
+/// `HandleTransferredStmt` (if provided) will be applied to `CfgStmt`, after it
+/// is evaluated.
+static void
+transferCFGStmt(const CFGStmt &CfgStmt, TypeErasedDataflowAnalysis &Analysis,
+ TypeErasedDataflowAnalysisState &State,
+ std::function<void(const CFGStmt &,
+ const TypeErasedDataflowAnalysisState &)>
+ HandleTransferredStmt) {
+ const Stmt *S = CfgStmt.getStmt();
+ assert(S != nullptr);
+
+ if (Analysis.applyBuiltinTransfer())
+ transfer(*S, State.Env);
+ Analysis.transferTypeErased(S, State.Lattice, State.Env);
+
+ if (HandleTransferredStmt != nullptr)
+ HandleTransferredStmt(CfgStmt, State);
+}
+
+/// Transfers `State` by evaluating `CfgInit`.
+static void transferCFGInitializer(const CFGInitializer &CfgInit,
+ TypeErasedDataflowAnalysisState &State) {
+ const auto &ThisLoc = *cast<AggregateStorageLocation>(
+ State.Env.getThisPointeeStorageLocation());
+
+ const CXXCtorInitializer *Initializer = CfgInit.getInitializer();
+ assert(Initializer != nullptr);
+
+ auto *InitStmt = Initializer->getInit();
+ assert(InitStmt != nullptr);
+
+ auto *InitStmtLoc =
+ State.Env.getStorageLocation(*InitStmt, SkipPast::Reference);
+ if (InitStmtLoc == nullptr)
+ return;
+
+ auto *InitStmtVal = State.Env.getValue(*InitStmtLoc);
+ if (InitStmtVal == nullptr)
+ return;
+
+ const FieldDecl *Member = Initializer->getMember();
+ assert(Member != nullptr);
+
+ if (Member->getType()->isReferenceType()) {
+ auto &MemberLoc = ThisLoc.getChild(*Member);
+ State.Env.setValue(MemberLoc,
+ State.Env.takeOwnership(
+ std::make_unique<ReferenceValue>(*InitStmtLoc)));
+ } else {
+ auto &MemberLoc = ThisLoc.getChild(*Member);
+ State.Env.setValue(MemberLoc, *InitStmtVal);
}
- return State;
}
TypeErasedDataflowAnalysisState transferBlock(
+ const ControlFlowContext &CFCtx,
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> &BlockStates,
const CFGBlock &Block, const Environment &InitEnv,
TypeErasedDataflowAnalysis &Analysis,
@@ -71,39 +171,37 @@ TypeErasedDataflowAnalysisState transferBlock(
const TypeErasedDataflowAnalysisState &)>
HandleTransferredStmt) {
TypeErasedDataflowAnalysisState State =
- computeBlockInputState(BlockStates, Block, InitEnv, Analysis);
+ computeBlockInputState(CFCtx, BlockStates, Block, InitEnv, Analysis);
for (const CFGElement &Element : Block) {
- // FIXME: Evaluate other kinds of `CFGElement`.
- const llvm::Optional<CFGStmt> Stmt = Element.getAs<CFGStmt>();
- if (!Stmt.hasValue())
- continue;
-
- // FIXME: Evaluate the statement contained in `Stmt`.
-
- State.Lattice = Analysis.transferTypeErased(Stmt.getValue().getStmt(),
- State.Lattice, State.Env);
- if (HandleTransferredStmt != nullptr)
- HandleTransferredStmt(Stmt.getValue(), State);
+ switch (Element.getKind()) {
+ case CFGElement::Statement:
+ transferCFGStmt(*Element.getAs<CFGStmt>(), Analysis, State,
+ HandleTransferredStmt);
+ break;
+ case CFGElement::Initializer:
+ if (Analysis.applyBuiltinTransfer())
+ transferCFGInitializer(*Element.getAs<CFGInitializer>(), State);
+ break;
+ default:
+ // FIXME: Evaluate other kinds of `CFGElement`.
+ break;
+ }
}
return State;
}
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
-runTypeErasedDataflowAnalysis(const CFG &Cfg,
+runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv) {
- // FIXME: Consider enforcing that `Cfg` meets the requirements that
- // are specified in the header. This could be done by remembering
- // what options were used to build `Cfg` and asserting on them here.
-
- PostOrderCFGView POV(&Cfg);
- ForwardDataflowWorklist Worklist(Cfg, &POV);
+ PostOrderCFGView POV(&CFCtx.getCFG());
+ ForwardDataflowWorklist Worklist(CFCtx.getCFG(), &POV);
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> BlockStates;
- BlockStates.resize(Cfg.size(), llvm::None);
+ BlockStates.resize(CFCtx.getCFG().size(), llvm::None);
// The entry basic block doesn't contain statements so it can be skipped.
- const CFGBlock &Entry = Cfg.getEntry();
+ const CFGBlock &Entry = CFCtx.getCFG().getEntry();
BlockStates[Entry.getBlockID()] = {Analysis.typeErasedInitialElement(),
InitEnv};
Worklist.enqueueSuccessors(&Entry);
@@ -114,8 +212,8 @@ runTypeErasedDataflowAnalysis(const CFG &Cfg,
// FIXME: Consider making the maximum number of iterations configurable.
// FIXME: Set up statistics (see llvm/ADT/Statistic.h) to count average number
// of iterations, number of functions that time out, etc.
- unsigned Iterations = 0;
- static constexpr unsigned MaxIterations = 1 << 16;
+ uint32_t Iterations = 0;
+ static constexpr uint32_t MaxIterations = 1 << 16;
while (const CFGBlock *Block = Worklist.dequeue()) {
if (++Iterations > MaxIterations) {
llvm::errs() << "Maximum number of iterations reached, giving up.\n";
@@ -125,7 +223,7 @@ runTypeErasedDataflowAnalysis(const CFG &Cfg,
const llvm::Optional<TypeErasedDataflowAnalysisState> &OldBlockState =
BlockStates[Block->getBlockID()];
TypeErasedDataflowAnalysisState NewBlockState =
- transferBlock(BlockStates, *Block, InitEnv, Analysis);
+ transferBlock(CFCtx, BlockStates, *Block, InitEnv, Analysis);
if (OldBlockState.hasValue() &&
Analysis.isEqualTypeErased(OldBlockState.getValue().Lattice,
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index a38ae34f4b81..811146e50b45 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -819,12 +819,11 @@ void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
while (const auto *UO = dyn_cast<UnaryOperator>(Ex))
Ex = stripCasts(C, UO->getSubExpr());
+ // Mark the variable as potentially uninitialized for those cases where
+ // it's used on an indirect path, where it's not guaranteed to be
+ // defined.
if (const VarDecl *VD = findVar(Ex).getDecl())
- if (vals[VD] != Initialized)
- // If the variable isn't initialized by the time we get here, then we
- // mark it as potentially uninitialized for those cases where it's used
- // on an indirect path, where it's not guaranteed to be defined.
- vals[VD] = MayUninitialized;
+ vals[VD] = MayUninitialized;
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp b/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
index fe35f77782c9..64bcb45a4cd8 100644
--- a/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
@@ -84,6 +84,25 @@ DarwinSDKInfo::parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj) {
llvm::DenseMap<OSEnvPair::StorageType, Optional<RelatedTargetVersionMapping>>
VersionMappings;
if (const auto *VM = Obj->getObject("VersionMap")) {
+ // FIXME: Generalize this out beyond iOS-deriving targets.
+ // Look for ios_<targetos> version mapping for targets that derive from ios.
+ for (const auto &KV : *VM) {
+ auto Pair = StringRef(KV.getFirst()).split("_");
+ if (Pair.first.compare_insensitive("ios") == 0) {
+ llvm::Triple TT(llvm::Twine("--") + Pair.second.lower());
+ if (TT.getOS() != llvm::Triple::UnknownOS) {
+ auto Mapping = RelatedTargetVersionMapping::parseJSON(
+ *KV.getSecond().getAsObject(), *MaximumDeploymentVersion);
+ if (Mapping)
+ VersionMappings[OSEnvPair(llvm::Triple::IOS,
+ llvm::Triple::UnknownEnvironment,
+ TT.getOS(),
+ llvm::Triple::UnknownEnvironment)
+ .Value] = std::move(Mapping);
+ }
+ }
+ }
+
if (const auto *Mapping = VM->getObject("macOS_iOSMac")) {
auto VersionMap = RelatedTargetVersionMapping::parseJSON(
*Mapping, *MaximumDeploymentVersion);
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index 9b7ad96b949f..ac4b9d2cd5a2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -374,6 +374,12 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
DiagnosticMapping Mapping = makeUserMapping(Map, L);
Mapping.setUpgradedFromWarning(WasUpgradedFromWarning);
+ // Make sure we propagate the NoWarningAsError flag from an existing
+ // mapping (which may be the default mapping).
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
+ Mapping.setNoWarningAsError(Info.hasNoWarningAsError() ||
+ Mapping.hasNoWarningAsError());
+
// Common case; setting all the diagnostics of a group in one place.
if ((L.isInvalid() || L == DiagStatesByLoc.getCurDiagStateLoc()) &&
DiagStatesByLoc.getCurDiagState()) {
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index a9f2d09924cd..87db131992e4 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -33,7 +33,7 @@ struct StaticDiagInfoRec;
// platforms. See "How To Write Shared Libraries" by Ulrich Drepper.
struct StaticDiagInfoDescriptionStringTable {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
char ENUM##_desc[sizeof(DESC)];
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -54,7 +54,7 @@ struct StaticDiagInfoDescriptionStringTable {
const StaticDiagInfoDescriptionStringTable StaticDiagInfoDescriptions = {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
DESC,
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -79,7 +79,7 @@ extern const StaticDiagInfoRec StaticDiagInfo[];
// StaticDiagInfoRec would have extra padding on 64-bit platforms.
const uint32_t StaticDiagInfoDescriptionOffsets[] = {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
offsetof(StaticDiagInfoDescriptionStringTable, ENUM##_desc),
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -115,6 +115,7 @@ struct StaticDiagInfoRec {
uint8_t Category : 6;
uint8_t WarnNoWerror : 1;
uint8_t WarnShowInSystemHeader : 1;
+ uint8_t WarnShowInSystemMacro : 1;
uint16_t OptionGroupIndex : 15;
uint16_t Deferrable : 1;
@@ -170,7 +171,7 @@ VALIDATE_DIAG_SIZE(REFACTORING)
const StaticDiagInfoRec StaticDiagInfo[] = {
// clang-format off
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
{ \
diag::ENUM, \
DEFAULT_SEVERITY, \
@@ -179,6 +180,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
CATEGORY, \
NOWERROR, \
SHOWINSYSHEADER, \
+ SHOWINSYSMACRO, \
GROUP, \
DEFERRABLE, \
STR_SIZE(DESC, uint16_t)},
@@ -586,6 +588,13 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
Diag.getSourceManager().getExpansionLoc(Loc)))
return diag::Severity::Ignored;
+ // We also ignore warnings due to system macros
+ bool ShowInSystemMacro =
+ !GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemMacro;
+ if (State->SuppressSystemWarnings && !ShowInSystemMacro && Loc.isValid() &&
+ Diag.getSourceManager().isInSystemMacro(Loc))
+ return diag::Severity::Ignored;
+
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index d811aeec84a0..b86cb7af69bd 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -309,6 +309,14 @@ IdentifierInfo::isReserved(const LangOptions &LangOpts) const {
return ReservedIdentifierStatus::NotReserved;
}
+StringRef IdentifierInfo::deuglifiedName() const {
+ StringRef Name = getName();
+ if (Name.size() >= 2 && Name.front() == '_' &&
+ (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')))
+ return Name.ltrim('_');
+ return Name;
+}
+
tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// We use a perfect hash function here involving the length of the keyword,
// the first and third character. For preprocessor ID's there are no
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
index b7408f39bdab..7e89b3f1b804 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
@@ -12,6 +12,17 @@
namespace clang {
+const OpenCLOptions::FeatureDepList OpenCLOptions::DependentFeaturesList = {
+ {"__opencl_c_read_write_images", "__opencl_c_images"},
+ {"__opencl_c_3d_image_writes", "__opencl_c_images"},
+ {"__opencl_c_pipes", "__opencl_c_generic_address_space"},
+ {"__opencl_c_device_enqueue", "__opencl_c_generic_address_space"},
+ {"__opencl_c_device_enqueue", "__opencl_c_program_scope_global_variables"}};
+
+const llvm::StringMap<llvm::StringRef> OpenCLOptions::FeatureExtensionMap = {
+ {"cl_khr_fp64", "__opencl_c_fp64"},
+ {"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
+
bool OpenCLOptions::isKnown(llvm::StringRef Ext) const {
return OptMap.find(Ext) != OptMap.end();
}
@@ -108,33 +119,23 @@ void OpenCLOptions::disableAll() {
bool OpenCLOptions::diagnoseUnsupportedFeatureDependencies(
const TargetInfo &TI, DiagnosticsEngine &Diags) {
- // Feature pairs. First feature in a pair requires the second one to be
- // supported.
- static const llvm::StringMap<llvm::StringRef> DependentFeaturesMap = {
- {"__opencl_c_read_write_images", "__opencl_c_images"},
- {"__opencl_c_3d_image_writes", "__opencl_c_images"},
- {"__opencl_c_pipes", "__opencl_c_generic_address_space"}};
-
auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
bool IsValid = true;
- for (auto &FeaturePair : DependentFeaturesMap)
- if (TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getKey()) &&
- !TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getValue())) {
+ for (auto &FeaturePair : DependentFeaturesList) {
+ auto Feature = FeaturePair.first;
+ auto Dep = FeaturePair.second;
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, Feature) &&
+ !TI.hasFeatureEnabled(OpenCLFeaturesMap, Dep)) {
IsValid = false;
- Diags.Report(diag::err_opencl_feature_requires)
- << FeaturePair.getKey() << FeaturePair.getValue();
+ Diags.Report(diag::err_opencl_feature_requires) << Feature << Dep;
}
+ }
return IsValid;
}
bool OpenCLOptions::diagnoseFeatureExtensionDifferences(
const TargetInfo &TI, DiagnosticsEngine &Diags) {
- // Extensions and equivalent feature pairs.
- static const llvm::StringMap<llvm::StringRef> FeatureExtensionMap = {
- {"cl_khr_fp64", "__opencl_c_fp64"},
- {"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
-
auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
bool IsValid = true;
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
index 59d416f0e015..3b8f4c13b9bf 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
@@ -15,7 +15,7 @@
namespace clang {
-static const llvm::SmallVector<llvm::StringRef, 4>
+static llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleAMDGPUTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Proc) {
// Entries in returned vector should be in alphabetical order.
@@ -33,7 +33,7 @@ getAllPossibleAMDGPUTargetIDFeatures(const llvm::Triple &T,
return Ret;
}
-const llvm::SmallVector<llvm::StringRef, 4>
+llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Processor) {
llvm::SmallVector<llvm::StringRef, 4> Ret;
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 646bbe8b7387..e3a2f30febe7 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -25,7 +25,7 @@ using namespace clang;
static const LangASMap DefaultAddrSpaceMap = {0};
// TargetInfo Constructor.
-TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
+TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
BigEndian = !T.isLittleEndian();
@@ -150,6 +150,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
PlatformMinVersion = VersionTuple();
MaxOpenCLWorkGroupSize = 1024;
+ ProgramAddrSpace = 0;
}
// Out of line virtual dtor for TargetInfo.
@@ -421,6 +422,8 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
OpenCLFeaturesMap, "__opencl_c_generic_address_space");
Opts.OpenCLPipes =
hasFeatureEnabled(OpenCLFeaturesMap, "__opencl_c_pipes");
+ Opts.Blocks =
+ hasFeatureEnabled(OpenCLFeaturesMap, "__opencl_c_device_enqueue");
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index 4089a393b762..8e23cc4c421a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -45,6 +45,7 @@ static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
case llvm::AArch64::ArchKind::ARMV9A:
case llvm::AArch64::ArchKind::ARMV9_1A:
case llvm::AArch64::ArchKind::ARMV9_2A:
+ case llvm::AArch64::ArchKind::ARMV9_3A:
return "9";
default:
return "8";
@@ -223,6 +224,12 @@ void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
getTargetDefinesARMV86A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.7 defines
+ getTargetDefinesARMV87A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Armv9-A maps to Armv8.5-A
@@ -241,6 +248,12 @@ void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
getTargetDefinesARMV87A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.3-A maps to Armv8.8-A
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -446,6 +459,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_7A:
getTargetDefinesARMV87A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_8A:
+ getTargetDefinesARMV88A(Opts, Builder);
+ break;
case llvm::AArch64::ArchKind::ARMV9A:
getTargetDefinesARMV9A(Opts, Builder);
break;
@@ -455,6 +471,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV9_2A:
getTargetDefinesARMV92A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV9_3A:
+ getTargetDefinesARMV93A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -524,6 +543,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMatmulFP64 = false;
HasMatmulFP32 = false;
HasLSE = false;
+ HasHBC = false;
+ HasMOPS = false;
ArchKind = llvm::AArch64::ArchKind::INVALID;
@@ -532,36 +553,36 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
FPU |= NeonMode;
if (Feature == "+sve") {
FPU |= SveMode;
- HasFullFP16 = 1;
+ HasFullFP16 = true;
}
if (Feature == "+sve2") {
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
}
if (Feature == "+sve2-aes") {
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2AES = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2AES = true;
}
if (Feature == "+sve2-sha3") {
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SHA3 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SHA3 = true;
}
if (Feature == "+sve2-sm4") {
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SM4 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SM4 = true;
}
if (Feature == "+sve2-bitperm") {
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2BitPerm = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2BitPerm = true;
}
if (Feature == "+f32mm") {
FPU |= SveMode;
@@ -603,12 +624,16 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
if (Feature == "+v8.7a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
+ if (Feature == "+v8.8a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
if (Feature == "+v9a")
ArchKind = llvm::AArch64::ArchKind::ARMV9A;
if (Feature == "+v9.1a")
ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
if (Feature == "+v9.2a")
ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
+ if (Feature == "+v9.3a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
if (Feature == "+v8r")
ArchKind = llvm::AArch64::ArchKind::ARMV8R;
if (Feature == "+fullfp16")
@@ -635,6 +660,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
+ if (Feature == "+hbc")
+ HasHBC = true;
}
setDataLayout();
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 74745df3be8d..ebddce0c1c73 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -15,6 +15,7 @@
#include "OSTargets.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
namespace clang {
@@ -53,6 +54,8 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasMatmulFP32;
bool HasLSE;
bool HasFlagM;
+ bool HasHBC;
+ bool HasMOPS;
llvm::AArch64::ArchKind ArchKind;
@@ -92,12 +95,16 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV87A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV88A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefinesARMV9A(const LangOptions &Opts,
MacroBuilder &Builder) const;
void getTargetDefinesARMV91A(const LangOptions &Opts,
MacroBuilder &Builder) const;
void getTargetDefinesARMV92A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV93A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index c619d6cde41d..478a0233398d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -212,12 +212,16 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_6A";
case llvm::ARM::ArchKind::ARMV8_7A:
return "8_7A";
+ case llvm::ARM::ArchKind::ARMV8_8A:
+ return "8_8A";
case llvm::ARM::ArchKind::ARMV9A:
return "9A";
case llvm::ARM::ArchKind::ARMV9_1A:
return "9_1A";
case llvm::ARM::ArchKind::ARMV9_2A:
return "9_2A";
+ case llvm::ARM::ArchKind::ARMV9_3A:
+ return "9_3A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -930,9 +934,11 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV8_4A:
case llvm::ARM::ArchKind::ARMV8_5A:
case llvm::ARM::ArchKind::ARMV8_6A:
+ case llvm::ARM::ArchKind::ARMV8_8A:
case llvm::ARM::ArchKind::ARMV9A:
case llvm::ARM::ArchKind::ARMV9_1A:
case llvm::ARM::ArchKind::ARMV9_2A:
+ case llvm::ARM::ArchKind::ARMV9_3A:
getTargetDefinesARMV83A(Opts, Builder);
break;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index 40c658f3f40e..f074dac57f9b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
index 50b0fc07b311..6266ed72cd5c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
@@ -24,281 +24,282 @@ namespace targets {
struct LLVM_LIBRARY_VISIBILITY MCUInfo {
const char *Name;
const char *DefineName;
+ const int NumFlashBanks; // -1 means the device does not support LPM/ELPM.
};
// This list should be kept up-to-date with AVRDevices.td in LLVM.
static MCUInfo AVRMcus[] = {
- {"at90s1200", "__AVR_AT90S1200__"},
- {"attiny11", "__AVR_ATtiny11__"},
- {"attiny12", "__AVR_ATtiny12__"},
- {"attiny15", "__AVR_ATtiny15__"},
- {"attiny28", "__AVR_ATtiny28__"},
- {"at90s2313", "__AVR_AT90S2313__"},
- {"at90s2323", "__AVR_AT90S2323__"},
- {"at90s2333", "__AVR_AT90S2333__"},
- {"at90s2343", "__AVR_AT90S2343__"},
- {"attiny22", "__AVR_ATtiny22__"},
- {"attiny26", "__AVR_ATtiny26__"},
- {"at86rf401", "__AVR_AT86RF401__"},
- {"at90s4414", "__AVR_AT90S4414__"},
- {"at90s4433", "__AVR_AT90S4433__"},
- {"at90s4434", "__AVR_AT90S4434__"},
- {"at90s8515", "__AVR_AT90S8515__"},
- {"at90c8534", "__AVR_AT90c8534__"},
- {"at90s8535", "__AVR_AT90S8535__"},
- {"ata5272", "__AVR_ATA5272__"},
- {"attiny13", "__AVR_ATtiny13__"},
- {"attiny13a", "__AVR_ATtiny13A__"},
- {"attiny2313", "__AVR_ATtiny2313__"},
- {"attiny2313a", "__AVR_ATtiny2313A__"},
- {"attiny24", "__AVR_ATtiny24__"},
- {"attiny24a", "__AVR_ATtiny24A__"},
- {"attiny4313", "__AVR_ATtiny4313__"},
- {"attiny44", "__AVR_ATtiny44__"},
- {"attiny44a", "__AVR_ATtiny44A__"},
- {"attiny84", "__AVR_ATtiny84__"},
- {"attiny84a", "__AVR_ATtiny84A__"},
- {"attiny25", "__AVR_ATtiny25__"},
- {"attiny45", "__AVR_ATtiny45__"},
- {"attiny85", "__AVR_ATtiny85__"},
- {"attiny261", "__AVR_ATtiny261__"},
- {"attiny261a", "__AVR_ATtiny261A__"},
- {"attiny441", "__AVR_ATtiny441__"},
- {"attiny461", "__AVR_ATtiny461__"},
- {"attiny461a", "__AVR_ATtiny461A__"},
- {"attiny841", "__AVR_ATtiny841__"},
- {"attiny861", "__AVR_ATtiny861__"},
- {"attiny861a", "__AVR_ATtiny861A__"},
- {"attiny87", "__AVR_ATtiny87__"},
- {"attiny43u", "__AVR_ATtiny43U__"},
- {"attiny48", "__AVR_ATtiny48__"},
- {"attiny88", "__AVR_ATtiny88__"},
- {"attiny828", "__AVR_ATtiny828__"},
- {"at43usb355", "__AVR_AT43USB355__"},
- {"at76c711", "__AVR_AT76C711__"},
- {"atmega103", "__AVR_ATmega103__"},
- {"at43usb320", "__AVR_AT43USB320__"},
- {"attiny167", "__AVR_ATtiny167__"},
- {"at90usb82", "__AVR_AT90USB82__"},
- {"at90usb162", "__AVR_AT90USB162__"},
- {"ata5505", "__AVR_ATA5505__"},
- {"atmega8u2", "__AVR_ATmega8U2__"},
- {"atmega16u2", "__AVR_ATmega16U2__"},
- {"atmega32u2", "__AVR_ATmega32U2__"},
- {"attiny1634", "__AVR_ATtiny1634__"},
- {"atmega8", "__AVR_ATmega8__"},
- {"ata6289", "__AVR_ATA6289__"},
- {"atmega8a", "__AVR_ATmega8A__"},
- {"ata6285", "__AVR_ATA6285__"},
- {"ata6286", "__AVR_ATA6286__"},
- {"atmega48", "__AVR_ATmega48__"},
- {"atmega48a", "__AVR_ATmega48A__"},
- {"atmega48pa", "__AVR_ATmega48PA__"},
- {"atmega48pb", "__AVR_ATmega48PB__"},
- {"atmega48p", "__AVR_ATmega48P__"},
- {"atmega88", "__AVR_ATmega88__"},
- {"atmega88a", "__AVR_ATmega88A__"},
- {"atmega88p", "__AVR_ATmega88P__"},
- {"atmega88pa", "__AVR_ATmega88PA__"},
- {"atmega88pb", "__AVR_ATmega88PB__"},
- {"atmega8515", "__AVR_ATmega8515__"},
- {"atmega8535", "__AVR_ATmega8535__"},
- {"atmega8hva", "__AVR_ATmega8HVA__"},
- {"at90pwm1", "__AVR_AT90PWM1__"},
- {"at90pwm2", "__AVR_AT90PWM2__"},
- {"at90pwm2b", "__AVR_AT90PWM2B__"},
- {"at90pwm3", "__AVR_AT90PWM3__"},
- {"at90pwm3b", "__AVR_AT90PWM3B__"},
- {"at90pwm81", "__AVR_AT90PWM81__"},
- {"ata5790", "__AVR_ATA5790__"},
- {"ata5795", "__AVR_ATA5795__"},
- {"atmega16", "__AVR_ATmega16__"},
- {"atmega16a", "__AVR_ATmega16A__"},
- {"atmega161", "__AVR_ATmega161__"},
- {"atmega162", "__AVR_ATmega162__"},
- {"atmega163", "__AVR_ATmega163__"},
- {"atmega164a", "__AVR_ATmega164A__"},
- {"atmega164p", "__AVR_ATmega164P__"},
- {"atmega164pa", "__AVR_ATmega164PA__"},
- {"atmega165", "__AVR_ATmega165__"},
- {"atmega165a", "__AVR_ATmega165A__"},
- {"atmega165p", "__AVR_ATmega165P__"},
- {"atmega165pa", "__AVR_ATmega165PA__"},
- {"atmega168", "__AVR_ATmega168__"},
- {"atmega168a", "__AVR_ATmega168A__"},
- {"atmega168p", "__AVR_ATmega168P__"},
- {"atmega168pa", "__AVR_ATmega168PA__"},
- {"atmega168pb", "__AVR_ATmega168PB__"},
- {"atmega169", "__AVR_ATmega169__"},
- {"atmega169a", "__AVR_ATmega169A__"},
- {"atmega169p", "__AVR_ATmega169P__"},
- {"atmega169pa", "__AVR_ATmega169PA__"},
- {"atmega32", "__AVR_ATmega32__"},
- {"atmega32a", "__AVR_ATmega32A__"},
- {"atmega323", "__AVR_ATmega323__"},
- {"atmega324a", "__AVR_ATmega324A__"},
- {"atmega324p", "__AVR_ATmega324P__"},
- {"atmega324pa", "__AVR_ATmega324PA__"},
- {"atmega324pb", "__AVR_ATmega324PB__"},
- {"atmega325", "__AVR_ATmega325__"},
- {"atmega325a", "__AVR_ATmega325A__"},
- {"atmega325p", "__AVR_ATmega325P__"},
- {"atmega325pa", "__AVR_ATmega325PA__"},
- {"atmega3250", "__AVR_ATmega3250__"},
- {"atmega3250a", "__AVR_ATmega3250A__"},
- {"atmega3250p", "__AVR_ATmega3250P__"},
- {"atmega3250pa", "__AVR_ATmega3250PA__"},
- {"atmega328", "__AVR_ATmega328__"},
- {"atmega328p", "__AVR_ATmega328P__"},
- {"atmega328pb", "__AVR_ATmega328PB__"},
- {"atmega329", "__AVR_ATmega329__"},
- {"atmega329a", "__AVR_ATmega329A__"},
- {"atmega329p", "__AVR_ATmega329P__"},
- {"atmega329pa", "__AVR_ATmega329PA__"},
- {"atmega3290", "__AVR_ATmega3290__"},
- {"atmega3290a", "__AVR_ATmega3290A__"},
- {"atmega3290p", "__AVR_ATmega3290P__"},
- {"atmega3290pa", "__AVR_ATmega3290PA__"},
- {"atmega406", "__AVR_ATmega406__"},
- {"atmega64", "__AVR_ATmega64__"},
- {"atmega64a", "__AVR_ATmega64A__"},
- {"atmega640", "__AVR_ATmega640__"},
- {"atmega644", "__AVR_ATmega644__"},
- {"atmega644a", "__AVR_ATmega644A__"},
- {"atmega644p", "__AVR_ATmega644P__"},
- {"atmega644pa", "__AVR_ATmega644PA__"},
- {"atmega645", "__AVR_ATmega645__"},
- {"atmega645a", "__AVR_ATmega645A__"},
- {"atmega645p", "__AVR_ATmega645P__"},
- {"atmega649", "__AVR_ATmega649__"},
- {"atmega649a", "__AVR_ATmega649A__"},
- {"atmega649p", "__AVR_ATmega649P__"},
- {"atmega6450", "__AVR_ATmega6450__"},
- {"atmega6450a", "__AVR_ATmega6450A__"},
- {"atmega6450p", "__AVR_ATmega6450P__"},
- {"atmega6490", "__AVR_ATmega6490__"},
- {"atmega6490a", "__AVR_ATmega6490A__"},
- {"atmega6490p", "__AVR_ATmega6490P__"},
- {"atmega64rfr2", "__AVR_ATmega64RFR2__"},
- {"atmega644rfr2", "__AVR_ATmega644RFR2__"},
- {"atmega16hva", "__AVR_ATmega16HVA__"},
- {"atmega16hva2", "__AVR_ATmega16HVA2__"},
- {"atmega16hvb", "__AVR_ATmega16HVB__"},
- {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__"},
- {"atmega32hvb", "__AVR_ATmega32HVB__"},
- {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__"},
- {"atmega64hve", "__AVR_ATmega64HVE__"},
- {"at90can32", "__AVR_AT90CAN32__"},
- {"at90can64", "__AVR_AT90CAN64__"},
- {"at90pwm161", "__AVR_AT90PWM161__"},
- {"at90pwm216", "__AVR_AT90PWM216__"},
- {"at90pwm316", "__AVR_AT90PWM316__"},
- {"atmega32c1", "__AVR_ATmega32C1__"},
- {"atmega64c1", "__AVR_ATmega64C1__"},
- {"atmega16m1", "__AVR_ATmega16M1__"},
- {"atmega32m1", "__AVR_ATmega32M1__"},
- {"atmega64m1", "__AVR_ATmega64M1__"},
- {"atmega16u4", "__AVR_ATmega16U4__"},
- {"atmega32u4", "__AVR_ATmega32U4__"},
- {"atmega32u6", "__AVR_ATmega32U6__"},
- {"at90usb646", "__AVR_AT90USB646__"},
- {"at90usb647", "__AVR_AT90USB647__"},
- {"at90scr100", "__AVR_AT90SCR100__"},
- {"at94k", "__AVR_AT94K__"},
- {"m3000", "__AVR_AT000__"},
- {"atmega128", "__AVR_ATmega128__"},
- {"atmega128a", "__AVR_ATmega128A__"},
- {"atmega1280", "__AVR_ATmega1280__"},
- {"atmega1281", "__AVR_ATmega1281__"},
- {"atmega1284", "__AVR_ATmega1284__"},
- {"atmega1284p", "__AVR_ATmega1284P__"},
- {"atmega128rfa1", "__AVR_ATmega128RFA1__"},
- {"atmega128rfr2", "__AVR_ATmega128RFR2__"},
- {"atmega1284rfr2", "__AVR_ATmega1284RFR2__"},
- {"at90can128", "__AVR_AT90CAN128__"},
- {"at90usb1286", "__AVR_AT90USB1286__"},
- {"at90usb1287", "__AVR_AT90USB1287__"},
- {"atmega2560", "__AVR_ATmega2560__"},
- {"atmega2561", "__AVR_ATmega2561__"},
- {"atmega256rfr2", "__AVR_ATmega256RFR2__"},
- {"atmega2564rfr2", "__AVR_ATmega2564RFR2__"},
- {"atxmega16a4", "__AVR_ATxmega16A4__"},
- {"atxmega16a4u", "__AVR_ATxmega16A4U__"},
- {"atxmega16c4", "__AVR_ATxmega16C4__"},
- {"atxmega16d4", "__AVR_ATxmega16D4__"},
- {"atxmega32a4", "__AVR_ATxmega32A4__"},
- {"atxmega32a4u", "__AVR_ATxmega32A4U__"},
- {"atxmega32c4", "__AVR_ATxmega32C4__"},
- {"atxmega32d4", "__AVR_ATxmega32D4__"},
- {"atxmega32e5", "__AVR_ATxmega32E5__"},
- {"atxmega16e5", "__AVR_ATxmega16E5__"},
- {"atxmega8e5", "__AVR_ATxmega8E5__"},
- {"atxmega32x1", "__AVR_ATxmega32X1__"},
- {"atxmega64a3", "__AVR_ATxmega64A3__"},
- {"atxmega64a3u", "__AVR_ATxmega64A3U__"},
- {"atxmega64a4u", "__AVR_ATxmega64A4U__"},
- {"atxmega64b1", "__AVR_ATxmega64B1__"},
- {"atxmega64b3", "__AVR_ATxmega64B3__"},
- {"atxmega64c3", "__AVR_ATxmega64C3__"},
- {"atxmega64d3", "__AVR_ATxmega64D3__"},
- {"atxmega64d4", "__AVR_ATxmega64D4__"},
- {"atxmega64a1", "__AVR_ATxmega64A1__"},
- {"atxmega64a1u", "__AVR_ATxmega64A1U__"},
- {"atxmega128a3", "__AVR_ATxmega128A3__"},
- {"atxmega128a3u", "__AVR_ATxmega128A3U__"},
- {"atxmega128b1", "__AVR_ATxmega128B1__"},
- {"atxmega128b3", "__AVR_ATxmega128B3__"},
- {"atxmega128c3", "__AVR_ATxmega128C3__"},
- {"atxmega128d3", "__AVR_ATxmega128D3__"},
- {"atxmega128d4", "__AVR_ATxmega128D4__"},
- {"atxmega192a3", "__AVR_ATxmega192A3__"},
- {"atxmega192a3u", "__AVR_ATxmega192A3U__"},
- {"atxmega192c3", "__AVR_ATxmega192C3__"},
- {"atxmega192d3", "__AVR_ATxmega192D3__"},
- {"atxmega256a3", "__AVR_ATxmega256A3__"},
- {"atxmega256a3u", "__AVR_ATxmega256A3U__"},
- {"atxmega256a3b", "__AVR_ATxmega256A3B__"},
- {"atxmega256a3bu", "__AVR_ATxmega256A3BU__"},
- {"atxmega256c3", "__AVR_ATxmega256C3__"},
- {"atxmega256d3", "__AVR_ATxmega256D3__"},
- {"atxmega384c3", "__AVR_ATxmega384C3__"},
- {"atxmega384d3", "__AVR_ATxmega384D3__"},
- {"atxmega128a1", "__AVR_ATxmega128A1__"},
- {"atxmega128a1u", "__AVR_ATxmega128A1U__"},
- {"atxmega128a4u", "__AVR_ATxmega128A4U__"},
- {"attiny4", "__AVR_ATtiny4__"},
- {"attiny5", "__AVR_ATtiny5__"},
- {"attiny9", "__AVR_ATtiny9__"},
- {"attiny10", "__AVR_ATtiny10__"},
- {"attiny20", "__AVR_ATtiny20__"},
- {"attiny40", "__AVR_ATtiny40__"},
- {"attiny102", "__AVR_ATtiny102__"},
- {"attiny104", "__AVR_ATtiny104__"},
- {"attiny202", "__AVR_ATtiny202__"},
- {"attiny402", "__AVR_ATtiny402__"},
- {"attiny204", "__AVR_ATtiny204__"},
- {"attiny404", "__AVR_ATtiny404__"},
- {"attiny804", "__AVR_ATtiny804__"},
- {"attiny1604", "__AVR_ATtiny1604__"},
- {"attiny406", "__AVR_ATtiny406__"},
- {"attiny806", "__AVR_ATtiny806__"},
- {"attiny1606", "__AVR_ATtiny1606__"},
- {"attiny807", "__AVR_ATtiny807__"},
- {"attiny1607", "__AVR_ATtiny1607__"},
- {"attiny212", "__AVR_ATtiny212__"},
- {"attiny412", "__AVR_ATtiny412__"},
- {"attiny214", "__AVR_ATtiny214__"},
- {"attiny414", "__AVR_ATtiny414__"},
- {"attiny814", "__AVR_ATtiny814__"},
- {"attiny1614", "__AVR_ATtiny1614__"},
- {"attiny416", "__AVR_ATtiny416__"},
- {"attiny816", "__AVR_ATtiny816__"},
- {"attiny1616", "__AVR_ATtiny1616__"},
- {"attiny3216", "__AVR_ATtiny3216__"},
- {"attiny417", "__AVR_ATtiny417__"},
- {"attiny817", "__AVR_ATtiny817__"},
- {"attiny1617", "__AVR_ATtiny1617__"},
- {"attiny3217", "__AVR_ATtiny3217__"},
+ {"at90s1200", "__AVR_AT90S1200__", 0},
+ {"attiny11", "__AVR_ATtiny11__", 0},
+ {"attiny12", "__AVR_ATtiny12__", 0},
+ {"attiny15", "__AVR_ATtiny15__", 0},
+ {"attiny28", "__AVR_ATtiny28__", 0},
+ {"at90s2313", "__AVR_AT90S2313__", 1},
+ {"at90s2323", "__AVR_AT90S2323__", 1},
+ {"at90s2333", "__AVR_AT90S2333__", 1},
+ {"at90s2343", "__AVR_AT90S2343__", 1},
+ {"attiny22", "__AVR_ATtiny22__", 1},
+ {"attiny26", "__AVR_ATtiny26__", 1},
+ {"at86rf401", "__AVR_AT86RF401__", 1},
+ {"at90s4414", "__AVR_AT90S4414__", 1},
+ {"at90s4433", "__AVR_AT90S4433__", 1},
+ {"at90s4434", "__AVR_AT90S4434__", 1},
+ {"at90s8515", "__AVR_AT90S8515__", 1},
+ {"at90c8534", "__AVR_AT90c8534__", 1},
+ {"at90s8535", "__AVR_AT90S8535__", 1},
+ {"ata5272", "__AVR_ATA5272__", 1},
+ {"attiny13", "__AVR_ATtiny13__", 1},
+ {"attiny13a", "__AVR_ATtiny13A__", 1},
+ {"attiny2313", "__AVR_ATtiny2313__", 1},
+ {"attiny2313a", "__AVR_ATtiny2313A__", 1},
+ {"attiny24", "__AVR_ATtiny24__", 1},
+ {"attiny24a", "__AVR_ATtiny24A__", 1},
+ {"attiny4313", "__AVR_ATtiny4313__", 1},
+ {"attiny44", "__AVR_ATtiny44__", 1},
+ {"attiny44a", "__AVR_ATtiny44A__", 1},
+ {"attiny84", "__AVR_ATtiny84__", 1},
+ {"attiny84a", "__AVR_ATtiny84A__", 1},
+ {"attiny25", "__AVR_ATtiny25__", 1},
+ {"attiny45", "__AVR_ATtiny45__", 1},
+ {"attiny85", "__AVR_ATtiny85__", 1},
+ {"attiny261", "__AVR_ATtiny261__", 1},
+ {"attiny261a", "__AVR_ATtiny261A__", 1},
+ {"attiny441", "__AVR_ATtiny441__", 1},
+ {"attiny461", "__AVR_ATtiny461__", 1},
+ {"attiny461a", "__AVR_ATtiny461A__", 1},
+ {"attiny841", "__AVR_ATtiny841__", 1},
+ {"attiny861", "__AVR_ATtiny861__", 1},
+ {"attiny861a", "__AVR_ATtiny861A__", 1},
+ {"attiny87", "__AVR_ATtiny87__", 1},
+ {"attiny43u", "__AVR_ATtiny43U__", 1},
+ {"attiny48", "__AVR_ATtiny48__", 1},
+ {"attiny88", "__AVR_ATtiny88__", 1},
+ {"attiny828", "__AVR_ATtiny828__", 1},
+ {"at43usb355", "__AVR_AT43USB355__", 1},
+ {"at76c711", "__AVR_AT76C711__", 1},
+ {"atmega103", "__AVR_ATmega103__", 1},
+ {"at43usb320", "__AVR_AT43USB320__", 1},
+ {"attiny167", "__AVR_ATtiny167__", 1},
+ {"at90usb82", "__AVR_AT90USB82__", 1},
+ {"at90usb162", "__AVR_AT90USB162__", 1},
+ {"ata5505", "__AVR_ATA5505__", 1},
+ {"atmega8u2", "__AVR_ATmega8U2__", 1},
+ {"atmega16u2", "__AVR_ATmega16U2__", 1},
+ {"atmega32u2", "__AVR_ATmega32U2__", 1},
+ {"attiny1634", "__AVR_ATtiny1634__", 1},
+ {"atmega8", "__AVR_ATmega8__", 1},
+ {"ata6289", "__AVR_ATA6289__", 1},
+ {"atmega8a", "__AVR_ATmega8A__", 1},
+ {"ata6285", "__AVR_ATA6285__", 1},
+ {"ata6286", "__AVR_ATA6286__", 1},
+ {"atmega48", "__AVR_ATmega48__", 1},
+ {"atmega48a", "__AVR_ATmega48A__", 1},
+ {"atmega48pa", "__AVR_ATmega48PA__", 1},
+ {"atmega48pb", "__AVR_ATmega48PB__", 1},
+ {"atmega48p", "__AVR_ATmega48P__", 1},
+ {"atmega88", "__AVR_ATmega88__", 1},
+ {"atmega88a", "__AVR_ATmega88A__", 1},
+ {"atmega88p", "__AVR_ATmega88P__", 1},
+ {"atmega88pa", "__AVR_ATmega88PA__", 1},
+ {"atmega88pb", "__AVR_ATmega88PB__", 1},
+ {"atmega8515", "__AVR_ATmega8515__", 1},
+ {"atmega8535", "__AVR_ATmega8535__", 1},
+ {"atmega8hva", "__AVR_ATmega8HVA__", 1},
+ {"at90pwm1", "__AVR_AT90PWM1__", 1},
+ {"at90pwm2", "__AVR_AT90PWM2__", 1},
+ {"at90pwm2b", "__AVR_AT90PWM2B__", 1},
+ {"at90pwm3", "__AVR_AT90PWM3__", 1},
+ {"at90pwm3b", "__AVR_AT90PWM3B__", 1},
+ {"at90pwm81", "__AVR_AT90PWM81__", 1},
+ {"ata5790", "__AVR_ATA5790__", 1},
+ {"ata5795", "__AVR_ATA5795__", 1},
+ {"atmega16", "__AVR_ATmega16__", 1},
+ {"atmega16a", "__AVR_ATmega16A__", 1},
+ {"atmega161", "__AVR_ATmega161__", 1},
+ {"atmega162", "__AVR_ATmega162__", 1},
+ {"atmega163", "__AVR_ATmega163__", 1},
+ {"atmega164a", "__AVR_ATmega164A__", 1},
+ {"atmega164p", "__AVR_ATmega164P__", 1},
+ {"atmega164pa", "__AVR_ATmega164PA__", 1},
+ {"atmega165", "__AVR_ATmega165__", 1},
+ {"atmega165a", "__AVR_ATmega165A__", 1},
+ {"atmega165p", "__AVR_ATmega165P__", 1},
+ {"atmega165pa", "__AVR_ATmega165PA__", 1},
+ {"atmega168", "__AVR_ATmega168__", 1},
+ {"atmega168a", "__AVR_ATmega168A__", 1},
+ {"atmega168p", "__AVR_ATmega168P__", 1},
+ {"atmega168pa", "__AVR_ATmega168PA__", 1},
+ {"atmega168pb", "__AVR_ATmega168PB__", 1},
+ {"atmega169", "__AVR_ATmega169__", 1},
+ {"atmega169a", "__AVR_ATmega169A__", 1},
+ {"atmega169p", "__AVR_ATmega169P__", 1},
+ {"atmega169pa", "__AVR_ATmega169PA__", 1},
+ {"atmega32", "__AVR_ATmega32__", 1},
+ {"atmega32a", "__AVR_ATmega32A__", 1},
+ {"atmega323", "__AVR_ATmega323__", 1},
+ {"atmega324a", "__AVR_ATmega324A__", 1},
+ {"atmega324p", "__AVR_ATmega324P__", 1},
+ {"atmega324pa", "__AVR_ATmega324PA__", 1},
+ {"atmega324pb", "__AVR_ATmega324PB__", 1},
+ {"atmega325", "__AVR_ATmega325__", 1},
+ {"atmega325a", "__AVR_ATmega325A__", 1},
+ {"atmega325p", "__AVR_ATmega325P__", 1},
+ {"atmega325pa", "__AVR_ATmega325PA__", 1},
+ {"atmega3250", "__AVR_ATmega3250__", 1},
+ {"atmega3250a", "__AVR_ATmega3250A__", 1},
+ {"atmega3250p", "__AVR_ATmega3250P__", 1},
+ {"atmega3250pa", "__AVR_ATmega3250PA__", 1},
+ {"atmega328", "__AVR_ATmega328__", 1},
+ {"atmega328p", "__AVR_ATmega328P__", 1},
+ {"atmega328pb", "__AVR_ATmega328PB__", 1},
+ {"atmega329", "__AVR_ATmega329__", 1},
+ {"atmega329a", "__AVR_ATmega329A__", 1},
+ {"atmega329p", "__AVR_ATmega329P__", 1},
+ {"atmega329pa", "__AVR_ATmega329PA__", 1},
+ {"atmega3290", "__AVR_ATmega3290__", 1},
+ {"atmega3290a", "__AVR_ATmega3290A__", 1},
+ {"atmega3290p", "__AVR_ATmega3290P__", 1},
+ {"atmega3290pa", "__AVR_ATmega3290PA__", 1},
+ {"atmega406", "__AVR_ATmega406__", 1},
+ {"atmega64", "__AVR_ATmega64__", 1},
+ {"atmega64a", "__AVR_ATmega64A__", 1},
+ {"atmega640", "__AVR_ATmega640__", 1},
+ {"atmega644", "__AVR_ATmega644__", 1},
+ {"atmega644a", "__AVR_ATmega644A__", 1},
+ {"atmega644p", "__AVR_ATmega644P__", 1},
+ {"atmega644pa", "__AVR_ATmega644PA__", 1},
+ {"atmega645", "__AVR_ATmega645__", 1},
+ {"atmega645a", "__AVR_ATmega645A__", 1},
+ {"atmega645p", "__AVR_ATmega645P__", 1},
+ {"atmega649", "__AVR_ATmega649__", 1},
+ {"atmega649a", "__AVR_ATmega649A__", 1},
+ {"atmega649p", "__AVR_ATmega649P__", 1},
+ {"atmega6450", "__AVR_ATmega6450__", 1},
+ {"atmega6450a", "__AVR_ATmega6450A__", 1},
+ {"atmega6450p", "__AVR_ATmega6450P__", 1},
+ {"atmega6490", "__AVR_ATmega6490__", 1},
+ {"atmega6490a", "__AVR_ATmega6490A__", 1},
+ {"atmega6490p", "__AVR_ATmega6490P__", 1},
+ {"atmega64rfr2", "__AVR_ATmega64RFR2__", 1},
+ {"atmega644rfr2", "__AVR_ATmega644RFR2__", 1},
+ {"atmega16hva", "__AVR_ATmega16HVA__", 1},
+ {"atmega16hva2", "__AVR_ATmega16HVA2__", 1},
+ {"atmega16hvb", "__AVR_ATmega16HVB__", 1},
+ {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__", 1},
+ {"atmega32hvb", "__AVR_ATmega32HVB__", 1},
+ {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__", 1},
+ {"atmega64hve", "__AVR_ATmega64HVE__", 1},
+ {"at90can32", "__AVR_AT90CAN32__", 1},
+ {"at90can64", "__AVR_AT90CAN64__", 1},
+ {"at90pwm161", "__AVR_AT90PWM161__", 1},
+ {"at90pwm216", "__AVR_AT90PWM216__", 1},
+ {"at90pwm316", "__AVR_AT90PWM316__", 1},
+ {"atmega32c1", "__AVR_ATmega32C1__", 1},
+ {"atmega64c1", "__AVR_ATmega64C1__", 1},
+ {"atmega16m1", "__AVR_ATmega16M1__", 1},
+ {"atmega32m1", "__AVR_ATmega32M1__", 1},
+ {"atmega64m1", "__AVR_ATmega64M1__", 1},
+ {"atmega16u4", "__AVR_ATmega16U4__", 1},
+ {"atmega32u4", "__AVR_ATmega32U4__", 1},
+ {"atmega32u6", "__AVR_ATmega32U6__", 1},
+ {"at90usb646", "__AVR_AT90USB646__", 1},
+ {"at90usb647", "__AVR_AT90USB647__", 1},
+ {"at90scr100", "__AVR_AT90SCR100__", 1},
+ {"at94k", "__AVR_AT94K__", 1},
+ {"m3000", "__AVR_AT000__", 1},
+ {"atmega128", "__AVR_ATmega128__", 2},
+ {"atmega128a", "__AVR_ATmega128A__", 2},
+ {"atmega1280", "__AVR_ATmega1280__", 2},
+ {"atmega1281", "__AVR_ATmega1281__", 2},
+ {"atmega1284", "__AVR_ATmega1284__", 2},
+ {"atmega1284p", "__AVR_ATmega1284P__", 2},
+ {"atmega128rfa1", "__AVR_ATmega128RFA1__", 2},
+ {"atmega128rfr2", "__AVR_ATmega128RFR2__", 2},
+ {"atmega1284rfr2", "__AVR_ATmega1284RFR2__", 2},
+ {"at90can128", "__AVR_AT90CAN128__", 2},
+ {"at90usb1286", "__AVR_AT90USB1286__", 2},
+ {"at90usb1287", "__AVR_AT90USB1287__", 2},
+ {"atmega2560", "__AVR_ATmega2560__", 4},
+ {"atmega2561", "__AVR_ATmega2561__", 4},
+ {"atmega256rfr2", "__AVR_ATmega256RFR2__", 4},
+ {"atmega2564rfr2", "__AVR_ATmega2564RFR2__", 4},
+ {"atxmega16a4", "__AVR_ATxmega16A4__", 1},
+ {"atxmega16a4u", "__AVR_ATxmega16A4U__", 1},
+ {"atxmega16c4", "__AVR_ATxmega16C4__", 1},
+ {"atxmega16d4", "__AVR_ATxmega16D4__", 1},
+ {"atxmega32a4", "__AVR_ATxmega32A4__", 1},
+ {"atxmega32a4u", "__AVR_ATxmega32A4U__", 1},
+ {"atxmega32c4", "__AVR_ATxmega32C4__", 1},
+ {"atxmega32d4", "__AVR_ATxmega32D4__", 1},
+ {"atxmega32e5", "__AVR_ATxmega32E5__", 1},
+ {"atxmega16e5", "__AVR_ATxmega16E5__", 1},
+ {"atxmega8e5", "__AVR_ATxmega8E5__", 1},
+ {"atxmega32x1", "__AVR_ATxmega32X1__", 1},
+ {"atxmega64a3", "__AVR_ATxmega64A3__", 1},
+ {"atxmega64a3u", "__AVR_ATxmega64A3U__", 1},
+ {"atxmega64a4u", "__AVR_ATxmega64A4U__", 1},
+ {"atxmega64b1", "__AVR_ATxmega64B1__", 1},
+ {"atxmega64b3", "__AVR_ATxmega64B3__", 1},
+ {"atxmega64c3", "__AVR_ATxmega64C3__", 1},
+ {"atxmega64d3", "__AVR_ATxmega64D3__", 1},
+ {"atxmega64d4", "__AVR_ATxmega64D4__", 1},
+ {"atxmega64a1", "__AVR_ATxmega64A1__", 1},
+ {"atxmega64a1u", "__AVR_ATxmega64A1U__", 1},
+ {"atxmega128a3", "__AVR_ATxmega128A3__", 2},
+ {"atxmega128a3u", "__AVR_ATxmega128A3U__", 2},
+ {"atxmega128b1", "__AVR_ATxmega128B1__", 2},
+ {"atxmega128b3", "__AVR_ATxmega128B3__", 2},
+ {"atxmega128c3", "__AVR_ATxmega128C3__", 2},
+ {"atxmega128d3", "__AVR_ATxmega128D3__", 2},
+ {"atxmega128d4", "__AVR_ATxmega128D4__", 2},
+ {"atxmega192a3", "__AVR_ATxmega192A3__", 3},
+ {"atxmega192a3u", "__AVR_ATxmega192A3U__", 3},
+ {"atxmega192c3", "__AVR_ATxmega192C3__", 3},
+ {"atxmega192d3", "__AVR_ATxmega192D3__", 3},
+ {"atxmega256a3", "__AVR_ATxmega256A3__", 4},
+ {"atxmega256a3u", "__AVR_ATxmega256A3U__", 4},
+ {"atxmega256a3b", "__AVR_ATxmega256A3B__", 4},
+ {"atxmega256a3bu", "__AVR_ATxmega256A3BU__", 4},
+ {"atxmega256c3", "__AVR_ATxmega256C3__", 4},
+ {"atxmega256d3", "__AVR_ATxmega256D3__", 4},
+ {"atxmega384c3", "__AVR_ATxmega384C3__", 6},
+ {"atxmega384d3", "__AVR_ATxmega384D3__", 6},
+ {"atxmega128a1", "__AVR_ATxmega128A1__", 2},
+ {"atxmega128a1u", "__AVR_ATxmega128A1U__", 2},
+ {"atxmega128a4u", "__AVR_ATxmega128A4U__", 2},
+ {"attiny4", "__AVR_ATtiny4__", 0},
+ {"attiny5", "__AVR_ATtiny5__", 0},
+ {"attiny9", "__AVR_ATtiny9__", 0},
+ {"attiny10", "__AVR_ATtiny10__", 0},
+ {"attiny20", "__AVR_ATtiny20__", 0},
+ {"attiny40", "__AVR_ATtiny40__", 0},
+ {"attiny102", "__AVR_ATtiny102__", 0},
+ {"attiny104", "__AVR_ATtiny104__", 0},
+ {"attiny202", "__AVR_ATtiny202__", 1},
+ {"attiny402", "__AVR_ATtiny402__", 1},
+ {"attiny204", "__AVR_ATtiny204__", 1},
+ {"attiny404", "__AVR_ATtiny404__", 1},
+ {"attiny804", "__AVR_ATtiny804__", 1},
+ {"attiny1604", "__AVR_ATtiny1604__", 1},
+ {"attiny406", "__AVR_ATtiny406__", 1},
+ {"attiny806", "__AVR_ATtiny806__", 1},
+ {"attiny1606", "__AVR_ATtiny1606__", 1},
+ {"attiny807", "__AVR_ATtiny807__", 1},
+ {"attiny1607", "__AVR_ATtiny1607__", 1},
+ {"attiny212", "__AVR_ATtiny212__", 1},
+ {"attiny412", "__AVR_ATtiny412__", 1},
+ {"attiny214", "__AVR_ATtiny214__", 1},
+ {"attiny414", "__AVR_ATtiny414__", 1},
+ {"attiny814", "__AVR_ATtiny814__", 1},
+ {"attiny1614", "__AVR_ATtiny1614__", 1},
+ {"attiny416", "__AVR_ATtiny416__", 1},
+ {"attiny816", "__AVR_ATtiny816__", 1},
+ {"attiny1616", "__AVR_ATtiny1616__", 1},
+ {"attiny3216", "__AVR_ATtiny3216__", 1},
+ {"attiny417", "__AVR_ATtiny417__", 1},
+ {"attiny817", "__AVR_ATtiny817__", 1},
+ {"attiny1617", "__AVR_ATtiny1617__", 1},
+ {"attiny3217", "__AVR_ATtiny3217__", 1},
};
} // namespace targets
@@ -330,13 +331,25 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
Builder.defineMacro("__ELF__");
- Builder.defineMacro("__flash", "__attribute__((address_space(1)))");
if (!this->CPU.empty()) {
auto It = llvm::find_if(
AVRMcus, [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
- if (It != std::end(AVRMcus))
+ if (It != std::end(AVRMcus)) {
Builder.defineMacro(It->DefineName);
+ if (It->NumFlashBanks >= 1)
+ Builder.defineMacro("__flash", "__attribute__((address_space(1)))");
+ if (It->NumFlashBanks >= 2)
+ Builder.defineMacro("__flash1", "__attribute__((address_space(2)))");
+ if (It->NumFlashBanks >= 3)
+ Builder.defineMacro("__flash2", "__attribute__((address_space(3)))");
+ if (It->NumFlashBanks >= 4)
+ Builder.defineMacro("__flash3", "__attribute__((address_space(4)))");
+ if (It->NumFlashBanks >= 5)
+ Builder.defineMacro("__flash4", "__attribute__((address_space(5)))");
+ if (It->NumFlashBanks >= 6)
+ Builder.defineMacro("__flash5", "__attribute__((address_space(6)))");
+ }
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
index 89a80ca6a39a..a281e2c2cd74 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
@@ -55,6 +55,7 @@ public:
Int16Type = SignedInt;
Char32Type = UnsignedLong;
SigAtomicType = SignedChar;
+ ProgramAddrSpace = 1;
resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
index c0cd8fa90ed6..ada5b97ed66d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
@@ -29,7 +29,7 @@ M68kTargetInfo::M68kTargetInfo(const llvm::Triple &Triple,
const TargetOptions &)
: TargetInfo(Triple) {
- std::string Layout = "";
+ std::string Layout;
// M68k is Big Endian
Layout += "E";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index 7f7b44b658eb..1eb0317af60b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -561,9 +561,9 @@ bool PPCTargetInfo::initFeatureMap(
if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
return false;
- if (!(ArchDefs & ArchDefinePwr9) && (ArchDefs & ArchDefinePpcgr) &&
+ if (!(ArchDefs & ArchDefinePwr7) && (ArchDefs & ArchDefinePpcgr) &&
llvm::is_contained(FeaturesVec, "+float128")) {
- // We have __float128 on PPC but not power 9 and above.
+ // We have __float128 on PPC but not pre-VSX targets.
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128" << CPU;
return false;
}
@@ -734,23 +734,28 @@ ArrayRef<const char *> PPCTargetInfo::getGCCRegNames() const {
const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
// While some of these aliases do map to different registers
// they still share the same register name.
- {{"0"}, "r0"}, {{"1"}, "r1"}, {{"2"}, "r2"}, {{"3"}, "r3"},
- {{"4"}, "r4"}, {{"5"}, "r5"}, {{"6"}, "r6"}, {{"7"}, "r7"},
- {{"8"}, "r8"}, {{"9"}, "r9"}, {{"10"}, "r10"}, {{"11"}, "r11"},
- {{"12"}, "r12"}, {{"13"}, "r13"}, {{"14"}, "r14"}, {{"15"}, "r15"},
- {{"16"}, "r16"}, {{"17"}, "r17"}, {{"18"}, "r18"}, {{"19"}, "r19"},
- {{"20"}, "r20"}, {{"21"}, "r21"}, {{"22"}, "r22"}, {{"23"}, "r23"},
- {{"24"}, "r24"}, {{"25"}, "r25"}, {{"26"}, "r26"}, {{"27"}, "r27"},
- {{"28"}, "r28"}, {{"29"}, "r29"}, {{"30"}, "r30"}, {{"31"}, "r31"},
- {{"fr0"}, "f0"}, {{"fr1"}, "f1"}, {{"fr2"}, "f2"}, {{"fr3"}, "f3"},
- {{"fr4"}, "f4"}, {{"fr5"}, "f5"}, {{"fr6"}, "f6"}, {{"fr7"}, "f7"},
- {{"fr8"}, "f8"}, {{"fr9"}, "f9"}, {{"fr10"}, "f10"}, {{"fr11"}, "f11"},
- {{"fr12"}, "f12"}, {{"fr13"}, "f13"}, {{"fr14"}, "f14"}, {{"fr15"}, "f15"},
- {{"fr16"}, "f16"}, {{"fr17"}, "f17"}, {{"fr18"}, "f18"}, {{"fr19"}, "f19"},
- {{"fr20"}, "f20"}, {{"fr21"}, "f21"}, {{"fr22"}, "f22"}, {{"fr23"}, "f23"},
- {{"fr24"}, "f24"}, {{"fr25"}, "f25"}, {{"fr26"}, "f26"}, {{"fr27"}, "f27"},
- {{"fr28"}, "f28"}, {{"fr29"}, "f29"}, {{"fr30"}, "f30"}, {{"fr31"}, "f31"},
- {{"cc"}, "cr0"},
+ {{"0"}, "r0"}, {{"1", "sp"}, "r1"}, {{"2"}, "r2"},
+ {{"3"}, "r3"}, {{"4"}, "r4"}, {{"5"}, "r5"},
+ {{"6"}, "r6"}, {{"7"}, "r7"}, {{"8"}, "r8"},
+ {{"9"}, "r9"}, {{"10"}, "r10"}, {{"11"}, "r11"},
+ {{"12"}, "r12"}, {{"13"}, "r13"}, {{"14"}, "r14"},
+ {{"15"}, "r15"}, {{"16"}, "r16"}, {{"17"}, "r17"},
+ {{"18"}, "r18"}, {{"19"}, "r19"}, {{"20"}, "r20"},
+ {{"21"}, "r21"}, {{"22"}, "r22"}, {{"23"}, "r23"},
+ {{"24"}, "r24"}, {{"25"}, "r25"}, {{"26"}, "r26"},
+ {{"27"}, "r27"}, {{"28"}, "r28"}, {{"29"}, "r29"},
+ {{"30"}, "r30"}, {{"31"}, "r31"}, {{"fr0"}, "f0"},
+ {{"fr1"}, "f1"}, {{"fr2"}, "f2"}, {{"fr3"}, "f3"},
+ {{"fr4"}, "f4"}, {{"fr5"}, "f5"}, {{"fr6"}, "f6"},
+ {{"fr7"}, "f7"}, {{"fr8"}, "f8"}, {{"fr9"}, "f9"},
+ {{"fr10"}, "f10"}, {{"fr11"}, "f11"}, {{"fr12"}, "f12"},
+ {{"fr13"}, "f13"}, {{"fr14"}, "f14"}, {{"fr15"}, "f15"},
+ {{"fr16"}, "f16"}, {{"fr17"}, "f17"}, {{"fr18"}, "f18"},
+ {{"fr19"}, "f19"}, {{"fr20"}, "f20"}, {{"fr21"}, "f21"},
+ {{"fr22"}, "f22"}, {{"fr23"}, "f23"}, {{"fr24"}, "f24"},
+ {{"fr25"}, "f25"}, {{"fr26"}, "f26"}, {{"fr27"}, "f27"},
+ {{"fr28"}, "f28"}, {{"fr29"}, "f29"}, {{"fr30"}, "f30"},
+ {{"fr31"}, "f31"}, {{"cc"}, "cr0"},
};
ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index 60701072ac4b..ac52eb219f54 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -414,7 +414,7 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = SignedLong;
Int64Type = SignedLong;
- std::string DataLayout = "";
+ std::string DataLayout;
if (Triple.isOSAIX()) {
// TODO: Set appropriate ABI for AIX platform.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 770d37a1c1be..0680cad5b07c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -125,6 +125,9 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_xlen", Is64Bit ? "64" : "32");
StringRef CodeModel = getTargetOpts().CodeModel;
unsigned FLen = ISAInfo->getFLen();
+ unsigned MinVLen = ISAInfo->getMinVLen();
+ unsigned MaxELen = ISAInfo->getMaxELen();
+ unsigned MaxELenFp = ISAInfo->getMaxELenFp();
if (CodeModel == "default")
CodeModel = "small";
@@ -176,10 +179,16 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_fsqrt");
}
+ if (MinVLen) {
+ Builder.defineMacro("__riscv_v_min_vlen", Twine(MinVLen));
+ Builder.defineMacro("__riscv_v_elen", Twine(MaxELen));
+ Builder.defineMacro("__riscv_v_elen_fp", Twine(MaxELenFp));
+ }
+
if (ISAInfo->hasExtension("c"))
Builder.defineMacro("__riscv_compressed");
- if (ISAInfo->hasExtension("v"))
+ if (ISAInfo->hasExtension("zve32x") || ISAInfo->hasExtension("v"))
Builder.defineMacro("__riscv_vector");
}
@@ -205,10 +214,26 @@ bool RISCVTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
- if (getTriple().getArch() == llvm::Triple::riscv64)
+ unsigned XLen = 32;
+
+ if (getTriple().getArch() == llvm::Triple::riscv64) {
Features["64bit"] = true;
+ XLen = 64;
+ }
+
+ auto ParseResult = llvm::RISCVISAInfo::parseFeatures(XLen, FeaturesVec);
+ if (!ParseResult) {
+ std::string Buffer;
+ llvm::raw_string_ostream OutputErrMsg(Buffer);
+ handleAllErrors(ParseResult.takeError(), [&](llvm::StringError &ErrMsg) {
+ OutputErrMsg << ErrMsg.getMessage();
+ });
+ Diags.Report(diag::err_invalid_feature_combination) << OutputErrMsg.str();
+ return false;
+ }
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ return TargetInfo::initFeatureMap(Features, Diags, CPU,
+ (*ParseResult)->toFeatureVector());
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
index 5eeb77406c34..932102434801 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
@@ -156,8 +156,6 @@ void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__sparcv8__");
break;
case CG_V9:
- Builder.defineMacro("__sparcv9");
- Builder.defineMacro("__sparcv9__");
Builder.defineMacro("__sparc_v9__");
break;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index c952b8c9a336..d1b66432e38b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -533,11 +533,12 @@ public:
DoubleAlign = LongLongAlign = 64;
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
- resetDataLayout(IsWinCOFF ? "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:32-n8:16:32-a:0:32-S32"
- : "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:32-n8:16:32-a:0:32-S32",
- IsWinCOFF ? "_" : "");
+ bool IsMSVC = getTriple().isWindowsMSVCEnvironment();
+ std::string Layout = IsWinCOFF ? "e-m:x" : "e-m:e";
+ Layout += "-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-";
+ Layout += IsMSVC ? "f80:128" : "f80:32";
+ Layout += "-n8:16:32-a:0:32-S32";
+ resetDataLayout(Layout, IsWinCOFF ? "_" : "");
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index 37c20291c0e8..3ac0f4f0d7e5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -14,30 +14,77 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
-#include "llvm/IR/Constants.h"
#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/MathExtras.h"
namespace clang {
namespace CodeGen {
-/// An aligned address.
-class Address {
+// We try to save some space by using 6 bits over two PointerIntPairs to store
+// the alignment. However, some arches don't support 3 bits in a PointerIntPair
+// so we fallback to storing the alignment separately.
+template <typename T, bool = alignof(llvm::Value *) >= 8> class AddressImpl {};
+
+template <typename T> class AddressImpl<T, false> {
llvm::Value *Pointer;
llvm::Type *ElementType;
CharUnits Alignment;
+public:
+ AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
+ CharUnits Alignment)
+ : Pointer(Pointer), ElementType(ElementType), Alignment(Alignment) {}
+ llvm::Value *getPointer() const { return Pointer; }
+ llvm::Type *getElementType() const { return ElementType; }
+ CharUnits getAlignment() const { return Alignment; }
+};
+
+template <typename T> class AddressImpl<T, true> {
+ // Int portion stores upper 3 bits of the log of the alignment.
+ llvm::PointerIntPair<llvm::Value *, 3, unsigned> Pointer;
+ // Int portion stores lower 3 bits of the log of the alignment.
+ llvm::PointerIntPair<llvm::Type *, 3, unsigned> ElementType;
+
+public:
+ AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
+ CharUnits Alignment)
+ : Pointer(Pointer), ElementType(ElementType) {
+ if (Alignment.isZero())
+ return;
+ // Currently the max supported alignment is much less than 1 << 63 and is
+ // guaranteed to be a power of 2, so we can store the log of the alignment
+ // into 6 bits.
+ assert(Alignment.isPowerOfTwo() && "Alignment cannot be zero");
+ auto AlignLog = llvm::Log2_64(Alignment.getQuantity());
+ assert(AlignLog < (1 << 6) && "cannot fit alignment into 6 bits");
+ this->Pointer.setInt(AlignLog >> 3);
+ this->ElementType.setInt(AlignLog & 7);
+ }
+ llvm::Value *getPointer() const { return Pointer.getPointer(); }
+ llvm::Type *getElementType() const { return ElementType.getPointer(); }
+ CharUnits getAlignment() const {
+ unsigned AlignLog = (Pointer.getInt() << 3) | ElementType.getInt();
+ return CharUnits::fromQuantity(CharUnits::QuantityType(1) << AlignLog);
+ }
+};
+
+/// An aligned address.
+class Address {
+ AddressImpl<void> A;
+
protected:
- Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {}
+ Address(std::nullptr_t) : A(nullptr, nullptr, CharUnits::Zero()) {}
public:
- Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment)
- : Pointer(pointer), ElementType(elementType), Alignment(alignment) {
- assert(pointer != nullptr && "Pointer cannot be null");
- assert(elementType != nullptr && "Element type cannot be null");
- assert(llvm::cast<llvm::PointerType>(pointer->getType())
- ->isOpaqueOrPointeeTypeMatches(elementType) &&
+ Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment)
+ : A(Pointer, ElementType, Alignment) {
+ assert(Pointer != nullptr && "Pointer cannot be null");
+ assert(ElementType != nullptr && "Element type cannot be null");
+ assert(llvm::cast<llvm::PointerType>(Pointer->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) &&
"Incorrect pointer element type");
- assert(!alignment.isZero() && "Alignment cannot be zero");
}
// Deprecated: Use constructor with explicit element type instead.
@@ -46,11 +93,11 @@ public:
Alignment) {}
static Address invalid() { return Address(nullptr); }
- bool isValid() const { return Pointer != nullptr; }
+ bool isValid() const { return A.getPointer() != nullptr; }
llvm::Value *getPointer() const {
assert(isValid());
- return Pointer;
+ return A.getPointer();
}
/// Return the type of the pointer value.
@@ -61,7 +108,7 @@ public:
/// Return the type of the values stored in this address.
llvm::Type *getElementType() const {
assert(isValid());
- return ElementType;
+ return A.getElementType();
}
/// Return the address space that this address resides in.
@@ -77,19 +124,19 @@ public:
/// Return the alignment of this pointer.
CharUnits getAlignment() const {
assert(isValid());
- return Alignment;
+ return A.getAlignment();
}
/// Return address with different pointer, but same element type and
/// alignment.
Address withPointer(llvm::Value *NewPointer) const {
- return Address(NewPointer, ElementType, Alignment);
+ return Address(NewPointer, getElementType(), getAlignment());
}
/// Return address with different alignment, but same pointer and element
/// type.
Address withAlignment(CharUnits NewAlignment) const {
- return Address(Pointer, ElementType, NewAlignment);
+ return Address(getPointer(), getElementType(), NewAlignment);
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index bacac0a20d4d..9ae5c870afc8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -197,8 +197,7 @@ public:
PassManagerBuilderWrapper(const Triple &TargetTriple,
const CodeGenOptions &CGOpts,
const LangOptions &LangOpts)
- : PassManagerBuilder(), TargetTriple(TargetTriple), CGOpts(CGOpts),
- LangOpts(LangOpts) {}
+ : TargetTriple(TargetTriple), CGOpts(CGOpts), LangOpts(LangOpts) {}
const Triple &getTargetTriple() const { return TargetTriple; }
const CodeGenOptions &getCGOpts() const { return CGOpts; }
const LangOptions &getLangOpts() const { return LangOpts; }
@@ -359,7 +358,8 @@ static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
PM.add(createMemorySanitizerLegacyPassPass(
- MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
+ MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel,
+ CGOpts.SanitizeMemoryParamRetval != 0}));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -645,6 +645,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
+ Options.Hotpatch = CodeGenOpts.HotPatch;
return true;
}
@@ -1164,11 +1165,11 @@ static void addSanitizers(const Triple &TargetTriple,
int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- MPM.addPass(
- ModuleMemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ MemorySanitizerOptions options(TrackOrigins, Recover, CompileKernel,
+ CodeGenOpts.SanitizeMemoryParamRetval);
+ MPM.addPass(ModuleMemorySanitizerPass(options));
FunctionPassManager FPM;
- FPM.addPass(
- MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ FPM.addPass(MemorySanitizerPass(options));
if (Level != OptimizationLevel::O0) {
// MemorySanitizer inserts complex instrumentation that mostly
// follows the logic of the original code, but operates on
@@ -1491,8 +1492,11 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
}
// Now that we have all of the passes ready, run them.
- PrettyStackTraceString CrashInfo("Optimizer");
- MPM.run(*TheModule, MAM);
+ {
+ PrettyStackTraceString CrashInfo("Optimizer");
+ llvm::TimeTraceScope TimeScope("Optimizer");
+ MPM.run(*TheModule, MAM);
+ }
}
void EmitAssemblyHelper::RunCodegenPipeline(
@@ -1524,8 +1528,11 @@ void EmitAssemblyHelper::RunCodegenPipeline(
return;
}
- PrettyStackTraceString CrashInfo("Code generation");
- CodeGenPasses.run(*TheModule);
+ {
+ PrettyStackTraceString CrashInfo("Code generation");
+ llvm::TimeTraceScope TimeScope("CodeGenPasses");
+ CodeGenPasses.run(*TheModule);
+ }
}
/// A clean version of `EmitAssembly` that uses the new pass manager.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index e81c5ba5055c..10569ae2c3f9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -307,7 +307,7 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
- llvm::AttrBuilder fnAttrB;
+ llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
fnAttrB.addAttribute(llvm::Attribute::WillReturn);
llvm::AttributeList fnAttrs = llvm::AttributeList::get(
@@ -351,12 +351,12 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- llvm::Value *addr = LVal.getPointer(CGF);
- if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
+ Address addr = LVal.getAddress(CGF);
+ if (!requiresMemSetZero(addr.getElementType()))
return false;
CGF.Builder.CreateMemSet(
- addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
@@ -1522,7 +1522,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
- : getAtomicAddress().getType()->getPointerElementType();
+ : getAtomicAddress().getElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 7bb6dbb8a8ac..1f1de3df857c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -33,10 +33,10 @@ using namespace clang;
using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
- : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- CapturesNonExternalType(false), LocalAddress(Address::invalid()),
- StructureType(nullptr), Block(block) {
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ NoEscape(false), HasCXXObject(false), UsesStret(false),
+ HasCapturedVariableLayout(false), CapturesNonExternalType(false),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -66,17 +66,6 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
namespace {
-/// Represents a type of copy/destroy operation that should be performed for an
-/// entity that's captured by a block.
-enum class BlockCaptureEntityKind {
- CXXRecord, // Copy or destroy
- ARCWeak,
- ARCStrong,
- NonTrivialCStruct,
- BlockObject, // Assign or release
- None
-};
-
/// Represents a captured entity that requires extra operations in order for
/// this entity to be copied or destroyed correctly.
struct BlockCaptureManagedEntity {
@@ -110,11 +99,7 @@ enum class CaptureStrKind {
} // end anonymous namespace
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures);
-
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM);
@@ -124,34 +109,33 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
std::string Name = "__block_descriptor_";
Name += llvm::to_string(BlockInfo.BlockSize.getQuantity()) + "_";
- if (BlockInfo.needsCopyDisposeHelpers()) {
+ if (BlockInfo.NeedsCopyDispose) {
if (CGM.getLangOpts().Exceptions)
Name += "e";
if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
Name += "a";
Name += llvm::to_string(BlockInfo.BlockAlign.getQuantity()) + "_";
- SmallVector<BlockCaptureManagedEntity, 4> ManagedCaptures;
- findBlockCapturedManagedEntities(BlockInfo, CGM.getContext().getLangOpts(),
- ManagedCaptures);
+ for (auto &Cap : BlockInfo.SortedCaptures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
- for (const BlockCaptureManagedEntity &E : ManagedCaptures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
- if (E.CopyKind == E.DisposeKind) {
+ if (Cap.CopyKind == Cap.DisposeKind) {
// If CopyKind and DisposeKind are the same, merge the capture
// information.
- assert(E.CopyKind != BlockCaptureEntityKind::None &&
+ assert(Cap.CopyKind != BlockCaptureEntityKind::None &&
"shouldn't see BlockCaptureManagedEntity that is None");
- Name += getBlockCaptureStr(E, CaptureStrKind::Merged,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::Merged,
BlockInfo.BlockAlign, CGM);
} else {
// If CopyKind and DisposeKind are not the same, which can happen when
// either Kind is None or the captured object is a __strong block,
// concatenate the copy and dispose strings.
- Name += getBlockCaptureStr(E, CaptureStrKind::CopyHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::CopyHelper,
BlockInfo.BlockAlign, CGM);
- Name += getBlockCaptureStr(E, CaptureStrKind::DisposeHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::DisposeHelper,
BlockInfo.BlockAlign, CGM);
}
}
@@ -223,7 +207,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
// Optional copy/dispose helpers.
bool hasInternalHelper = false;
- if (blockInfo.needsCopyDisposeHelpers()) {
+ if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo);
elements.add(copyHelper);
@@ -340,17 +324,21 @@ namespace {
struct BlockLayoutChunk {
CharUnits Alignment;
CharUnits Size;
- Qualifiers::ObjCLifetime Lifetime;
const BlockDecl::Capture *Capture; // null for 'this'
llvm::Type *Type;
QualType FieldType;
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
BlockLayoutChunk(CharUnits align, CharUnits size,
- Qualifiers::ObjCLifetime lifetime,
- const BlockDecl::Capture *capture,
- llvm::Type *type, QualType fieldType)
- : Alignment(align), Size(size), Lifetime(lifetime),
- Capture(capture), Type(type), FieldType(fieldType) {}
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType, BlockCaptureEntityKind CopyKind,
+ BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind,
+ BlockFieldFlags DisposeFlags)
+ : Alignment(align), Size(size), Capture(capture), Type(type),
+ FieldType(fieldType), CopyKind(CopyKind), DisposeKind(DisposeKind),
+ CopyFlags(CopyFlags), DisposeFlags(DisposeFlags) {}
/// Tell the block info that this chunk has the given field index.
void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
@@ -358,32 +346,93 @@ namespace {
info.CXXThisIndex = index;
info.CXXThisOffset = offset;
} else {
- auto C = CGBlockInfo::Capture::makeIndex(index, offset, FieldType);
- info.Captures.insert({Capture->getVariable(), C});
+ info.SortedCaptures.push_back(CGBlockInfo::Capture::makeIndex(
+ index, offset, FieldType, CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags, Capture));
}
}
+
+ bool isTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
};
- /// Order by 1) all __strong together 2) next, all byfref together 3) next,
- /// all __weak together. Preserve descending alignment in all situations.
+ /// Order by 1) all __strong together 2) next, all block together 3) next,
+ /// all byref together 4) next, all __weak together. Preserve descending
+ /// alignment in all situations.
bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
if (left.Alignment != right.Alignment)
return left.Alignment > right.Alignment;
auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
- if (chunk.Capture && chunk.Capture->isByRef())
- return 1;
- if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ switch (chunk.CopyKind) {
+ case BlockCaptureEntityKind::ARCStrong:
return 0;
- if (chunk.Lifetime == Qualifiers::OCL_Weak)
- return 2;
- return 3;
+ case BlockCaptureEntityKind::BlockObject:
+ switch (chunk.CopyFlags.getBitMask()) {
+ case BLOCK_FIELD_IS_OBJECT:
+ return 0;
+ case BLOCK_FIELD_IS_BLOCK:
+ return 1;
+ case BLOCK_FIELD_IS_BYREF:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case BlockCaptureEntityKind::ARCWeak:
+ return 3;
+ default:
+ break;
+ }
+ return 4;
};
return getPrefOrder(left) < getPrefOrder(right);
}
} // end anonymous namespace
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static void addBlockLayout(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType,
+ SmallVectorImpl<BlockLayoutChunk> &Layout,
+ CGBlockInfo &Info, CodeGenModule &CGM) {
+ if (!capture) {
+ // 'this' capture.
+ Layout.push_back(BlockLayoutChunk(
+ align, size, capture, type, fieldType, BlockCaptureEntityKind::None,
+ BlockFieldFlags(), BlockCaptureEntityKind::None, BlockFieldFlags()));
+ return;
+ }
+
+ const LangOptions &LangOpts = CGM.getLangOpts();
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+
+ std::tie(CopyKind, CopyFlags) =
+ computeCopyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ std::tie(DisposeKind, DisposeFlags) =
+ computeDestroyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ Layout.push_back(BlockLayoutChunk(align, size, capture, type, fieldType,
+ CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags));
+
+ if (Info.NoEscape)
+ return;
+
+ if (!Layout.back().isTrivial())
+ Info.NeedsCopyDispose = true;
+}
+
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
const RecordType *recordType =
@@ -541,6 +590,9 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
+ if (block->doesNotEscape())
+ info.NoEscape = true;
+
// Collect the layout chunks.
SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
@@ -560,9 +612,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
auto TInfo = CGM.getContext().getTypeInfoInChars(thisType);
maxFieldAlign = std::max(maxFieldAlign, TInfo.Align);
- layout.push_back(BlockLayoutChunk(TInfo.Align, TInfo.Width,
- Qualifiers::OCL_None,
- nullptr, llvmType, thisType));
+ addBlockLayout(TInfo.Align, TInfo.Width, nullptr, llvmType, thisType,
+ layout, info, CGM);
}
// Next, all the block captures.
@@ -570,9 +621,6 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
const VarDecl *variable = CI.getVariable();
if (CI.isEscapingByref()) {
- // We have to copy/dispose of the __block reference.
- info.NeedsCopyDispose = true;
-
// Just use void* instead of a pointer to the byref type.
CharUnits align = CGM.getPointerAlign();
maxFieldAlign = std::max(maxFieldAlign, align);
@@ -581,72 +629,28 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// the capture field type should always match.
assert(CGF && getCaptureFieldType(*CGF, CI) == variable->getType() &&
"capture type differs from the variable type");
- layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
- Qualifiers::OCL_None, &CI,
- CGM.VoidPtrTy, variable->getType()));
+ addBlockLayout(align, CGM.getPointerSize(), &CI, CGM.VoidPtrTy,
+ variable->getType(), layout, info, CGM);
continue;
}
// Otherwise, build a layout chunk with the size and alignment of
// the declaration.
if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
- info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ info.SortedCaptures.push_back(
+ CGBlockInfo::Capture::makeConstant(constant, &CI));
continue;
}
QualType VT = getCaptureFieldType(*CGF, CI);
- // If we have a lifetime qualifier, honor it for capture purposes.
- // That includes *not* copying it if it's __unsafe_unretained.
- Qualifiers::ObjCLifetime lifetime = VT.getObjCLifetime();
- if (lifetime) {
- switch (lifetime) {
- case Qualifiers::OCL_None: llvm_unreachable("impossible");
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- info.NeedsCopyDispose = true;
- }
-
- // Block pointers require copy/dispose. So do Objective-C pointers.
- } else if (VT->isObjCRetainableType()) {
- // But honor the inert __unsafe_unretained qualifier, which doesn't
- // actually make it into the type system.
- if (VT->isObjCInertUnsafeUnretainedType()) {
- lifetime = Qualifiers::OCL_ExplicitNone;
- } else {
- info.NeedsCopyDispose = true;
- // used for mrr below.
- lifetime = Qualifiers::OCL_Strong;
- }
-
- // So do types that require non-trivial copy construction.
- } else if (CI.hasCopyExpr()) {
- info.NeedsCopyDispose = true;
- info.HasCXXObject = true;
- if (!VT->getAsCXXRecordDecl()->isExternallyVisible())
- info.CapturesNonExternalType = true;
-
- // So do C structs that require non-trivial copy construction or
- // destruction.
- } else if (VT.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct ||
- VT.isDestructedType() == QualType::DK_nontrivial_c_struct) {
- info.NeedsCopyDispose = true;
-
- // And so do types with destructors.
- } else if (CGM.getLangOpts().CPlusPlus) {
- if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl()) {
- if (!record->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().CPlusPlus)
+ if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl())
+ if (CI.hasCopyExpr() || !record->hasTrivialDestructor()) {
info.HasCXXObject = true;
- info.NeedsCopyDispose = true;
if (!record->isExternallyVisible())
info.CapturesNonExternalType = true;
}
- }
- }
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -656,8 +660,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(VT);
- layout.push_back(
- BlockLayoutChunk(align, size, lifetime, &CI, llvmType, VT));
+ addBlockLayout(align, size, &CI, llvmType, VT, layout, info, CGM);
}
// If that was everything, we're done here.
@@ -665,6 +668,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
info.CanBeGlobal = true;
+ info.buildCaptureMap();
return;
}
@@ -718,6 +722,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// ...until we get to the alignment of the maximum field.
if (endAlign >= maxFieldAlign) {
+ ++li;
break;
}
}
@@ -770,6 +775,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
endAlign = getLowBit(blockSize);
}
+ info.buildCaptureMap();
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
@@ -826,7 +832,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If the block is non-escaping, set field 'isa 'to NSConcreteGlobalBlock
// and set the BLOCK_IS_GLOBAL bit of field 'flags'. Copying a non-escaping
// block just returns the original block and releasing it is a no-op.
- llvm::Constant *blockISA = blockInfo.getBlockDecl()->doesNotEscape()
+ llvm::Constant *blockISA = blockInfo.NoEscape
? CGM.getNSConcreteGlobalBlock()
: CGM.getNSConcreteStackBlock();
isa = llvm::ConstantExpr::getBitCast(blockISA, VoidPtrTy);
@@ -838,13 +844,13 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.HasCapturedVariableLayout)
flags |= BLOCK_HAS_EXTENDED_LAYOUT;
- if (blockInfo.needsCopyDisposeHelpers())
+ if (blockInfo.NeedsCopyDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject)
flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret)
flags |= BLOCK_USE_STRET;
- if (blockInfo.getBlockDecl()->doesNotEscape())
+ if (blockInfo.NoEscape)
flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
}
@@ -1033,7 +1039,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
}
// Push a cleanup for the capture if necessary.
- if (!blockInfo.NeedsCopyDispose)
+ if (!blockInfo.NoEscape && !blockInfo.NeedsCopyDispose)
continue;
// Ignore __block captures; there's nothing special in the on-stack block
@@ -1654,6 +1660,11 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
// For all other types, the memcpy is fine.
return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ // Honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
+ if (T->isObjCInertUnsafeUnretainedType())
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+
// Special rules for ARC captures:
Qualifiers QS = T.getQualifiers();
@@ -1669,34 +1680,6 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
llvm_unreachable("after exhaustive PrimitiveCopyKind switch");
}
-static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
-computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
- const LangOptions &LangOpts);
-
-/// Find the set of block captures that need to be explicitly copied or destroy.
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures) {
- for (const auto &CI : BlockInfo.getBlockDecl()->captures()) {
- const VarDecl *Variable = CI.getVariable();
- const CGBlockInfo::Capture &Capture = BlockInfo.getCapture(Variable);
- if (Capture.isConstant())
- continue;
-
- QualType VT = Capture.fieldType();
- auto CopyInfo = computeCopyInfoForBlockCapture(CI, VT, LangOpts);
- auto DisposeInfo = computeDestroyInfoForBlockCapture(CI, VT, LangOpts);
- if (CopyInfo.first != BlockCaptureEntityKind::None ||
- DisposeInfo.first != BlockCaptureEntityKind::None)
- ManagedCaptures.emplace_back(CopyInfo.first, DisposeInfo.first,
- CopyInfo.second, DisposeInfo.second, CI,
- Capture);
- }
-
- // Sort the captures by offset.
- llvm::sort(ManagedCaptures);
-}
-
namespace {
/// Release a __block variable.
struct CallBlockRelease final : EHScopeStack::Cleanup {
@@ -1732,13 +1715,13 @@ bool CodeGenFunction::cxxDestructorCanThrow(QualType T) {
}
// Return a string that has the information about a capture.
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM) {
std::string Str;
ASTContext &Ctx = CGM.getContext();
- const BlockDecl::Capture &CI = *E.CI;
+ const BlockDecl::Capture &CI = *Cap.Cap;
QualType CaptureTy = CI.getVariable()->getType();
BlockCaptureEntityKind Kind;
@@ -1747,15 +1730,16 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
// CaptureStrKind::Merged should be passed only when the operations and the
// flags are the same for copy and dispose.
assert((StrKind != CaptureStrKind::Merged ||
- (E.CopyKind == E.DisposeKind && E.CopyFlags == E.DisposeFlags)) &&
+ (Cap.CopyKind == Cap.DisposeKind &&
+ Cap.CopyFlags == Cap.DisposeFlags)) &&
"different operations and flags");
if (StrKind == CaptureStrKind::DisposeHelper) {
- Kind = E.DisposeKind;
- Flags = E.DisposeFlags;
+ Kind = Cap.DisposeKind;
+ Flags = Cap.DisposeFlags;
} else {
- Kind = E.CopyKind;
- Flags = E.CopyFlags;
+ Kind = Cap.CopyKind;
+ Flags = Cap.CopyFlags;
}
switch (Kind) {
@@ -1803,8 +1787,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
case BlockCaptureEntityKind::NonTrivialCStruct: {
bool IsVolatile = CaptureTy.isVolatileQualified();
- CharUnits Alignment =
- BlockAlignment.alignmentAtOffset(E.Capture->getOffset());
+ CharUnits Alignment = BlockAlignment.alignmentAtOffset(Cap.getOffset());
Str += "n";
std::string FuncStr;
@@ -1829,7 +1812,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
static std::string getCopyDestroyHelperFuncName(
- const SmallVectorImpl<BlockCaptureManagedEntity> &Captures,
+ const SmallVectorImpl<CGBlockInfo::Capture> &Captures,
CharUnits BlockAlignment, CaptureStrKind StrKind, CodeGenModule &CGM) {
assert((StrKind == CaptureStrKind::CopyHelper ||
StrKind == CaptureStrKind::DisposeHelper) &&
@@ -1843,9 +1826,11 @@ static std::string getCopyDestroyHelperFuncName(
Name += "a";
Name += llvm::to_string(BlockAlignment.getQuantity()) + "_";
- for (const BlockCaptureManagedEntity &E : Captures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
- Name += getBlockCaptureStr(E, StrKind, BlockAlignment, CGM);
+ for (auto &Cap : Captures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
+ Name += getBlockCaptureStr(Cap, StrKind, BlockAlignment, CGM);
}
return Name;
@@ -1916,11 +1901,9 @@ static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
/// the contents of an individual __block variable to the heap.
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(CopiedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::CopyHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::CopyHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
@@ -1967,17 +1950,19 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
- for (const auto &CopiedCapture : CopiedCaptures) {
- const BlockDecl::Capture &CI = *CopiedCapture.CI;
- const CGBlockInfo::Capture &capture = *CopiedCapture.Capture;
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
+
+ const BlockDecl::Capture &CI = *capture.Cap;
QualType captureType = CI.getVariable()->getType();
- BlockFieldFlags flags = CopiedCapture.CopyFlags;
+ BlockFieldFlags flags = capture.CopyFlags;
unsigned index = capture.getIndex();
Address srcField = Builder.CreateStructGEP(src, index);
Address dstField = Builder.CreateStructGEP(dst, index);
- switch (CopiedCapture.CopyKind) {
+ switch (capture.CopyKind) {
case BlockCaptureEntityKind::CXXRecord:
// If there's an explicit copy expression, we do that.
assert(CI.getCopyExpr() && "copy expression for variable is missing");
@@ -2040,7 +2025,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// Ensure that we destroy the copied object if an exception is thrown later
// in the helper function.
- pushCaptureCleanup(CopiedCapture.CopyKind, dstField, captureType, flags,
+ pushCaptureCleanup(capture.CopyKind, dstField, captureType, flags,
/*ForCopyHelper*/ true, CI.getVariable(), *this);
}
@@ -2085,8 +2070,10 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
BlockFieldFlags());
case QualType::DK_none: {
// Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ // But honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
if (T->isObjCRetainableType() && !T.getQualifiers().hasObjCLifetime() &&
- !LangOpts.ObjCAutoRefCount)
+ !LangOpts.ObjCAutoRefCount && !T->isObjCInertUnsafeUnretainedType())
return std::make_pair(BlockCaptureEntityKind::BlockObject,
getBlockFieldFlagsForObjCObjectPointer(CI, T));
// Otherwise, we have nothing to do.
@@ -2105,11 +2092,9 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
/// variable.
llvm::Constant *
CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(DestroyedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::DisposeHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::DisposeHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
@@ -2153,14 +2138,16 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
CodeGenFunction::RunCleanupsScope cleanups(*this);
- for (const auto &DestroyedCapture : DestroyedCaptures) {
- const BlockDecl::Capture &CI = *DestroyedCapture.CI;
- const CGBlockInfo::Capture &capture = *DestroyedCapture.Capture;
- BlockFieldFlags flags = DestroyedCapture.DisposeFlags;
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
+
+ const BlockDecl::Capture &CI = *capture.Cap;
+ BlockFieldFlags flags = capture.DisposeFlags;
Address srcField = Builder.CreateStructGEP(src, capture.getIndex());
- pushCaptureCleanup(DestroyedCapture.DisposeKind, srcField,
+ pushCaptureCleanup(capture.DisposeKind, srcField,
CI.getVariable()->getType(), flags,
/*ForCopyHelper*/ false, CI.getVariable(), *this);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index 698ecd3d926a..e8857d98894f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -26,14 +26,7 @@
#include "clang/Basic/TargetInfo.h"
namespace llvm {
-class Constant;
-class Function;
-class GlobalValue;
-class DataLayout;
-class FunctionType;
-class PointerType;
class Value;
-class LLVMContext;
}
namespace clang {
@@ -148,6 +141,17 @@ public:
CharUnits FieldOffset;
};
+/// Represents a type of copy/destroy operation that should be performed for an
+/// entity that's captured by a block.
+enum class BlockCaptureEntityKind {
+ None,
+ CXXRecord, // Copy or destroy
+ ARCWeak,
+ ARCStrong,
+ NonTrivialCStruct,
+ BlockObject, // Assign or release
+};
+
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
@@ -197,20 +201,40 @@ public:
return FieldType;
}
- static Capture makeIndex(unsigned index, CharUnits offset,
- QualType FieldType) {
+ static Capture
+ makeIndex(unsigned index, CharUnits offset, QualType FieldType,
+ BlockCaptureEntityKind CopyKind, BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind, BlockFieldFlags DisposeFlags,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
v.FieldType = FieldType;
+ v.CopyKind = CopyKind;
+ v.CopyFlags = CopyFlags;
+ v.DisposeKind = DisposeKind;
+ v.DisposeFlags = DisposeFlags;
+ v.Cap = Cap;
return v;
}
- static Capture makeConstant(llvm::Value *value) {
+ static Capture makeConstant(llvm::Value *value,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = reinterpret_cast<uintptr_t>(value);
+ v.Cap = Cap;
return v;
}
+
+ bool isConstantOrTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
+
+ BlockCaptureEntityKind CopyKind = BlockCaptureEntityKind::None,
+ DisposeKind = BlockCaptureEntityKind::None;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+ const BlockDecl::Capture *Cap;
};
/// CanBeGlobal - True if the block can be global, i.e. it has
@@ -221,6 +245,9 @@ public:
/// dispose helper functions if the block were escaping.
bool NeedsCopyDispose : 1;
+ /// Indicates whether the block is non-escaping.
+ bool NoEscape : 1;
+
/// HasCXXObject - True if the block's custom copy/dispose functions
/// need to be run even in GC mode.
bool HasCXXObject : 1;
@@ -238,8 +265,11 @@ public:
/// functions.
bool CapturesNonExternalType : 1;
- /// The mapping of allocated indexes within the block.
- llvm::DenseMap<const VarDecl*, Capture> Captures;
+ /// Mapping from variables to pointers to captures in SortedCaptures.
+ llvm::DenseMap<const VarDecl *, Capture *> Captures;
+
+ /// The block's captures. Non-constant captures are sorted by their offsets.
+ llvm::SmallVector<Capture, 4> SortedCaptures;
Address LocalAddress;
llvm::StructType *StructureType;
@@ -263,14 +293,18 @@ public:
/// has been encountered.
CGBlockInfo *NextBlockInfo;
+ void buildCaptureMap() {
+ for (auto &C : SortedCaptures)
+ Captures[C.Cap->getVariable()] = &C;
+ }
+
const Capture &getCapture(const VarDecl *var) const {
return const_cast<CGBlockInfo*>(this)->getCapture(var);
}
Capture &getCapture(const VarDecl *var) {
- llvm::DenseMap<const VarDecl*, Capture>::iterator
- it = Captures.find(var);
+ auto it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
- return it->second;
+ return *it->second;
}
const BlockDecl *getBlockDecl() const { return Block; }
@@ -281,11 +315,6 @@ public:
}
CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
-
- // Indicates whether the block needs a custom copy or dispose function.
- bool needsCopyDisposeHelpers() const {
- return NeedsCopyDispose && !Block->doesNotEscape();
- }
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 1982b40ff667..2b7862e618bd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -159,6 +159,7 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
static Value *MakeBinaryAtomicValue(
CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -532,13 +533,13 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
-static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
+static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ llvm::StringRef Name = "") {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, Src0);
+ return CGF.Builder.CreateCall(F, Src0, Name);
}
// Emit an intrinsic that has 2 operands of the same type as its result.
@@ -1060,7 +1061,10 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
- return CGF.Builder.CreateCall(IA, {Addr});
+ llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
+ CI->addParamAttr(
+ 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
+ return CI;
}
namespace {
@@ -3122,24 +3126,34 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_elementwise_abs: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Result;
- if (Op0->getType()->isIntOrIntVectorTy())
+ QualType QT = E->getArg(0)->getType();
+
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isIntegerType())
Result = Builder.CreateBinaryIntrinsic(
- llvm::Intrinsic::abs, Op0, Builder.getFalse(), nullptr, "elt.abs");
+ llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
+ Builder.getFalse(), nullptr, "elt.abs");
else
- Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::fabs, Op0, nullptr,
- "elt.abs");
- return RValue::get(Result);
- }
+ Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
- case Builtin::BI__builtin_elementwise_ceil: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::ceil, Op0,
- nullptr, "elt.ceil");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_elementwise_ceil:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ case Builtin::BI__builtin_elementwise_floor:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
+ case Builtin::BI__builtin_elementwise_roundeven:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
+ "elt.roundeven"));
+ case Builtin::BI__builtin_elementwise_trunc:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
@@ -3174,52 +3188,48 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_reduce_max: {
- auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
- if (IrTy->isIntOrIntVectorTy()) {
- if (auto *VecTy = QT->getAs<VectorType>())
- QT = VecTy->getElementType();
- if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smax;
- else
- return llvm::Intrinsic::vector_reduce_umax;
- }
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smax;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umax;
+ assert(QT->isFloatingType() && "must have a float here");
return llvm::Intrinsic::vector_reduce_fmax;
};
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
- "rdx.min");
- return RValue::get(Result);
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
case Builtin::BI__builtin_reduce_min: {
- auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
- if (IrTy->isIntOrIntVectorTy()) {
- if (auto *VecTy = QT->getAs<VectorType>())
- QT = VecTy->getElementType();
- if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smin;
- else
- return llvm::Intrinsic::vector_reduce_umin;
- }
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smin;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umin;
+ assert(QT->isFloatingType() && "must have a float here");
return llvm::Intrinsic::vector_reduce_fmin;
};
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
- "rdx.min");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_reduce_xor: {
- Value *Op0 = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreateUnaryIntrinsic(
- llvm::Intrinsic::vector_reduce_xor, Op0, nullptr, "rdx.xor");
- return RValue::get(Result);
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
+ case Builtin::BI__builtin_reduce_xor:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
+ case Builtin::BI__builtin_reduce_or:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
+ case Builtin::BI__builtin_reduce_and:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
+
case Builtin::BI__builtin_matrix_transpose: {
- const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
MatrixBuilder<CGBuilderTy> MB(Builder);
Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
@@ -3423,6 +3433,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIalloca:
case Builtin::BI_alloca:
+ case Builtin::BI__builtin_alloca_uninitialized:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
const TargetInfo &TI = getContext().getTargetInfo();
@@ -3433,10 +3444,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
.getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
- initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
+ initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
return RValue::get(AI);
}
+ case Builtin::BI__builtin_alloca_with_align_uninitialized:
case Builtin::BI__builtin_alloca_with_align: {
Value *Size = EmitScalarExpr(E->getArg(0));
Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
@@ -3446,7 +3459,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
- initializeAlloca(*this, AI, Size, AlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
+ initializeAlloca(*this, AI, Size, AlignmentInBytes);
return RValue::get(AI);
}
@@ -4921,7 +4935,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- AttrBuilder B;
+ AttrBuilder B(Builder.getContext());
B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -5860,6 +5874,10 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_v, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_v, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_v, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_v, arm_neon_vqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
@@ -6085,6 +6103,10 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_v, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_v, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_v, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_v, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
@@ -6287,6 +6309,10 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
@@ -14271,73 +14297,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops[0]);
}
}
- case X86::BI__builtin_ia32_pabsb128:
- case X86::BI__builtin_ia32_pabsw128:
- case X86::BI__builtin_ia32_pabsd128:
- case X86::BI__builtin_ia32_pabsb256:
- case X86::BI__builtin_ia32_pabsw256:
- case X86::BI__builtin_ia32_pabsd256:
- case X86::BI__builtin_ia32_pabsq128:
- case X86::BI__builtin_ia32_pabsq256:
- case X86::BI__builtin_ia32_pabsb512:
- case X86::BI__builtin_ia32_pabsw512:
- case X86::BI__builtin_ia32_pabsd512:
- case X86::BI__builtin_ia32_pabsq512: {
- Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_pmaxsb128:
- case X86::BI__builtin_ia32_pmaxsw128:
- case X86::BI__builtin_ia32_pmaxsd128:
- case X86::BI__builtin_ia32_pmaxsq128:
- case X86::BI__builtin_ia32_pmaxsb256:
- case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256:
- case X86::BI__builtin_ia32_pmaxsq256:
- case X86::BI__builtin_ia32_pmaxsb512:
- case X86::BI__builtin_ia32_pmaxsw512:
- case X86::BI__builtin_ia32_pmaxsd512:
- case X86::BI__builtin_ia32_pmaxsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
- case X86::BI__builtin_ia32_pmaxub128:
- case X86::BI__builtin_ia32_pmaxuw128:
- case X86::BI__builtin_ia32_pmaxud128:
- case X86::BI__builtin_ia32_pmaxuq128:
- case X86::BI__builtin_ia32_pmaxub256:
- case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256:
- case X86::BI__builtin_ia32_pmaxuq256:
- case X86::BI__builtin_ia32_pmaxub512:
- case X86::BI__builtin_ia32_pmaxuw512:
- case X86::BI__builtin_ia32_pmaxud512:
- case X86::BI__builtin_ia32_pmaxuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
- case X86::BI__builtin_ia32_pminsb128:
- case X86::BI__builtin_ia32_pminsw128:
- case X86::BI__builtin_ia32_pminsd128:
- case X86::BI__builtin_ia32_pminsq128:
- case X86::BI__builtin_ia32_pminsb256:
- case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256:
- case X86::BI__builtin_ia32_pminsq256:
- case X86::BI__builtin_ia32_pminsb512:
- case X86::BI__builtin_ia32_pminsw512:
- case X86::BI__builtin_ia32_pminsd512:
- case X86::BI__builtin_ia32_pminsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
- case X86::BI__builtin_ia32_pminub128:
- case X86::BI__builtin_ia32_pminuw128:
- case X86::BI__builtin_ia32_pminud128:
- case X86::BI__builtin_ia32_pminuq128:
- case X86::BI__builtin_ia32_pminub256:
- case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256:
- case X86::BI__builtin_ia32_pminuq256:
- case X86::BI__builtin_ia32_pminub512:
- case X86::BI__builtin_ia32_pminuw512:
- case X86::BI__builtin_ia32_pminud512:
- case X86::BI__builtin_ia32_pminuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
@@ -14418,12 +14377,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_and_d512:
- case X86::BI__builtin_ia32_reduce_and_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
case X86::BI__builtin_ia32_reduce_fadd_pd512:
case X86::BI__builtin_ia32_reduce_fadd_ps512:
case X86::BI__builtin_ia32_reduce_fadd_ph512:
@@ -14470,36 +14423,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_or_d512:
- case X86::BI__builtin_ia32_reduce_or_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smax_d512:
- case X86::BI__builtin_ia32_reduce_smax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smin_d512:
- case X86::BI__builtin_ia32_reduce_smin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umax_d512:
- case X86::BI__builtin_ia32_reduce_umax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umin_d512:
- case X86::BI__builtin_ia32_reduce_umin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -17279,7 +17202,7 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
return MMA_LDST(4, m16n16k8_load_a_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
- return MMA_LDST(2, m16n16k8_load_b_tf32);
+ return MMA_LDST(4, m16n16k8_load_b_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
return MMA_LDST(8, m16n16k8_load_c_f32);
@@ -18448,15 +18371,11 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("unexpected builtin ID");
}
llvm::Type *SrcT = Vec->getType();
- llvm::Type *TruncT =
- SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
+ llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty());
Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
Value *Trunc = Builder.CreateCall(Callee, Vec);
- Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
- Value *ConcatMask =
- llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2), Builder.getInt32(3)});
- return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
+ Value *Splat = Constant::getNullValue(TruncT);
+ return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
}
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
@@ -18822,6 +18741,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_bcompress_64:
case RISCV::BI__builtin_riscv_bdecompress_32:
case RISCV::BI__builtin_riscv_bdecompress_64:
+ case RISCV::BI__builtin_riscv_bfp_32:
+ case RISCV::BI__builtin_riscv_bfp_64:
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
case RISCV::BI__builtin_riscv_gorc_32:
@@ -18841,7 +18762,11 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_crc32c_b:
case RISCV::BI__builtin_riscv_crc32c_h:
case RISCV::BI__builtin_riscv_crc32c_w:
- case RISCV::BI__builtin_riscv_crc32c_d: {
+ case RISCV::BI__builtin_riscv_crc32c_d:
+ case RISCV::BI__builtin_riscv_fsl_32:
+ case RISCV::BI__builtin_riscv_fsr_32:
+ case RISCV::BI__builtin_riscv_fsl_64:
+ case RISCV::BI__builtin_riscv_fsr_64: {
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
// Zbb
@@ -18871,6 +18796,12 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
ID = Intrinsic::riscv_bdecompress;
break;
+ // Zbf
+ case RISCV::BI__builtin_riscv_bfp_32:
+ case RISCV::BI__builtin_riscv_bfp_64:
+ ID = Intrinsic::riscv_bfp;
+ break;
+
// Zbp
case RISCV::BI__builtin_riscv_grev_32:
case RISCV::BI__builtin_riscv_grev_64:
@@ -18926,6 +18857,16 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_crc32c_d:
ID = Intrinsic::riscv_crc32c_d;
break;
+
+ // Zbt
+ case RISCV::BI__builtin_riscv_fsl_32:
+ case RISCV::BI__builtin_riscv_fsl_64:
+ ID = Intrinsic::riscv_fsl;
+ break;
+ case RISCV::BI__builtin_riscv_fsr_32:
+ case RISCV::BI__builtin_riscv_fsr_64:
+ ID = Intrinsic::riscv_fsr;
+ break;
}
IntrinsicTypes = {ResultType};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 9714730e3c4b..0b441e382f11 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -154,6 +154,51 @@ void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
CGF.CXXABIThisValue = ThisPtr;
}
+bool CGCXXABI::mayNeedDestruction(const VarDecl *VD) const {
+ if (VD->needsDestruction(getContext()))
+ return true;
+
+ // If the variable has an incomplete class type (or array thereof), it
+ // might need destruction.
+ const Type *T = VD->getType()->getBaseElementTypeUnsafe();
+ if (T->getAs<RecordType>() && T->isIncompleteType())
+ return true;
+
+ return false;
+}
+
+bool CGCXXABI::isEmittedWithConstantInitializer(
+ const VarDecl *VD, bool InspectInitForWeakDef) const {
+ VD = VD->getMostRecentDecl();
+ if (VD->hasAttr<ConstInitAttr>())
+ return true;
+
+ // All later checks examine the initializer specified on the variable. If
+ // the variable is weak, such examination would not be correct.
+ if (!InspectInitForWeakDef && (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
+ return false;
+
+ const VarDecl *InitDecl = VD->getInitializingDeclaration();
+ if (!InitDecl)
+ return false;
+
+ // If there's no initializer to run, this is constant initialization.
+ if (!InitDecl->hasInit())
+ return true;
+
+ // If we have the only definition, we don't need a thread wrapper if we
+ // will emit the value as a constant.
+ if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
+ return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
+
+ // Otherwise, we need a thread wrapper unless we know that every
+ // translation unit will emit the value as a constant. We rely on the
+ // variable being constant-initialized in every translation unit if it's
+ // constant-initialized in any translation unit, which isn't actually
+ // guaranteed by the standard but is necessary for sanity.
+ return InitDecl->hasConstantInitialization();
+}
+
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
assert(!CGF.hasAggregateEvaluationKind(ResultType) &&
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index ea839db7528e..b96222b3ce28 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -31,7 +31,6 @@ class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
-class FieldDecl;
class MangleContext;
namespace CodeGen {
@@ -80,6 +79,18 @@ protected:
ASTContext &getContext() const { return CGM.getContext(); }
+ bool mayNeedDestruction(const VarDecl *VD) const;
+
+ /// Determine whether we will definitely emit this variable with a constant
+ /// initializer, either because the language semantics demand it or because
+ /// we know that the initializer is a constant.
+ // For weak definitions, any initializer available in the current translation
+ // is not necessarily reflective of the initializer used; such initializers
+ // are ignored unless if InspectInitForWeakDef is true.
+ bool
+ isEmittedWithConstantInitializer(const VarDecl *VD,
+ bool InspectInitForWeakDef = false) const;
+
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index d70f78fea6b4..a37ff8844e88 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -1892,7 +1892,7 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
- llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder FuncAttrs(F.getContext());
getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
/* AttrOnCallSite = */ false, FuncAttrs);
// TODO: call GetCPUAndFeaturesAttributes?
@@ -2014,8 +2014,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
llvm::AttributeList &AttrList,
unsigned &CallingConv,
bool AttrOnCallSite, bool IsThunk) {
- llvm::AttrBuilder FuncAttrs;
- llvm::AttrBuilder RetAttrs;
+ llvm::AttrBuilder FuncAttrs(getLLVMContext());
+ llvm::AttrBuilder RetAttrs(getLLVMContext());
// Collect function IR attributes from the CC lowering.
// We'll collect the paramete and result attributes later.
@@ -2243,7 +2243,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
getLangOpts().Sanitize.has(SanitizerKind::Return);
// Determine if the return type could be partially undef
- if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
+ if (!CodeGenOpts.DisableNoundefAttrs && HasStrictReturn) {
if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
RetAttrs.addAttribute(llvm::Attribute::NoUndef);
@@ -2302,7 +2302,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
- llvm::AttrBuilder SRETAttrs;
+ llvm::AttrBuilder SRETAttrs(getLLVMContext());
SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
hasUsedSRet = true;
if (RetAI.getInReg())
@@ -2314,7 +2314,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addInAllocaAttr(FI.getArgStruct());
ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), Attrs);
@@ -2329,7 +2329,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
QualType ThisTy =
FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
@@ -2364,7 +2364,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
I != E; ++I, ++ArgNo) {
QualType ParamType = I->type;
const ABIArgInfo &AI = I->info;
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
@@ -2372,14 +2372,15 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
llvm::AttributeSet::get(
getLLVMContext(),
- llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
+ llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
}
}
// Decide whether the argument we're handling could be partially undef
- bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
- if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
+ if (!CodeGenOpts.DisableNoundefAttrs &&
+ DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
Attrs.addAttribute(llvm::Attribute::NoUndef);
+ }
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -2519,8 +2520,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
- ArgAttrs[FirstIRArg + i] =
- llvm::AttributeSet::get(getLLVMContext(), Attrs);
+ ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
+ getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
}
}
assert(ArgNo == FI.arg_size());
@@ -2747,11 +2748,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
uint64_t ArrSize = ArrTy->getSize().getZExtValue();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity() *
ArrSize);
@@ -2771,7 +2772,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
if (!getContext().getTargetAddressSpace(ETy) &&
!CGM.getCodeGenOpts().NullPointerIsValid)
AI->addAttr(llvm::Attribute::NonNull);
@@ -2793,7 +2794,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
llvm::Align(AlignmentInt)));
}
}
@@ -3879,9 +3880,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
// Create the temporary.
- Address temp = CGF.CreateTempAlloca(destType->getElementType(),
- CGF.getPointerAlign(),
- "icr.temp");
+ Address temp = CGF.CreateTempAlloca(destType->getPointerElementType(),
+ CGF.getPointerAlign(), "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -3891,9 +3891,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Zero-initialize it if we're not doing a copy-initialization.
bool shouldCopy = CRE->shouldCopy();
if (!shouldCopy) {
- llvm::Value *null =
- llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(destType->getElementType()));
+ llvm::Value *null = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getPointerElementType()));
CGF.Builder.CreateStore(null, temp);
}
@@ -3935,7 +3934,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
- src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ src = CGF.Builder.CreateBitCast(src, destType->getPointerElementType(),
"icr.cast");
// Use an ordinary store, not a store-to-lvalue.
@@ -5074,8 +5073,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
#ifndef NDEBUG
// Assert that these structs have equivalent element types.
llvm::StructType *FullTy = CallInfo.getArgStruct();
- llvm::StructType *DeclaredTy = cast<llvm::StructType>(
- cast<llvm::PointerType>(LastParamTy)->getElementType());
+ llvm::StructType *DeclaredTy =
+ cast<llvm::StructType>(LastParamTy->getPointerElementType());
assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
DE = DeclaredTy->element_end(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index c8594068c3fc..af63e1bddd2d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -26,17 +26,13 @@
#include "ABIInfo.h"
namespace llvm {
-class AttributeList;
-class Function;
class Type;
class Value;
} // namespace llvm
namespace clang {
-class ASTContext;
class Decl;
class FunctionDecl;
-class ObjCMethodDecl;
class VarDecl;
namespace CodeGen {
@@ -49,11 +45,11 @@ class CGCalleeInfo {
GlobalDecl CalleeDecl;
public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {}
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl() {}
+ : CalleeProtoTy(calleeProtoTy) {}
CGCalleeInfo(GlobalDecl calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
@@ -116,7 +112,8 @@ public:
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
assert(functionPtr->getType()->isOpaquePointerTy() ||
- functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ functionPtr->getType()->getNonOpaquePointerElementType()
+ ->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 8f99ff0d50ff..520e119ada26 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -390,7 +390,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
PHI->addIncoming(Value.getPointer(), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = Address(PHI, Value.getAlignment());
+ Value = Value.withPointer(PHI);
}
return Value;
@@ -1983,7 +1983,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
- Address curAddr = Address(cur, eltAlignment);
+ Address curAddr = Address(cur, elementType, eltAlignment);
// Zero initialize the storage, if requested.
if (zeroInitialize)
@@ -2852,9 +2852,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
SanitizerHandler::CFICheckFail, {}, {});
}
- return Builder.CreateBitCast(
- Builder.CreateExtractValue(CheckedLoad, 0),
- cast<llvm::PointerType>(VTable->getType())->getElementType());
+ return Builder.CreateBitCast(Builder.CreateExtractValue(CheckedLoad, 0),
+ VTable->getType()->getPointerElementType());
}
void CodeGenFunction::EmitForwardingCallToLambda(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index 76f3a48f32f3..079a3e25d6dc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -23,7 +23,6 @@ namespace llvm {
class BasicBlock;
class Value;
class ConstantInt;
-class AllocaInst;
}
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index 2041d2a5b4c9..c1763cbbc5a0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -707,6 +707,10 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
if (Stmt *Ret = S.getReturnStmt())
EmitStmt(Ret);
+
+ // LLVM require the frontend to add the function attribute. See
+ // Coroutines.rst.
+ CurFn->addFnAttr("coroutine.presplit", "0");
}
// Emit coroutine intrinsic and patch up arguments of the token type.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 6e189a61dd20..1a9080604a79 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -354,13 +354,9 @@ CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
if (!MemBuffer)
return None;
- llvm::MD5 Hash;
- llvm::MD5::MD5Result Result;
-
- Hash.update(MemBuffer->getBuffer());
- Hash.final(Result);
-
- Hash.stringifyResult(Result, Checksum);
+ llvm::toHex(
+ llvm::MD5::hash(llvm::arrayRefFromStringRef(MemBuffer->getBuffer())),
+ /*LowerCase*/ true, Checksum);
return llvm::DIFile::CSK_MD5;
}
@@ -722,7 +718,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
- SmallVector<int64_t, 9> Expr(
+ SmallVector<uint64_t, 9> Expr(
{llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
/* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
@@ -768,7 +764,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
// Element count = (VLENB / SEW) x LMUL
- SmallVector<int64_t, 12> Expr(
+ SmallVector<uint64_t, 12> Expr(
// The DW_OP_bregx operation has two operands: a register which is
// specified by an unsigned LEB128 number, followed by a signed LEB128
// offset.
@@ -3690,7 +3686,7 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non-virtual primary base root.
- while (1) {
+ while (true) {
const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
const CXXRecordDecl *PBT = BRL.getPrimaryBase();
if (PBT && !BRL.isPrimaryBaseVirtual())
@@ -4325,7 +4321,7 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
}
void CGDebugInfo::AppendAddressSpaceXDeref(
- unsigned AddressSpace, SmallVectorImpl<int64_t> &Expr) const {
+ unsigned AddressSpace, SmallVectorImpl<uint64_t> &Expr) const {
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (!DWARFAddressSpace)
@@ -4494,7 +4490,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
Line = getLineNumber(VD->getLocation());
Column = getColumnNumber(VD->getLocation());
}
- SmallVector<int64_t, 13> Expr;
+ SmallVector<uint64_t, 13> Expr;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
@@ -4720,7 +4716,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- SmallVector<int64_t, 9> addr;
+ SmallVector<uint64_t, 9> addr;
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
addr.push_back(offset.getQuantity());
@@ -5191,7 +5187,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
} else {
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
- SmallVector<int64_t, 4> Expr;
+ SmallVector<uint64_t, 4> Expr;
unsigned AddressSpace =
CGM.getContext().getTargetAddressSpace(D->getType());
if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index 14ff0eeabd21..a76426e585c8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -40,7 +40,6 @@ class ClassTemplateSpecializationDecl;
class GlobalDecl;
class ModuleMap;
class ObjCInterfaceDecl;
-class ObjCIvarDecl;
class UsingDecl;
class VarDecl;
enum class DynamicInitKind : unsigned;
@@ -363,7 +362,7 @@ class CGDebugInfo {
/// Extended dereferencing mechanism is has the following format:
/// DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef
void AppendAddressSpaceXDeref(unsigned AddressSpace,
- SmallVectorImpl<int64_t> &Expr) const;
+ SmallVectorImpl<uint64_t> &Expr) const;
/// A helper function to collect debug info for the default elements of a
/// block.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index e09279c1d455..18d658436086 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -1392,9 +1392,11 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
else {
// Create an artificial VarDecl to generate debug info for.
IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
- auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
+ assert(cast<llvm::PointerType>(VlaSize.NumElts->getType())
+ ->isOpaqueOrPointeeTypeMatches(SizeTy) &&
+ "Number of VLA elements must be SizeTy");
auto QT = getContext().getIntTypeForBitwidth(
- VlaExprTy->getScalarSizeInBits(), false);
+ SizeTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
D.getLocation(), D.getLocation(), NameIdent, QT,
@@ -2250,16 +2252,17 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
llvm::Value *element = Builder.CreateInBoundsGEP(
- elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
- "arraydestroy.element");
+ llvmElementType, elementPast, negativeOne, "arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
destroyer);
// Perform the actual destruction there.
- destroyer(*this, Address(element, elementAlign), elementType);
+ destroyer(*this, Address(element, llvmElementType, elementAlign),
+ elementType);
if (useEHCleanup)
PopCleanupBlock();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index 3579761f1429..7b880c1354e1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -136,6 +136,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
}
// Otherwise, the standard logic requires a helper function.
} else {
+ Addr = Addr.getElementBitCast(CGF.ConvertTypeForMem(Type));
Func = CodeGenFunction(CGM)
.generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
CGF.needsEHCleanup(DtorKind), &D);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index 34b4951a7f72..0fb7ec26a85e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -1931,7 +1931,7 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isMatrixElt()) {
llvm::Value *Idx = LV.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
- const auto *const MatTy = LV.getType()->getAs<ConstantMatrixType>();
+ const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
@@ -2077,7 +2077,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isMatrixElt()) {
llvm::Value *Idx = Dst.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
- const auto *const MatTy = Dst.getType()->getAs<ConstantMatrixType>();
+ const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
@@ -3178,7 +3178,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(CGF.getLLVMContext());
if (!MayReturn) {
B.addAttribute(llvm::Attribute::NoReturn)
.addAttribute(llvm::Attribute::NoUnwind);
@@ -4699,12 +4699,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (LV.isSimple()) {
Address V = LV.getAddress(*this);
if (V.isValid()) {
- llvm::Type *T =
- ConvertTypeForMem(E->getType())
- ->getPointerTo(
- cast<llvm::PointerType>(V.getType())->getAddressSpace());
- if (V.getType() != T)
- LV.setAddress(Builder.CreateBitCast(V, T));
+ llvm::Type *T = ConvertTypeForMem(E->getType());
+ if (V.getElementType() != T)
+ LV.setAddress(Builder.CreateElementBitCast(V, T));
}
}
return LV;
@@ -4763,8 +4760,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateBitCast(LV.getAddress(*this),
- ConvertType(CE->getTypeAsWritten()));
+ Address V = Builder.CreateElementBitCast(
+ LV.getAddress(*this),
+ ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 3b996b89a1d7..0968afd82064 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -614,8 +614,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// every temporary created in a default argument is sequenced before
// the construction of the next array element, if any
CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
- LValue elementLV =
- CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(currentElement, llvmElementType, elementAlign), elementType);
if (filler)
EmitInitializationToLValue(filler, elementLV);
else
@@ -1801,6 +1801,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
destPtr.getAlignment().alignmentOfArrayElement(elementSize);
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
@@ -1810,8 +1811,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
llvm::PHINode *index =
Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
index->addIncoming(zero, entryBB);
- llvm::Value *element = Builder.CreateInBoundsGEP(
- begin->getType()->getPointerElementType(), begin, index);
+ llvm::Value *element =
+ Builder.CreateInBoundsGEP(llvmElementType, begin, index);
// Prepare for a cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index e32462eb635c..4e8933fffe03 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1613,8 +1613,9 @@ ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
return GlobalConstStr;
- llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
- llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
+ llvm::PointerType *PtrTy = cast<llvm::PointerType>(GlobalConstStr->getType());
+ llvm::PointerType *NewPtrTy =
+ llvm::PointerType::getWithSamePointeeType(PtrTy, ExprAS);
return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index b5bcf157036d..8cc609186f9e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -847,7 +847,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
// FIXME: Allow unaligned atomic load/store on x86. (It is not
// currently supported by the backend.)
- return 0;
+ return false;
}
/// Return the maximum size that permits atomic accesses for the given
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index b2bf60d2c0fc..52b449090868 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -2347,9 +2347,10 @@ llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
}
}
if (!SelValue) {
- SelValue = llvm::GlobalAlias::create(
- SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage,
- ".objc_selector_" + Sel.getAsString(), &TheModule);
+ SelValue = llvm::GlobalAlias::create(SelectorTy->getPointerElementType(), 0,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_" + Sel.getAsString(),
+ &TheModule);
Types.emplace_back(TypeEncoding, SelValue);
}
@@ -2576,14 +2577,16 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdTy->getPointerElementType(), 0,
+ llvm::GlobalValue::InternalLinkage,
".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = MetaClassPtrAlias;
} else {
if (!ClassPtrAlias) {
ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdTy->getPointerElementType(), 0,
+ llvm::GlobalValue::InternalLinkage,
".objc_class_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = ClassPtrAlias;
@@ -3706,7 +3709,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
GenerateProtocolHolderCategory();
llvm::StructType *selStructTy =
- dyn_cast<llvm::StructType>(SelectorTy->getElementType());
+ dyn_cast<llvm::StructType>(SelectorTy->getPointerElementType());
llvm::Type *selStructPtrTy = SelectorTy;
if (!selStructTy) {
selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index 425d1a793439..e7dba4c8feab 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2138,16 +2138,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
const ObjCCommonTypesHelper &ObjCTypes) {
CodeGenTypes &Types = CGM.getTypes();
auto selTy = CGF.getContext().getObjCSelType();
- llvm::Value *SelValue;
-
- if (Method && Method->isDirectMethod()) {
- // Direct methods will synthesize the proper `_cmd` internally,
- // so just don't bother with setting the `_cmd` argument.
- assert(!IsSuper);
- SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
- } else {
- SelValue = GetSelector(CGF, Sel);
- }
+ llvm::Value *SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
CallArgList ActualArgs;
if (!IsSuper)
@@ -2168,10 +2159,15 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
canMessageReceiverBeNull(CGF, Method, IsSuper, ClassReceiver, Arg0);
bool RequiresNullCheck = false;
+ bool RequiresSelValue = true;
llvm::FunctionCallee Fn = nullptr;
if (Method && Method->isDirectMethod()) {
+ assert(!IsSuper);
Fn = GenerateDirectMethod(Method, Method->getClassInterface());
+ // Direct methods will synthesize the proper `_cmd` internally,
+ // so just don't bother with setting the `_cmd` argument.
+ RequiresSelValue = false;
} else if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
if (ReceiverCanBeNull) RequiresNullCheck = true;
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
@@ -2209,6 +2205,12 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
nullReturn.init(CGF, Arg0);
}
+ // If a selector value needs to be passed, emit the load before the call.
+ if (RequiresSelValue) {
+ SelValue = GetSelector(CGF, Sel);
+ ActualArgs[1] = CallArg(RValue::get(SelValue), selTy);
+ }
+
llvm::CallBase *CallSite;
CGCallee Callee = CGCallee::forDirect(BitcastFn);
RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
@@ -2487,7 +2489,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (FQT->isUnionType())
HasUnion = true;
- BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BuildRCBlockVarRecordLayout(FQT->castAs<RecordType>(),
BytePos + FieldOffset, HasUnion);
continue;
}
@@ -2935,8 +2937,7 @@ CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
std::string CGObjCCommonMac::getRCBlockLayoutStr(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
fillRunSkipBlockVars(CGM, blockInfo);
- return getBlockLayoutInfoString(RunSkipBlockVars,
- blockInfo.needsCopyDisposeHelpers());
+ return getBlockLayoutInfoString(RunSkipBlockVars, blockInfo.NeedsCopyDispose);
}
llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
@@ -4370,7 +4371,11 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;
- CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ llvm::CallInst *Call = CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ for (auto Pair : llvm::enumerate(Locals))
+ Call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
@@ -4378,6 +4383,10 @@ void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals);
call->setDoesNotThrow();
call->setCallingConv(CGF.getRuntimeCC());
+ for (auto Pair : llvm::enumerate(Locals))
+ call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ Builder.getContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
/// Emit read hazards in all the protected blocks, i.e. all the blocks
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index e35c15421520..db1c3ca191ca 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -837,12 +837,11 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType =
- cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
- ->getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
+ Size = CGF.Builder.CreatePtrDiff(ElemType,
+ OrigAddresses[N].second.getPointer(CGF),
OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
@@ -1008,7 +1007,8 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
OriginalBaseLValue);
Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
- BaseLValue.getPointer(CGF), SharedAddr.getPointer());
+ SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
+ SharedAddr.getPointer());
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivateAddr.getPointer(), SharedAddr.getType());
@@ -1429,24 +1429,25 @@ static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
+ uint32_t SrcLocStrSize;
llvm::Constant *SrcLocStr;
if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
Loc.isInvalid()) {
- SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+ SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
} else {
- std::string FunctionName = "";
+ std::string FunctionName;
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
FunctionName = FD->getQualifiedNameAsString();
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
const char *FileName = PLoc.getFilename();
unsigned Line = PLoc.getLine();
unsigned Column = PLoc.getColumn();
- SrcLocStr =
- OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line, Column);
+ SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
+ Column, SrcLocStrSize);
}
unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
- return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
- Reserved2Flags);
+ return OMPBuilder.getOrCreateIdent(
+ SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
@@ -1457,10 +1458,11 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (CGM.getLangOpts().OpenMPIRBuilder) {
SmallString<128> Buffer;
OMPBuilder.updateToLocation(CGF.Builder.saveIP());
+ uint32_t SrcLocStrSize;
auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
- getIdentStringFromSourceLocation(CGF, Loc, Buffer));
+ getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
return OMPBuilder.getOrCreateThreadID(
- OMPBuilder.getOrCreateIdent(SrcLocStr));
+ OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
}
llvm::Value *ThreadID = nullptr;
@@ -3464,8 +3466,7 @@ static bool isAllocatableDecl(const VarDecl *VD) {
return false;
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
// Use the default allocation.
- return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
- AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
+ return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
!AA->getAllocator());
}
@@ -8120,7 +8121,7 @@ private:
.getAddress(CGF);
}
Size = CGF.Builder.CreatePtrDiff(
- CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
+ CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
break;
}
@@ -8141,7 +8142,7 @@ private:
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
- CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
CGF.EmitCastToVoidPtr(LB.getPointer()));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -8967,7 +8968,7 @@ public:
CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
- llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
+ llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
/*isSigned=*/false);
CombinedInfo.Sizes.push_back(Size);
@@ -9527,8 +9528,9 @@ llvm::Constant *
emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
MappableExprsHandler::MappingExprInfo &MapExprs) {
+ uint32_t SrcLocStrSize;
if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
- return OMPBuilder.getOrCreateDefaultSrcLocStr();
+ return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
SourceLocation Loc;
if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
@@ -9540,7 +9542,7 @@ emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
Loc = MapExprs.getMapDecl()->getLocation();
}
- std::string ExprName = "";
+ std::string ExprName;
if (MapExprs.getMapExpr()) {
PrintingPolicy P(CGF.getContext().getLangOpts());
llvm::raw_string_ostream OS(ExprName);
@@ -9551,8 +9553,9 @@ emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
}
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName.c_str(),
- PLoc.getLine(), PLoc.getColumn());
+ return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
+ PLoc.getLine(), PLoc.getColumn(),
+ SrcLocStrSize);
}
/// Emit the arrays used to pass the captures and map information to the
@@ -10216,8 +10219,7 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
llvm::Value *Cond;
if (IsInit) {
// base != begin?
- llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull(
- MapperCGF.Builder.CreatePtrDiff(Base, Begin));
+ llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
// IsPtrAndObj?
llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
MapType,
@@ -10581,7 +10583,7 @@ void CGOpenMPRuntime::emitTargetCall(
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
+ {/*ForEndCall=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
@@ -11463,7 +11465,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
emitOffloadingArraysArgument(
CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
+ {/*ForEndCall=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
Address(Info.BasePointersArray, CGM.getPointerAlign());
@@ -12213,6 +12215,26 @@ Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
return CGF.GetAddrOfLocalVar(NativeParam);
}
+/// Return allocator value from expression, or return a null allocator (default
+/// when no allocator specified).
+static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
+ const Expr *Allocator) {
+ llvm::Value *AllocVal;
+ if (Allocator) {
+ AllocVal = CGF.EmitScalarExpr(Allocator);
+ // According to the standard, the original allocator type is a enum
+ // (integer). Convert to pointer type, if required.
+ AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ } else {
+ // If no allocator specified, it defaults to the null allocator.
+ AllocVal = llvm::Constant::getNullValue(
+ CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
+ }
+ return AllocVal;
+}
+
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
@@ -12249,20 +12271,24 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
}
llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- assert(AA->getAllocator() &&
- "Expected allocator expression for non-default allocator.");
- llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- Allocator = CGF.EmitScalarConversion(
- Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
- AA->getAllocator()->getExprLoc());
- llvm::Value *Args[] = {ThreadID, Size, Allocator};
-
- llvm::Value *Addr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc),
- Args, getName({CVD->getName(), ".void.addr"}));
+ const Expr *Allocator = AA->getAllocator();
+ llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
+ llvm::Value *Alignment =
+ AA->getAlignment()
+ ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(AA->getAlignment()),
+ CGM.SizeTy, /*isSigned=*/false)
+ : nullptr;
+ SmallVector<llvm::Value *, 4> Args;
+ Args.push_back(ThreadID);
+ if (Alignment)
+ Args.push_back(Alignment);
+ Args.push_back(Size);
+ Args.push_back(AllocVal);
+ llvm::omp::RuntimeFunction FnID =
+ Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
+ llvm::Value *Addr = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
+ getName({CVD->getName(), ".void.addr"}));
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free);
QualType Ty = CGM.getContext().getPointerType(CVD->getType());
@@ -12276,14 +12302,14 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::FunctionCallee RTLFn;
SourceLocation::UIntTy LocEncoding;
Address Addr;
- const Expr *Allocator;
+ const Expr *AllocExpr;
public:
OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
SourceLocation::UIntTy LocEncoding, Address Addr,
- const Expr *Allocator)
+ const Expr *AllocExpr)
: RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
- Allocator(Allocator) {}
+ AllocExpr(AllocExpr) {}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
if (!CGF.HaveInsertPoint())
return;
@@ -12292,14 +12318,8 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
- CGF.getContext().VoidPtrTy,
- Allocator->getExprLoc());
+ llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
-
CGF.EmitRuntimeCall(RTLFn, Args);
}
};
@@ -12307,7 +12327,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
- VDAddr, AA->getAllocator());
+ VDAddr, Allocator);
if (UntiedRealAddr.isValid())
if (auto *Region =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index b83ec78696d1..19754b0cfacc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -35,7 +35,6 @@ class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
-class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
@@ -48,7 +47,6 @@ class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
-class IdentifierInfo;
namespace CodeGen {
class Address;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 866454ddeaed..e09ea5e01b1a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1402,10 +1402,14 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
// Allocate space for the variable to be globalized
llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
+ llvm::CallBase *VoidPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
AllocArgs, VD->getName());
+ // FIXME: We should use the variables actual alignment as an argument.
+ VoidPtr->addRetAttr(llvm::Attribute::get(
+ CGM.getLLVMContext(), llvm::Attribute::Alignment,
+ CGM.getContext().getTargetInfo().getNewAlign() / 8));
// Cast the void pointer and get the address of the globalized variable.
llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
@@ -1438,10 +1442,13 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
// Allocate space for this VLA object to be globalized.
llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
+ llvm::CallBase *VoidPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
AllocArgs, VD->getName());
+ VoidPtr->addRetAttr(
+ llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
+ CGM.getContext().getTargetInfo().getNewAlign()));
I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
std::pair<llvm::Value *, llvm::Value *>(
@@ -1791,8 +1798,9 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Ptr = Address(PhiSrc, Ptr.getAlignment());
ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
- Ptr.getPointer(), CGF.VoidPtrTy));
+ CGF.Int8Ty, PtrEnd.getPointer(),
+ Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
+ CGF.VoidPtrTy));
Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
@@ -3394,12 +3402,13 @@ CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
// First cast to generic.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- /*AddrSpace=*/0));
+ TargetAddr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
// Cast from generic to native address space.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- NativePointeeAddrSpace));
+ TargetAddr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(TargetAddr->getType()),
+ NativePointeeAddrSpace));
Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
NativeParamType);
@@ -3424,8 +3433,8 @@ void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
continue;
}
llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg,
- NativeArg->getType()->getPointerElementType()->getPointerTo());
+ NativeArg, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
TargetArgs.emplace_back(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
index e6665b72bcba..5a3bcdf72f7b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
@@ -93,8 +93,8 @@ struct CGBitFieldInfo {
CharUnits VolatileStorageOffset;
CGBitFieldInfo()
- : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
- VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
+ : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(),
+ VolatileStorageSize() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index cf8313f92587..6f85bca8a201 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -411,7 +411,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
continue;
}
llvm::Type *Type =
- Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
+ Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index ef0068cd3b0c..520483bc08b6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -2109,42 +2109,35 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}
-llvm::Value*
-CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc) {
- llvm::Value *Arg;
+std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
+ const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
if (Info.allowsRegister() || !Info.allowsMemory()) {
- if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
- Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
- } else {
- llvm::Type *Ty = ConvertType(InputType);
- uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
- if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
- getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
- Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- Ty = llvm::PointerType::getUnqual(Ty);
-
- Arg = Builder.CreateLoad(
- Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
- }
+ if (CodeGenFunction::hasScalarEvaluationKind(InputType))
+ return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
+
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
+ getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ return {Builder.CreateLoad(
+ Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
+ nullptr};
}
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
}
- return Arg;
+ Address Addr = InputValue.getAddress(*this);
+ ConstraintStr += '*';
+ return {Addr.getPointer(), Addr.getElementType()};
}
-llvm::Value* CodeGenFunction::EmitAsmInput(
- const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr,
- std::string &ConstraintStr) {
+std::pair<llvm::Value *, llvm::Type *>
+CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
@@ -2155,19 +2148,20 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
llvm::APSInt IntResult;
if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), IntResult);
+ return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
}
Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
+ return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
+ nullptr};
}
if (Info.allowsRegister() || !Info.allowsMemory())
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
@@ -2209,6 +2203,7 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
bool HasUnwindClobber, bool ReadOnly,
bool ReadNone, bool NoMerge, const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
+ const std::vector<llvm::Type *> &ArgElemTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
if (!HasUnwindClobber)
@@ -2224,6 +2219,15 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
Result.addFnAttr(llvm::Attribute::ReadOnly);
}
+ // Add elementtype attribute for indirect constraints.
+ for (auto Pair : llvm::enumerate(ArgElemTypes)) {
+ if (Pair.value()) {
+ auto Attr = llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
+ Result.addParamAttr(Pair.index(), Attr);
+ }
+ }
+
// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
@@ -2291,6 +2295,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
std::vector<llvm::Type *> ArgTypes;
+ std::vector<llvm::Type *> ArgElemTypes;
std::vector<llvm::Value*> Args;
llvm::BitVector ResultTypeRequiresCast;
@@ -2298,6 +2303,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
std::vector<llvm::Type*> InOutArgTypes;
+ std::vector<llvm::Type*> InOutArgElemTypes;
// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;
@@ -2399,21 +2405,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
- llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
- llvm::Value *DestPtr = Dest.getPointer(*this);
+ Address DestAddr = Dest.getAddress(*this);
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
// Otherwise there will be a mis-match if the matrix is also an
// input-argument which is represented as vector.
- if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
- DestAddrTy = llvm::PointerType::get(
- ConvertType(OutExpr->getType()),
- cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
- }
- ArgTypes.push_back(DestAddrTy);
- Args.push_back(DestPtr);
+ if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
+ DestAddr = Builder.CreateElementBitCast(
+ DestAddr, ConvertType(OutExpr->getType()));
+
+ ArgTypes.push_back(DestAddr.getType());
+ ArgElemTypes.push_back(DestAddr.getElementType());
+ Args.push_back(DestAddr.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2423,9 +2427,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
- InOutConstraints,
- InputExpr->getExprLoc());
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
+ Info, Dest, InputExpr->getType(), InOutConstraints,
+ InputExpr->getExprLoc());
if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
@@ -2444,6 +2450,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += OutputConstraint;
InOutArgTypes.push_back(Arg->getType());
+ InOutArgElemTypes.push_back(ArgElemType);
InOutArgs.push_back(Arg);
}
}
@@ -2483,7 +2490,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getTarget(), CGM, S, false /* No EarlyClobber */);
std::string ReplaceConstraint (InputConstraint);
- llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
@@ -2528,10 +2537,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
VT->getPrimitiveSizeInBits().getKnownMinSize());
ArgTypes.push_back(Arg->getType());
+ ArgElemTypes.push_back(ArgElemType);
Args.push_back(Arg);
Constraints += InputConstraint;
}
+ // Append the "input" part of inout constraints.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ ArgElemTypes.push_back(InOutArgElemTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
// Labels
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
@@ -2546,21 +2564,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::BlockAddress::get(CurFn, Dest.getBlock());
Args.push_back(BA);
ArgTypes.push_back(BA->getType());
+ ArgElemTypes.push_back(nullptr);
if (!Constraints.empty())
Constraints += ',';
- Constraints += 'X';
+ Constraints += 'i';
}
Fallthrough = createBasicBlock("asm.fallthrough");
}
}
- // Append the "input" part of inout constraints last.
- for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
- ArgTypes.push_back(InOutArgTypes[i]);
- Args.push_back(InOutArgs[i]);
- }
- Constraints += InOutConstraints;
-
bool HasUnwindClobber = false;
// Clobbers
@@ -2647,18 +2659,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
+ ResultRegTypes, ArgElemTypes, *this, RegResults);
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
- InNoMergeAttributedStmt, S, ResultRegTypes, *this,
- RegResults);
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
+ ResultRegTypes, ArgElemTypes, *this, RegResults);
}
assert(RegResults.size() == ResultRegTypes.size());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 4c11f7d67534..0db59dd2624c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -2584,7 +2584,67 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
}
+static bool isSupportedByOpenMPIRBuilder(const OMPExecutableDirective &S) {
+ // Check for unsupported clauses
+ if (!S.clauses().empty()) {
+ // Currently no clause is supported
+ return false;
+ }
+
+ // Check if we have a statement with the ordered directive.
+ // Visit the statement hierarchy to find a compound statement
+ // with a ordered directive in it.
+ if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
+ if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
+ for (const Stmt *SubStmt : SyntacticalLoop->children()) {
+ if (!SubStmt)
+ continue;
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
+ for (const Stmt *CSSubStmt : CS->children()) {
+ if (!CSSubStmt)
+ continue;
+ if (isa<OMPOrderedDirective>(CSSubStmt)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ bool UseOMPIRBuilder =
+ CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
+ if (UseOMPIRBuilder) {
+ auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ // Use the OpenMPIRBuilder if enabled.
+ if (UseOMPIRBuilder) {
+ // Emit the associated statement and get its loop representation.
+ llvm::DebugLoc DL = SourceLocToDebugLoc(S.getBeginLoc());
+ const Stmt *Inner = S.getRawStmt();
+ llvm::CanonicalLoopInfo *CLI =
+ EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
+
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGM.getOpenMPRuntime().getOMPBuilder();
+ // Add SIMD specific metadata
+ OMPBuilder.applySimd(DL, CLI);
+ return;
+ }
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
+ CodeGenIRBuilder);
+ }
+ return;
+ }
+
ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
OMPFirstScanLoop = true;
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -4460,8 +4520,9 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getASTRecordLayout(CaptureRecord);
unsigned Offset =
Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
- (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
- CGF.Builder, false);
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
+ CGF.Builder, false);
llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
// Get the call dbg.declare instruction we just created and update
// its DIExpression to add offset to base address.
@@ -4560,8 +4621,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
if (auto *DI = CGF.getDebugInfo())
- DI->EmitDeclareOfAutoVariable(Pair.first, Pair.second.getPointer(),
- CGF.Builder, /*UsePointerValue*/ true);
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(
+ Pair.first, Pair.second.getPointer(), CGF.Builder,
+ /*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
// a pointer to this memory.
@@ -6046,6 +6109,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_inbranch:
case OMPC_notinbranch:
case OMPC_link:
+ case OMPC_indirect:
case OMPC_use:
case OMPC_novariants:
case OMPC_nocontext:
@@ -6789,7 +6853,7 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
public:
explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
- : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
+ : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
void Enter(CodeGenFunction &CGF) override {
PrivatizeDevicePointers = true;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index 482499da1b0f..c839376880c4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -1178,7 +1178,7 @@ bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
return false;
const DeclContext *DC = RD;
- while (1) {
+ while (true) {
auto *D = cast<Decl>(DC);
DC = DC->getParent();
if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index e6adec6948af..50e1638924d1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -740,7 +740,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
#include "clang/Basic/Sanitizers.def"
#undef SANITIZER
- } while (0);
+ } while (false);
if (D) {
bool NoSanitizeCoverage = false;
@@ -882,6 +882,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (Offset)
Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
}
+ // Instruct that functions for COFF/CodeView targets should start with a
+ // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
+ // backends as they don't need it -- instructions on these architectures are
+ // always atomically patchable at runtime.
+ if (CGM.getCodeGenOpts().HotPatch &&
+ getContext().getTargetInfo().getTriple().isX86())
+ Fn->addFnAttr("patchable-function", "prologue-short-redirect");
// Add no-jump-tables value.
if (CGM.getCodeGenOpts().NoUseJumpTables)
@@ -1595,9 +1602,9 @@ void CodeGenFunction::EmitBranchToCounterBlock(
if (!InstrumentRegions || !isInstrumentedCondition(Cond))
return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
- llvm::BasicBlock *ThenBlock = NULL;
- llvm::BasicBlock *ElseBlock = NULL;
- llvm::BasicBlock *NextBlock = NULL;
+ llvm::BasicBlock *ThenBlock = nullptr;
+ llvm::BasicBlock *ElseBlock = nullptr;
+ llvm::BasicBlock *NextBlock = nullptr;
// Create the block we'll use to increment the appropriate counter.
llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
@@ -2109,6 +2116,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// Create the actual GEP.
addr = Address(Builder.CreateInBoundsGEP(
addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
+ ConvertTypeForMem(eltType),
addr.getAlignment());
}
@@ -2246,32 +2254,36 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
- if (const Expr *size = vat->getSizeExpr()) {
+ if (const Expr *sizeExpr = vat->getSizeExpr()) {
// It's possible that we might have emitted this already,
// e.g. with a typedef and a pointer to it.
- llvm::Value *&entry = VLASizeMap[size];
+ llvm::Value *&entry = VLASizeMap[sizeExpr];
if (!entry) {
- llvm::Value *Size = EmitScalarExpr(size);
+ llvm::Value *size = EmitScalarExpr(sizeExpr);
// C11 6.7.6.2p5:
// If the size is an expression that is not an integer constant
// expression [...] each time it is evaluated it shall have a value
// greater than zero.
- if (SanOpts.has(SanitizerKind::VLABound) &&
- size->getType()->isSignedIntegerType()) {
+ if (SanOpts.has(SanitizerKind::VLABound)) {
SanitizerScope SanScope(this);
- llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
+ clang::QualType SEType = sizeExpr->getType();
+ llvm::Value *CheckCondition =
+ SEType->isSignedIntegerType()
+ ? Builder.CreateICmpSGT(size, Zero)
+ : Builder.CreateICmpUGT(size, Zero);
llvm::Constant *StaticArgs[] = {
- EmitCheckSourceLocation(size->getBeginLoc()),
- EmitCheckTypeDescriptor(size->getType())};
- EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
- SanitizerKind::VLABound),
- SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
+ EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
+ EmitCheckTypeDescriptor(SEType)};
+ EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
+ SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
}
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
- entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
+ // FIXME: What about when size's type is larger than size_t?
+ entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
@@ -2694,7 +2706,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
SanitizerScope SanScope(this);
if (!OffsetValue)
- OffsetValue = Builder.getInt1(0); // no offset.
+ OffsetValue = Builder.getInt1(false); // no offset.
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
EmitCheckSourceLocation(SecondaryLoc),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index f76ce8a6400d..6db888dcec08 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -46,7 +46,6 @@ namespace llvm {
class BasicBlock;
class LLVMContext;
class MDNode;
-class Module;
class SwitchInst;
class Twine;
class Value;
@@ -55,13 +54,11 @@ class CanonicalLoopInfo;
namespace clang {
class ASTContext;
-class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
-class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
class LabelStmt;
@@ -80,7 +77,6 @@ class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
class OMPUseDevicePtrClause;
class OMPUseDeviceAddrClause;
-class ReturnsNonNullAttr;
class SVETypeFlags;
class OMPExecutableDirective;
@@ -92,12 +88,10 @@ namespace CodeGen {
class CodeGenTypes;
class CGCallee;
class CGFunctionInfo;
-class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
class BlockByrefHelpers;
class BlockByrefInfo;
-class BlockFlags;
class BlockFieldFlags;
class RegionCodeGenTy;
class TargetCodeGenInfo;
@@ -182,6 +176,7 @@ template <> struct DominatingValue<Address> {
struct saved_type {
DominatingLLVMValue::saved_type SavedValue;
+ llvm::Type *ElementType;
CharUnits Alignment;
};
@@ -190,11 +185,11 @@ template <> struct DominatingValue<Address> {
}
static saved_type save(CodeGenFunction &CGF, type value) {
return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
+ value.getElementType(), value.getAlignment() };
}
static type restore(CodeGenFunction &CGF, saved_type value) {
return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
+ value.ElementType, value.Alignment);
}
};
@@ -241,11 +236,10 @@ public:
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
- JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
- JumpDest(llvm::BasicBlock *Block,
- EHScopeStack::stable_iterator Depth,
+ JumpDest() : Block(nullptr), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
unsigned Index)
- : Block(Block), ScopeDepth(Depth), Index(Index) {}
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
bool isValid() const { return Block != nullptr; }
llvm::BasicBlock *getBlock() const { return Block; }
@@ -4677,13 +4671,14 @@ private:
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
- llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr, std::string &ConstraintStr);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
+ std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr,
+ SourceLocation Loc);
/// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 36b7ce87336c..d534cf182f5a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -565,7 +565,9 @@ void CodeGenModule::Release() {
"__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
llvm::GlobalVariable::NotThreadLocal);
addCompilerUsedGlobal(Var);
- getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
+ if (!getModule().getModuleFlag("amdgpu_hostcall")) {
+ getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
+ }
}
emitLLVMUsed();
@@ -610,7 +612,7 @@ void CodeGenModule::Release() {
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
- getModule().setSemanticInterposition(1);
+ getModule().setSemanticInterposition(true);
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
@@ -710,6 +712,9 @@ void CodeGenModule::Release() {
1);
}
+ if (CodeGenOpts.IBTSeal)
+ getModule().addModuleFlag(llvm::Module::Override, "ibt-seal", 1);
+
// Add module metadata for return address signing (ignoring
// non-leaf/all) and stack tagging. These are actually turned on by function
// attributes, but we use module metadata to emit build attributes. This is
@@ -1368,7 +1373,8 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
- const FunctionDecl *FD) {
+ const FunctionDecl *FD,
+ StringRef &CurName) {
if (!FD->isMultiVersion())
return;
@@ -1400,7 +1406,11 @@ void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
if (ExistingRecord != std::end(Manglings))
Manglings.remove(&(*ExistingRecord));
auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
- MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
+ StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
+ Result.first->first();
+ // If this is the current decl is being created, make sure we update the name.
+ if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
+ CurName = OtherNameRef;
if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
Entry->setName(OtherName);
}
@@ -1819,7 +1829,7 @@ CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(F->getContext());
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
@@ -1982,7 +1992,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
llvm::Function *F) {
if (D->hasAttr<StrictFPAttr>()) {
- llvm::AttrBuilder FuncAttrs;
+ llvm::AttrBuilder FuncAttrs(F->getContext());
FuncAttrs.addAttribute("strictfp");
F->addFnAttrs(FuncAttrs);
}
@@ -2092,12 +2102,12 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
if (!D->getAttr<SectionAttr>())
F->addFnAttr("implicit-section-name", SA->getName());
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(F->getContext());
if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
- llvm::AttrBuilder RemoveAttrs;
+ llvm::AttributeMask RemoveAttrs;
RemoveAttrs.addAttribute("target-cpu");
RemoveAttrs.addAttribute("target-features");
RemoveAttrs.addAttribute("tune-cpu");
@@ -3479,6 +3489,7 @@ void CodeGenModule::emitMultiVersionFunctions() {
void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
+ assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
@@ -3489,14 +3500,16 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
}
StringRef ResolverName = getMangledName(GD);
+ UpdateMultiVersionNames(GD, FD, ResolverName);
llvm::Type *ResolverType;
GlobalDecl ResolverGD;
- if (getTarget().supportsIFunc())
+ if (getTarget().supportsIFunc()) {
ResolverType = llvm::FunctionType::get(
llvm::PointerType::get(DeclTy,
Context.getTargetAddressSpace(FD->getType())),
false);
+ }
else {
ResolverType = DeclTy;
ResolverGD = GD;
@@ -3688,8 +3701,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if (FD->isMultiVersion()) {
- if (FD->hasAttr<TargetAttr>())
- UpdateMultiVersionNames(GD, FD);
+ UpdateMultiVersionNames(GD, FD, MangledName);
if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD, Ty, FD);
}
@@ -3785,7 +3797,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (D)
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasFnAttrs()) {
- llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
+ llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
F->addFnAttrs(B);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index f1565511f98a..e803022508a4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -46,7 +46,6 @@ class GlobalValue;
class DataLayout;
class FunctionType;
class LLVMContext;
-class OpenMPIRBuilder;
class IndexedInstrProfReader;
}
@@ -55,17 +54,13 @@ class ASTContext;
class AtomicType;
class FunctionDecl;
class IdentifierInfo;
-class ObjCMethodDecl;
class ObjCImplementationDecl;
-class ObjCCategoryImplDecl;
-class ObjCProtocolDecl;
class ObjCEncodeExpr;
class BlockExpr;
class CharUnits;
class Decl;
class Expr;
class Stmt;
-class InitListExpr;
class StringLiteral;
class NamedDecl;
class ValueDecl;
@@ -78,13 +73,10 @@ class AnnotateAttr;
class CXXDestructorDecl;
class Module;
class CoverageSourceInfo;
-class TargetAttr;
class InitSegAttr;
-struct ParsedTargetAttr;
namespace CodeGen {
-class CallArgList;
class CodeGenFunction;
class CodeGenTBAA;
class CGCXXABI;
@@ -93,8 +85,6 @@ class CGObjCRuntime;
class CGOpenCLRuntime;
class CGOpenMPRuntime;
class CGCUDARuntime;
-class BlockFieldFlags;
-class FunctionArgList;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;
@@ -311,7 +301,7 @@ private:
const TargetInfo &Target;
std::unique_ptr<CGCXXABI> ABI;
llvm::LLVMContext &VMContext;
- std::string ModuleNameHash = "";
+ std::string ModuleNameHash;
std::unique_ptr<CodeGenTBAA> TBAA;
@@ -345,7 +335,7 @@ private:
/// for emission and therefore should only be output if they are actually
/// used. If a decl is in this, then it is known to have not been referenced
/// yet.
- std::map<StringRef, GlobalDecl> DeferredDecls;
+ llvm::DenseMap<StringRef, GlobalDecl> DeferredDecls;
/// This is a list of deferred decls which we have seen that *are* actually
/// referenced. These get code generated when the module is done.
@@ -1478,7 +1468,8 @@ private:
llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
llvm::Type *DeclTy,
const FunctionDecl *FD);
- void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
+ void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD,
+ StringRef &CurName);
llvm::Constant *
GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, LangAS AddrSpace,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index ab953c2c7d52..6657f2a91e3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -131,7 +131,7 @@ public:
static_assert(LastHashType <= TooBig, "Too many types in HashType");
PGOHash(PGOHashVersion HashVersion)
- : Working(0), Count(0), HashVersion(HashVersion), MD5() {}
+ : Working(0), Count(0), HashVersion(HashVersion) {}
void combine(HashType Type);
uint64_t finalize();
PGOHashVersion getHashVersion() const { return HashVersion; }
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
index e8e006f41616..a65963596fe9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
@@ -29,7 +29,6 @@ namespace clang {
class Type;
namespace CodeGen {
-class CGRecordLayout;
// TBAAAccessKind - A kind of TBAA memory access descriptor.
enum class TBAAAccessKind : unsigned {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index 77721510dfd0..4839e22c4b14 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -643,11 +643,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = ConvertTypeForMem(ETy);
if (PointeeType->isVoidTy())
PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
-
- unsigned AS = PointeeType->isFunctionTy()
- ? getDataLayout().getProgramAddressSpace()
- : Context.getTargetAddressSpace(ETy);
-
+ unsigned AS = Context.getTargetAddressSpace(ETy);
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
@@ -748,7 +744,13 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
? CGM.getGenericBlockLiteralType()
: ConvertTypeForMem(FTy);
- unsigned AS = Context.getTargetAddressSpace(FTy);
+ // Block pointers lower to function type. For function type,
+ // getTargetAddressSpace() returns default address space for
+ // function pointer i.e. program address space. Therefore, for block
+ // pointers, it is important to pass qualifiers when calling
+ // getTargetAddressSpace(), to ensure that we get the address space
+ // for data pointers and not function pointers.
+ unsigned AS = Context.getTargetAddressSpace(FTy.getQualifiers());
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index f8f7542e4c83..28b831222943 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -31,14 +31,9 @@ namespace clang {
class ASTContext;
template <typename> class CanQual;
class CXXConstructorDecl;
-class CXXDestructorDecl;
class CXXMethodDecl;
class CodeGenOptions;
-class FieldDecl;
class FunctionProtoType;
-class ObjCInterfaceDecl;
-class ObjCIvarDecl;
-class PointerType;
class QualType;
class RecordDecl;
class TagDecl;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 1a15b09c7b2b..2979d92c8417 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -334,59 +334,6 @@ public:
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
- bool mayNeedDestruction(const VarDecl *VD) const {
- if (VD->needsDestruction(getContext()))
- return true;
-
- // If the variable has an incomplete class type (or array thereof), it
- // might need destruction.
- const Type *T = VD->getType()->getBaseElementTypeUnsafe();
- if (T->getAs<RecordType>() && T->isIncompleteType())
- return true;
-
- return false;
- }
-
- /// Determine whether we will definitely emit this variable with a constant
- /// initializer, either because the language semantics demand it or because
- /// we know that the initializer is a constant.
- // For weak definitions, any initializer available in the current translation
- // is not necessarily reflective of the initializer used; such initializers
- // are ignored unless if InspectInitForWeakDef is true.
- bool
- isEmittedWithConstantInitializer(const VarDecl *VD,
- bool InspectInitForWeakDef = false) const {
- VD = VD->getMostRecentDecl();
- if (VD->hasAttr<ConstInitAttr>())
- return true;
-
- // All later checks examine the initializer specified on the variable. If
- // the variable is weak, such examination would not be correct.
- if (!InspectInitForWeakDef &&
- (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
- return false;
-
- const VarDecl *InitDecl = VD->getInitializingDeclaration();
- if (!InitDecl)
- return false;
-
- // If there's no initializer to run, this is constant initialization.
- if (!InitDecl->hasInit())
- return true;
-
- // If we have the only definition, we don't need a thread wrapper if we
- // will emit the value as a constant.
- if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
- return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
-
- // Otherwise, we need a thread wrapper unless we know that every
- // translation unit will emit the value as a constant. We rely on the
- // variable being constant-initialized in every translation unit if it's
- // constant-initialized in any translation unit, which isn't actually
- // guaranteed by the standard but is necessary for sanity.
- return InitDecl->hasConstantInitialization();
- }
-
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
return !isEmittedWithConstantInitializer(VD) ||
mayNeedDestruction(VD);
@@ -697,8 +644,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CharUnits VTablePtrAlign =
CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
CGF.getPointerAlign());
- llvm::Value *VTable =
- CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
// Apply the offset.
// On ARM64, to reserve extra space in virtual member function pointers,
@@ -4525,8 +4472,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- llvm::Type *PtrTy =
- cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerElementType();
// Create the temporary and write the adjusted pointer into it.
Address ExnPtrTmp =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
index 32906a000269..d249b5b0eb88 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
@@ -17,7 +17,6 @@
namespace llvm {
class DIMacroFile;
-class DIMacroNode;
}
namespace clang {
class Preprocessor;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 5971a7709304..e00ff2b68719 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -401,7 +401,9 @@ public:
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
- return false;
+ return getContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019_5) &&
+ (!isEmittedWithConstantInitializer(VD) || mayNeedDestruction(VD));
}
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
@@ -2397,11 +2399,97 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
}
}
+static llvm::GlobalValue *getTlsGuardVar(CodeGenModule &CGM) {
+ // __tls_guard comes from the MSVC runtime and reflects
+ // whether TLS has been initialized for a particular thread.
+ // It is set from within __dyn_tls_init by the runtime.
+ // Every library and executable has its own variable.
+ llvm::Type *VTy = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Constant *TlsGuardConstant =
+ CGM.CreateRuntimeVariable(VTy, "__tls_guard");
+ llvm::GlobalValue *TlsGuard = cast<llvm::GlobalValue>(TlsGuardConstant);
+
+ TlsGuard->setThreadLocal(true);
+
+ return TlsGuard;
+}
+
+static llvm::FunctionCallee getDynTlsOnDemandInitFn(CodeGenModule &CGM) {
+ // __dyn_tls_on_demand_init comes from the MSVC runtime and triggers
+ // dynamic TLS initialization by calling __dyn_tls_init internally.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), {},
+ /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "__dyn_tls_on_demand_init",
+ llvm::AttributeList::get(CGM.getLLVMContext(),
+ llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
+}
+
+static void emitTlsGuardCheck(CodeGenFunction &CGF, llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *DynInitBB,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::LoadInst *TlsGuardValue =
+ CGF.Builder.CreateLoad(Address(TlsGuard, CharUnits::One()));
+ llvm::Value *CmpResult =
+ CGF.Builder.CreateICmpEQ(TlsGuardValue, CGF.Builder.getInt8(0));
+ CGF.Builder.CreateCondBr(CmpResult, DynInitBB, ContinueBB);
+}
+
+static void emitDynamicTlsInitializationCall(CodeGenFunction &CGF,
+ llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::FunctionCallee Initializer = getDynTlsOnDemandInitFn(CGF.CGM);
+ llvm::Function *InitializerFunction =
+ cast<llvm::Function>(Initializer.getCallee());
+ llvm::CallInst *CallVal = CGF.Builder.CreateCall(InitializerFunction);
+ CallVal->setCallingConv(InitializerFunction->getCallingConv());
+
+ CGF.Builder.CreateBr(ContinueBB);
+}
+
+static void emitDynamicTlsInitialization(CodeGenFunction &CGF) {
+ llvm::BasicBlock *DynInitBB =
+ CGF.createBasicBlock("dyntls.dyn_init", CGF.CurFn);
+ llvm::BasicBlock *ContinueBB =
+ CGF.createBasicBlock("dyntls.continue", CGF.CurFn);
+
+ llvm::GlobalValue *TlsGuard = getTlsGuardVar(CGF.CGM);
+
+ emitTlsGuardCheck(CGF, TlsGuard, DynInitBB, ContinueBB);
+ CGF.Builder.SetInsertPoint(DynInitBB);
+ emitDynamicTlsInitializationCall(CGF, TlsGuard, ContinueBB);
+ CGF.Builder.SetInsertPoint(ContinueBB);
+}
+
LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) {
- CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
- return LValue();
+ // Dynamic TLS initialization works by checking the state of a
+ // guard variable (__tls_guard) to see whether TLS initialization
+ // for a thread has happend yet.
+ // If not, the initialization is triggered on-demand
+ // by calling __dyn_tls_on_demand_init.
+ emitDynamicTlsInitialization(CGF);
+
+ // Emit the variable just like any regular global variable.
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ V = CGF.Builder.CreateBitCast(V, RealVarTy->getPointerTo(AS));
+
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, Alignment);
+
+ LValue LV = VD->getType()->isReferenceType()
+ ? CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
+ AlignmentSource::Decl)
+ : CGF.MakeAddrLValue(Addr, LValType, AlignmentSource::Decl);
+ return LV;
}
static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index f7b83c45022d..9fe7e5d1f5c3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -156,6 +156,7 @@ public:
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
CodeGenOpts.DebugPrefixMap =
CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
+ CodeGenOpts.DebugStrictDwarf = CI.getCodeGenOpts().DebugStrictDwarf;
}
~PCHContainerGenerator() override = default;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 85089cdb2200..fb81169003fc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -855,19 +855,19 @@ public:
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-import-module", Attr->getImportModule());
Fn->addFnAttrs(B);
}
if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-import-name", Attr->getImportName());
Fn->addFnAttrs(B);
}
if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(GV->getContext());
B.addAttribute("wasm-export-name", Attr->getExportName());
Fn->addFnAttrs(B);
}
@@ -1606,7 +1606,7 @@ static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
- return 0;
+ return false;
const RecordDecl *RD = RT->getDecl();
// If this is a C++ record, check the bases first.
@@ -6414,7 +6414,7 @@ public:
// AAPCS guarantees that sp will be 8-byte aligned on any public interface,
// however this is not necessarily true on taking any interrupt. Instruct
// the backend to perform a realignment as part of the function prologue.
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(Fn->getContext());
B.addStackAlignmentAttr(8);
Fn->addFnAttrs(B);
}
@@ -8282,14 +8282,15 @@ public:
LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const override {
- // Check if a global/static variable is defined within address space 1
+ // Check if global/static variable is defined in address space
+ // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
// but not constant.
LangAS AS = D->getType().getAddressSpace();
- if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 &&
- !D->getType().isConstQualified())
+ if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
+ toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
CGM.getDiags().Report(D->getLocation(),
diag::err_verify_nonconst_addrspace)
- << "__flash";
+ << "__flash*";
return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
}
@@ -8693,7 +8694,7 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
"__new_saved_reg_area_pointer");
- llvm::Value *UsingStack = 0;
+ llvm::Value *UsingStack = nullptr;
UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
__saved_reg_area_end_pointer);
@@ -8935,9 +8936,9 @@ private:
llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
unsigned ToAS) const {
// Single value types.
- if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
- return llvm::PointerType::get(
- cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
+ if (PtrTy && PtrTy->getAddressSpace() == FromAS)
+ return llvm::PointerType::getWithSamePointeeType(PtrTy, ToAS);
return Ty;
}
@@ -9304,16 +9305,9 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (FD)
setFunctionDeclAttributes(FD, F, M);
- const bool IsOpenCLKernel =
- M.getLangOpts().OpenCL && FD && FD->hasAttr<OpenCLKernelAttr>();
const bool IsHIPKernel =
M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
- const bool IsOpenMP = M.getLangOpts().OpenMP && !FD;
- if ((IsOpenCLKernel || IsHIPKernel || IsOpenMP) &&
- (M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
-
if (IsHIPKernel)
F->addFnAttr("uniform-work-group-size", "true");
@@ -9340,8 +9334,8 @@ llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
return llvm::ConstantPointerNull::get(PT);
auto &Ctx = CGM.getContext();
- auto NPT = llvm::PointerType::get(PT->getElementType(),
- Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ auto NPT = llvm::PointerType::getWithSamePointeeType(
+ PT, Ctx.getTargetAddressSpace(LangAS::opencl_generic));
return llvm::ConstantExpr::getAddrSpaceCast(
llvm::ConstantPointerNull::get(NPT), PT);
}
@@ -10276,9 +10270,9 @@ ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
llvm::Type *LTy = CGT.ConvertType(Ty);
auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
- if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) {
- LTy = llvm::PointerType::get(
- cast<llvm::PointerType>(LTy)->getElementType(), GlobalAS);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
+ if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::getWithSamePointeeType(PtrTy, GlobalAS);
return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
}
}
@@ -11417,7 +11411,7 @@ TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
- auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
+ auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index aa8bbb60a75f..dfdb2f5f55bb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -38,7 +38,6 @@ class ABIInfo;
class CallArgList;
class CodeGenFunction;
class CGBlockInfo;
-class CGFunctionInfo;
/// TargetCodeGenInfo - This class organizes various target-specific
/// codegeneration issues, like target-specific attributes, builtins and so
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index 3b551ea94cc2..2e4ebc10e9ba 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -62,6 +62,7 @@
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
@@ -170,13 +171,11 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
: Diags(Diags), VFS(std::move(VFS)), Mode(GCCMode),
SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone), LTOMode(LTOK_None),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
- DriverTitle(Title), CCPrintStatReportFilename(), CCPrintOptionsFilename(),
- CCPrintHeadersFilename(), CCLogDiagnosticsFilename(),
- CCCPrintBindings(false), CCPrintOptions(false), CCPrintHeaders(false),
- CCLogDiagnostics(false), CCGenDiagnostics(false),
- CCPrintProcessStats(false), TargetTriple(TargetTriple),
- CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
- GenReproducer(false), SuppressMissingInputWarning(false) {
+ DriverTitle(Title), CCCPrintBindings(false), CCPrintOptions(false),
+ CCPrintHeaders(false), CCLogDiagnostics(false), CCGenDiagnostics(false),
+ CCPrintProcessStats(false), TargetTriple(TargetTriple), Saver(Alloc),
+ CheckInputsExist(true), GenReproducer(false),
+ SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
@@ -328,7 +327,8 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_ast)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_extract_api))) {
FinalPhase = phases::Compile;
// -S only runs up to the backend.
@@ -369,7 +369,20 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
bool HasNostdlibxx = Args.hasArg(options::OPT_nostdlibxx);
bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
+ bool IgnoreUnused = false;
for (Arg *A : Args) {
+ if (IgnoreUnused)
+ A->claim();
+
+ if (A->getOption().matches(options::OPT_start_no_unused_arguments)) {
+ IgnoreUnused = true;
+ continue;
+ }
+ if (A->getOption().matches(options::OPT_end_no_unused_arguments)) {
+ IgnoreUnused = false;
+ continue;
+ }
+
// Unfortunately, we have to parse some forwarding options (-Xassembler,
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
// (assembler and preprocessor), or bypass a previous driver ('collect2').
@@ -437,7 +450,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
// Enforce -static if -miamcu is present.
if (Args.hasFlag(options::OPT_miamcu, options::OPT_mno_iamcu, false))
- DAL->AddFlagArg(0, Opts.getOption(options::OPT_static));
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_static));
// Add a default value of -mlinker-version=, if one was given and the user
// didn't specify one.
@@ -763,6 +776,18 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
llvm::Triple TT(Val);
std::string NormalizedName = TT.normalize();
+ // We want to expand the shortened versions of the triples passed in to
+ // the values used for the bitcode libraries for convenience.
+ if (TT.getVendor() == llvm::Triple::UnknownVendor ||
+ TT.getOS() == llvm::Triple::UnknownOS) {
+ if (TT.getArch() == llvm::Triple::nvptx)
+ TT = llvm::Triple("nvptx-nvidia-cuda");
+ else if (TT.getArch() == llvm::Triple::nvptx64)
+ TT = llvm::Triple("nvptx64-nvidia-cuda");
+ else if (TT.getArch() == llvm::Triple::amdgcn)
+ TT = llvm::Triple("amdgcn-amd-amdhsa");
+ }
+
// Make sure we don't have a duplicate triple.
auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
if (Duplicate != FoundNormalizedTriples.end()) {
@@ -1871,9 +1896,16 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (C.getArgs().hasArg(options::OPT_print_runtime_dir)) {
- std::string CandidateRuntimePath = TC.getRuntimePath();
- if (getVFS().exists(CandidateRuntimePath))
- llvm::outs() << CandidateRuntimePath << '\n';
+ std::string RuntimePath;
+ // Get the first existing path, if any.
+ for (auto Path : TC.getRuntimePaths()) {
+ if (getVFS().exists(Path)) {
+ RuntimePath = Path;
+ break;
+ }
+ }
+ if (!RuntimePath.empty())
+ llvm::outs() << RuntimePath << '\n';
else
llvm::outs() << TC.getCompilerRTPath() << '\n';
return false;
@@ -3105,7 +3137,7 @@ class OffloadingActionBuilder final {
// We will pass the device action as a host dependence, so we don't
// need to do anything else with them.
CudaDeviceActions.clear();
- return ABRT_Success;
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
}
// By default, we produce an action for each device arch.
@@ -3138,6 +3170,7 @@ class OffloadingActionBuilder final {
assert(DeviceLinkerInputs.size() == GpuArchList.size() &&
"Linker inputs and GPU arch list sizes do not match.");
+ ActionList Actions;
// Append a new link action for each device.
unsigned I = 0;
for (auto &LI : DeviceLinkerInputs) {
@@ -3149,22 +3182,29 @@ class OffloadingActionBuilder final {
OffloadAction::DeviceDependences DeviceLinkDeps;
DeviceLinkDeps.add(*DeviceLinkAction, *ToolChains[0],
GpuArchList[I], AssociatedOffloadKind);
- AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
- DeviceLinkAction->getType()));
+ Actions.push_back(C.MakeAction<OffloadAction>(
+ DeviceLinkDeps, DeviceLinkAction->getType()));
++I;
}
DeviceLinkerInputs.clear();
// Create a host object from all the device images by embedding them
- // in a fat binary.
+ // in a fat binary for mixed host-device compilation. For device-only
+ // compilation, creates a fat binary.
OffloadAction::DeviceDependences DDeps;
- auto *TopDeviceLinkAction =
- C.MakeAction<LinkJobAction>(AL, types::TY_Object);
- DDeps.add(*TopDeviceLinkAction, *ToolChains[0],
- nullptr, AssociatedOffloadKind);
-
- // Offload the host object to the host linker.
- AL.push_back(C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
+ if (!CompileDeviceOnly || !BundleOutput.hasValue() ||
+ BundleOutput.getValue()) {
+ auto *TopDeviceLinkAction = C.MakeAction<LinkJobAction>(
+ Actions,
+ CompileDeviceOnly ? types::TY_HIP_FATBIN : types::TY_Object);
+ DDeps.add(*TopDeviceLinkAction, *ToolChains[0], nullptr,
+ AssociatedOffloadKind);
+ // Offload the host object to the host linker.
+ AL.push_back(
+ C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
+ } else {
+ AL.append(Actions);
+ }
}
Action* appendLinkHostActions(ActionList &AL) override { return AL.back(); }
@@ -3551,15 +3591,18 @@ public:
return false;
}
- Action* makeHostLinkAction() {
- // Build a list of device linking actions.
- ActionList DeviceAL;
+ void appendDeviceLinkActions(ActionList &AL) {
for (DeviceActionBuilder *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
- SB->appendLinkDeviceActions(DeviceAL);
+ SB->appendLinkDeviceActions(AL);
}
+ }
+ Action *makeHostLinkAction() {
+ // Build a list of device linking actions.
+ ActionList DeviceAL;
+ appendDeviceLinkActions(DeviceAL);
if (DeviceAL.empty())
return nullptr;
@@ -3775,14 +3818,6 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
}
- // FIXME: Linking separate translation units for SPIR-V is not supported yet.
- // It can be done either by LLVM IR linking before conversion of the final
- // linked module to SPIR-V or external SPIR-V linkers can be used e.g.
- // spirv-link.
- if (C.getDefaultToolChain().getTriple().isSPIRV() && Inputs.size() > 1) {
- Diag(clang::diag::warn_drv_spirv_linking_multiple_inputs_unsupported);
- }
-
handleArguments(C, Args, Inputs, Actions);
// Builder to be used to build offloading actions.
@@ -3822,15 +3857,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Queue linker inputs.
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
- // Compilation phases are setup per language, however for SPIR-V the
- // final linking phase is meaningless since the compilation phase
- // produces the final binary.
- // FIXME: OpenCL - we could strip linking phase out from OpenCL
- // compilation phases if we could verify it is not needed by any target.
- if (!C.getDefaultToolChain().getTriple().isSPIRV()) {
- LinkerInputs.push_back(Current);
- Current = nullptr;
- }
+ LinkerInputs.push_back(Current);
+ Current = nullptr;
break;
}
@@ -3888,6 +3916,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
// Add a link action if necessary.
+
+ if (LinkerInputs.empty()) {
+ Arg *FinalPhaseArg;
+ if (getFinalPhase(Args, &FinalPhaseArg) == phases::Link)
+ OffloadBuilder.appendDeviceLinkActions(Actions);
+ }
+
if (!LinkerInputs.empty()) {
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
@@ -4036,7 +4071,8 @@ Action *Driver::ConstructPhaseAction(
OutputTy = types::TY_ModuleFile;
}
- if (Args.hasArg(options::OPT_fsyntax_only)) {
+ if (Args.hasArg(options::OPT_fsyntax_only) ||
+ Args.hasArg(options::OPT_extract_api)) {
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
}
@@ -4064,6 +4100,8 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<CompileJobAction>(Input, types::TY_ModuleFile);
if (Args.hasArg(options::OPT_verify_pch))
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
+ if (Args.hasArg(options::OPT_extract_api))
+ return C.MakeAction<CompileJobAction>(Input, types::TY_API_INFO);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index d31529748b62..403fac76f060 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
@@ -641,10 +642,14 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Args.hasFlag(options::OPT_fsanitize_memory_use_after_dtor,
options::OPT_fno_sanitize_memory_use_after_dtor,
MsanUseAfterDtor);
+ MsanParamRetval = Args.hasFlag(
+ options::OPT_fsanitize_memory_param_retval,
+ options::OPT_fno_sanitize_memory_param_retval, MsanParamRetval);
NeedPIE |= !(TC.getTriple().isOSLinux() &&
TC.getTriple().getArch() == llvm::Triple::x86_64);
} else {
MsanUseAfterDtor = false;
+ MsanParamRetval = false;
}
if (AllAddedKinds & SanitizerKind::Thread) {
@@ -1096,6 +1101,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (MsanUseAfterDtor)
CmdArgs.push_back("-fsanitize-memory-use-after-dtor");
+ if (MsanParamRetval)
+ CmdArgs.push_back("-fsanitize-memory-param-retval");
+
// FIXME: Pass these parameters as function attributes, not as -llvm flags.
if (!TsanMemoryAccess) {
CmdArgs.push_back("-mllvm");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index 50c89aaadc18..5fef1fb2ee5a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -75,17 +75,16 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
- std::string RuntimePath = getRuntimePath();
- if (getVFS().exists(RuntimePath))
- getLibraryPaths().push_back(RuntimePath);
-
- std::string StdlibPath = getStdlibPath();
- if (getVFS().exists(StdlibPath))
- getFilePaths().push_back(StdlibPath);
+ auto addIfExists = [this](path_list &List, const std::string &Path) {
+ if (getVFS().exists(Path))
+ List.push_back(Path);
+ };
- std::string CandidateLibPath = getArchSpecificLibPath();
- if (getVFS().exists(CandidateLibPath))
- getFilePaths().push_back(CandidateLibPath);
+ for (const auto &Path : getRuntimePaths())
+ addIfExists(getLibraryPaths(), Path);
+ for (const auto &Path : getStdlibPaths())
+ addIfExists(getFilePaths(), Path);
+ addIfExists(getFilePaths(), getArchSpecificLibPath());
}
void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) {
@@ -110,6 +109,10 @@ bool ToolChain::useRelaxRelocations() const {
return ENABLE_X86_RELAX_RELOCATIONS;
}
+bool ToolChain::defaultToIEEELongDouble() const {
+ return PPC_LINUX_DEFAULT_IEEELONGDOUBLE && getTriple().isOSLinux();
+}
+
SanitizerArgs
ToolChain::getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const {
SanitizerArgs SanArgs(*this, JobArgs, !SanitizerArgsChecked);
@@ -485,16 +488,35 @@ const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
return Args.MakeArgString(getCompilerRT(Args, Component, Type));
}
-std::string ToolChain::getRuntimePath() const {
- SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, "lib", getTripleString());
- return std::string(P.str());
+ToolChain::path_list ToolChain::getRuntimePaths() const {
+ path_list Paths;
+ auto addPathForTriple = [this, &Paths](const llvm::Triple &Triple) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", Triple.str());
+ Paths.push_back(std::string(P.str()));
+ };
+
+ addPathForTriple(getTriple());
+
+ // Android targets may include an API level at the end. We still want to fall
+ // back on a path without the API level.
+ if (getTriple().isAndroid() &&
+ getTriple().getEnvironmentName() != "android") {
+ llvm::Triple TripleWithoutLevel = getTriple();
+ TripleWithoutLevel.setEnvironmentName("android");
+ addPathForTriple(TripleWithoutLevel);
+ }
+
+ return Paths;
}
-std::string ToolChain::getStdlibPath() const {
+ToolChain::path_list ToolChain::getStdlibPaths() const {
+ path_list Paths;
SmallString<128> P(D.Dir);
llvm::sys::path::append(P, "..", "lib", getTripleString());
- return std::string(P.str());
+ Paths.push_back(std::string(P.str()));
+
+ return Paths;
}
std::string ToolChain::getArchSpecificLibPath() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index f282f04b7931..6899f9360da5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -16,6 +16,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatAdapters.h"
@@ -95,9 +96,9 @@ const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
if (II.isFilename())
CmdArgs.push_back(II.getFilename());
+ bool HasLibm = false;
if (Args.hasArg(options::OPT_l)) {
auto Lm = Args.getAllArgValues(options::OPT_l);
- bool HasLibm = false;
for (auto &Lib : Lm) {
if (Lib == "m") {
HasLibm = true;
@@ -131,9 +132,8 @@ const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
}
AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, CmdArgs, "amdgcn",
- SubArchName,
- /* bitcode SDL?*/ true,
- /* PostClang Link? */ false);
+ SubArchName, /*isBitCodeSDL=*/true,
+ /*postClangLink=*/false);
// Add an intermediate output file.
CmdArgs.push_back("-o");
const char *OutputFileName =
@@ -144,6 +144,26 @@ const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs,
InputInfo(&JA, Args.MakeArgString(OutputFileName))));
+
+ // If we linked in libm definitions late we run another round of optimizations
+ // to inline the definitions and fold what is foldable.
+ if (HasLibm) {
+ ArgStringList OptCmdArgs;
+ const char *OptOutputFileName =
+ getOutputFileName(C, OutputFilePrefix, "-linked-opt", "bc");
+ addLLCOptArg(Args, OptCmdArgs);
+ OptCmdArgs.push_back(OutputFileName);
+ OptCmdArgs.push_back("-o");
+ OptCmdArgs.push_back(OptOutputFileName);
+ const char *OptExec =
+ Args.MakeArgString(getToolChain().GetProgramPath("opt"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), OptExec, OptCmdArgs,
+ InputInfo(&JA, Args.MakeArgString(OutputFileName)),
+ InputInfo(&JA, Args.MakeArgString(OptOutputFileName))));
+ OutputFileName = OptOutputFileName;
+ }
+
return OutputFileName;
}
@@ -286,10 +306,22 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
const OptTable &Opts = getDriver().getOpts();
- if (DeviceOffloadKind != Action::OFK_OpenMP) {
- for (Arg *A : Args) {
- DAL->append(A);
+ if (DeviceOffloadKind == Action::OFK_OpenMP) {
+ for (Arg *A : Args)
+ if (!llvm::is_contained(*DAL, A))
+ DAL->append(A);
+
+ std::string Arch = DAL->getLastArgValue(options::OPT_march_EQ).str();
+ if (Arch.empty()) {
+ checkSystemForAMDGPU(Args, *this, Arch);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
}
+
+ return DAL;
+ }
+
+ for (Arg *A : Args) {
+ DAL->append(A);
}
if (!BoundArch.empty()) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index be13d6d583ce..ca0ca4bf4eea 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -11,6 +11,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Host.h"
@@ -98,12 +99,14 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
Features.push_back("-sve2-sm4");
}
- // +sve implies +f32mm if the base architecture is v8.6A, v8.7A, v9.1A or
- // v9.2A. It isn't the case in general that sve implies both f64mm and f32mm
+ // +sve implies +f32mm if the base architecture is >= v8.6A (except v9A)
+ // It isn't the case in general that sve implies both f64mm and f32mm
if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A ||
ArchKind == llvm::AArch64::ArchKind::ARMV8_7A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV8_8A ||
ArchKind == llvm::AArch64::ArchKind::ARMV9_1A ||
- ArchKind == llvm::AArch64::ArchKind::ARMV9_2A) &&
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_2A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_3A) &&
Feature == "sve")
Features.push_back("+f32mm");
}
@@ -219,6 +222,7 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
void aarch64::getAArch64TargetFeatures(const Driver &D,
const llvm::Triple &Triple,
const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
std::vector<StringRef> &Features,
bool ForAS) {
Arg *A;
@@ -390,9 +394,11 @@ fp16_fml_fallthrough:
}
if (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd ||
+ std::find(ItBegin, ItEnd, "+v8.8a") != ItEnd ||
std::find(ItBegin, ItEnd, "+v9a") != ItEnd ||
std::find(ItBegin, ItEnd, "+v9.1a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v9.2a") != ItEnd) {
+ std::find(ItBegin, ItEnd, "+v9.2a") != ItEnd ||
+ std::find(ItBegin, ItEnd, "+v9.3a") != ItEnd) {
if (HasCrypto && !NoCrypto) {
// Check if we have NOT disabled an algorithm with something like:
// +crypto, -algorithm
@@ -451,7 +457,8 @@ fp16_fml_fallthrough:
}
}
- const char *Archs[] = {"+v8.6a", "+v8.7a", "+v9.1a", "+v9.2a"};
+ const char *Archs[] = {"+v8.6a", "+v8.7a", "+v8.8a",
+ "+v9.1a", "+v9.2a", "+v9.3a"};
auto Pos = std::find_first_of(Features.begin(), Features.end(),
std::begin(Archs), std::end(Archs));
if (Pos != std::end(Features))
@@ -459,10 +466,16 @@ fp16_fml_fallthrough:
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access)) {
- if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ if (A->getOption().matches(options::OPT_mno_unaligned_access)) {
Features.push_back("+strict-align");
- } else if (Triple.isOSOpenBSD())
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
+ } else if (Triple.isOSOpenBSD()) {
Features.push_back("+strict-align");
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
if (Args.hasArg(options::OPT_ffixed_x1))
Features.push_back("+reserve-x1");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
index d47c402d4a42..0cdc2ec725e0 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -22,6 +22,7 @@ namespace aarch64 {
void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
std::vector<llvm::StringRef> &Features,
bool ForAS);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 4013cf230026..16af9f6d7129 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Host.h"
@@ -769,10 +770,12 @@ fp16_fml_fallthrough:
}
// Kernel code has more strict alignment requirements.
- if (KernelOrKext)
+ if (KernelOrKext) {
Features.push_back("+strict-align");
- else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ } else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_munaligned_access)) {
// No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
@@ -781,8 +784,11 @@ fp16_fml_fallthrough:
// access either.
else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline)
D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base";
- } else
+ } else {
Features.push_back("+strict-align");
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
} else {
// Assume pre-ARMv6 doesn't support unaligned accesses.
//
@@ -801,14 +807,23 @@ fp16_fml_fallthrough:
int VersionNum = getARMSubArchVersionNumber(Triple);
if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
if (VersionNum < 6 ||
- Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m) {
Features.push_back("+strict-align");
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
} else if (Triple.isOSLinux() || Triple.isOSNaCl() ||
Triple.isOSWindows()) {
- if (VersionNum < 7)
+ if (VersionNum < 7) {
Features.push_back("+strict-align");
- } else
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
+ } else {
Features.push_back("+strict-align");
+ if (!ForAS)
+ CmdArgs.push_back("-Wunaligned-access");
+ }
}
// llvm does not support reserving registers in general. There is support
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index 881b63bd36b9..862a2f2796be 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index 65347a38490e..4386e395bc6c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -33,6 +33,7 @@
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/Types.h"
#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/llvm-config.h"
@@ -346,7 +347,8 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, ForAS);
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, CmdArgs, Features,
+ ForAS);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -1115,7 +1117,7 @@ static void RenderDebugInfoCompressionArgs(const ArgList &Args,
StringRef Value = A->getValue();
if (Value == "none") {
CmdArgs.push_back("--compress-debug-sections=none");
- } else if (Value == "zlib" || Value == "zlib-gnu") {
+ } else if (Value == "zlib") {
if (llvm::zlib::isAvailable()) {
CmdArgs.push_back(
Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
@@ -1929,6 +1931,11 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
}
}
+ if (Args.getLastArg(options::OPT_mfix4300)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mfix4300");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_G)) {
StringRef v = A->getValue();
CmdArgs.push_back("-mllvm");
@@ -2055,7 +2062,7 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
}
- bool IEEELongDouble = false;
+ bool IEEELongDouble = getToolChain().defaultToIEEELongDouble();
for (const Arg *A : Args.filtered(options::OPT_mabi_EQ)) {
StringRef V = A->getValue();
if (V == "ieeelongdouble")
@@ -2897,6 +2904,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
AssociativeMath = true;
ReciprocalMath = true;
SignedZeros = false;
+ ApproxFunc = true;
TrappingMath = false;
FPExceptionBehavior = "";
break;
@@ -2904,6 +2912,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
AssociativeMath = false;
ReciprocalMath = false;
SignedZeros = true;
+ ApproxFunc = false;
TrappingMath = true;
FPExceptionBehavior = "strict";
@@ -2923,6 +2932,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
MathErrno = false;
AssociativeMath = true;
ReciprocalMath = true;
+ ApproxFunc = true;
SignedZeros = false;
TrappingMath = false;
RoundingFPMath = false;
@@ -2938,6 +2948,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
MathErrno = TC.IsMathErrnoDefault();
AssociativeMath = false;
ReciprocalMath = false;
+ ApproxFunc = false;
SignedZeros = true;
// -fno_fast_math restores default denormal and fpcontract handling
DenormalFPMath = DefaultDenormalFPMath;
@@ -2956,7 +2967,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// If -ffp-model=strict has been specified on command line but
// subsequent options conflict then emit warning diagnostic.
if (HonorINFs && HonorNaNs && !AssociativeMath && !ReciprocalMath &&
- SignedZeros && TrappingMath && RoundingFPMath &&
+ SignedZeros && TrappingMath && RoundingFPMath && !ApproxFunc &&
DenormalFPMath == llvm::DenormalMode::getIEEE() &&
DenormalFP32Math == llvm::DenormalMode::getIEEE() &&
FPContract.equals("off"))
@@ -2989,7 +3000,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-fmath-errno");
if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
- !TrappingMath)
+ ApproxFunc && !TrappingMath)
CmdArgs.push_back("-menable-unsafe-fp-math");
if (!SignedZeros)
@@ -3040,7 +3051,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// -ffast-math enables the __FAST_MATH__ preprocessor macro, but check for the
// individual features enabled by -ffast-math instead of the option itself as
// that's consistent with gcc's behaviour.
- if (!HonorINFs && !HonorNaNs && !MathErrno && AssociativeMath &&
+ if (!HonorINFs && !HonorNaNs && !MathErrno && AssociativeMath && ApproxFunc &&
ReciprocalMath && !SignedZeros && !TrappingMath && !RoundingFPMath) {
CmdArgs.push_back("-ffast-math");
if (FPModel.equals("fast")) {
@@ -3217,9 +3228,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
return;
}
// Check whether the target subarch supports the hardware TLS register
- if (arm::getARMSubArchVersionNumber(EffectiveTriple) < 7 &&
- llvm::ARM::parseArch(EffectiveTriple.getArchName()) !=
- llvm::ARM::ArchKind::ARMV6T2) {
+ if (!arm::isHardTPSupported(EffectiveTriple)) {
D.Diag(diag::err_target_unsupported_tp_hard)
<< EffectiveTriple.getArchName();
return;
@@ -4589,6 +4598,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
CmdArgs.push_back("-rewrite-objc");
rewriteKind = RK_Fragile;
+ } else if (JA.getType() == types::TY_API_INFO) {
+ CmdArgs.push_back("-extract-api");
} else {
assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!");
}
@@ -5310,7 +5321,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// as errors, but until then, we can live with a warning being emitted by the
// compiler. This way, Clang can be used to compile code with scalable vectors
// and identify possible issues.
- if (isa<BackendJobAction>(JA)) {
+ if (isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
+ isa<BackendJobAction>(JA)) {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-treat-scalable-fixed-error-as-warning");
}
@@ -5813,6 +5825,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-ftype-visibility");
CmdArgs.push_back("default");
}
+ } else if (IsOpenMPDevice) {
+ // When compiling for the OpenMP device we want protected visibility by
+ // default. This prevents the device from accidenally preempting code on the
+ // host, makes the system more robust, and improves performance.
+ CmdArgs.push_back("-fvisibility");
+ CmdArgs.push_back("protected");
}
if (!RawTriple.isPS4())
@@ -5992,6 +6010,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ Args.AddLastArg(CmdArgs, options::OPT_fms_hotpatch);
+
if (TC.SupportsProfiling()) {
Args.AddLastArg(CmdArgs, options::OPT_pg);
@@ -6149,6 +6169,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(Twine("-fcf-protection=") + A->getValue()));
}
+ if (IsUsingLTO)
+ Args.AddLastArg(CmdArgs, options::OPT_mibt_seal);
+
// Forward -f options with positive and negative forms; we translate these by
// hand. Do not propagate PGO options to the GPU-side compilations as the
// profile info is for the host-side compilation only.
@@ -6663,6 +6686,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_dM);
Args.AddLastArg(CmdArgs, options::OPT_dD);
+ Args.AddLastArg(CmdArgs, options::OPT_dI);
Args.AddLastArg(CmdArgs, options::OPT_fmax_tokens_EQ);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index 00e0490e069b..013cd2341e17 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Clang_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Clang_H
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLANG_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLANG_H
#include "MSVC.h"
#include "clang/Basic/DebugInfoOptions.h"
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 407f81a2ae09..1d30090ca21c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -286,13 +286,13 @@ void tools::addLinkerCompressDebugSectionsOption(
const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) {
// GNU ld supports --compress-debug-sections=none|zlib|zlib-gnu|zlib-gabi
- // whereas zlib is an alias to zlib-gabi. Therefore -gz=none|zlib|zlib-gnu
- // are translated to --compress-debug-sections=none|zlib|zlib-gnu.
- // -gz is not translated since ld --compress-debug-sections option requires an
+ // whereas zlib is an alias to zlib-gabi and zlib-gnu is obsoleted. Therefore
+ // -gz=none|zlib are translated to --compress-debug-sections=none|zlib. -gz
+ // is not translated since ld --compress-debug-sections option requires an
// argument.
if (const Arg *A = Args.getLastArg(options::OPT_gz_EQ)) {
StringRef V = A->getValue();
- if (V == "none" || V == "zlib" || V == "zlib-gnu")
+ if (V == "none" || V == "zlib")
CmdArgs.push_back(Args.MakeArgString("--compress-debug-sections=" + V));
else
TC.getDriver().Diag(diag::err_drv_unsupported_option_argument)
@@ -832,6 +832,10 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
return;
}
+ // Always link the static runtime for executable.
+ if (SanArgs.needsAsanRt())
+ HelperStaticRuntimes.push_back("asan_static");
+
// Each static runtime that has a DSO counterpart above is excluded below,
// but runtimes that exist only as static are not affected by needsSharedRt.
@@ -1186,10 +1190,9 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
options::OPT_fpic, options::OPT_fno_pic,
options::OPT_fPIE, options::OPT_fno_PIE,
options::OPT_fpie, options::OPT_fno_pie);
- if (Triple.isOSWindows() && LastPICArg &&
- LastPICArg ==
- Args.getLastArg(options::OPT_fPIC, options::OPT_fpic,
- options::OPT_fPIE, options::OPT_fpie)) {
+ if (Triple.isOSWindows() && !Triple.isOSCygMing() && LastPICArg &&
+ LastPICArg == Args.getLastArg(options::OPT_fPIC, options::OPT_fpic,
+ options::OPT_fPIE, options::OPT_fpie)) {
ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
<< LastPICArg->getSpelling() << Triple.str();
if (Triple.getArch() == llvm::Triple::x86_64)
@@ -1724,7 +1727,7 @@ bool tools::GetSDLFromOffloadArchive(
std::string OutputLib = D.GetTemporaryPath(
Twine(Prefix + Lib + "-" + Arch + "-" + Target).str(), "a");
- C.addTempFile(C.getArgs().MakeArgString(OutputLib.c_str()));
+ C.addTempFile(C.getArgs().MakeArgString(OutputLib));
ArgStringList CmdArgs;
SmallString<128> DeviceTriple;
@@ -1747,20 +1750,20 @@ bool tools::GetSDLFromOffloadArchive(
T.getToolChain().GetProgramPath("clang-offload-bundler"));
ArgStringList UBArgs;
- UBArgs.push_back(C.getArgs().MakeArgString(UnbundleArg.c_str()));
- UBArgs.push_back(C.getArgs().MakeArgString(TypeArg.c_str()));
- UBArgs.push_back(C.getArgs().MakeArgString(InputArg.c_str()));
- UBArgs.push_back(C.getArgs().MakeArgString(OffloadArg.c_str()));
- UBArgs.push_back(C.getArgs().MakeArgString(OutputArg.c_str()));
+ UBArgs.push_back(C.getArgs().MakeArgString(UnbundleArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(TypeArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(InputArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(OffloadArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(OutputArg));
// Add this flag to not exit from clang-offload-bundler if no compatible
// code object is found in heterogenous archive library.
std::string AdditionalArgs("-allow-missing-bundles");
- UBArgs.push_back(C.getArgs().MakeArgString(AdditionalArgs.c_str()));
+ UBArgs.push_back(C.getArgs().MakeArgString(AdditionalArgs));
C.addCommand(std::make_unique<Command>(
JA, T, ResponseFileSupport::AtFileCurCP(), UBProgram, UBArgs, Inputs,
- InputInfo(&JA, C.getArgs().MakeArgString(OutputLib.c_str()))));
+ InputInfo(&JA, C.getArgs().MakeArgString(OutputLib))));
if (postClangLink)
CC1Args.push_back("-mlink-builtin-bitcode");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index ee573b89bed1..7324339efaa6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -612,8 +612,9 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(CubinF);
}
- AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, CmdArgs, "nvptx", GPUArch,
- false, false);
+ AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, CmdArgs, "nvptx",
+ GPUArch, /*isBitCodeSDL=*/false,
+ /*postClangLink=*/false);
// Find nvlink and pass it as "--nvlink-path=" argument of
// clang-nvlink-wrapper.
@@ -752,8 +753,9 @@ void CudaToolChain::addClangTargetOptions(
addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
getTriple());
- AddStaticDeviceLibsPostLinking(getDriver(), DriverArgs, CC1Args, "nvptx", GpuArch,
- /* bitcode SDL?*/ true, /* PostClang Link? */ true);
+ AddStaticDeviceLibsPostLinking(getDriver(), DriverArgs, CC1Args, "nvptx",
+ GpuArch, /*isBitCodeSDL=*/true,
+ /*postClangLink=*/true);
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index de635f5816cf..05c58a8f43a8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -247,7 +247,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
@@ -295,7 +296,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
unsigned Major = ToolChain.getTriple().getOSMajorVersion();
bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
!Args.hasArg(options::OPT_static);
@@ -358,7 +360,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (Args.hasArg(options::OPT_shared) || IsPIE)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
else
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index a7afec6963a1..bd1600d060c8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -109,7 +109,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("Scrt1.o")));
}
@@ -131,7 +132,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
ToolChain.addProfileRTLibs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-Bdynamic");
@@ -191,9 +193,11 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
std::vector<std::string> FP;
- SmallString<128> P(getStdlibPath());
- llvm::sys::path::append(P, M.gccSuffix());
- FP.push_back(std::string(P.str()));
+ for (const std::string &Path : getStdlibPaths()) {
+ SmallString<128> P(Path);
+ llvm::sys::path::append(P, M.gccSuffix());
+ FP.push_back(std::string(P.str()));
+ }
return FP;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index 7aeadd84dfee..7a9570a686f4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -487,7 +487,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (!isAndroid && !IsIAMCU) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
@@ -563,7 +564,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
if (D.CCCIsCXX() &&
- !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
if (ToolChain.ShouldLinkCXXStdlib(Args)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
@@ -578,7 +580,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Silence warnings when linking C code with a C++ '-stdlib' argument.
Args.ClaimAllArgs(options::OPT_stdlib_EQ);
- if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_r)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
if (IsStatic || IsStaticPIE)
CmdArgs.push_back("--start-group");
@@ -692,7 +694,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back("--compress-debug-sections");
} else {
StringRef Value = A->getValue();
- if (Value == "none" || Value == "zlib" || Value == "zlib-gnu") {
+ if (Value == "none" || Value == "zlib") {
CmdArgs.push_back(
Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
} else {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index e413640abad3..af74b108e04e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -324,6 +324,12 @@ ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
return Generic_ELF::GetDefaultRuntimeLibType();
}
+unsigned Linux::GetDefaultDwarfVersion() const {
+ if (getTriple().isAndroid())
+ return 4;
+ return ToolChain::GetDefaultDwarfVersion();
+}
+
ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
if (getTriple().isAndroid())
return ToolChain::CST_Libcxx;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
index a5ec33bd44f1..a5648d79d655 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
@@ -40,6 +40,7 @@ public:
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override;
+ unsigned GetDefaultDwarfVersion() const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
bool
IsAArch64OutlineAtomicsDefault(const llvm::opt::ArgList &Args) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 66e9d8ab525a..18cef288f018 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -47,7 +47,14 @@
// Make sure this comes before MSVCSetupApi.h
#include <comdef.h>
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
#include "MSVCSetupApi.h"
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
#include "llvm/Support/COM.h"
_COM_SMARTPTR_TYPEDEF(ISetupConfiguration, __uuidof(ISetupConfiguration));
_COM_SMARTPTR_TYPEDEF(ISetupConfiguration2, __uuidof(ISetupConfiguration2));
@@ -511,6 +518,11 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
CmdArgs.push_back("-debug");
+ // If we specify /hotpatch, let the linker add padding in front of each
+ // function, like MSVC does.
+ if (Args.hasArg(options::OPT_fms_hotpatch, options::OPT__SLASH_hotpatch))
+ CmdArgs.push_back("-functionpadmin");
+
// Pass on /Brepro if it was passed to the compiler.
// Note that /Brepro maps to -mno-incremental-linker-compatible.
bool DefaultIncrementalLinkerCompatible =
@@ -1333,6 +1345,15 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"Include", windowsSDKIncludeVersion,
"winrt");
+ if (major >= 10) {
+ llvm::VersionTuple Tuple;
+ if (!Tuple.tryParse(windowsSDKIncludeVersion) &&
+ Tuple.getSubminor().getValueOr(0) >= 17134) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "Include", windowsSDKIncludeVersion,
+ "cppwinrt");
+ }
+ }
} else {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"Include");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index 8f033de09bf6..c842773996ed 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -69,6 +69,10 @@ public:
return llvm::DebuggerKind::Default;
}
+ unsigned GetDefaultDwarfVersion() const override {
+ return 4;
+ }
+
enum class SubDirectoryType {
Bin,
Include,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
index a890b85fd5e9..28e6e3e08e37 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
@@ -28,6 +28,11 @@
#pragma once
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
+
// Constants
//
#ifndef E_NOTFOUND
@@ -512,3 +517,7 @@ STDMETHODIMP GetSetupConfiguration(_Out_ ISetupConfiguration **ppConfiguration,
#ifdef __cplusplus
}
#endif
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index ecce2f062bd7..0501f9737404 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -164,6 +164,9 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--enable-auto-image-base");
}
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
CmdArgs.push_back("-o");
const char *OutputFile = Output.getFilename();
// GCC implicitly adds an .exe extension if it is given an output file name
@@ -486,10 +489,7 @@ bool toolchains::MinGW::isPIEDefault(const llvm::opt::ArgList &Args) const {
return false;
}
-bool toolchains::MinGW::isPICDefaultForced() const {
- return getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::aarch64;
-}
+bool toolchains::MinGW::isPICDefaultForced() const { return true; }
llvm::ExceptionHandling
toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
index af2e3a21a0af..e480d8bd8703 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
@@ -8,11 +8,51 @@
#include "PPCLinux.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace llvm::opt;
+using namespace llvm::sys;
+
+// Glibc older than 2.32 doesn't fully support IEEE float128. Here we check
+// glibc version by looking at dynamic linker name.
+static bool GlibcSupportsFloat128(const std::string &Linker) {
+ llvm::SmallVector<char, 16> Path;
+
+ // Resolve potential symlinks to linker.
+ if (fs::real_path(Linker, Path))
+ return false;
+ llvm::StringRef LinkerName =
+ path::filename(llvm::StringRef(Path.data(), Path.size()));
+
+ // Since glibc 2.34, the installed .so file is not symlink anymore. But we can
+ // still safely assume it's newer than 2.32.
+ if (LinkerName.startswith("ld64.so"))
+ return true;
+
+ if (!LinkerName.startswith("ld-2."))
+ return false;
+ unsigned Minor = (LinkerName[5] - '0') * 10 + (LinkerName[6] - '0');
+ if (Minor < 32)
+ return false;
+
+ return true;
+}
+
+PPCLinuxToolChain::PPCLinuxToolChain(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : Linux(D, Triple, Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ StringRef ABIName = A->getValue();
+ if (ABIName == "ieeelongdouble" && !SupportIEEEFloat128(D, Triple, Args))
+ D.Diag(diag::warn_drv_unsupported_float_abi_by_lib) << ABIName;
+ }
+}
void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
@@ -26,3 +66,20 @@ void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
Linux::AddClangSystemIncludeArgs(DriverArgs, CC1Args);
}
+
+bool PPCLinuxToolChain::SupportIEEEFloat128(
+ const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args) const {
+ if (!Triple.isLittleEndian() || !Triple.isPPC64())
+ return false;
+
+ if (Args.hasArg(options::OPT_nostdlib, options::OPT_nostdlibxx))
+ return true;
+
+ bool HasUnsupportedCXXLib =
+ ToolChain::GetCXXStdlibType(Args) == CST_Libcxx &&
+ GCCInstallation.getVersion().isOlderThan(12, 1, 0);
+
+ return GlibcSupportsFloat128(Linux::getDynamicLinker(Args)) &&
+ !(D.CCCIsCXX() && HasUnsupportedCXXLib);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
index b3ef7b61dc3a..e0318ae8a3a2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
@@ -18,12 +18,15 @@ namespace toolchains {
class LLVM_LIBRARY_VISIBILITY PPCLinuxToolChain : public Linux {
public:
PPCLinuxToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args)
- : Linux(D, Triple, Args) {}
+ const llvm::opt::ArgList &Args);
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
+private:
+ bool SupportIEEEFloat128(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 5783a733983a..bcf9147833dd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -23,8 +23,6 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-using clang::driver::tools::AddLinkerInputs;
-
void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
index 50d03e79bbb0..27de69550853 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
@@ -70,3 +70,24 @@ clang::driver::Tool *SPIRVToolChain::getTool(Action::ActionClass AC) const {
}
return ToolChain::getTool(AC);
}
+clang::driver::Tool *SPIRVToolChain::buildLinker() const {
+ return new tools::SPIRV::Linker(*this);
+}
+
+void SPIRV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ std::string Linker = ToolChain.GetProgramPath(getShortName());
+ ArgStringList CmdArgs;
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Linker), CmdArgs,
+ Inputs, Output));
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
index 229f7018e3b5..a16ae3ca51fa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
@@ -39,6 +39,17 @@ public:
const char *LinkingOutput) const override;
};
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("SPIRV::Linker", "spirv-link", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
} // namespace SPIRV
} // namespace tools
@@ -68,6 +79,7 @@ public:
protected:
clang::driver::Tool *getTool(Action::ActionClass AC) const override;
+ Tool *buildLinker() const override;
private:
clang::driver::Tool *getTranslator() const;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
index 4cdeec7f9d8a..1e43796be1ff 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -48,7 +48,8 @@ VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
// ${BINPATH}/../lib/ve-unknown-linux-gnu, (== getStdlibPath)
// ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPath)
// ${SYSROOT}/opt/nec/ve/lib,
- getFilePaths().push_back(getStdlibPath());
+ for (auto &Path : getStdlibPaths())
+ getFilePaths().push_back(std::move(Path));
getFilePaths().push_back(getArchSpecificLibPath());
getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index a7298a9a71bf..3614272a5f74 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -76,7 +76,7 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
const char *Crt1 = "crt1.o";
- const char *Entry = NULL;
+ const char *Entry = nullptr;
// If crt1-command.o exists, it supports new-style commands, so use it.
// Otherwise, use the old crt1.o. This is a temporary transition measure.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
index c84e59675946..b4c3082a089a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -51,6 +51,7 @@ private:
bool hasBlocksRuntime() const override;
bool SupportsProfiling() const override;
bool HasNativeLLVMSupport() const override;
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
void
addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index 1bd187ad2fc0..8f6adc6c2ad1 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -143,6 +143,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CXXModule: case TY_PP_CXXModule:
case TY_AST: case TY_ModuleFile: case TY_PCH:
case TY_LLVM_IR: case TY_LLVM_BC:
+ case TY_API_INFO:
return true;
}
}
diff --git a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index a3d388a5ae44..589bf8d216ed 100644
--- a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -704,7 +704,7 @@ static bool getLiteralInfo(SourceRange literalRange,
}
};
- while (1) {
+ while (true) {
if (Suff::has("u", text)) {
UpperU = false;
} else if (Suff::has("U", text)) {
diff --git a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
index 7ad1f7070d0a..f69f65c5ddf1 100644
--- a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
@@ -27,6 +27,7 @@ bool AffectedRangeManager::computeAffectedLines(
const AnnotatedLine *PreviousLine = nullptr;
while (I != E) {
AnnotatedLine *Line = *I;
+ assert(Line->First);
Line->LeadingEmptyLinesAffected = affectsLeadingEmptyLines(*Line->First);
// If a line is part of a preprocessor directive, it needs to be formatted
@@ -59,13 +60,10 @@ bool AffectedRangeManager::computeAffectedLines(
bool AffectedRangeManager::affectsCharSourceRange(
const CharSourceRange &Range) {
- for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I) {
- if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
- !SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
+ for (const CharSourceRange &R : Ranges)
+ if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), R.getBegin()) &&
+ !SourceMgr.isBeforeInTranslationUnit(R.getEnd(), Range.getBegin()))
return true;
- }
return false;
}
@@ -116,6 +114,7 @@ bool AffectedRangeManager::nonPPLineAffected(
// affected.
bool SomeFirstChildAffected = false;
+ assert(Line->First);
for (FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
// Determine whether 'Tok' was affected.
if (affectsTokenRange(*Tok, *Tok, IncludeLeadingNewlines))
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 4225d6b67b0e..b66584652bc8 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -341,6 +341,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (State.Stack.back().BreakBeforeClosingBrace &&
Current.closesBlockOrBlockTypeList(Style))
return true;
+ if (State.Stack.back().BreakBeforeClosingParen && Current.is(tok::r_paren))
+ return true;
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
if (Style.Language == FormatStyle::LK_ObjC &&
@@ -485,7 +487,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// different LineFormatter would be used otherwise.
if (Previous.ClosesTemplateDeclaration)
return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
- if (Previous.is(TT_FunctionAnnotationRParen))
+ if (Previous.is(TT_FunctionAnnotationRParen) &&
+ State.Line->Type != LT_PreprocessorDirective)
return true;
if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
Current.isNot(TT_LeadingJavaAnnotation))
@@ -540,13 +543,15 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
bool DryRun,
unsigned ExtraSpaces) {
const FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
+ const FormatToken &Previous = *State.NextToken->Previous;
assert(!State.Stack.empty());
State.NoContinuation = false;
if ((Current.is(TT_ImplicitStringLiteral) &&
- (Current.Previous->Tok.getIdentifierInfo() == nullptr ||
- Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
+ (Previous.Tok.getIdentifierInfo() == nullptr ||
+ Previous.Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_not_keyword))) {
unsigned EndColumn =
SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd());
@@ -576,7 +581,9 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ExtraSpaces) {
FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
+
if (Current.is(tok::equal) &&
(State.Line->First->is(tok::kw_for) || Current.NestingLevel == 0) &&
State.Stack.back().VariablePos == 0) {
@@ -638,10 +645,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
State.Stack.back().ColonPos = FirstColonPos;
}
- // In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
- // disallowing any further line breaks if there is no line break after the
- // opening parenthesis. Don't break if it doesn't conserve columns.
- if (Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak &&
+ // In "AlwaysBreak" or "BlockIndent" mode, enforce wrapping directly after the
+ // parenthesis by disallowing any further line breaks if there is no line
+ // break after the opening parenthesis. Don't break if it doesn't conserve
+ // columns.
+ if ((Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak ||
+ Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent) &&
(Previous.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) ||
(Previous.is(tok::l_brace) && Previous.isNot(BK_Block) &&
Style.Cpp11BracedListStyle)) &&
@@ -770,6 +779,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool DryRun) {
FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
// Extra penalty that needs to be added because of the way certain line
@@ -942,6 +952,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
opensProtoMessageField(*PreviousNonComment, Style)))
State.Stack.back().BreakBeforeClosingBrace = true;
+ if (PreviousNonComment && PreviousNonComment->is(tok::l_paren))
+ State.Stack.back().BreakBeforeClosingParen =
+ Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent;
+
if (State.Stack.back().AvoidBinPacking) {
// If we are breaking after '(', '{', '<', or this is the break after a ':'
// to start a member initializater list in a constructor, this should not
@@ -1036,6 +1050,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
(!Current.Next ||
Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace)))
return State.Stack[State.Stack.size() - 2].LastSpace;
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent &&
+ Current.is(tok::r_paren) && State.Stack.size() > 1)
+ return State.Stack[State.Stack.size() - 2].LastSpace;
if (NextNonComment->is(TT_TemplateString) && NextNonComment->closesScope())
return State.Stack[State.Stack.size() - 2].LastSpace;
if (Current.is(tok::identifier) && Current.Next &&
@@ -1288,10 +1305,9 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
State.Stack[i].NoLineBreak = true;
State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
}
- if (Previous &&
- (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) ||
- Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
- !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
+ (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))) {
State.Stack.back().NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
}
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
index b1b2611263a9..0eb53cbd0293 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
@@ -203,15 +203,15 @@ struct ParenState {
bool AvoidBinPacking, bool NoLineBreak)
: Tok(Tok), Indent(Indent), LastSpace(LastSpace),
NestedBlockIndent(Indent), IsAligned(false),
- BreakBeforeClosingBrace(false), AvoidBinPacking(AvoidBinPacking),
- BreakBeforeParameter(false), NoLineBreak(NoLineBreak),
- NoLineBreakInOperand(false), LastOperatorWrapped(true),
- ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
- AlignColons(true), ObjCSelectorNameFound(false),
- HasMultipleNestedBlocks(false), NestedBlockInlined(false),
- IsInsideObjCArrayLiteral(false), IsCSharpGenericTypeConstraint(false),
- IsChainedConditional(false), IsWrappedConditional(false),
- UnindentOperator(false) {}
+ BreakBeforeClosingBrace(false), BreakBeforeClosingParen(false),
+ AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
+ NoLineBreak(NoLineBreak), NoLineBreakInOperand(false),
+ LastOperatorWrapped(true), ContainsLineBreak(false),
+ ContainsUnwrappedBuilder(false), AlignColons(true),
+ ObjCSelectorNameFound(false), HasMultipleNestedBlocks(false),
+ NestedBlockInlined(false), IsInsideObjCArrayLiteral(false),
+ IsCSharpGenericTypeConstraint(false), IsChainedConditional(false),
+ IsWrappedConditional(false), UnindentOperator(false) {}
/// \brief The token opening this parenthesis level, or nullptr if this level
/// is opened by fake parenthesis.
@@ -277,6 +277,13 @@ struct ParenState {
/// was a newline after the beginning left brace.
bool BreakBeforeClosingBrace : 1;
+ /// Whether a newline needs to be inserted before the block's closing
+ /// paren.
+ ///
+ /// We only want to insert a newline before the closing paren if there also
+ /// was a newline after the beginning left paren.
+ bool BreakBeforeClosingParen : 1;
+
/// Avoid bin packing, i.e. multiple parameters/elements on multiple
/// lines, in this context.
bool AvoidBinPacking : 1;
@@ -362,6 +369,8 @@ struct ParenState {
return IsAligned;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
return BreakBeforeClosingBrace;
+ if (BreakBeforeClosingParen != Other.BreakBeforeClosingParen)
+ return BreakBeforeClosingParen;
if (QuestionColumn != Other.QuestionColumn)
return QuestionColumn < Other.QuestionColumn;
if (AvoidBinPacking != Other.AvoidBinPacking)
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
new file mode 100644
index 000000000000..827564357f78
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -0,0 +1,236 @@
+//===--- DefinitionBlockSeparator.cpp ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements DefinitionBlockSeparator, a TokenAnalyzer that inserts
+/// or removes empty lines separating definition blocks like classes, structs,
+/// functions, enums, and namespaces in between.
+///
+//===----------------------------------------------------------------------===//
+
+#include "DefinitionBlockSeparator.h"
+#include "llvm/Support/Debug.h"
+#define DEBUG_TYPE "definition-block-separator"
+
+namespace clang {
+namespace format {
+std::pair<tooling::Replacements, unsigned> DefinitionBlockSeparator::analyze(
+ TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) {
+ assert(Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave);
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ separateBlocks(AnnotatedLines, Result, Tokens);
+ return {Result, 0};
+}
+
+void DefinitionBlockSeparator::separateBlocks(
+ SmallVectorImpl<AnnotatedLine *> &Lines, tooling::Replacements &Result,
+ FormatTokenLexer &Tokens) {
+ const bool IsNeverStyle =
+ Style.SeparateDefinitionBlocks == FormatStyle::SDS_Never;
+ const AdditionalKeywords &ExtraKeywords = Tokens.getKeywords();
+ auto LikelyDefinition = [this, ExtraKeywords](const AnnotatedLine *Line,
+ bool ExcludeEnum = false) {
+ if ((Line->MightBeFunctionDecl && Line->mightBeFunctionDefinition()) ||
+ Line->startsWithNamespace())
+ return true;
+ FormatToken *CurrentToken = Line->First;
+ while (CurrentToken) {
+ if (CurrentToken->isOneOf(tok::kw_class, tok::kw_struct) ||
+ (Style.isJavaScript() && CurrentToken->is(ExtraKeywords.kw_function)))
+ return true;
+ if (!ExcludeEnum && CurrentToken->is(tok::kw_enum))
+ return true;
+ CurrentToken = CurrentToken->Next;
+ }
+ return false;
+ };
+ unsigned NewlineCount =
+ (Style.SeparateDefinitionBlocks == FormatStyle::SDS_Always ? 1 : 0) + 1;
+ WhitespaceManager Whitespaces(
+ Env.getSourceManager(), Style,
+ Style.DeriveLineEnding
+ ? WhitespaceManager::inputUsesCRLF(
+ Env.getSourceManager().getBufferData(Env.getFileID()),
+ Style.UseCRLF)
+ : Style.UseCRLF);
+ for (unsigned I = 0; I < Lines.size(); ++I) {
+ const auto &CurrentLine = Lines[I];
+ if (CurrentLine->InPPDirective)
+ continue;
+ FormatToken *TargetToken = nullptr;
+ AnnotatedLine *TargetLine;
+ auto OpeningLineIndex = CurrentLine->MatchingOpeningBlockLineIndex;
+ AnnotatedLine *OpeningLine = nullptr;
+ const auto IsAccessSpecifierToken = [](const FormatToken *Token) {
+ return Token->isAccessSpecifier() || Token->isObjCAccessSpecifier();
+ };
+ const auto InsertReplacement = [&](const int NewlineToInsert) {
+ assert(TargetLine);
+ assert(TargetToken);
+
+ // Do not handle EOF newlines.
+ if (TargetToken->is(tok::eof))
+ return;
+ if (IsAccessSpecifierToken(TargetToken) ||
+ (OpeningLineIndex > 0 &&
+ IsAccessSpecifierToken(Lines[OpeningLineIndex - 1]->First)))
+ return;
+ if (!TargetLine->Affected)
+ return;
+ Whitespaces.replaceWhitespace(*TargetToken, NewlineToInsert,
+ TargetToken->OriginalColumn,
+ TargetToken->OriginalColumn);
+ };
+ const auto IsPPConditional = [&](const size_t LineIndex) {
+ const auto &Line = Lines[LineIndex];
+ return Line->First->is(tok::hash) && Line->First->Next &&
+ Line->First->Next->isOneOf(tok::pp_if, tok::pp_ifdef, tok::pp_else,
+ tok::pp_ifndef, tok::pp_elifndef,
+ tok::pp_elifdef, tok::pp_elif,
+ tok::pp_endif);
+ };
+ const auto FollowingOtherOpening = [&]() {
+ return OpeningLineIndex == 0 ||
+ Lines[OpeningLineIndex - 1]->Last->opensScope() ||
+ IsPPConditional(OpeningLineIndex - 1);
+ };
+ const auto HasEnumOnLine = [&]() {
+ FormatToken *CurrentToken = CurrentLine->First;
+ bool FoundEnumKeyword = false;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::kw_enum))
+ FoundEnumKeyword = true;
+ else if (FoundEnumKeyword && CurrentToken->is(tok::l_brace))
+ return true;
+ CurrentToken = CurrentToken->Next;
+ }
+ return FoundEnumKeyword && I + 1 < Lines.size() &&
+ Lines[I + 1]->First->is(tok::l_brace);
+ };
+
+ bool IsDefBlock = false;
+ const auto MayPrecedeDefinition = [&](const int Direction = -1) {
+ assert(Direction >= -1);
+ assert(Direction <= 1);
+ const size_t OperateIndex = OpeningLineIndex + Direction;
+ assert(OperateIndex < Lines.size());
+ const auto &OperateLine = Lines[OperateIndex];
+ if (LikelyDefinition(OperateLine))
+ return false;
+
+ if (OperateLine->First->is(tok::comment))
+ return true;
+
+ // A single line identifier that is not in the last line.
+ if (OperateLine->First->is(tok::identifier) &&
+ OperateLine->First == OperateLine->Last &&
+ OperateIndex + 1 < Lines.size()) {
+ // UnwrappedLineParser's recognition of free-standing macro like
+ // Q_OBJECT may also recognize some uppercased type names that may be
+ // used as return type as that kind of macros, which is a bit hard to
+ // distinguish one from another purely from token patterns. Here, we
+ // try not to add new lines below those identifiers.
+ AnnotatedLine *NextLine = Lines[OperateIndex + 1];
+ if (NextLine->MightBeFunctionDecl &&
+ NextLine->mightBeFunctionDefinition() &&
+ NextLine->First->NewlinesBefore == 1 &&
+ OperateLine->First->is(TT_FunctionLikeOrFreestandingMacro))
+ return true;
+ }
+
+ if ((Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare)))
+ return true;
+ return false;
+ };
+
+ if (HasEnumOnLine() &&
+ !LikelyDefinition(CurrentLine, /*ExcludeEnum=*/true)) {
+ // We have no scope opening/closing information for enum.
+ IsDefBlock = true;
+ OpeningLineIndex = I;
+ while (OpeningLineIndex > 0 && MayPrecedeDefinition())
+ --OpeningLineIndex;
+ OpeningLine = Lines[OpeningLineIndex];
+ TargetLine = OpeningLine;
+ TargetToken = TargetLine->First;
+ if (!FollowingOtherOpening())
+ InsertReplacement(NewlineCount);
+ else if (IsNeverStyle)
+ InsertReplacement(OpeningLineIndex != 0);
+ TargetLine = CurrentLine;
+ TargetToken = TargetLine->First;
+ while (TargetToken && !TargetToken->is(tok::r_brace))
+ TargetToken = TargetToken->Next;
+ if (!TargetToken) {
+ while (I < Lines.size() && !Lines[I]->First->is(tok::r_brace))
+ ++I;
+ }
+ } else if (CurrentLine->First->closesScope()) {
+ if (OpeningLineIndex > Lines.size())
+ continue;
+ // Handling the case that opening brace has its own line, with checking
+ // whether the last line already had an opening brace to guard against
+ // misrecognition.
+ if (OpeningLineIndex > 0 &&
+ Lines[OpeningLineIndex]->First->is(tok::l_brace) &&
+ Lines[OpeningLineIndex - 1]->Last->isNot(tok::l_brace))
+ --OpeningLineIndex;
+ OpeningLine = Lines[OpeningLineIndex];
+ // Closing a function definition.
+ if (LikelyDefinition(OpeningLine)) {
+ IsDefBlock = true;
+ while (OpeningLineIndex > 0 && MayPrecedeDefinition())
+ --OpeningLineIndex;
+ OpeningLine = Lines[OpeningLineIndex];
+ TargetLine = OpeningLine;
+ TargetToken = TargetLine->First;
+ if (!FollowingOtherOpening()) {
+ // Avoid duplicated replacement.
+ if (TargetToken->isNot(tok::l_brace))
+ InsertReplacement(NewlineCount);
+ } else if (IsNeverStyle)
+ InsertReplacement(OpeningLineIndex != 0);
+ }
+ }
+
+ // Not the last token.
+ if (IsDefBlock && I + 1 < Lines.size()) {
+ OpeningLineIndex = I + 1;
+ TargetLine = Lines[OpeningLineIndex];
+ TargetToken = TargetLine->First;
+
+ // No empty line for continuously closing scopes. The token will be
+ // handled in another case if the line following is opening a
+ // definition.
+ if (!TargetToken->closesScope() && !IsPPConditional(OpeningLineIndex)) {
+ // Check whether current line may precede a definition line.
+ while (OpeningLineIndex + 1 < Lines.size() &&
+ MayPrecedeDefinition(/*Direction=*/0))
+ ++OpeningLineIndex;
+ TargetLine = Lines[OpeningLineIndex];
+ if (!LikelyDefinition(TargetLine)) {
+ OpeningLineIndex = I + 1;
+ TargetLine = Lines[I + 1];
+ TargetToken = TargetLine->First;
+ InsertReplacement(NewlineCount);
+ }
+ } else if (IsNeverStyle)
+ InsertReplacement(/*NewlineToInsert=*/1);
+ }
+ }
+ for (const auto &R : Whitespaces.generateReplacements())
+ // The add method returns an Error instance which simulates program exit
+ // code through overloading boolean operator, thus false here indicates
+ // success.
+ if (Result.add(R))
+ return;
+}
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h
new file mode 100644
index 000000000000..31c0f34d6e19
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h
@@ -0,0 +1,41 @@
+//===--- DefinitionBlockSeparator.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares DefinitionBlockSeparator, a TokenAnalyzer that inserts or
+/// removes empty lines separating definition blocks like classes, structs,
+/// functions, enums, and namespaces in between.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_DEFINITIONBLOCKSEPARATOR_H
+#define LLVM_CLANG_LIB_FORMAT_DEFINITIONBLOCKSEPARATOR_H
+
+#include "TokenAnalyzer.h"
+#include "WhitespaceManager.h"
+
+namespace clang {
+namespace format {
+class DefinitionBlockSeparator : public TokenAnalyzer {
+public:
+ DefinitionBlockSeparator(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override;
+
+private:
+ void separateBlocks(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result, FormatTokenLexer &Tokens);
+};
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index be01daa38929..04e2915e3af6 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -16,6 +16,7 @@
#include "AffectedRangeManager.h"
#include "BreakableToken.h"
#include "ContinuationIndenter.h"
+#include "DefinitionBlockSeparator.h"
#include "FormatInternal.h"
#include "FormatTokenLexer.h"
#include "NamespaceEndCommentsFixer.h"
@@ -383,6 +384,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BracketAlignmentStyle> {
IO.enumCase(Value, "Align", FormatStyle::BAS_Align);
IO.enumCase(Value, "DontAlign", FormatStyle::BAS_DontAlign);
IO.enumCase(Value, "AlwaysBreak", FormatStyle::BAS_AlwaysBreak);
+ IO.enumCase(Value, "BlockIndent", FormatStyle::BAS_BlockIndent);
// For backward compatibility.
IO.enumCase(Value, "true", FormatStyle::BAS_Align);
@@ -430,6 +432,15 @@ template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::SeparateDefinitionStyle> {
+ static void enumeration(IO &IO, FormatStyle::SeparateDefinitionStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::SDS_Leave);
+ IO.enumCase(Value, "Always", FormatStyle::SDS_Always);
+ IO.enumCase(Value, "Never", FormatStyle::SDS_Never);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceAroundPointerQualifiersStyle> {
static void
enumeration(IO &IO, FormatStyle::SpaceAroundPointerQualifiersStyle &Value) {
@@ -756,6 +767,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PenaltyBreakComment", Style.PenaltyBreakComment);
IO.mapOptional("PenaltyBreakFirstLessLess",
Style.PenaltyBreakFirstLessLess);
+ IO.mapOptional("PenaltyBreakOpenParenthesis",
+ Style.PenaltyBreakOpenParenthesis);
IO.mapOptional("PenaltyBreakString", Style.PenaltyBreakString);
IO.mapOptional("PenaltyBreakTemplateDeclaration",
Style.PenaltyBreakTemplateDeclaration);
@@ -769,6 +782,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("RawStringFormats", Style.RawStringFormats);
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
+ IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("SeparateDefinitionBlocks", Style.SeparateDefinitionBlocks);
IO.mapOptional("ShortNamespaceLines", Style.ShortNamespaceLines);
IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SortJavaStaticImport", Style.SortJavaStaticImport);
@@ -855,6 +870,7 @@ template <> struct MappingTraits<FormatStyle::SpaceBeforeParensCustom> {
IO.mapOptional("AfterFunctionDeclarationName",
Spacing.AfterFunctionDeclarationName);
IO.mapOptional("AfterIfMacros", Spacing.AfterIfMacros);
+ IO.mapOptional("AfterOverloadedOperator", Spacing.AfterOverloadedOperator);
IO.mapOptional("BeforeNonEmptyParentheses",
Spacing.BeforeNonEmptyParentheses);
}
@@ -1193,12 +1209,14 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
+ LLVMStyle.SeparateDefinitionBlocks = FormatStyle::SDS_Leave;
LLVMStyle.ShortNamespaceLines = 1;
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.Standard = FormatStyle::LS_Latest;
LLVMStyle.UseCRLF = false;
LLVMStyle.UseTab = FormatStyle::UT_Never;
LLVMStyle.ReflowComments = true;
+ LLVMStyle.RemoveBracesLLVM = false;
LLVMStyle.SpacesInParentheses = false;
LLVMStyle.SpacesInSquareBrackets = false;
LLVMStyle.SpaceInEmptyBlock = false;
@@ -1232,6 +1250,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.PenaltyExcessCharacter = 1000000;
LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
+ LLVMStyle.PenaltyBreakOpenParenthesis = 0;
LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
LLVMStyle.PenaltyIndentedWhitespace = 0;
@@ -1732,6 +1751,45 @@ FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
namespace {
+class BracesRemover : public TokenAnalyzer {
+public:
+ BracesRemover(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ removeBraces(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ // Remove optional braces.
+ void removeBraces(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ for (AnnotatedLine *Line : Lines) {
+ removeBraces(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (FormatToken *Token = Line->First; Token; Token = Token->Next) {
+ if (!Token->Optional)
+ continue;
+ assert(Token->isOneOf(tok::l_brace, tok::r_brace));
+ const auto Start = Token == Line->Last
+ ? Token->WhitespaceRange.getBegin()
+ : Token->Tok.getLocation();
+ const auto Range =
+ CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Range, "")));
+ }
+ }
+ }
+};
+
class JavaScriptRequoter : public TokenAnalyzer {
public:
JavaScriptRequoter(const Environment &Env, const FormatStyle &Style)
@@ -1840,7 +1898,7 @@ public:
WhitespaceManager Whitespaces(
Env.getSourceManager(), Style,
Style.DeriveLineEnding
- ? inputUsesCRLF(
+ ? WhitespaceManager::inputUsesCRLF(
Env.getSourceManager().getBufferData(Env.getFileID()),
Style.UseCRLF)
: Style.UseCRLF);
@@ -1864,19 +1922,13 @@ public:
}
private:
- static bool inputUsesCRLF(StringRef Text, bool DefaultToCRLF) {
- size_t LF = Text.count('\n');
- size_t CR = Text.count('\r') * 2;
- return LF == CR ? DefaultToCRLF : CR > LF;
- }
-
bool
hasCpp03IncompatibleFormat(const SmallVectorImpl<AnnotatedLine *> &Lines) {
for (const AnnotatedLine *Line : Lines) {
if (hasCpp03IncompatibleFormat(Line->Children))
return true;
for (FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next) {
- if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
+ if (!Tok->hasWhitespaceBefore()) {
if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
return true;
if (Tok->is(TT_TemplateCloser) &&
@@ -1895,10 +1947,8 @@ private:
for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
if (!Tok->is(TT_PointerOrReference))
continue;
- bool SpaceBefore =
- Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
- bool SpaceAfter = Tok->Next->WhitespaceRange.getBegin() !=
- Tok->Next->WhitespaceRange.getEnd();
+ bool SpaceBefore = Tok->hasWhitespaceBefore();
+ bool SpaceAfter = Tok->Next->hasWhitespaceBefore();
if (SpaceBefore && !SpaceAfter)
++AlignmentDiff;
if (!SpaceBefore && SpaceAfter)
@@ -2204,7 +2254,7 @@ private:
unsigned St = Idx, End = Idx;
while ((End + 1) < Tokens.size() &&
Tokens[End]->Next == Tokens[End + 1]) {
- End++;
+ ++End;
}
auto SR = CharSourceRange::getCharRange(Tokens[St]->Tok.getLocation(),
Tokens[End]->Tok.getEndLoc());
@@ -2440,7 +2490,7 @@ std::string replaceCRLF(const std::string &Code) {
do {
Pos = Code.find("\r\n", LastPos);
if (Pos == LastPos) {
- LastPos++;
+ ++LastPos;
continue;
}
if (Pos == std::string::npos) {
@@ -2586,8 +2636,9 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
bool MainIncludeFound = false;
bool FormattingOff = false;
+ // '[' must be the first and '-' the last character inside [...].
llvm::Regex RawStringRegex(
- "R\"(([\\[A-Za-z0-9_{}#<>%:;.?*+/^&\\$|~!=,'\\-]|])*)\\(");
+ "R\"([][A-Za-z0-9_{}#<>%:;.?*+/^&\\$|~!=,'-]*)\\(");
SmallVector<StringRef, 2> RawStringMatches;
std::string RawStringTermination = ")\"";
@@ -3038,6 +3089,11 @@ reformat(const FormatStyle &Style, StringRef Code,
});
}
+ if (Style.isCpp() && Style.RemoveBracesLLVM)
+ Passes.emplace_back([&](const Environment &Env) {
+ return BracesRemover(Env, Expanded).process();
+ });
+
if (Style.Language == FormatStyle::LK_Cpp) {
if (Style.FixNamespaceComments)
Passes.emplace_back([&](const Environment &Env) {
@@ -3050,6 +3106,11 @@ reformat(const FormatStyle &Style, StringRef Code,
});
}
+ if (Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave)
+ Passes.emplace_back([&](const Environment &Env) {
+ return DefinitionBlockSeparator(Env, Expanded).process();
+ });
+
if (Style.isJavaScript() && Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
Passes.emplace_back([&](const Environment &Env) {
return JavaScriptRequoter(Env, Expanded).process();
@@ -3138,6 +3199,16 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
return NamespaceEndCommentsFixer(*Env, Style).process().first;
}
+tooling::Replacements separateDefinitionBlocks(const FormatStyle &Style,
+ StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName) {
+ auto Env = Environment::make(Code, FileName, Ranges);
+ if (!Env)
+ return {};
+ return DefinitionBlockSeparator(*Env, Style).process().first;
+}
+
tooling::Replacements sortUsingDeclarations(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,
@@ -3181,6 +3252,8 @@ const char *StyleOptionHelpDescription =
".clang-format file located in one of the parent\n"
"directories of the source file (or current\n"
"directory for stdin).\n"
+ "Use -style=file:<format_file_path> to explicitly specify"
+ "the configuration file.\n"
"Use -style=\"{key: value, ...}\" to set specific\n"
"parameters, e.g.:\n"
" -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
@@ -3233,6 +3306,18 @@ const char *DefaultFormatStyle = "file";
const char *DefaultFallbackStyle = "LLVM";
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+loadAndParseConfigFile(StringRef ConfigFile, llvm::vfs::FileSystem *FS,
+ FormatStyle *Style, bool AllowUnknownOptions) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ FS->getBufferForFile(ConfigFile.str());
+ if (auto EC = Text.getError())
+ return EC;
+ if (auto EC = parseConfiguration(*Text.get(), Style, AllowUnknownOptions))
+ return EC;
+ return Text;
+}
+
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
StringRef Code, llvm::vfs::FileSystem *FS,
@@ -3263,6 +3348,28 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return Style;
}
+ // User provided clang-format file using -style=file:path/to/format/file.
+ if (!Style.InheritsParentConfig &&
+ StyleName.startswith_insensitive("file:")) {
+ auto ConfigFile = StyleName.substr(5);
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ if (auto EC = Text.getError())
+ return make_string_error("Error reading " + ConfigFile + ": " +
+ EC.message());
+
+ LLVM_DEBUG(llvm::dbgs()
+ << "Using configuration file " << ConfigFile << "\n");
+
+ if (!Style.InheritsParentConfig)
+ return Style;
+
+ // Search for parent configs starting from the parent directory of
+ // ConfigFile.
+ FileName = ConfigFile;
+ ChildFormatTextToApply.emplace_back(std::move(*Text));
+ }
+
// If the style inherits the parent configuration it is a command line
// configuration, which wants to inherit, so we have to skip the check of the
// StyleName.
@@ -3288,6 +3395,16 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
auto dropDiagnosticHandler = [](const llvm::SMDiagnostic &, void *) {};
+ auto applyChildFormatTexts = [&](FormatStyle *Style) {
+ for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
+ auto EC = parseConfiguration(*MemBuf, Style, AllowUnknownOptions,
+ dropDiagnosticHandler);
+ // It was already correctly parsed.
+ assert(!EC);
+ static_cast<void>(EC);
+ }
+ };
+
for (StringRef Directory = Path; !Directory.empty();
Directory = llvm::sys::path::parent_path(Directory)) {
@@ -3308,19 +3425,16 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
if (Status &&
(Status->getType() == llvm::sys::fs::file_type::regular_file)) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
- FS->getBufferForFile(ConfigFile.str());
- if (std::error_code EC = Text.getError())
- return make_string_error(EC.message());
- if (std::error_code ec =
- parseConfiguration(*Text.get(), &Style, AllowUnknownOptions)) {
- if (ec == ParseError::Unsuitable) {
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ if (auto EC = Text.getError()) {
+ if (EC == ParseError::Unsuitable) {
if (!UnsuitableConfigFiles.empty())
UnsuitableConfigFiles.append(", ");
UnsuitableConfigFiles.append(ConfigFile);
continue;
}
return make_string_error("Error reading " + ConfigFile + ": " +
- ec.message());
+ EC.message());
}
LLVM_DEBUG(llvm::dbgs()
<< "Using configuration file " << ConfigFile << "\n");
@@ -3330,14 +3444,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return Style;
LLVM_DEBUG(llvm::dbgs() << "Applying child configurations\n");
-
- for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
- auto Ec = parseConfiguration(*MemBuf, &Style, AllowUnknownOptions,
- dropDiagnosticHandler);
- // It was already correctly parsed.
- assert(!Ec);
- static_cast<void>(Ec);
- }
+ applyChildFormatTexts(&Style);
return Style;
}
@@ -3363,17 +3470,9 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
UnsuitableConfigFiles);
if (!ChildFormatTextToApply.empty()) {
- assert(ChildFormatTextToApply.size() == 1);
-
LLVM_DEBUG(llvm::dbgs()
- << "Applying child configuration on fallback style\n");
-
- auto Ec =
- parseConfiguration(*ChildFormatTextToApply.front(), &FallbackStyle,
- AllowUnknownOptions, dropDiagnosticHandler);
- // It was already correctly parsed.
- assert(!Ec);
- static_cast<void>(Ec);
+ << "Applying child configurations on fallback style\n");
+ applyChildFormatTexts(&FallbackStyle);
}
return FallbackStyle;
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index 57f8a5a45cbb..59d6f29bb54d 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -189,6 +189,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
bool HasSeparatingComment = false;
for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
+ assert(ItemBegin);
// Skip comments on their own line.
while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) {
ItemBegin = ItemBegin->Next;
@@ -296,14 +297,11 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
const CommaSeparatedList::ColumnFormat *
CommaSeparatedList::getColumnFormat(unsigned RemainingCharacters) const {
const ColumnFormat *BestFormat = nullptr;
- for (SmallVector<ColumnFormat, 4>::const_reverse_iterator
- I = Formats.rbegin(),
- E = Formats.rend();
- I != E; ++I) {
- if (I->TotalWidth <= RemainingCharacters || I->Columns == 1) {
- if (BestFormat && I->LineCount > BestFormat->LineCount)
+ for (const ColumnFormat &Format : llvm::reverse(Formats)) {
+ if (Format.TotalWidth <= RemainingCharacters || Format.Columns == 1) {
+ if (BestFormat && Format.LineCount > BestFormat->LineCount)
break;
- BestFormat = &*I;
+ BestFormat = &Format;
}
}
return BestFormat;
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index d410ede32240..a64329802ee3 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -51,6 +51,7 @@ namespace format {
TYPE(FunctionAnnotationRParen) \
TYPE(FunctionDeclarationName) \
TYPE(FunctionLBrace) \
+ TYPE(FunctionLikeOrFreestandingMacro) \
TYPE(FunctionTypeLParen) \
TYPE(IfMacro) \
TYPE(ImplicitStringLiteral) \
@@ -95,6 +96,7 @@ namespace format {
TYPE(PointerOrReference) \
TYPE(PureVirtualSpecifier) \
TYPE(RangeBasedForLoopColon) \
+ TYPE(RecordLBrace) \
TYPE(RegexLiteral) \
TYPE(SelectorName) \
TYPE(StartOfName) \
@@ -442,6 +444,9 @@ public:
/// This starts an array initializer.
bool IsArrayInitializer = false;
+ /// Is optional and can be removed.
+ bool Optional = false;
+
/// If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
@@ -634,6 +639,12 @@ public:
return WhitespaceRange.getEnd();
}
+ /// Returns \c true if the range of whitespace immediately preceding the \c
+ /// Token is not empty.
+ bool hasWhitespaceBefore() const {
+ return WhitespaceRange.getBegin() != WhitespaceRange.getEnd();
+ }
+
prec::Level getPrecedence() const {
return getBinOpPrecedence(Tok.getKind(), /*GreaterThanIsOperator=*/true,
/*CPlusPlus11=*/true);
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index 7736a7042f86..c9166f4b17aa 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -429,18 +429,21 @@ bool FormatTokenLexer::tryMergeLessLess() {
if (Tokens.size() < 3)
return false;
- bool FourthTokenIsLess = false;
- if (Tokens.size() > 3)
- FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
-
auto First = Tokens.end() - 3;
- if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
- First[0]->isNot(tok::less) || FourthTokenIsLess)
+ if (First[0]->isNot(tok::less) || First[1]->isNot(tok::less))
return false;
// Only merge if there currently is no whitespace between the two "<".
- if (First[1]->WhitespaceRange.getBegin() !=
- First[1]->WhitespaceRange.getEnd())
+ if (First[1]->hasWhitespaceBefore())
+ return false;
+
+ auto X = Tokens.size() > 3 ? First[-1] : nullptr;
+ auto Y = First[2];
+ if ((X && X->is(tok::less)) || Y->is(tok::less))
+ return false;
+
+ // Do not remove a whitespace between the two "<" e.g. "operator< <>".
+ if (X && X->is(tok::kw_operator) && Y->is(tok::greater))
return false;
First[0]->Tok.setKind(tok::lessless);
@@ -461,8 +464,7 @@ bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
return false;
unsigned AddLength = 0;
for (unsigned i = 1; i < Kinds.size(); ++i) {
- if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() !=
- First[i]->WhitespaceRange.getEnd())
+ if (!First[i]->is(Kinds[i]) || First[i]->hasWhitespaceBefore())
return false;
AddLength += First[i]->TokenText.size();
}
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 38ab5b9df76d..0c34c6126c21 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -28,7 +28,7 @@ std::string computeName(const FormatToken *NamespaceTok) {
assert(NamespaceTok &&
NamespaceTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro) &&
"expecting a namespace token");
- std::string name = "";
+ std::string name;
const FormatToken *Tok = NamespaceTok->getNextNonComment();
if (NamespaceTok->is(TT_NamespaceMacro)) {
// Collects all the non-comment tokens between opening parenthesis
@@ -224,7 +224,7 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
return {Fixes, 0};
}
- std::string AllNamespaceNames = "";
+ std::string AllNamespaceNames;
size_t StartLineIndex = SIZE_MAX;
StringRef NamespaceTokenText;
unsigned int CompactedNamespacesCount = 0;
@@ -260,8 +260,9 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// remove end comment, it will be merged in next one
updateEndComment(EndCommentPrevTok, std::string(), SourceMgr, &Fixes);
}
- CompactedNamespacesCount++;
- AllNamespaceNames = "::" + NamespaceName + AllNamespaceNames;
+ ++CompactedNamespacesCount;
+ if (!NamespaceName.empty())
+ AllNamespaceNames = "::" + NamespaceName + AllNamespaceNames;
continue;
}
NamespaceName += AllNamespaceNames;
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
index 5a89225c7fc8..b3a4684bead1 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -88,11 +88,11 @@ std::pair<tooling::Replacements, unsigned> QualifierAlignmentFixer::analyze(
// Don't make replacements that replace nothing.
tooling::Replacements NonNoOpFixes;
- for (auto I = Fixes.begin(), E = Fixes.end(); I != E; ++I) {
- StringRef OriginalCode = Code.substr(I->getOffset(), I->getLength());
+ for (const tooling::Replacement &Fix : Fixes) {
+ StringRef OriginalCode = Code.substr(Fix.getOffset(), Fix.getLength());
- if (!OriginalCode.equals(I->getReplacementText())) {
- auto Err = NonNoOpFixes.add(*I);
+ if (!OriginalCode.equals(Fix.getReplacementText())) {
+ auto Err = NonNoOpFixes.add(Fix);
if (Err)
llvm::errs() << "Error adding replacements : "
<< llvm::toString(std::move(Err)) << "\n";
@@ -204,9 +204,9 @@ static void rotateTokens(const SourceManager &SourceMgr,
replaceToken(SourceMgr, Fixes, Range, NewText);
}
-FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
+const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, FormatToken *Tok,
+ tooling::Replacements &Fixes, const FormatToken *Tok,
const std::string &Qualifier, tok::TokenKind QualifierType) {
// We only need to think about streams that begin with a qualifier.
if (!Tok->is(QualifierType))
@@ -261,10 +261,8 @@ FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// Move to the end of any template class members e.g.
// `Foo<int>::iterator`.
if (Next && Next->startsSequence(TT_TemplateCloser, tok::coloncolon,
- tok::identifier)) {
- Next = Next->Next->Next;
+ tok::identifier))
return Tok;
- }
assert(Next && "Missing template opener");
Next = Next->Next;
}
@@ -281,16 +279,16 @@ FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
return Tok;
}
-FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
+const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, FormatToken *Tok,
+ tooling::Replacements &Fixes, const FormatToken *Tok,
const std::string &Qualifier, tok::TokenKind QualifierType) {
// if Tok is an identifier and possibly a macro then don't convert.
if (LeftRightQualifierAlignmentFixer::isPossibleMacro(Tok))
return Tok;
- FormatToken *Qual = Tok;
- FormatToken *LastQual = Qual;
+ const FormatToken *Qual = Tok;
+ const FormatToken *LastQual = Qual;
while (Qual && isQualifierOrType(Qual, ConfiguredQualifierTokens)) {
LastQual = Qual;
Qual = Qual->Next;
@@ -326,7 +324,7 @@ FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
Tok->Previous->isOneOf(tok::star, tok::ampamp, tok::amp)) {
return Tok;
}
- FormatToken *Next = Tok->Next;
+ const FormatToken *Next = Tok->Next;
// The case `std::Foo<T> const` -> `const std::Foo<T> &&`
while (Next && Next->isOneOf(tok::identifier, tok::coloncolon))
Next = Next->Next;
@@ -334,6 +332,8 @@ FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
Next->Previous->startsSequence(tok::identifier, TT_TemplateOpener)) {
// Read from to the end of the TemplateOpener to
// TemplateCloser const ArrayRef<int> a; const ArrayRef<int> &a;
+ if (Next->is(tok::comment) && Next->getNextNonComment())
+ Next = Next->getNextNonComment();
assert(Next->MatchingParen && "Missing template closer");
Next = Next->MatchingParen->Next;
@@ -394,11 +394,12 @@ LeftRightQualifierAlignmentFixer::analyze(
tok::TokenKind QualifierToken = getTokenFromQualifier(Qualifier);
assert(QualifierToken != tok::identifier && "Unrecognised Qualifier");
- for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
- FormatToken *First = AnnotatedLines[I]->First;
- const auto *Last = AnnotatedLines[I]->Last;
+ for (AnnotatedLine *Line : AnnotatedLines) {
+ FormatToken *First = Line->First;
+ const auto *Last = Line->Last;
- for (auto *Tok = First; Tok && Tok != Last && Tok->Next; Tok = Tok->Next) {
+ for (const auto *Tok = First; Tok && Tok != Last && Tok->Next;
+ Tok = Tok->Next) {
if (Tok->is(tok::comment))
continue;
if (RightAlign)
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
index 7abd25687564..30ef96b8b0a7 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
@@ -72,17 +72,19 @@ public:
static tok::TokenKind getTokenFromQualifier(const std::string &Qualifier);
- FormatToken *analyzeRight(const SourceManager &SourceMgr,
- const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, FormatToken *Tok,
- const std::string &Qualifier,
- tok::TokenKind QualifierType);
-
- FormatToken *analyzeLeft(const SourceManager &SourceMgr,
- const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, FormatToken *Tok,
- const std::string &Qualifier,
- tok::TokenKind QualifierType);
+ const FormatToken *analyzeRight(const SourceManager &SourceMgr,
+ const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes,
+ const FormatToken *Tok,
+ const std::string &Qualifier,
+ tok::TokenKind QualifierType);
+
+ const FormatToken *analyzeLeft(const SourceManager &SourceMgr,
+ const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes,
+ const FormatToken *Tok,
+ const std::string &Qualifier,
+ tok::TokenKind QualifierType);
// is the Token a simple or qualifier type
static bool isQualifierOrType(const FormatToken *Tok,
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index 77dc0d683e5f..e4107525a7ff 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -78,6 +78,7 @@ struct JsModuleReference {
ABSOLUTE, // from 'something'
RELATIVE_PARENT, // from '../*'
RELATIVE, // from './*'
+ ALIAS, // import X = A.B;
};
ReferenceCategory Category = ReferenceCategory::SIDE_EFFECT;
// The URL imported, e.g. `import .. from 'url';`. Empty for `export {a, b};`.
@@ -105,10 +106,12 @@ bool operator<(const JsModuleReference &LHS, const JsModuleReference &RHS) {
return LHS.IsExport < RHS.IsExport;
if (LHS.Category != RHS.Category)
return LHS.Category < RHS.Category;
- if (LHS.Category == JsModuleReference::ReferenceCategory::SIDE_EFFECT)
- // Side effect imports might be ordering sensitive. Consider them equal so
- // that they maintain their relative order in the stable sort below.
- // This retains transitivity because LHS.Category == RHS.Category here.
+ if (LHS.Category == JsModuleReference::ReferenceCategory::SIDE_EFFECT ||
+ LHS.Category == JsModuleReference::ReferenceCategory::ALIAS)
+ // Side effect imports and aliases might be ordering sensitive. Consider
+ // them equal so that they maintain their relative order in the stable sort
+ // below. This retains transitivity because LHS.Category == RHS.Category
+ // here.
return false;
// Empty URLs sort *last* (for export {...};).
if (LHS.URL.empty() != RHS.URL.empty())
@@ -260,13 +263,13 @@ private:
while (Start != References.end() && Start->FormattingOff) {
// Skip over all imports w/ disabled formatting.
ReferencesSorted.push_back(*Start);
- Start++;
+ ++Start;
}
SmallVector<JsModuleReference, 16> SortChunk;
while (Start != References.end() && !Start->FormattingOff) {
// Skip over all imports w/ disabled formatting.
SortChunk.push_back(*Start);
- Start++;
+ ++Start;
}
llvm::stable_sort(SortChunk);
mergeModuleReferences(SortChunk);
@@ -338,10 +341,12 @@ private:
// Stitch together the module reference start...
Buffer += getSourceText(Reference.Range.getBegin(), Reference.SymbolsStart);
// ... then the references in order ...
- for (auto I = Symbols.begin(), E = Symbols.end(); I != E; ++I) {
- if (I != Symbols.begin())
+ if (!Symbols.empty()) {
+ Buffer += getSourceText(Symbols.front().Range);
+ for (const JsImportedSymbol &Symbol : llvm::drop_begin(Symbols)) {
Buffer += ",";
- Buffer += getSourceText(I->Range);
+ Buffer += getSourceText(Symbol.Range);
+ }
}
// ... followed by the module reference end.
Buffer += getSourceText(Reference.SymbolsEnd, Reference.Range.getEnd());
@@ -359,6 +364,7 @@ private:
bool AnyImportAffected = false;
bool FormattingOff = false;
for (auto *Line : AnnotatedLines) {
+ assert(Line->First);
Current = Line->First;
LineEnd = Line->Last;
// clang-format comments toggle formatting on/off.
@@ -395,6 +401,8 @@ private:
JsModuleReference Reference;
Reference.FormattingOff = FormattingOff;
Reference.Range.setBegin(Start);
+ // References w/o a URL, e.g. export {A}, groups with RELATIVE.
+ Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
if (!parseModuleReference(Keywords, Reference)) {
if (!FirstNonImportLine)
FirstNonImportLine = Line; // if no comment before.
@@ -410,9 +418,8 @@ private:
<< ", cat: " << Reference.Category
<< ", url: " << Reference.URL
<< ", prefix: " << Reference.Prefix;
- for (size_t I = 0; I < Reference.Symbols.size(); ++I)
- llvm::dbgs() << ", " << Reference.Symbols[I].Symbol << " as "
- << Reference.Symbols[I].Alias;
+ for (const JsImportedSymbol &Symbol : Reference.Symbols)
+ llvm::dbgs() << ", " << Symbol.Symbol << " as " << Symbol.Alias;
llvm::dbgs() << ", text: " << getSourceText(Reference.Range);
llvm::dbgs() << "}\n";
});
@@ -461,9 +468,6 @@ private:
Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
else
Reference.Category = JsModuleReference::ReferenceCategory::ABSOLUTE;
- } else {
- // w/o URL groups with "empty".
- Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
}
return true;
}
@@ -499,6 +503,21 @@ private:
nextToken();
if (Current->is(Keywords.kw_from))
return true;
+ // import X = A.B.C;
+ if (Current->is(tok::equal)) {
+ Reference.Category = JsModuleReference::ReferenceCategory::ALIAS;
+ nextToken();
+ while (Current->is(tok::identifier)) {
+ nextToken();
+ if (Current->is(tok::semi)) {
+ nextToken();
+ return true;
+ }
+ if (!Current->is(tok::period))
+ return false;
+ nextToken();
+ }
+ }
if (Current->isNot(tok::comma))
return false;
nextToken(); // eat comma.
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index d83e837ca134..d0754e0c1112 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -127,11 +127,8 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
LLVM_DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
- for (tooling::Replacements::const_iterator I = RunResult.first.begin(),
- E = RunResult.first.end();
- I != E; ++I) {
- llvm::dbgs() << I->toString() << "\n";
- }
+ for (const tooling::Replacement &Fix : RunResult.first)
+ llvm::dbgs() << Fix.toString() << "\n";
});
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
delete AnnotatedLines[i];
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 505a7250572b..9d130dbb02eb 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -75,7 +75,7 @@ public:
: Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
Keywords(Keywords) {
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
- resetTokenMetadata(CurrentToken);
+ resetTokenMetadata();
}
private:
@@ -780,6 +780,7 @@ private:
unsigned CommaCount = 0;
while (CurrentToken) {
if (CurrentToken->is(tok::r_brace)) {
+ assert(Left->Optional == CurrentToken->Optional);
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
@@ -1409,8 +1410,8 @@ private:
Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
}
- void resetTokenMetadata(FormatToken *Token) {
- if (!Token)
+ void resetTokenMetadata() {
+ if (!CurrentToken)
return;
// Reset token type in case we have already looked at it and then
@@ -1422,7 +1423,8 @@ private:
TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
TT_UntouchableMacroFunc, TT_ConstraintJunctions,
- TT_StatementAttributeLikeMacro))
+ TT_StatementAttributeLikeMacro, TT_FunctionLikeOrFreestandingMacro,
+ TT_RecordLBrace))
CurrentToken->setType(TT_Unknown);
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
@@ -1431,15 +1433,16 @@ private:
}
void next() {
- if (CurrentToken) {
- CurrentToken->NestingLevel = Contexts.size() - 1;
- CurrentToken->BindingStrength = Contexts.back().BindingStrength;
- modifyContext(*CurrentToken);
- determineTokenType(*CurrentToken);
- CurrentToken = CurrentToken->Next;
- }
+ if (!CurrentToken)
+ return;
+
+ CurrentToken->NestingLevel = Contexts.size() - 1;
+ CurrentToken->BindingStrength = Contexts.back().BindingStrength;
+ modifyContext(*CurrentToken);
+ determineTokenType(*CurrentToken);
+ CurrentToken = CurrentToken->Next;
- resetTokenMetadata(CurrentToken);
+ resetTokenMetadata();
}
/// A struct to hold information valid in a specific context, e.g.
@@ -1563,9 +1566,9 @@ private:
int ParenLevel = 0;
while (Current) {
if (Current->is(tok::l_paren))
- ParenLevel++;
+ ++ParenLevel;
if (Current->is(tok::r_paren))
- ParenLevel--;
+ --ParenLevel;
if (ParenLevel < 1)
break;
Current = Current->Next;
@@ -1589,9 +1592,9 @@ private:
break;
}
if (TemplateCloser->is(tok::less))
- NestingLevel++;
+ ++NestingLevel;
if (TemplateCloser->is(tok::greater))
- NestingLevel--;
+ --NestingLevel;
if (NestingLevel < 1)
break;
TemplateCloser = TemplateCloser->Next;
@@ -1882,15 +1885,23 @@ private:
FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
if (LeftOfParens) {
- // If there is a closing parenthesis left of the current parentheses,
- // look past it as these might be chained casts.
- if (LeftOfParens->is(tok::r_paren)) {
+ // If there is a closing parenthesis left of the current
+ // parentheses, look past it as these might be chained casts.
+ if (LeftOfParens->is(tok::r_paren) &&
+ LeftOfParens->isNot(TT_CastRParen)) {
if (!LeftOfParens->MatchingParen ||
!LeftOfParens->MatchingParen->Previous)
return false;
LeftOfParens = LeftOfParens->MatchingParen->Previous;
}
+ // The Condition directly below this one will see the operator arguments
+ // as a (void *foo) cast.
+ // void operator delete(void *foo) ATTRIB;
+ if (LeftOfParens->Tok.getIdentifierInfo() && LeftOfParens->Previous &&
+ LeftOfParens->Previous->is(tok::kw_operator))
+ return false;
+
// If there is an identifier (or with a few exceptions a keyword) right
// before the parentheses, this is unlikely to be a cast.
if (LeftOfParens->Tok.getIdentifierInfo() &&
@@ -2343,9 +2354,10 @@ private:
void TokenAnnotator::setCommentLineLevels(
SmallVectorImpl<AnnotatedLine *> &Lines) {
const AnnotatedLine *NextNonCommentLine = nullptr;
- for (AnnotatedLine *AL : llvm::reverse(Lines)) {
+ for (AnnotatedLine *Line : llvm::reverse(Lines)) {
+ assert(Line->First);
bool CommentLine = true;
- for (const FormatToken *Tok = AL->First; Tok; Tok = Tok->Next) {
+ for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
if (!Tok->is(tok::comment)) {
CommentLine = false;
break;
@@ -2357,20 +2369,21 @@ void TokenAnnotator::setCommentLineLevels(
if (NextNonCommentLine && CommentLine &&
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
- AL->First->OriginalColumn) {
+ Line->First->OriginalColumn) {
// Align comments for preprocessor lines with the # in column 0 if
// preprocessor lines are not indented. Otherwise, align with the next
// line.
- AL->Level = (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
- (NextNonCommentLine->Type == LT_PreprocessorDirective ||
- NextNonCommentLine->Type == LT_ImportStatement))
- ? 0
- : NextNonCommentLine->Level;
+ Line->Level =
+ (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ (NextNonCommentLine->Type == LT_PreprocessorDirective ||
+ NextNonCommentLine->Type == LT_ImportStatement))
+ ? 0
+ : NextNonCommentLine->Level;
} else {
- NextNonCommentLine = AL->First->isNot(tok::r_brace) ? AL : nullptr;
+ NextNonCommentLine = Line->First->isNot(tok::r_brace) ? Line : nullptr;
}
- setCommentLineLevels(AL->Children);
+ setCommentLineLevels(Line->Children);
}
}
@@ -2549,11 +2562,8 @@ bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
}
void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
- for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
- E = Line.Children.end();
- I != E; ++I) {
- calculateFormattingInformation(**I);
- }
+ for (AnnotatedLine *ChildLine : Line.Children)
+ calculateFormattingInformation(*ChildLine);
Line.First->TotalLength =
Line.First->IsMultiline ? Style.ColumnLimit
@@ -2569,9 +2579,9 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
while (Current) {
if (isFunctionDeclarationName(Style.isCpp(), *Current, Line))
Current->setType(TT_FunctionDeclarationName);
+ const FormatToken *Prev = Current->Previous;
if (Current->is(TT_LineComment)) {
- if (Current->Previous->is(BK_BracedInit) &&
- Current->Previous->opensScope())
+ if (Prev->is(BK_BracedInit) && Prev->opensScope())
Current->SpacesRequiredBefore =
(Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1;
else
@@ -2612,12 +2622,11 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
unsigned ChildSize = 0;
- if (Current->Previous->Children.size() == 1) {
- FormatToken &LastOfChild = *Current->Previous->Children[0]->Last;
+ if (Prev->Children.size() == 1) {
+ FormatToken &LastOfChild = *Prev->Children[0]->Last;
ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit
: LastOfChild.TotalLength + 1;
}
- const FormatToken *Prev = Current->Previous;
if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
(Prev->Children.size() == 1 &&
Prev->Children[0]->First->MustBreakBefore) ||
@@ -2857,6 +2866,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
Left.Previous->isOneOf(tok::identifier, tok::greater))
return 500;
+ if (Left.is(tok::l_paren) && Style.PenaltyBreakOpenParenthesis != 0)
+ return Style.PenaltyBreakOpenParenthesis;
if (Left.is(tok::l_paren) && InFunctionDecl &&
Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
return 100;
@@ -2921,9 +2932,15 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
- return Style.SpaceBeforeParens == FormatStyle::SBPO_Always ||
- (Style.SpaceBeforeParensOptions.BeforeNonEmptyParentheses &&
- Right.ParameterCount > 0);
+ if (Style.SpaceBeforeParens == FormatStyle::SBPO_Always)
+ return true;
+ if (Right.is(TT_OverloadedOperatorLParen) &&
+ Style.SpaceBeforeParensOptions.AfterOverloadedOperator)
+ return true;
+ if (Style.SpaceBeforeParensOptions.BeforeNonEmptyParentheses &&
+ Right.ParameterCount > 0)
+ return true;
+ return false;
}
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
@@ -3290,9 +3307,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
- auto HasExistingWhitespace = [&Right]() {
- return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
- };
+
+ // If the token is finalized don't touch it (as it could be in a
+ // clang-format-off section).
+ if (Left.Finalized)
+ return Right.hasWhitespaceBefore();
if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
return true; // Never ever merge two identifiers.
@@ -3307,7 +3326,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// or import .....;
if (Left.is(Keywords.kw_import) && Right.isOneOf(tok::less, tok::ellipsis))
return true;
- // No space between module :.
+ // Space between `module :` and `import :`.
if (Left.isOneOf(Keywords.kw_module, Keywords.kw_import) &&
Right.is(TT_ModulePartitionColon))
return true;
@@ -3321,12 +3340,18 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::ellipsis) && Right.is(tok::identifier) &&
Line.First->is(Keywords.kw_import))
return false;
+ // Space in __attribute__((attr)) ::type.
+ if (Left.is(TT_AttributeParen) && Right.is(tok::coloncolon))
+ return true;
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) &&
!Left.opensScope() && Style.SpaceBeforeCpp11BracedList)
return true;
+ if (Left.is(tok::less) && Left.is(TT_OverloadedOperator) &&
+ Right.is(TT_TemplateOpener))
+ return true;
} else if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
if (Right.is(tok::period) &&
@@ -3351,7 +3376,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Preserve the existence of a space before a percent for cases like 0x%04x
// and "%d %d"
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
} else if (Style.isJson()) {
if (Right.is(tok::colon))
return false;
@@ -3532,7 +3557,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
if (Left.is(TT_ImplicitStringLiteral))
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
@@ -3617,11 +3642,11 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
- auto ShouldAddSpacesInAngles = [this, &HasExistingWhitespace]() {
+ auto ShouldAddSpacesInAngles = [this, &Right]() {
if (this->Style.SpacesInAngles == FormatStyle::SIAS_Always)
return true;
if (this->Style.SpacesInAngles == FormatStyle::SIAS_Leave)
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
return false;
};
@@ -3647,7 +3672,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
// this turns out to be too lenient, add analysis of the identifier itself.
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
if (Right.is(tok::coloncolon) &&
!Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
// Put a space between < and :: in vector< ::std::string >
@@ -3856,16 +3881,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
if (Left.isTrailingComment())
return true;
- if (Right.Previous->IsUnterminatedLiteral)
+ if (Left.IsUnterminatedLiteral)
return true;
- if (Right.is(tok::lessless) && Right.Next &&
- Right.Previous->is(tok::string_literal) &&
+ if (Right.is(tok::lessless) && Right.Next && Left.is(tok::string_literal) &&
Right.Next->is(tok::string_literal))
return true;
// Can break after template<> declaration
- if (Right.Previous->ClosesTemplateDeclaration &&
- Right.Previous->MatchingParen &&
- Right.Previous->MatchingParen->NestingLevel == 0) {
+ if (Left.ClosesTemplateDeclaration && Left.MatchingParen &&
+ Left.MatchingParen->NestingLevel == 0) {
// Put concepts on the next line e.g.
// template<typename T>
// concept ...
@@ -3898,9 +3921,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// has made a deliberate choice and might have aligned the contents of the
// string literal accordingly. Thus, we try keep existing line breaks.
return Right.IsMultiline && Right.NewlinesBefore > 0;
- if ((Right.Previous->is(tok::l_brace) ||
- (Right.Previous->is(tok::less) && Right.Previous->Previous &&
- Right.Previous->Previous->is(tok::equal))) &&
+ if ((Left.is(tok::l_brace) || (Left.is(tok::less) && Left.Previous &&
+ Left.Previous->is(tok::equal))) &&
Right.NestingLevel == 1 && Style.Language == FormatStyle::LK_Proto) {
// Don't put enums or option definitions onto single lines in protocol
// buffers.
@@ -4004,7 +4026,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Right.is(TT_SelectorName) && !Right.is(tok::r_square) && Right.Next) {
// Keep `@submessage` together in:
// @submessage { key: value }
- if (Right.Previous && Right.Previous->is(tok::at))
+ if (Left.is(tok::at))
return false;
// Look for the scope opener after selector in cases like:
// selector { ...
@@ -4300,7 +4322,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_ImplicitStringLiteral))
return false;
- if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser))
+ if (Right.is(TT_TemplateCloser))
return false;
if (Right.is(tok::r_square) && Right.MatchingParen &&
Right.MatchingParen->is(TT_LambdaLSquare))
@@ -4311,6 +4333,18 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::r_brace))
return Right.MatchingParen && Right.MatchingParen->is(BK_Block);
+ // We only break before r_paren if we're in a block indented context.
+ if (Right.is(tok::r_paren)) {
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent) {
+ return Right.MatchingParen &&
+ !(Right.MatchingParen->Previous &&
+ (Right.MatchingParen->Previous->is(tok::kw_for) ||
+ Right.MatchingParen->Previous->isIf()));
+ }
+
+ return false;
+ }
+
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
if (Left.is(TT_TrailingAnnotation))
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
index 6e5e62cd4d82..ecd9dbb0f864 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
@@ -19,8 +19,6 @@
#include "clang/Format/Format.h"
namespace clang {
-class SourceManager;
-
namespace format {
enum LineType {
@@ -53,10 +51,9 @@ public:
// left them in a different state.
First->Previous = nullptr;
FormatToken *Current = First;
- for (auto I = ++Line.Tokens.begin(), E = Line.Tokens.end(); I != E; ++I) {
- const UnwrappedLineNode &Node = *I;
- Current->Next = I->Tok;
- I->Tok->Previous = Current;
+ for (const UnwrappedLineNode &Node : llvm::drop_begin(Line.Tokens)) {
+ Current->Next = Node.Tok;
+ Node.Tok->Previous = Current;
Current = Current->Next;
Current->Children.clear();
for (const auto &Child : Node.Children) {
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index f652a4e7088f..0172a224335c 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -262,14 +262,48 @@ private:
}
}
- // FIXME: TheLine->Level != 0 might or might not be the right check to do.
- // If necessary, change to something smarter.
- bool MergeShortFunctions =
- Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
- (Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
- I[1]->First->is(tok::r_brace)) ||
- (Style.AllowShortFunctionsOnASingleLine & FormatStyle::SFS_InlineOnly &&
- TheLine->Level != 0);
+ auto ShouldMergeShortFunctions =
+ [this, B = AnnotatedLines.begin()](
+ SmallVectorImpl<AnnotatedLine *>::const_iterator I) {
+ if (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All)
+ return true;
+ if (Style.AllowShortFunctionsOnASingleLine >=
+ FormatStyle::SFS_Empty &&
+ I[1]->First->is(tok::r_brace))
+ return true;
+
+ if (Style.AllowShortFunctionsOnASingleLine &
+ FormatStyle::SFS_InlineOnly) {
+ // Just checking TheLine->Level != 0 is not enough, because it
+ // provokes treating functions inside indented namespaces as short.
+ if (Style.isJavaScript() && (*I)->Last->is(TT_FunctionLBrace))
+ return true;
+
+ if ((*I)->Level != 0) {
+ if (I == B)
+ return false;
+
+ // TODO: Use IndentTracker to avoid loop?
+ // Find the last line with lower level.
+ auto J = I - 1;
+ for (; J != B; --J)
+ if ((*J)->Level < (*I)->Level)
+ break;
+
+ // Check if the found line starts a record.
+ for (const FormatToken *RecordTok = (*J)->Last; RecordTok;
+ RecordTok = RecordTok->Previous)
+ if (RecordTok->is(tok::l_brace))
+ return RecordTok->is(TT_RecordLBrace);
+
+ return false;
+ }
+ }
+
+ return false;
+ };
+
+ bool MergeShortFunctions = ShouldMergeShortFunctions(I);
if (Style.CompactNamespaces) {
if (auto nsToken = TheLine->First->getNamespaceToken()) {
@@ -313,7 +347,8 @@ private:
}
// Try to merge a control statement block with left brace unwrapped
if (TheLine->Last->is(tok::l_brace) && TheLine->First != TheLine->Last &&
- TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for)) {
+ TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
+ TT_ForEachMacro)) {
return Style.AllowShortBlocksOnASingleLine != FormatStyle::SBS_Never
? tryMergeSimpleBlock(I, E, Limit)
: 0;
@@ -336,7 +371,7 @@ private:
: 0;
} else if (I[1]->First->is(tok::l_brace) &&
TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
- tok::kw_for)) {
+ tok::kw_for, TT_ForEachMacro)) {
return (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always)
? tryMergeSimpleBlock(I, E, Limit)
@@ -391,7 +426,7 @@ private:
}
}
- // Try to merge a block with left brace wrapped that wasn't yet covered
+ // Try to merge a block with left brace unwrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
const FormatToken *Tok = TheLine->First;
bool ShouldMerge = false;
@@ -451,7 +486,8 @@ private:
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
- if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do)) {
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do,
+ TT_ForEachMacro)) {
return Style.AllowShortLoopsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
@@ -499,13 +535,14 @@ private:
if (!Line.First->is(tok::kw_do) && !Line.First->is(tok::kw_else) &&
!Line.Last->is(tok::kw_else) && Line.Last->isNot(tok::r_paren))
return 0;
- // Only merge do while if do is the only statement on the line.
+ // Only merge `do while` if `do` is the only statement on the line.
if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
+ // Don't merge with loops, ifs, a single semicolon or a line comment.
if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
- TT_LineComment))
+ TT_ForEachMacro, TT_LineComment))
return 0;
// Only inline simple if's (no nested if or else), unless specified
if (Style.AllowShortIfStatementsOnASingleLine ==
@@ -593,8 +630,8 @@ private:
}
if (Line.First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while, tok::kw_do,
tok::kw_try, tok::kw___try, tok::kw_catch,
- tok::kw___finally, tok::kw_for, tok::r_brace,
- Keywords.kw___except)) {
+ tok::kw___finally, tok::kw_for, TT_ForEachMacro,
+ tok::r_brace, Keywords.kw___except)) {
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
return 0;
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Empty &&
@@ -614,12 +651,14 @@ private:
I + 2 != E && !I[2]->First->is(tok::r_brace))
return 0;
if (!Style.AllowShortLoopsOnASingleLine &&
- Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for) &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
+ TT_ForEachMacro) &&
!Style.BraceWrapping.AfterControlStatement &&
!I[1]->First->is(tok::r_brace))
return 0;
if (!Style.AllowShortLoopsOnASingleLine &&
- Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for) &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
+ TT_ForEachMacro) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
I + 2 != E && !I[2]->First->is(tok::r_brace))
@@ -1060,9 +1099,9 @@ private:
FormatDecision LastFormat = Node->State.NextToken->getDecision();
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/false, Count, Queue);
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/true, Count, Queue);
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
}
if (Queue.empty()) {
@@ -1088,7 +1127,7 @@ private:
/// Assume the current state is \p PreviousNode and has been reached with a
/// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
- bool NewLine, unsigned &Count, QueueType &Queue) {
+ bool NewLine, unsigned *Count, QueueType *Queue) {
if (NewLine && !Indenter->canBreak(PreviousNode->State))
return;
if (!NewLine && Indenter->mustBreak(PreviousNode->State))
@@ -1101,29 +1140,29 @@ private:
Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
- Queue.push(QueueItem(OrderedPenalty(Penalty, Count), Node));
- ++Count;
+ Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
+ ++(*Count);
}
/// Applies the best formatting by reconstructing the path in the
/// solution space that leads to \c Best.
void reconstructPath(LineState &State, StateNode *Best) {
- std::deque<StateNode *> Path;
+ llvm::SmallVector<StateNode *> Path;
// We do not need a break before the initial token.
while (Best->Previous) {
- Path.push_front(Best);
+ Path.push_back(Best);
Best = Best->Previous;
}
- for (auto I = Path.begin(), E = Path.end(); I != E; ++I) {
+ for (const auto &Node : llvm::reverse(Path)) {
unsigned Penalty = 0;
- formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
- Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
+ formatChildren(State, Node->NewLine, /*DryRun=*/false, Penalty);
+ Penalty += Indenter->addTokenToState(State, Node->NewLine, false);
LLVM_DEBUG({
- printLineState((*I)->Previous->State);
- if ((*I)->NewLine) {
+ printLineState(Node->Previous->State);
+ if (Node->NewLine) {
llvm::dbgs() << "Penalty for placing "
- << (*I)->Previous->State.NextToken->Tok.getName()
+ << Node->Previous->State.NextToken->Tok.getName()
<< " on a new line: " << Penalty << "\n";
}
});
@@ -1162,7 +1201,9 @@ unsigned UnwrappedLineFormatter::format(
bool FirstLine = true;
for (const AnnotatedLine *Line =
Joiner.getNextMergedLine(DryRun, IndentTracker);
- Line; Line = NextLine, FirstLine = false) {
+ Line; PrevPrevLine = PreviousLine, PreviousLine = Line, Line = NextLine,
+ FirstLine = false) {
+ assert(Line->First);
const AnnotatedLine &TheLine = *Line;
unsigned Indent = IndentTracker.getIndent();
@@ -1190,7 +1231,7 @@ unsigned UnwrappedLineFormatter::format(
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun) {
- bool LastLine = Line->First->is(tok::eof);
+ bool LastLine = TheLine.First->is(tok::eof);
formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines, Indent,
LastLine ? LastStartColumn : NextStartColumn + Indent);
}
@@ -1252,8 +1293,6 @@ unsigned UnwrappedLineFormatter::format(
}
if (!DryRun)
markFinalized(TheLine.First);
- PrevPrevLine = PreviousLine;
- PreviousLine = &TheLine;
}
PenaltyCache[CacheKey] = Penalty;
return Penalty;
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index b6e55aab708f..35be2fa3eb62 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -14,6 +14,7 @@
#include "UnwrappedLineParser.h"
#include "FormatToken.h"
+#include "TokenAnnotator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -32,7 +33,7 @@ public:
// Returns the next token in the token stream.
virtual FormatToken *getNextToken() = 0;
- // Returns the token precedint the token returned by the last call to
+ // Returns the token preceding the token returned by the last call to
// getNextToken() in the token stream, or nullptr if no such token exists.
virtual FormatToken *getPreviousToken() = 0;
@@ -57,7 +58,7 @@ namespace {
class ScopedDeclarationState {
public:
- ScopedDeclarationState(UnwrappedLine &Line, std::vector<bool> &Stack,
+ ScopedDeclarationState(UnwrappedLine &Line, llvm::BitVector &Stack,
bool MustBeDeclaration)
: Line(Line), Stack(Stack) {
Line.MustBeDeclaration = MustBeDeclaration;
@@ -73,7 +74,7 @@ public:
private:
UnwrappedLine &Line;
- std::vector<bool> &Stack;
+ llvm::BitVector &Stack;
};
static bool isLineComment(const FormatToken &FormatTok) {
@@ -246,8 +247,7 @@ public:
}
FormatToken *getPreviousToken() override {
- assert(Position > 0);
- return Tokens[Position - 1];
+ return Position > 0 ? Tokens[Position - 1] : nullptr;
}
FormatToken *peekNextToken() override {
@@ -316,6 +316,7 @@ void UnwrappedLineParser::reset() {
PreprocessorDirectives.clear();
CurrentLines = &Lines;
DeclarationScopeStack.clear();
+ NestedTooDeep.clear();
PPStack.clear();
Line->FirstStartColumn = FirstStartColumn;
}
@@ -343,11 +344,9 @@ void UnwrappedLineParser::parse() {
pushToken(FormatTok);
addUnwrappedLine();
- for (SmallVectorImpl<UnwrappedLine>::iterator I = Lines.begin(),
- E = Lines.end();
- I != E; ++I) {
- Callback.consumeUnwrappedLine(*I);
- }
+ for (const UnwrappedLine &Line : Lines)
+ Callback.consumeUnwrappedLine(Line);
+
Callback.finishRun();
Lines.clear();
while (!PPLevelBranchIndex.empty() &&
@@ -431,7 +430,47 @@ void UnwrappedLineParser::parseCSharpAttribute() {
} while (!eof());
}
-void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
+bool UnwrappedLineParser::precededByCommentOrPPDirective() const {
+ if (!Lines.empty() && Lines.back().InPPDirective)
+ return true;
+
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ return Previous && Previous->is(tok::comment) &&
+ (Previous->IsMultiline || Previous->NewlinesBefore > 0);
+}
+
+bool UnwrappedLineParser::mightFitOnOneLine() const {
+ const auto ColumnLimit = Style.ColumnLimit;
+ if (ColumnLimit == 0)
+ return true;
+
+ if (Lines.empty())
+ return true;
+
+ const auto &PreviousLine = Lines.back();
+ const auto &Tokens = PreviousLine.Tokens;
+ assert(!Tokens.empty());
+ const auto *LastToken = Tokens.back().Tok;
+ assert(LastToken);
+ if (!LastToken->isOneOf(tok::semi, tok::comment))
+ return true;
+
+ AnnotatedLine Line(PreviousLine);
+ assert(Line.Last == LastToken);
+
+ TokenAnnotator Annotator(Style, Keywords);
+ Annotator.annotate(Line);
+ Annotator.calculateFormattingInformation(Line);
+
+ return Line.Level * Style.IndentWidth + LastToken->TotalLength <= ColumnLimit;
+}
+
+// Returns true if a simple block, or false otherwise. (A simple block has a
+// single statement that fits on a single line.)
+bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
+ const bool IsPrecededByCommentOrPPDirective =
+ !Style.RemoveBracesLLVM || precededByCommentOrPPDirective();
+ unsigned StatementCount = 0;
bool SwitchLabelEncountered = false;
do {
tok::TokenKind kind = FormatTok->Tok.getKind();
@@ -452,11 +491,24 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
continue;
parseBlock();
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
addUnwrappedLine();
break;
case tok::r_brace:
- if (HasOpeningBrace)
- return;
+ if (HasOpeningBrace) {
+ if (!Style.RemoveBracesLLVM)
+ return false;
+ if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 ||
+ IsPrecededByCommentOrPPDirective ||
+ precededByCommentOrPPDirective()) {
+ return false;
+ }
+ const FormatToken *Next = Tokens->peekNextToken();
+ if (Next->is(tok::comment) && Next->NewlinesBefore == 0)
+ return false;
+ return mightFitOnOneLine();
+ }
nextToken();
addUnwrappedLine();
break;
@@ -496,10 +548,13 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
}
LLVM_FALLTHROUGH;
default:
- parseStructuralElement(!HasOpeningBrace);
+ parseStructuralElement(IfKind, !HasOpeningBrace);
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
break;
}
} while (!eof());
+ return false;
}
void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
@@ -655,11 +710,13 @@ size_t UnwrappedLineParser::computePPHash() const {
return h;
}
-void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
- bool MunchSemi,
- bool UnindentWhitesmithsBraces) {
+UnwrappedLineParser::IfStmtKind
+UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
+ bool MunchSemi,
+ bool UnindentWhitesmithsBraces) {
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
+ FormatToken *Tok = FormatTok;
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
FormatTok->setBlockKind(BK_Block);
@@ -694,16 +751,28 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
MustBeDeclaration);
if (AddLevels > 0u && Style.BreakBeforeBraces != FormatStyle::BS_Whitesmiths)
Line->Level += AddLevels;
- parseLevel(/*HasOpeningBrace=*/true);
+
+ IfStmtKind IfKind = IfStmtKind::NotIf;
+ const bool SimpleBlock = parseLevel(/*HasOpeningBrace=*/true, &IfKind);
if (eof())
- return;
+ return IfKind;
if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
: !FormatTok->is(tok::r_brace)) {
Line->Level = InitialLevel;
FormatTok->setBlockKind(BK_Block);
- return;
+ return IfKind;
+ }
+
+ if (SimpleBlock && Tok->is(tok::l_brace)) {
+ assert(FormatTok->is(tok::r_brace));
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ assert(Previous);
+ if (Previous->isNot(tok::r_brace) || Previous->Optional) {
+ Tok->MatchingParen = FormatTok;
+ FormatTok->MatchingParen = Tok;
+ }
}
size_t PPEndHash = computePPHash();
@@ -734,6 +803,8 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
CurrentLines->size() - 1;
}
}
+
+ return IfKind;
}
static bool isGoogScope(const UnwrappedLine &Line) {
@@ -782,6 +853,8 @@ static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
return Style.BraceWrapping.AfterUnion;
if (InitialToken.is(tok::kw_struct))
return Style.BraceWrapping.AfterStruct;
+ if (InitialToken.is(tok::kw_enum))
+ return Style.BraceWrapping.AfterEnum;
return false;
}
@@ -969,8 +1042,7 @@ void UnwrappedLineParser::parsePPDefine() {
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
- FormatTok->WhitespaceRange.getBegin() ==
- FormatTok->WhitespaceRange.getEnd()) {
+ !FormatTok->hasWhitespaceBefore()) {
parseParens();
}
if (Style.IndentPPDirectives != FormatStyle::PPDIS_None)
@@ -1186,7 +1258,8 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
return addUnwrappedLine();
}
-void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
+void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
+ bool IsTopLevel) {
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
nextToken();
@@ -1229,7 +1302,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (Style.isJavaScript() && Line->MustBeDeclaration)
// field/method declaration.
break;
- parseIfThenElse();
+ parseIfThenElse(IfKind);
return;
case tok::kw_for:
case tok::kw_while:
@@ -1523,8 +1596,16 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
- if (Style.BraceWrapping.AfterFunction)
+ if (Style.Language == FormatStyle::LK_Java &&
+ Line->Tokens.front().Tok->is(Keywords.kw_synchronized)) {
+ // If necessary, we could set the type to something different than
+ // TT_FunctionLBrace.
+ if (Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_Always)
+ addUnwrappedLine();
+ } else if (Style.BraceWrapping.AfterFunction) {
addUnwrappedLine();
+ }
FormatTok->setType(TT_FunctionLBrace);
parseBlock();
addUnwrappedLine();
@@ -1601,6 +1682,8 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// See if the following token should start a new unwrapped line.
StringRef Text = FormatTok->TokenText;
+
+ FormatToken *PreviousToken = FormatTok;
nextToken();
// JS doesn't have macros, and within classes colons indicate fields, not
@@ -1629,6 +1712,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) {
+ PreviousToken->setType(TT_FunctionLikeOrFreestandingMacro);
addUnwrappedLine();
return;
}
@@ -1772,6 +1856,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
return false;
bool SeenArrow = false;
+ bool InTemplateParameterList = false;
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->isSimpleTypeSpecifier()) {
@@ -1784,6 +1869,17 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::l_paren:
parseParens();
break;
+ case tok::l_square:
+ parseSquare();
+ break;
+ case tok::kw_class:
+ case tok::kw_template:
+ case tok::kw_typename:
+ assert(FormatTok->Previous);
+ if (FormatTok->Previous->is(tok::less))
+ InTemplateParameterList = true;
+ nextToken();
+ break;
case tok::amp:
case tok::star:
case tok::kw_const:
@@ -1793,11 +1889,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::identifier:
case tok::numeric_constant:
case tok::coloncolon:
- case tok::kw_class:
case tok::kw_mutable:
case tok::kw_noexcept:
- case tok::kw_template:
- case tok::kw_typename:
nextToken();
break;
// Specialization of a template with an integer parameter can contain
@@ -1834,7 +1927,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::ellipsis:
case tok::kw_true:
case tok::kw_false:
- if (SeenArrow) {
+ if (SeenArrow || InTemplateParameterList) {
nextToken();
break;
}
@@ -2128,7 +2221,39 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
} while (!eof());
}
-void UnwrappedLineParser::parseIfThenElse() {
+void UnwrappedLineParser::keepAncestorBraces() {
+ if (!Style.RemoveBracesLLVM)
+ return;
+
+ const int MaxNestingLevels = 2;
+ const int Size = NestedTooDeep.size();
+ if (Size >= MaxNestingLevels)
+ NestedTooDeep[Size - MaxNestingLevels] = true;
+ NestedTooDeep.push_back(false);
+}
+
+static void markOptionalBraces(FormatToken *LeftBrace) {
+ if (!LeftBrace)
+ return;
+
+ assert(LeftBrace->is(tok::l_brace));
+
+ FormatToken *RightBrace = LeftBrace->MatchingParen;
+ if (!RightBrace) {
+ assert(!LeftBrace->Optional);
+ return;
+ }
+
+ assert(RightBrace->is(tok::r_brace));
+ assert(RightBrace->MatchingParen == LeftBrace);
+ assert(LeftBrace->Optional == RightBrace->Optional);
+
+ LeftBrace->Optional = true;
+ RightBrace->Optional = true;
+}
+
+FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
+ bool KeepBraces) {
auto HandleAttributes = [this]() {
// Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
if (FormatTok->is(TT_AttributeMacro))
@@ -2145,10 +2270,17 @@ void UnwrappedLineParser::parseIfThenElse() {
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
HandleAttributes();
+
bool NeedsUnwrappedLine = false;
+ keepAncestorBraces();
+
+ FormatToken *IfLeftBrace = nullptr;
+ IfStmtKind IfBlockKind = IfStmtKind::NotIf;
+
if (FormatTok->Tok.is(tok::l_brace)) {
+ IfLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock();
+ IfBlockKind = parseBlock();
if (Style.BraceWrapping.BeforeElse)
addUnwrappedLine();
else
@@ -2159,22 +2291,48 @@ void UnwrappedLineParser::parseIfThenElse() {
parseStructuralElement();
--Line->Level;
}
+
+ bool KeepIfBraces = false;
+ if (Style.RemoveBracesLLVM) {
+ assert(!NestedTooDeep.empty());
+ KeepIfBraces = (IfLeftBrace && !IfLeftBrace->MatchingParen) ||
+ NestedTooDeep.back() || IfBlockKind == IfStmtKind::IfOnly ||
+ IfBlockKind == IfStmtKind::IfElseIf;
+ }
+
+ FormatToken *ElseLeftBrace = nullptr;
+ IfStmtKind Kind = IfStmtKind::IfOnly;
+
if (FormatTok->Tok.is(tok::kw_else)) {
+ if (Style.RemoveBracesLLVM) {
+ NestedTooDeep.back() = false;
+ Kind = IfStmtKind::IfElse;
+ }
nextToken();
HandleAttributes();
if (FormatTok->Tok.is(tok::l_brace)) {
+ ElseLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock();
+ if (parseBlock() == IfStmtKind::IfOnly)
+ Kind = IfStmtKind::IfElseIf;
addUnwrappedLine();
} else if (FormatTok->Tok.is(tok::kw_if)) {
FormatToken *Previous = Tokens->getPreviousToken();
- bool PrecededByComment = Previous && Previous->is(tok::comment);
- if (PrecededByComment) {
+ const bool IsPrecededByComment = Previous && Previous->is(tok::comment);
+ if (IsPrecededByComment) {
addUnwrappedLine();
++Line->Level;
}
- parseIfThenElse();
- if (PrecededByComment)
+ bool TooDeep = true;
+ if (Style.RemoveBracesLLVM) {
+ Kind = IfStmtKind::IfElseIf;
+ TooDeep = NestedTooDeep.pop_back_val();
+ }
+ ElseLeftBrace =
+ parseIfThenElse(/*IfKind=*/nullptr, KeepBraces || KeepIfBraces);
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.push_back(TooDeep);
+ if (IsPrecededByComment)
--Line->Level;
} else {
addUnwrappedLine();
@@ -2184,9 +2342,40 @@ void UnwrappedLineParser::parseIfThenElse() {
addUnwrappedLine();
--Line->Level;
}
- } else if (NeedsUnwrappedLine) {
- addUnwrappedLine();
+ } else {
+ if (Style.RemoveBracesLLVM)
+ KeepIfBraces = KeepIfBraces || IfBlockKind == IfStmtKind::IfElse;
+ if (NeedsUnwrappedLine)
+ addUnwrappedLine();
+ }
+
+ if (!Style.RemoveBracesLLVM)
+ return nullptr;
+
+ assert(!NestedTooDeep.empty());
+ const bool KeepElseBraces =
+ (ElseLeftBrace && !ElseLeftBrace->MatchingParen) || NestedTooDeep.back();
+
+ NestedTooDeep.pop_back();
+
+ if (!KeepBraces && !KeepIfBraces && !KeepElseBraces) {
+ markOptionalBraces(IfLeftBrace);
+ markOptionalBraces(ElseLeftBrace);
+ } else if (IfLeftBrace) {
+ FormatToken *IfRightBrace = IfLeftBrace->MatchingParen;
+ if (IfRightBrace) {
+ assert(IfRightBrace->MatchingParen == IfLeftBrace);
+ assert(!IfLeftBrace->Optional);
+ assert(!IfRightBrace->Optional);
+ IfLeftBrace->MatchingParen = nullptr;
+ IfRightBrace->MatchingParen = nullptr;
+ }
}
+
+ if (IfKind)
+ *IfKind = Kind;
+
+ return IfLeftBrace;
}
void UnwrappedLineParser::parseTryCatch() {
@@ -2224,6 +2413,9 @@ void UnwrappedLineParser::parseTryCatch() {
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
parseParens();
}
+
+ keepAncestorBraces();
+
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
@@ -2241,7 +2433,7 @@ void UnwrappedLineParser::parseTryCatch() {
parseStructuralElement();
--Line->Level;
}
- while (1) {
+ while (true) {
if (FormatTok->is(tok::at))
nextToken();
if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
@@ -2257,8 +2449,11 @@ void UnwrappedLineParser::parseTryCatch() {
parseParens();
continue;
}
- if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof))
+ if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof)) {
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
return;
+ }
nextToken();
}
NeedsUnwrappedLine = false;
@@ -2269,6 +2464,10 @@ void UnwrappedLineParser::parseTryCatch() {
else
NeedsUnwrappedLine = true;
}
+
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
+
if (NeedsUnwrappedLine)
addUnwrappedLine();
}
@@ -2283,7 +2482,8 @@ void UnwrappedLineParser::parseNamespace() {
parseParens();
} else {
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw_inline,
- tok::l_square, tok::period)) {
+ tok::l_square, tok::period) ||
+ (Style.isCSharp() && FormatTok->is(tok::kw_union))) {
if (FormatTok->is(tok::l_square))
parseSquare();
else
@@ -2375,9 +2575,18 @@ void UnwrappedLineParser::parseForOrWhileLoop() {
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
+
+ keepAncestorBraces();
+
if (FormatTok->Tok.is(tok::l_brace)) {
+ FormatToken *LeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
+ if (Style.RemoveBracesLLVM) {
+ assert(!NestedTooDeep.empty());
+ if (!NestedTooDeep.back())
+ markOptionalBraces(LeftBrace);
+ }
addUnwrappedLine();
} else {
addUnwrappedLine();
@@ -2385,11 +2594,17 @@ void UnwrappedLineParser::parseForOrWhileLoop() {
parseStructuralElement();
--Line->Level;
}
+
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
}
void UnwrappedLineParser::parseDoWhile() {
assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
nextToken();
+
+ keepAncestorBraces();
+
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
@@ -2402,6 +2617,9 @@ void UnwrappedLineParser::parseDoWhile() {
--Line->Level;
}
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
+
// FIXME: Add error handling.
if (!FormatTok->Tok.is(tok::kw_while)) {
addUnwrappedLine();
@@ -2438,7 +2656,7 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
addUnwrappedLine();
if (!Style.IndentCaseBlocks &&
Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
- Line->Level++;
+ ++Line->Level;
}
}
parseStructuralElement();
@@ -2471,6 +2689,9 @@ void UnwrappedLineParser::parseSwitch() {
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
+
+ keepAncestorBraces();
+
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
@@ -2481,6 +2702,9 @@ void UnwrappedLineParser::parseSwitch() {
parseStructuralElement();
--Line->Level;
}
+
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
}
void UnwrappedLineParser::parseAccessSpecifier() {
@@ -2597,7 +2821,7 @@ void UnwrappedLineParser::parseRequires() {
if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) {
addUnwrappedLine();
if (Style.IndentRequires) {
- Line->Level++;
+ ++Line->Level;
}
}
nextToken();
@@ -2606,12 +2830,12 @@ void UnwrappedLineParser::parseRequires() {
}
bool UnwrappedLineParser::parseEnum() {
+ const FormatToken &InitialToken = *FormatTok;
+
// Won't be 'enum' for NS_ENUMs.
if (FormatTok->Tok.is(tok::kw_enum))
nextToken();
- const FormatToken &InitialToken = *FormatTok;
-
// In TypeScript, "enum" can also be used as property name, e.g. in interface
// declarations. An "enum" keyword followed by a colon would be a syntax
// error and thus assume it is just an identifier.
@@ -2645,6 +2869,7 @@ bool UnwrappedLineParser::parseEnum() {
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
return true;
+ FormatTok->setType(TT_RecordLBrace);
FormatTok->setBlockKind(BK_Block);
if (Style.Language == FormatStyle::LK_Java) {
@@ -2864,6 +3089,15 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
if (!tryToParseBracedList())
break;
}
+ if (FormatTok->is(tok::l_square)) {
+ FormatToken *Previous = FormatTok->Previous;
+ if (!Previous || Previous->isNot(tok::r_paren)) {
+ // Don't try parsing a lambda if we had a closing parenthesis before,
+ // it was probably a pointer to an array: int (*)[].
+ if (!tryToParseLambda())
+ break;
+ }
+ }
if (FormatTok->Tok.is(tok::semi))
return;
if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) {
@@ -2876,6 +3110,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
}
if (FormatTok->Tok.is(tok::l_brace)) {
+ FormatTok->setType(TT_RecordLBrace);
if (ParseAsExpr) {
parseChildBlock();
} else {
@@ -3264,10 +3499,7 @@ continuesLineCommentSection(const FormatToken &FormatTok,
void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
bool JustComments = Line->Tokens.empty();
- for (SmallVectorImpl<FormatToken *>::const_iterator
- I = CommentsBeforeNextToken.begin(),
- E = CommentsBeforeNextToken.end();
- I != E; ++I) {
+ for (FormatToken *Tok : CommentsBeforeNextToken) {
// Line comments that belong to the same line comment section are put on the
// same line since later we might want to reflow content between them.
// Additional fine-grained breaking of line comment sections is controlled
@@ -3276,11 +3508,11 @@ void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
//
// FIXME: Consider putting separate line comment sections as children to the
// unwrapped line instead.
- (*I)->ContinuesLineCommentSection =
- continuesLineCommentSection(**I, *Line, CommentPragmasRegex);
- if (isOnNewLine(**I) && JustComments && !(*I)->ContinuesLineCommentSection)
+ Tok->ContinuesLineCommentSection =
+ continuesLineCommentSection(*Tok, *Line, CommentPragmasRegex);
+ if (isOnNewLine(*Tok) && JustComments && !Tok->ContinuesLineCommentSection)
addUnwrappedLine();
- pushToken(*I);
+ pushToken(Tok);
}
if (NewlineBeforeNext && JustComments)
addUnwrappedLine();
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index 0c79723d50fc..3f64d57c7bff 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -18,6 +18,7 @@
#include "FormatToken.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Format/Format.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Regex.h"
#include <stack>
#include <vector>
@@ -81,12 +82,21 @@ public:
void parse();
private:
+ enum class IfStmtKind {
+ NotIf, // Not an if statement.
+ IfOnly, // An if statement without the else clause.
+ IfElse, // An if statement followed by else but not else if.
+ IfElseIf // An if statement followed by else if.
+ };
+
void reset();
void parseFile();
- void parseLevel(bool HasOpeningBrace);
- void parseBlock(bool MustBeDeclaration = false, unsigned AddLevels = 1u,
- bool MunchSemi = true,
- bool UnindentWhitesmithsBraces = false);
+ bool precededByCommentOrPPDirective() const;
+ bool mightFitOnOneLine() const;
+ bool parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind = nullptr);
+ IfStmtKind parseBlock(bool MustBeDeclaration = false, unsigned AddLevels = 1u,
+ bool MunchSemi = true,
+ bool UnindentWhitesmithsBraces = false);
void parseChildBlock();
void parsePPDirective();
void parsePPDefine();
@@ -96,13 +106,15 @@ private:
void parsePPEndIf();
void parsePPUnknown();
void readTokenWithJavaScriptASI();
- void parseStructuralElement(bool IsTopLevel = false);
+ void parseStructuralElement(IfStmtKind *IfKind = nullptr,
+ bool IsTopLevel = false);
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
void parseParens();
void parseSquare(bool LambdaIntroducer = false);
- void parseIfThenElse();
+ void keepAncestorBraces();
+ FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false);
void parseTryCatch();
void parseForOrWhileLoop();
void parseDoWhile();
@@ -220,7 +232,7 @@ private:
// We store for each line whether it must be a declaration depending on
// whether we are in a compound statement or not.
- std::vector<bool> DeclarationScopeStack;
+ llvm::BitVector DeclarationScopeStack;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
@@ -235,6 +247,10 @@ private:
// owned outside of and handed into the UnwrappedLineParser.
ArrayRef<FormatToken *> AllTokens;
+ // Keeps a stack of the states of nested control statements (true if the
+ // statement contains more than some predefined number of nested statements).
+ SmallVector<bool, 8> NestedTooDeep;
+
// Represents preprocessor branch type, so we can find matching
// #if/#else/#endif directives.
enum PPBranchKind {
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index 96a66da0f82b..0d2e507ac587 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -74,6 +74,12 @@ WhitespaceManager::addReplacement(const tooling::Replacement &Replacement) {
return Replaces.add(Replacement);
}
+bool WhitespaceManager::inputUsesCRLF(StringRef Text, bool DefaultToCRLF) {
+ size_t LF = Text.count('\n');
+ size_t CR = Text.count('\r') * 2;
+ return LF == CR ? DefaultToCRLF : CR > LF;
+}
+
void WhitespaceManager::replaceWhitespaceInToken(
const FormatToken &Tok, unsigned Offset, unsigned ReplaceChars,
StringRef PreviousPostfix, StringRef CurrentPrefix, bool InPPDirective,
@@ -304,7 +310,7 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
unsigned PreviousNonComment = i - 1;
while (PreviousNonComment > Start &&
Changes[PreviousNonComment].Tok->is(tok::comment))
- PreviousNonComment--;
+ --PreviousNonComment;
if (i != Start && Changes[i].indentAndNestingLevel() >
Changes[PreviousNonComment].indentAndNestingLevel())
ScopeStack.push_back(i);
@@ -362,6 +368,13 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
Changes[i].Tok->Previous->is(TT_ConditionalExpr))
return true;
+ // Continued braced list.
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->isNot(tok::identifier) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_brace) &&
+ Changes[i].Tok->isNot(tok::r_brace))
+ return true;
+
return false;
};
@@ -718,6 +731,11 @@ void WhitespaceManager::alignConsecutiveAssignments() {
if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
return false;
+ // Do not align operator= overloads.
+ FormatToken *Previous = C.Tok->getPreviousNonComment();
+ if (Previous && Previous->is(tok::kw_operator))
+ return false;
+
return C.Tok->is(tok::equal);
},
Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments);
@@ -1160,7 +1178,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
NextNonComment = NextNonComment->getNextNonComment();
auto j = i;
while (Changes[j].Tok != NextNonComment && j < End)
- j++;
+ ++j;
if (j < End && Changes[j].NewlinesBefore == 0 &&
Changes[j].Tok->isNot(tok::r_brace)) {
Changes[j].NewlinesBefore = 1;
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index 029f4159b748..42f958f68ba6 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -45,6 +45,9 @@ public:
bool useCRLF() const { return UseCRLF; }
+ /// Infers whether the input is using CRLF.
+ static bool inputUsesCRLF(StringRef Text, bool DefaultToCRLF);
+
/// Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
///
@@ -242,7 +245,7 @@ private:
/// as described by \p CellDescs.
void alignArrayInitializersRightJustified(CellDescriptions &&CellDescs);
- /// Align Array Initializers being careful to leftt justify the columns
+ /// Align Array Initializers being careful to left justify the columns
/// as described by \p CellDescs.
void alignArrayInitializersLeftJustified(CellDescriptions &&CellDescs);
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index 52589677ca28..5f587cc1c023 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -817,7 +817,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->Reader = new ASTReader(
PP, *AST->ModuleCache, AST->Ctx.get(), PCHContainerRdr, {},
/*isysroot=*/"",
- /*DisableValidation=*/disableValid, AllowASTWithCompilerErrors);
+ /*DisableValidationKind=*/disableValid, AllowASTWithCompilerErrors);
AST->Reader->setListener(std::make_unique<ASTInfoCollector>(
*AST->PP, AST->Ctx.get(), *AST->HSOpts, *AST->PPOpts, *AST->LangOpts,
@@ -1922,9 +1922,10 @@ namespace {
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) override {
+ SourceLocation OpenParLoc,
+ bool Braced) override {
Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates,
- OpenParLoc);
+ OpenParLoc, Braced);
}
CodeCompletionAllocator &getAllocator() override {
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index 31e7ea3d243d..2465a7e2453b 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -37,6 +37,7 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/InMemoryModuleCache.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CrashRecoveryContext.h"
@@ -996,6 +997,11 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// DesiredStackSpace available.
noteBottomOfStack();
+ auto FinishDiagnosticClient = llvm::make_scope_exit([&]() {
+ // Notify the diagnostic client that all files were processed.
+ getDiagnosticClient().finish();
+ });
+
raw_ostream &OS = getVerboseOutputStream();
if (!Act.PrepareToExecute(*this))
@@ -1034,9 +1040,6 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
}
- // Notify the diagnostic client that all files were processed.
- getDiagnostics().getClient()->finish();
-
if (getDiagnosticOpts().ShowCarets) {
// We can have multiple diagnostics sharing one diagnostic client.
// Get the total number of warnings/errors from the client.
@@ -1414,7 +1417,7 @@ static bool compileModuleAndReadASTBehindLock(
StringRef Dir = llvm::sys::path::parent_path(ModuleFileName);
llvm::sys::fs::create_directories(Dir);
- while (1) {
+ while (true) {
llvm::LockFileManager Locked(ModuleFileName);
switch (Locked) {
case llvm::LockFileManager::LFS_Error:
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index b71addd84bfd..7f1ce3da7e7e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -438,7 +438,7 @@ static T extractMaskValue(T KeyPath) {
}(EXTRACTOR(KEYPATH)); \
}
-static const StringRef GetInputKindName(InputKind IK);
+static StringRef GetInputKindName(InputKind IK);
static bool FixupInvocation(CompilerInvocation &Invocation,
DiagnosticsEngine &Diags, const ArgList &Args,
@@ -1814,6 +1814,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
}
+ if (Opts.PrepareForLTO && Args.hasArg(OPT_mibt_seal))
+ Opts.IBTSeal = 1;
+
for (auto *A :
Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
@@ -2405,6 +2408,7 @@ static const auto &getFrontendActionTable() {
{frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
{frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
{frontend::EmitObj, OPT_emit_obj},
+ {frontend::ExtractAPI, OPT_extract_api},
{frontend::FixIt, OPT_fixit_EQ},
{frontend::FixIt, OPT_fixit},
@@ -3291,7 +3295,7 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
}
/// Get language name for given input kind.
-static const StringRef GetInputKindName(InputKind IK) {
+static StringRef GetInputKindName(InputKind IK) {
switch (IK.getLanguage()) {
case Language::C:
return "C";
@@ -4144,6 +4148,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::EmitLLVMOnly:
case frontend::EmitCodeGenOnly:
case frontend::EmitObj:
+ case frontend::ExtractAPI:
case frontend::FixIt:
case frontend::GenerateModule:
case frontend::GenerateModuleInterface:
diff --git a/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp
new file mode 100644
index 000000000000..cdf67f3c327a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp
@@ -0,0 +1,32 @@
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+
+using namespace clang;
+
+namespace {
+class ExtractAPIVisitor : public RecursiveASTVisitor<ExtractAPIVisitor> {
+public:
+ bool VisitNamedDecl(NamedDecl *Decl) {
+ llvm::outs() << Decl->getName() << "\n";
+ return true;
+ }
+};
+
+class ExtractAPIConsumer : public ASTConsumer {
+public:
+ void HandleTranslationUnit(ASTContext &Context) override {
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+private:
+ ExtractAPIVisitor Visitor;
+};
+} // namespace
+
+std::unique_ptr<ASTConsumer>
+ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return std::make_unique<ExtractAPIConsumer>();
+}
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index fb8132a5e40a..ad2e6039477f 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -8,9 +8,10 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -23,6 +24,7 @@
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -310,9 +312,8 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
auto &HS = CI.getPreprocessor().getHeaderSearchInfo();
SmallVector<Module::Header, 16> Headers;
for (StringRef Name : ModuleHeaders) {
- const DirectoryLookup *CurDir = nullptr;
Optional<FileEntryRef> FE = HS.LookupFile(
- Name, SourceLocation(), /*Angled*/ false, nullptr, CurDir, None,
+ Name, SourceLocation(), /*Angled*/ false, nullptr, nullptr, None,
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
if (!FE) {
CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
@@ -481,25 +482,92 @@ private:
Out << "---" << YAML << "\n";
}
+ static void printEntryName(const Sema &TheSema, const Decl *Entity,
+ llvm::raw_string_ostream &OS) {
+ auto *NamedTemplate = cast<NamedDecl>(Entity);
+
+ PrintingPolicy Policy = TheSema.Context.getPrintingPolicy();
+ // FIXME: Also ask for FullyQualifiedNames?
+ Policy.SuppressDefaultTemplateArgs = false;
+ NamedTemplate->getNameForDiagnostic(OS, Policy, true);
+
+ if (!OS.str().empty())
+ return;
+
+ Decl *Ctx = Decl::castFromDeclContext(NamedTemplate->getDeclContext());
+ NamedDecl *NamedCtx = dyn_cast_or_null<NamedDecl>(Ctx);
+
+ if (const auto *Decl = dyn_cast<TagDecl>(NamedTemplate)) {
+ if (const auto *R = dyn_cast<RecordDecl>(Decl)) {
+ if (R->isLambda()) {
+ OS << "lambda at ";
+ Decl->getLocation().print(OS, TheSema.getSourceManager());
+ return;
+ }
+ }
+ OS << "unnamed " << Decl->getKindName();
+ return;
+ }
+
+ if (const auto *Decl = dyn_cast<ParmVarDecl>(NamedTemplate)) {
+ OS << "unnamed function parameter " << Decl->getFunctionScopeIndex()
+ << " ";
+ if (Decl->getFunctionScopeDepth() > 0)
+ OS << "(at depth " << Decl->getFunctionScopeDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ if (const auto *Decl = dyn_cast<TemplateTypeParmDecl>(NamedTemplate)) {
+ if (const Type *Ty = Decl->getTypeForDecl()) {
+ if (const auto *TTPT = dyn_cast_or_null<TemplateTypeParmType>(Ty)) {
+ OS << "unnamed template type parameter " << TTPT->getIndex() << " ";
+ if (TTPT->getDepth() > 0)
+ OS << "(at depth " << TTPT->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+ }
+ }
+
+ if (const auto *Decl = dyn_cast<NonTypeTemplateParmDecl>(NamedTemplate)) {
+ OS << "unnamed template non-type parameter " << Decl->getIndex() << " ";
+ if (Decl->getDepth() > 0)
+ OS << "(at depth " << Decl->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ if (const auto *Decl = dyn_cast<TemplateTemplateParmDecl>(NamedTemplate)) {
+ OS << "unnamed template template parameter " << Decl->getIndex() << " ";
+ if (Decl->getDepth() > 0)
+ OS << "(at depth " << Decl->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ llvm_unreachable("Failed to retrieve a name for this entry!");
+ OS << "unnamed identifier";
+ }
+
template <bool BeginInstantiation>
static TemplightEntry getTemplightEntry(const Sema &TheSema,
const CodeSynthesisContext &Inst) {
TemplightEntry Entry;
Entry.Kind = toString(Inst.Kind);
Entry.Event = BeginInstantiation ? "Begin" : "End";
- if (auto *NamedTemplate = dyn_cast_or_null<NamedDecl>(Inst.Entity)) {
- llvm::raw_string_ostream OS(Entry.Name);
- PrintingPolicy Policy = TheSema.Context.getPrintingPolicy();
- // FIXME: Also ask for FullyQualifiedNames?
- Policy.SuppressDefaultTemplateArgs = false;
- NamedTemplate->getNameForDiagnostic(OS, Policy, true);
- const PresumedLoc DefLoc =
+ llvm::raw_string_ostream OS(Entry.Name);
+ printEntryName(TheSema, Inst.Entity, OS);
+ const PresumedLoc DefLoc =
TheSema.getSourceManager().getPresumedLoc(Inst.Entity->getLocation());
- if(!DefLoc.isInvalid())
- Entry.DefinitionLocation = std::string(DefLoc.getFilename()) + ":" +
- std::to_string(DefLoc.getLine()) + ":" +
- std::to_string(DefLoc.getColumn());
- }
+ if (!DefLoc.isInvalid())
+ Entry.DefinitionLocation = std::string(DefLoc.getFilename()) + ":" +
+ std::to_string(DefLoc.getLine()) + ":" +
+ std::to_string(DefLoc.getColumn());
const PresumedLoc PoiLoc =
TheSema.getSourceManager().getPresumedLoc(Inst.PointOfInstantiation);
if (!PoiLoc.isInvalid()) {
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index 629f99110661..a9023a7a1171 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -194,7 +194,7 @@ static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
Builder.defineMacro(MacroName, TargetInfo::getTypeName(Ty));
}
-static void DefineTypeWidth(StringRef MacroName, TargetInfo::IntType Ty,
+static void DefineTypeWidth(const Twine &MacroName, TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
Builder.defineMacro(MacroName, Twine(TI.getTypeWidth(Ty)));
}
@@ -205,6 +205,16 @@ static void DefineTypeSizeof(StringRef MacroName, unsigned BitWidth,
Twine(BitWidth / TI.getCharWidth()));
}
+// This will generate a macro based on the prefix with `_MAX__` as the suffix
+// for the max value representable for the type, and a macro with a `_WIDTH__`
+// suffix for the width of the type.
+static void DefineTypeSizeAndWidth(const Twine &Prefix, TargetInfo::IntType Ty,
+ const TargetInfo &TI,
+ MacroBuilder &Builder) {
+ DefineTypeSize(Prefix + "_MAX__", Ty, TI, Builder);
+ DefineTypeWidth(Prefix + "_WIDTH__", Ty, TI, Builder);
+}
+
static void DefineExactWidthIntType(TargetInfo::IntType Ty,
const TargetInfo &TI,
MacroBuilder &Builder) {
@@ -241,6 +251,8 @@ static void DefineExactWidthIntTypeSize(TargetInfo::IntType Ty,
if (TypeWidth == 64)
Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
+ // We don't need to define a _WIDTH macro for the exact-width types because
+ // we already know the width.
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
}
@@ -254,7 +266,12 @@ static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
const char *Prefix = IsSigned ? "__INT_LEAST" : "__UINT_LEAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
- DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
+ // We only want the *_WIDTH macro for the signed types to avoid too many
+ // predefined macros (the unsigned width and the signed width are identical.)
+ if (IsSigned)
+ DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ else
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
@@ -268,8 +285,12 @@ static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
const char *Prefix = IsSigned ? "__INT_FAST" : "__UINT_FAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
- DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
-
+ // We only want the *_WIDTH macro for the signed types to avoid too many
+ // predefined macros (the unsigned width and the signed width are identical.)
+ if (IsSigned)
+ DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ else
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
@@ -887,20 +908,26 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
Builder.defineMacro("__CHAR_BIT__", Twine(TI.getCharWidth()));
+ Builder.defineMacro("__BOOL_WIDTH__", Twine(TI.getBoolWidth()));
+ Builder.defineMacro("__SHRT_WIDTH__", Twine(TI.getShortWidth()));
+ Builder.defineMacro("__INT_WIDTH__", Twine(TI.getIntWidth()));
+ Builder.defineMacro("__LONG_WIDTH__", Twine(TI.getLongWidth()));
+ Builder.defineMacro("__LLONG_WIDTH__", Twine(TI.getLongLongWidth()));
+
DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);
DefineTypeSize("__LONG_MAX__", TargetInfo::SignedLong, TI, Builder);
DefineTypeSize("__LONG_LONG_MAX__", TargetInfo::SignedLongLong, TI, Builder);
- DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
- DefineTypeSize("__WINT_MAX__", TI.getWIntType(), TI, Builder);
- DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
- DefineTypeSize("__SIZE_MAX__", TI.getSizeType(), TI, Builder);
+ DefineTypeSizeAndWidth("__WCHAR", TI.getWCharType(), TI, Builder);
+ DefineTypeSizeAndWidth("__WINT", TI.getWIntType(), TI, Builder);
+ DefineTypeSizeAndWidth("__INTMAX", TI.getIntMaxType(), TI, Builder);
+ DefineTypeSizeAndWidth("__SIZE", TI.getSizeType(), TI, Builder);
- DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
- DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
- DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
- DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
+ DefineTypeSizeAndWidth("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
+ DefineTypeSizeAndWidth("__PTRDIFF", TI.getPtrDiffType(0), TI, Builder);
+ DefineTypeSizeAndWidth("__INTPTR", TI.getIntPtrType(), TI, Builder);
+ DefineTypeSizeAndWidth("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
@@ -929,29 +956,29 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineFmt("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
Builder.defineMacro("__UINTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getUIntMaxType()));
- DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Builder);
DefineFmt("__PTRDIFF", TI.getPtrDiffType(0), TI, Builder);
- DefineTypeWidth("__PTRDIFF_WIDTH__", TI.getPtrDiffType(0), TI, Builder);
DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
DefineFmt("__INTPTR", TI.getIntPtrType(), TI, Builder);
- DefineTypeWidth("__INTPTR_WIDTH__", TI.getIntPtrType(), TI, Builder);
DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
DefineFmt("__SIZE", TI.getSizeType(), TI, Builder);
- DefineTypeWidth("__SIZE_WIDTH__", TI.getSizeType(), TI, Builder);
DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
- DefineTypeWidth("__WCHAR_WIDTH__", TI.getWCharType(), TI, Builder);
DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
- DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
- DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
- DefineTypeSize("__SIG_ATOMIC_MAX__", TI.getSigAtomicType(), TI, Builder);
+ DefineTypeSizeAndWidth("__SIG_ATOMIC", TI.getSigAtomicType(), TI, Builder);
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
- DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
- DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
+
+ // The C standard requires the width of uintptr_t and intptr_t to be the same,
+ // per 7.20.2.4p1. Same for intmax_t and uintmax_t, per 7.20.2.5p1.
+ assert(TI.getTypeWidth(TI.getUIntPtrType()) ==
+ TI.getTypeWidth(TI.getIntPtrType()) &&
+ "uintptr_t and intptr_t have different widths?");
+ assert(TI.getTypeWidth(TI.getUIntMaxType()) ==
+ TI.getTypeWidth(TI.getIntMaxType()) &&
+ "uintmax_t and intmax_t have different widths?");
if (TI.hasFloat16Type())
DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
@@ -1039,6 +1066,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__USER_LABEL_PREFIX__", TI.getUserLabelPrefix());
+ if (!LangOpts.MathErrno)
+ Builder.defineMacro("__NO_MATH_ERRNO__");
+
if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
else
diff --git a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
index 5abbb3a235b4..34bbc365e647 100644
--- a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -236,10 +236,10 @@ void MultiplexASTMutationListener::AddedAttributeToRecord(
MultiplexConsumer::MultiplexConsumer(
std::vector<std::unique_ptr<ASTConsumer>> C)
- : Consumers(std::move(C)), MutationListener(), DeserializationListener() {
+ : Consumers(std::move(C)) {
// Collect the mutation listeners and deserialization listeners of all
// children, and create a multiplex listener each if so.
- std::vector<ASTMutationListener*> mutationListeners;
+ std::vector<ASTMutationListener *> mutationListeners;
std::vector<ASTDeserializationListener*> serializationListeners;
for (auto &Consumer : Consumers) {
if (auto *mutationListener = Consumer->GetASTMutationListener())
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 45df86ef91cd..1d0022bda474 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -792,7 +792,7 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
bool IsStartOfLine = false;
char Buffer[256];
- while (1) {
+ while (true) {
// Two lines joined with line continuation ('\' as last character on the
// line) must be emitted as one line even though Tok.getLine() returns two
// different values. In this situation Tok.isAtStartOfLine() is false even
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 3f2a78127477..3e8d582f90c2 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -14,7 +14,6 @@
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
-#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
@@ -31,10 +30,8 @@ class InclusionRewriter : public PPCallbacks {
struct IncludedFile {
FileID Id;
SrcMgr::CharacteristicKind FileType;
- const DirectoryLookup *DirLookup;
- IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup)
- : Id(Id), FileType(FileType), DirLookup(DirLookup) {}
+ IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType)
+ : Id(Id), FileType(FileType) {}
};
Preprocessor &PP; ///< Used to find inclusion directives.
SourceManager &SM; ///< Used to read and manage source files.
@@ -57,8 +54,7 @@ class InclusionRewriter : public PPCallbacks {
public:
InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers,
bool UseLineDirectives);
- void Process(FileID FileId, SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup);
+ void Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
void setPredefinesBuffer(const llvm::MemoryBufferRef &Buf) {
PredefinesBuffer = Buf;
}
@@ -162,8 +158,7 @@ void InclusionRewriter::FileChanged(SourceLocation Loc,
return;
FileID Id = FullSourceLoc(Loc, SM).getFileID();
auto P = FileIncludes.insert(
- std::make_pair(LastInclusionLocation,
- IncludedFile(Id, NewFileType, PP.GetCurDirLookup())));
+ std::make_pair(LastInclusionLocation, IncludedFile(Id, NewFileType)));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
LastInclusionLocation = SourceLocation();
@@ -256,28 +251,12 @@ bool InclusionRewriter::IsIfAtLocationTrue(SourceLocation Loc) const {
return false;
}
-/// Detect the likely line ending style of \p FromFile by examining the first
-/// newline found within it.
-static StringRef DetectEOL(const MemoryBufferRef &FromFile) {
- // Detect what line endings the file uses, so that added content does not mix
- // the style. We need to check for "\r\n" first because "\n\r" will match
- // "\r\n\r\n".
- const char *Pos = strchr(FromFile.getBufferStart(), '\n');
- if (!Pos)
- return "\n";
- if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
- return "\r\n";
- if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
- return "\n\r";
- return "\n";
-}
-
void InclusionRewriter::detectMainFileEOL() {
Optional<MemoryBufferRef> FromFile = *SM.getBufferOrNone(SM.getMainFileID());
assert(FromFile);
if (!FromFile)
return; // Should never happen, but whatever.
- MainEOL = DetectEOL(*FromFile);
+ MainEOL = FromFile->getBuffer().detectEOL();
}
/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
@@ -371,8 +350,7 @@ StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
/// Use a raw lexer to analyze \p FileId, incrementally copying parts of it
/// and including content of included files recursively.
void InclusionRewriter::Process(FileID FileId,
- SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup) {
+ SrcMgr::CharacteristicKind FileType) {
MemoryBufferRef FromFile;
{
auto B = SM.getBufferOrNone(FileId);
@@ -384,7 +362,7 @@ void InclusionRewriter::Process(FileID FileId,
Lexer RawLex(FileId, FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
- StringRef LocalEOL = DetectEOL(FromFile);
+ StringRef LocalEOL = FromFile.getBuffer().detectEOL();
// Per the GNU docs: "1" indicates entering a new file.
if (FileId == SM.getMainFileID() || FileId == PP.getPredefinesFileID())
@@ -433,7 +411,7 @@ void InclusionRewriter::Process(FileID FileId,
<< Mod->getFullModuleName(true) << "\n";
// Include and recursively process the file.
- Process(Inc->Id, Inc->FileType, Inc->DirLookup);
+ Process(Inc->Id, Inc->FileType);
if (Mod)
OS << "#pragma clang module end /*"
@@ -559,7 +537,7 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
Rewrite->handleModuleBegin(Tok);
} while (Tok.isNot(tok::eof));
Rewrite->setPredefinesBuffer(SM.getBufferOrFake(PP.getPredefinesFileID()));
- Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User, nullptr);
- Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User, nullptr);
+ Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
+ Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
OS->flush();
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 462aeda6e027..fc8fce4b42b8 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -95,8 +95,7 @@ class SDiagsMerger : SerializedDiagnosticReader {
AbbrevLookup DiagFlagLookup;
public:
- SDiagsMerger(SDiagsWriter &Writer)
- : SerializedDiagnosticReader(), Writer(Writer) {}
+ SDiagsMerger(SDiagsWriter &Writer) : Writer(Writer) {}
std::error_code mergeRecordsFromFile(const char *File) {
return readDiagnostics(File);
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
index 8df7496c6ddd..1c4a76e68953 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
@@ -44,7 +44,7 @@ static const enum raw_ostream::Colors savedColor =
/// Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
- while (1) {
+ while (true) {
size_t Pos = Str.find(ToggleHighlight);
OS << Str.slice(0, Pos);
if (Pos == StringRef::npos)
diff --git a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 2759625ae254..f67dceea9135 100644
--- a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -541,9 +541,8 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
ExpectedLoc = SourceLocation();
} else {
// Lookup file via Preprocessor, like a #include.
- const DirectoryLookup *CurDir;
Optional<FileEntryRef> File =
- PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr);
if (!File) {
Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 8e18f33af0cb..8a8a13743762 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -57,6 +57,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case EmitLLVMOnly: return std::make_unique<EmitLLVMOnlyAction>();
case EmitCodeGenOnly: return std::make_unique<EmitCodeGenOnlyAction>();
case EmitObj: return std::make_unique<EmitObjAction>();
+ case ExtractAPI:
+ return std::make_unique<ExtractAPIAction>();
case FixIt: return std::make_unique<FixItAction>();
case GenerateModule:
return std::make_unique<GenerateModuleFromModuleMapAction>();
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
index 538556f394da..e447590393ec 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
@@ -345,4 +345,4 @@ __DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
#pragma pop_macro("__DEVICE_VOID__")
#pragma pop_macro("__FAST_OR_SLOW")
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index 73021d256cba..10cec58ed12f 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -50,6 +50,9 @@ extern "C" {
#include <cmath>
#include <cstdlib>
#include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
#else
typedef __SIZE_TYPE__ size_t;
// Define macros which are needed to declare HIP device API's without standard
@@ -74,25 +77,35 @@ typedef __SIZE_TYPE__ __hip_size_t;
extern "C" {
#endif //__cplusplus
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+ return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+ __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#else // HIP version check
#if __HIP_ENABLE_DEVICE_MALLOC__
__device__ void *__hip_malloc(__hip_size_t __size);
__device__ void *__hip_free(void *__ptr);
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return __hip_malloc(__size);
}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
- return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+ __hip_free(__ptr);
}
#else
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
__builtin_trap();
return (void *)0;
}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
__builtin_trap();
- return (void *)0;
}
#endif
+#endif // HIP version check
#ifdef __cplusplus
} // extern "C"
diff --git a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
index 5064c87c2bb1..e33514a60ff3 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
@@ -26,19 +26,19 @@
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi8(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+ return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi16(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+ return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi32(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
+ return (__m256i)__builtin_elementwise_abs((__v8si)__a);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -253,73 +253,73 @@ _mm256_madd_epi16(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
+ return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
+ return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
+ return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
+ return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
}
static __inline__ int __DEFAULT_FN_ATTRS256
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
index 6aee8aed8487..522ef100bab1 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
@@ -485,7 +485,7 @@ _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi8 (__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+ return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +507,7 @@ _mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi16 (__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+ return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +751,7 @@ _mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +773,7 @@ _mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +796,7 @@ _mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +818,7 @@ _mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +840,7 @@ _mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +862,7 @@ _mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +884,7 @@ _mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +906,7 @@ _mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index df298640523b..50e0e287d9fc 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -26,6 +26,10 @@ typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
typedef unsigned int __v16su __attribute__((__vector_size__(64)));
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -1086,7 +1090,7 @@ static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_max_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1112,7 @@ _mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1134,7 @@ _mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1156,7 @@ _mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1321,7 +1325,7 @@ static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_min_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1347,7 @@ _mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1369,7 @@ _mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1391,7 @@ _mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1846,7 +1850,7 @@ _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi64(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+ return (__m512i)__builtin_elementwise_abs((__v8di)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1872,7 @@ _mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi32(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+ return (__m512i)__builtin_elementwise_abs((__v16si) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9320,11 +9324,11 @@ static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
- return __builtin_ia32_reduce_and_q512(__W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
@@ -9342,13 +9346,13 @@ _mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
- return __builtin_ia32_reduce_and_q512(__W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
// -0.0 is used to ignore the start value since it is the neutral value of
@@ -9386,12 +9390,12 @@ _mm512_reduce_mul_epi32(__m512i __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_and_epi32(__m512i __W) {
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_or_epi32(__m512i __W) {
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
@@ -9409,13 +9413,13 @@ _mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9446,89 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umin_q512(__V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi64(__M, __V);
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
- return __builtin_ia32_reduce_umin_q512(__V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi32(__M, __V);
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
index 0519dba59081..178c9dbc0e6e 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
@@ -2988,7 +2988,7 @@ _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_abs_epi64 (__m128i __A) {
- return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+ return (__m128i)__builtin_elementwise_abs((__v2di)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3007,7 @@ _mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi64 (__m256i __A) {
- return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+ return (__m256i)__builtin_elementwise_abs((__v4di)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3054,7 @@ _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_max_epi64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3073,7 @@ _mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3120,7 @@ _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_max_epu64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3139,7 @@ _mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3186,7 @@ _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epi64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3205,7 @@ _mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3252,7 @@ _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epu64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3271,7 @@ _mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/contrib/llvm-project/clang/lib/Headers/cetintrin.h b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
index 4290e9d7355b..019cab0261e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/cetintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
@@ -42,10 +42,20 @@ static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
return __builtin_ia32_rdsspd(__a);
}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32() {
+ unsigned int t;
+ return __builtin_ia32_rdsspd(t);
+}
+
#ifdef __x86_64__
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
return __builtin_ia32_rdsspq(__a);
}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64() {
+ unsigned long long t;
+ return __builtin_ia32_rdsspq(t);
+}
#endif /* __x86_64__ */
#ifdef __x86_64__
diff --git a/contrib/llvm-project/clang/lib/Headers/cpuid.h b/contrib/llvm-project/clang/lib/Headers/cpuid.h
index 6df1b4a11172..5d262a60735f 100644
--- a/contrib/llvm-project/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm-project/clang/lib/Headers/cpuid.h
@@ -200,7 +200,7 @@
#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI 0x00000008
+#define bit_AVXVNNI 0x00000010
#define bit_AVX512BF16 0x00000020
#define bit_HRESET 0x00400000
diff --git a/contrib/llvm-project/clang/lib/Headers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
index 6e9c3032c21f..4618b808efc4 100644
--- a/contrib/llvm-project/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
@@ -2375,7 +2375,7 @@ _mm_madd_epi16(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi16(__m128i __a, __m128i __b)
{
- return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+ return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
}
/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2395,7 +2395,7 @@ _mm_max_epi16(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu8(__m128i __a, __m128i __b)
{
- return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+ return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
}
/// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2415,7 +2415,7 @@ _mm_max_epu8(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi16(__m128i __a, __m128i __b)
{
- return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+ return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
}
/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2435,7 +2435,7 @@ _mm_min_epi16(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu8(__m128i __a, __m128i __b)
{
- return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+ return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
}
/// Multiplies the corresponding elements of two signed [8 x i16]
diff --git a/contrib/llvm-project/clang/lib/Headers/limits.h b/contrib/llvm-project/clang/lib/Headers/limits.h
index c653580bac4e..c2d3a7cf4353 100644
--- a/contrib/llvm-project/clang/lib/Headers/limits.h
+++ b/contrib/llvm-project/clang/lib/Headers/limits.h
@@ -62,6 +62,24 @@
#define CHAR_BIT __CHAR_BIT__
+/* C2x 5.2.4.2.1 */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define BOOL_WIDTH __BOOL_WIDTH__
+#define CHAR_WIDTH CHAR_BIT
+#define SCHAR_WIDTH CHAR_BIT
+#define UCHAR_WIDTH CHAR_BIT
+#define USHRT_WIDTH __SHRT_WIDTH__
+#define SHRT_WIDTH __SHRT_WIDTH__
+#define UINT_WIDTH __INT_WIDTH__
+#define INT_WIDTH __INT_WIDTH__
+#define ULONG_WIDTH __LONG_WIDTH__
+#define LONG_WIDTH __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH __LLONG_WIDTH__
+#endif
+
#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
#define CHAR_MIN 0
#define CHAR_MAX UCHAR_MAX
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
index 9c81ddb5e2a7..06b78da63e69 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
@@ -68,6 +68,7 @@
// For the SPIR and SPIR-V target all features are supported.
#if defined(__SPIR__) || defined(__SPIRV__)
#define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
#endif // defined(__SPIR__)
#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
@@ -498,12 +499,14 @@ typedef int clk_profiling_info;
#define MAX_WORK_DIM 3
+#ifdef __opencl_c_device_enqueue
typedef struct {
unsigned int workDimension;
size_t globalWorkOffset[MAX_WORK_DIM];
size_t globalWorkSize[MAX_WORK_DIM];
size_t localWorkSize[MAX_WORK_DIM];
} ndrange_t;
+#endif // __opencl_c_device_enqueue
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -600,9 +603,11 @@ typedef struct {
// C++ for OpenCL - __remove_address_space
#if defined(__OPENCL_CPP_VERSION__)
template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
template <typename _Tp> struct __remove_address_space<__generic _Tp> {
using type = _Tp;
};
+#endif
template <typename _Tp> struct __remove_address_space<__global _Tp> {
using type = _Tp;
};
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index 77a7a8b9bb3a..8fde2fa29899 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -11,11 +11,11 @@
#include "opencl-c-base.h"
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_images)
#ifndef cl_khr_depth_images
#define cl_khr_depth_images
#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_images)
#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
#ifdef cl_khr_3d_image_writes
@@ -15585,7 +15585,7 @@ half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
#endif //cl_khr_fp16
// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
@@ -15628,7 +15628,6 @@ float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 co
float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
@@ -15679,7 +15678,6 @@ int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler,
uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
@@ -15690,7 +15688,7 @@ half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Write color value to location specified by coordinate
@@ -15834,7 +15832,7 @@ void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 col
#endif //cl_khr_fp16
// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
@@ -15866,7 +15864,6 @@ void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float col
void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
#endif //cl_khr_depth_images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#if defined(cl_khr_mipmap_image_writes)
void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
@@ -15894,7 +15891,6 @@ void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4
#endif //cl_khr_3d_image_writes
#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
#ifdef cl_khr_fp16
@@ -15907,7 +15903,7 @@ void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 col
void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
// access qualifier, which by default assume read_only access qualifier. Image query builtin
@@ -15955,7 +15951,7 @@ int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld __cnfn get_image_width(read_write image1d_t image);
int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_width(read_write image2d_t image);
@@ -15972,7 +15968,7 @@ int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image height in pixels.
@@ -16007,7 +16003,7 @@ int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld __cnfn get_image_height(read_write image2d_t image);
int __ovld __cnfn get_image_height(read_write image3d_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_t image);
@@ -16021,7 +16017,7 @@ int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image depth in pixels.
@@ -16032,9 +16028,9 @@ int __ovld __cnfn get_image_depth(read_only image3d_t image);
int __ovld __cnfn get_image_depth(write_only image3d_t image);
#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16053,9 +16049,11 @@ int __ovld get_image_num_mip_levels(write_only image2d_t image);
int __ovld get_image_num_mip_levels(write_only image3d_t image);
#endif
+#if defined(__opencl_c_read_write_images)
int __ovld get_image_num_mip_levels(read_write image1d_t image);
int __ovld get_image_num_mip_levels(read_write image2d_t image);
int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#endif //defined(__opencl_c_read_write_images)
int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
@@ -16067,10 +16065,12 @@ int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
+#if defined(__opencl_c_read_write_images)
int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#endif //defined(__opencl_c_read_write_images)
#endif //cl_khr_mipmap_image
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -16130,7 +16130,7 @@ int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t im
int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
@@ -16147,7 +16147,7 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t im
int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image channel order. Valid values are:
@@ -16202,7 +16202,7 @@ int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image)
int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
@@ -16219,7 +16219,7 @@ int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image)
int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the 2D image width and height as an int2
@@ -16252,7 +16252,7 @@ int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
#ifdef cl_khr_depth_images
@@ -16265,7 +16265,7 @@ int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the 3D image width, height, and depth as an
@@ -16277,9 +16277,9 @@ int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
#ifdef cl_khr_3d_image_writes
int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image array size.
@@ -16305,7 +16305,7 @@ size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_
size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
#ifdef cl_khr_depth_images
@@ -16315,7 +16315,7 @@ size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the number of samples associated with image
@@ -16331,12 +16331,12 @@ int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
int __ovld get_image_num_samples(read_write image2d_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
#endif
// OpenCL v2.0 s6.13.15 - Work-group Functions
@@ -16450,6 +16450,7 @@ bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef __opencl_c_device_enqueue
ndrange_t __ovld ndrange_1D(size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
@@ -16477,6 +16478,7 @@ bool __ovld is_valid_event (clk_event_t event);
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL Extension v2.0 s9.17 - Sub-groups
@@ -17572,34 +17574,38 @@ uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
long __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
ulong __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
+#if defined(__opencl_c_images)
uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
uint2 __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
uint4 __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
uint8 __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
+#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read2( const __global uint* p );
uint4 __ovld __conv intel_sub_group_block_read4( const __global uint* p );
uint8 __ovld __conv intel_sub_group_block_read8( const __global uint* p );
+#if defined(__opencl_c_images)
void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
@@ -17712,68 +17718,76 @@ ushort __ovld __conv intel_sub_group_scan_inclusive_min( ushort x );
short __ovld __conv intel_sub_group_scan_inclusive_max( short x );
ushort __ovld __conv intel_sub_group_scan_inclusive_max( ushort x );
+#if defined(__opencl_c_images)
uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
uint2 __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
uint4 __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
uint8 __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
uint2 __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
uint4 __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
uint8 __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
uint4 __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
uint8 __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
+#if defined(__opencl_c_images)
void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
+#endif //defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
void __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
+#if defined(__opencl_c_images)
ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
ushort2 __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
ushort4 __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
ushort8 __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p );
ushort2 __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
ushort4 __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
ushort8 __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
+#if defined(__opencl_c_images)
void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort data);
void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort data);
void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data );
void __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
@@ -17891,6 +17905,7 @@ short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
ushort2 image_size);
+#if defined(__opencl_c_images)
intel_sub_group_avc_ime_result_t __ovld
intel_sub_group_avc_ime_evaluate_with_single_reference(
read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -17931,6 +17946,7 @@ intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
intel_sub_group_avc_ime_payload_t payload,
intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
intel_sub_group_avc_ime_single_reference_streamin_t __ovld
intel_sub_group_avc_ime_get_single_reference_streamin(
@@ -17995,6 +18011,7 @@ intel_sub_group_avc_ref_payload_t __ovld
intel_sub_group_avc_ref_set_bilinear_filter_enable(
intel_sub_group_avc_ref_payload_t payload);
+#if defined(__opencl_c_images)
intel_sub_group_avc_ref_result_t __ovld
intel_sub_group_avc_ref_evaluate_with_single_reference(
read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -18013,6 +18030,7 @@ intel_sub_group_avc_ref_evaluate_with_multi_reference(
read_only image2d_t src_image, uint packed_reference_ids,
uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
// SIC built-in functions
intel_sub_group_avc_sic_payload_t __ovld
@@ -18063,6 +18081,7 @@ intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
uchar block_based_skip_type,
intel_sub_group_avc_sic_payload_t payload);
+#if defined(__opencl_c_images)
intel_sub_group_avc_sic_result_t __ovld
intel_sub_group_avc_sic_evaluate_ipe(
read_only image2d_t src_image, sampler_t vme_media_sampler,
@@ -18085,6 +18104,7 @@ intel_sub_group_avc_sic_evaluate_with_multi_reference(
read_only image2d_t src_image, uint packed_reference_ids,
uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
intel_sub_group_avc_sic_result_t result);
diff --git a/contrib/llvm-project/clang/lib/Headers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
index 710e55aaa120..0df59c5fcc59 100644
--- a/contrib/llvm-project/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
@@ -668,7 +668,7 @@ _mm_stream_load_si128 (__m128i const *__V)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi8 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+ return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -687,7 +687,7 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi8 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+ return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -706,7 +706,7 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu16 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+ return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -725,7 +725,7 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu16 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+ return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -744,7 +744,7 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi32 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+ return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -763,7 +763,7 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi32 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+ return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -782,7 +782,7 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu32 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+ return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -801,7 +801,7 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu32 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+ return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdatomic.h b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
index 1e47bcb2bacf..780bcc2dfea1 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdatomic.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
@@ -44,6 +44,11 @@ extern "C" {
/* 7.17.2 Initialization */
#define ATOMIC_VAR_INIT(value) (value)
+#if (__STDC_VERSION__ >= 201710L || __cplusplus >= 202002L) && \
+ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */
+#pragma clang deprecated(ATOMIC_VAR_INIT)
+#endif
#define atomic_init __c11_atomic_init
/* 7.17.3 Order and consistency */
@@ -153,6 +158,10 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t;
typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
#define ATOMIC_FLAG_INIT { 0 }
+#if __cplusplus >= 202002L && !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* ATOMIC_FLAG_INIT was deprecated in C++20 but is not deprecated in C. */
+#pragma clang deprecated(ATOMIC_FLAG_INIT)
+#endif
/* These should be provided by the libc implementation. */
#ifdef __cplusplus
diff --git a/contrib/llvm-project/clang/lib/Headers/stdint.h b/contrib/llvm-project/clang/lib/Headers/stdint.h
index 192f653e95a1..4790c25a2774 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdint.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdint.h
@@ -461,6 +461,18 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT64_MAX INT64_C( 9223372036854775807)
# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
# define UINT64_MAX UINT64_C(18446744073709551615)
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT64_WIDTH 64
+# define INT64_WIDTH UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
# define __INT_LEAST64_MIN INT64_MIN
# define __INT_LEAST64_MAX INT64_MAX
# define __UINT_LEAST64_MAX UINT64_MAX
@@ -482,6 +494,15 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST64_MIN __INT_LEAST64_MIN
# define INT_FAST64_MAX __INT_LEAST64_MAX
# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST64_MIN */
@@ -495,6 +516,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST56_MIN INT56_MIN
# define INT_FAST56_MAX INT56_MAX
# define UINT_FAST56_MAX UINT56_MAX
+
# define __INT_LEAST32_MIN INT56_MIN
# define __INT_LEAST32_MAX INT56_MAX
# define __UINT_LEAST32_MAX UINT56_MAX
@@ -504,6 +526,20 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define __INT_LEAST8_MIN INT56_MIN
# define __INT_LEAST8_MAX INT56_MAX
# define __UINT_LEAST8_MAX UINT56_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT56_WIDTH 56
+# define INT56_WIDTH UINT56_WIDTH
+# define UINT_LEAST56_WIDTH UINT56_WIDTH
+# define INT_LEAST56_WIDTH UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH UINT56_WIDTH
+# define INT_FAST56_WIDTH UINT_FAST56_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# define __UINT_LEAST8_WIDTH UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT56_TYPE__ */
@@ -517,6 +553,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST48_MIN INT48_MIN
# define INT_FAST48_MAX INT48_MAX
# define UINT_FAST48_MAX UINT48_MAX
+
# define __INT_LEAST32_MIN INT48_MIN
# define __INT_LEAST32_MAX INT48_MAX
# define __UINT_LEAST32_MAX UINT48_MAX
@@ -526,6 +563,20 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define __INT_LEAST8_MIN INT48_MIN
# define __INT_LEAST8_MAX INT48_MAX
# define __UINT_LEAST8_MAX UINT48_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define UINT48_WIDTH 48
+#define INT48_WIDTH UINT48_WIDTH
+#define UINT_LEAST48_WIDTH UINT48_WIDTH
+#define INT_LEAST48_WIDTH UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH UINT48_WIDTH
+#define INT_FAST48_WIDTH UINT_FAST48_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+#define __UINT_LEAST8_WIDTH UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT48_TYPE__ */
@@ -539,6 +590,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST40_MIN INT40_MIN
# define INT_FAST40_MAX INT40_MAX
# define UINT_FAST40_MAX UINT40_MAX
+
# define __INT_LEAST32_MIN INT40_MIN
# define __INT_LEAST32_MAX INT40_MAX
# define __UINT_LEAST32_MAX UINT40_MAX
@@ -548,6 +600,20 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define __INT_LEAST8_MIN INT40_MIN
# define __INT_LEAST8_MAX INT40_MAX
# define __UINT_LEAST8_MAX UINT40_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT40_WIDTH 40
+# define INT40_WIDTH UINT40_WIDTH
+# define UINT_LEAST40_WIDTH UINT40_WIDTH
+# define INT_LEAST40_WIDTH UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH UINT40_WIDTH
+# define INT_FAST40_WIDTH UINT_FAST40_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# define __UINT_LEAST8_WIDTH UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT40_TYPE__ */
@@ -555,6 +621,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT32_MAX INT32_C(2147483647)
# define INT32_MIN (-INT32_C(2147483647)-1)
# define UINT32_MAX UINT32_C(4294967295)
+
# define __INT_LEAST32_MIN INT32_MIN
# define __INT_LEAST32_MAX INT32_MAX
# define __UINT_LEAST32_MAX UINT32_MAX
@@ -564,6 +631,16 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define __INT_LEAST8_MIN INT32_MIN
# define __INT_LEAST8_MAX INT32_MAX
# define __UINT_LEAST8_MAX UINT32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT32_WIDTH 32
+# define INT32_WIDTH UINT32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# define __UINT_LEAST8_WIDTH UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT32_TYPE__ */
#ifdef __INT_LEAST32_MIN
@@ -573,6 +650,15 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST32_MIN __INT_LEAST32_MIN
# define INT_FAST32_MAX __INT_LEAST32_MAX
# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST32_MIN */
@@ -586,12 +672,26 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST24_MIN INT24_MIN
# define INT_FAST24_MAX INT24_MAX
# define UINT_FAST24_MAX UINT24_MAX
+
# define __INT_LEAST16_MIN INT24_MIN
# define __INT_LEAST16_MAX INT24_MAX
# define __UINT_LEAST16_MAX UINT24_MAX
# define __INT_LEAST8_MIN INT24_MIN
# define __INT_LEAST8_MAX INT24_MAX
# define __UINT_LEAST8_MAX UINT24_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT24_WIDTH 24
+# define INT24_WIDTH UINT24_WIDTH
+# define UINT_LEAST24_WIDTH UINT24_WIDTH
+# define INT_LEAST24_WIDTH UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH UINT24_WIDTH
+# define INT_FAST24_WIDTH UINT_FAST24_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# define __UINT_LEAST8_WIDTH UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT24_TYPE__ */
@@ -599,12 +699,22 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INT16_MAX INT16_C(32767)
#define INT16_MIN (-INT16_C(32767)-1)
#define UINT16_MAX UINT16_C(65535)
+
# define __INT_LEAST16_MIN INT16_MIN
# define __INT_LEAST16_MAX INT16_MAX
# define __UINT_LEAST16_MAX UINT16_MAX
# define __INT_LEAST8_MIN INT16_MIN
# define __INT_LEAST8_MAX INT16_MAX
# define __UINT_LEAST8_MAX UINT16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT16_WIDTH 16
+# define INT16_WIDTH UINT16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# define __UINT_LEAST8_WIDTH UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT16_TYPE__ */
#ifdef __INT_LEAST16_MIN
@@ -614,6 +724,15 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST16_MIN __INT_LEAST16_MIN
# define INT_FAST16_MAX __INT_LEAST16_MAX
# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST16_MIN */
@@ -621,9 +740,18 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT8_MAX INT8_C(127)
# define INT8_MIN (-INT8_C(127)-1)
# define UINT8_MAX UINT8_C(255)
+
# define __INT_LEAST8_MIN INT8_MIN
# define __INT_LEAST8_MAX INT8_MAX
# define __UINT_LEAST8_MAX UINT8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT8_WIDTH 8
+# define INT8_WIDTH UINT8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT8_TYPE__ */
#ifdef __INT_LEAST8_MIN
@@ -633,6 +761,15 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST8_MIN __INT_LEAST8_MIN
# define INT_FAST8_MAX __INT_LEAST8_MAX
# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST8_MIN */
/* Some utility macros */
@@ -652,6 +789,16 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define PTRDIFF_MAX __PTRDIFF_MAX__
#define SIZE_MAX __SIZE_MAX__
+/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+ exposes separate internal width macros. */
+#define INTPTR_WIDTH __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
* is enabled. */
#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +810,16 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INTMAX_MAX __INTMAX_MAX__
#define UINTMAX_MAX __UINTMAX_MAX__
+/* C2x 7.20.2.5 Width of greatest-width integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+/* NB: The C standard requires that these be the same value, but the compiler
+ exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
/* C99 7.18.3 Limits of other integer types. */
#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +846,16 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__)
#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
+/* C2x 7.20.3.x Width of other integer types. */
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if __STDC_VERSION__ >= 202000L
+#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH __SIZE_WIDTH__
+#define WCHAR_WIDTH __WCHAR_WIDTH__
+#define WINT_WIDTH __WINT_WIDTH__
+#endif
+
#endif /* __STDC_HOSTED__ */
#endif /* __CLANG_STDINT_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
index bcffa8187801..cb9be2349de5 100644
--- a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
@@ -53,7 +53,7 @@ _mm_abs_pi8(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi8(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+ return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
}
/// Computes the absolute value of each of the packed 16-bit signed
@@ -89,7 +89,7 @@ _mm_abs_pi16(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi16(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+ return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
}
/// Computes the absolute value of each of the packed 32-bit signed
@@ -125,7 +125,7 @@ _mm_abs_pi32(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi32(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+ return (__m128i)__builtin_elementwise_abs((__v4si)__a);
}
/// Concatenates the two 128-bit integer vector operands, and
diff --git a/contrib/llvm-project/clang/lib/Headers/vaesintrin.h b/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
index f3c0807bb94a..294dcff2addd 100644
--- a/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
@@ -82,4 +82,4 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS_F
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_F
-#endif
+#endif // __VAESINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
index 84eabc3a210f..4ade8b8bb074 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
@@ -256,7 +256,7 @@ IncrementalParser::Parse(llvm::StringRef input) {
/*LoadedOffset=*/0, NewLoc);
// NewLoc only used for diags.
- if (PP.EnterSourceFile(FID, /*DirLookup=*/0, NewLoc))
+ if (PP.EnterSourceFile(FID, /*DirLookup=*/nullptr, NewLoc))
return llvm::make_error<llvm::StringError>("Parsing failed. "
"Cannot enter source file.",
std::error_code());
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
index e5ce798025d9..d1f454f21239 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
@@ -30,9 +30,6 @@ class LLVMContext;
namespace clang {
class ASTConsumer;
class CompilerInstance;
-class CodeGenerator;
-class DeclGroupRef;
-class FrontendAction;
class IncrementalAction;
class Parser;
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index a0b60118a1a8..39c125c395ef 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -29,6 +29,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Errc.h"
@@ -89,16 +90,10 @@ HeaderSearch::HeaderSearch(std::shared_ptr<HeaderSearchOptions> HSOpts,
void HeaderSearch::PrintStats() {
llvm::errs() << "\n*** HeaderSearch Stats:\n"
<< FileInfo.size() << " files tracked.\n";
- unsigned NumOnceOnlyFiles = 0, MaxNumIncludes = 0, NumSingleIncludedFiles = 0;
- for (unsigned i = 0, e = FileInfo.size(); i != e; ++i) {
+ unsigned NumOnceOnlyFiles = 0;
+ for (unsigned i = 0, e = FileInfo.size(); i != e; ++i)
NumOnceOnlyFiles += (FileInfo[i].isPragmaOnce || FileInfo[i].isImport);
- if (MaxNumIncludes < FileInfo[i].NumIncludes)
- MaxNumIncludes = FileInfo[i].NumIncludes;
- NumSingleIncludedFiles += FileInfo[i].NumIncludes == 1;
- }
- llvm::errs() << " " << NumOnceOnlyFiles << " #import/#pragma once files.\n"
- << " " << NumSingleIncludedFiles << " included exactly once.\n"
- << " " << MaxNumIncludes << " max times a file is included.\n";
+ llvm::errs() << " " << NumOnceOnlyFiles << " #import/#pragma once files.\n";
llvm::errs() << " " << NumIncluded << " #include/#include_next/#import.\n"
<< " " << NumMultiIncludeFileOptzn
@@ -108,6 +103,30 @@ void HeaderSearch::PrintStats() {
<< NumSubFrameworkLookups << " subframework lookups.\n";
}
+void HeaderSearch::SetSearchPaths(
+ std::vector<DirectoryLookup> dirs, unsigned int angledDirIdx,
+ unsigned int systemDirIdx, bool noCurDirSearch,
+ llvm::DenseMap<unsigned int, unsigned int> searchDirToHSEntry) {
+ assert(angledDirIdx <= systemDirIdx && systemDirIdx <= dirs.size() &&
+ "Directory indices are unordered");
+ SearchDirs = std::move(dirs);
+ SearchDirsUsage.assign(SearchDirs.size(), false);
+ AngledDirIdx = angledDirIdx;
+ SystemDirIdx = systemDirIdx;
+ NoCurDirSearch = noCurDirSearch;
+ SearchDirToHSEntry = std::move(searchDirToHSEntry);
+ //LookupFileCache.clear();
+}
+
+void HeaderSearch::AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
+ unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
+ SearchDirs.insert(SearchDirs.begin() + idx, dir);
+ SearchDirsUsage.insert(SearchDirsUsage.begin() + idx, false);
+ if (!isAngled)
+ AngledDirIdx++;
+ SystemDirIdx++;
+}
+
std::vector<bool> HeaderSearch::computeUserEntryUsage() const {
std::vector<bool> UserEntryUsage(HSOpts->UserEntries.size());
for (unsigned I = 0, E = SearchDirsUsage.size(); I < E; ++I) {
@@ -720,7 +739,8 @@ static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
}
static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
- SmallVectorImpl<char> &FrameworkName) {
+ SmallVectorImpl<char> &FrameworkName,
+ SmallVectorImpl<char> &IncludeSpelling) {
using namespace llvm::sys;
path::const_iterator I = path::begin(Path);
path::const_iterator E = path::end(Path);
@@ -736,15 +756,22 @@ static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
// and some other variations among these lines.
int FoundComp = 0;
while (I != E) {
- if (*I == "Headers")
+ if (*I == "Headers") {
++FoundComp;
- if (I->endswith(".framework")) {
- FrameworkName.append(I->begin(), I->end());
- ++FoundComp;
- }
- if (*I == "PrivateHeaders") {
+ } else if (*I == "PrivateHeaders") {
++FoundComp;
IsPrivateHeader = true;
+ } else if (I->endswith(".framework")) {
+ StringRef Name = I->drop_back(10); // Drop .framework
+ // Need to reset the strings and counter to support nested frameworks.
+ FrameworkName.clear();
+ FrameworkName.append(Name.begin(), Name.end());
+ IncludeSpelling.clear();
+ IncludeSpelling.append(Name.begin(), Name.end());
+ FoundComp = 1;
+ } else if (FoundComp >= 2) {
+ IncludeSpelling.push_back('/');
+ IncludeSpelling.append(I->begin(), I->end());
}
++I;
}
@@ -759,20 +786,24 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
bool FoundByHeaderMap = false) {
bool IsIncluderPrivateHeader = false;
SmallString<128> FromFramework, ToFramework;
- if (!isFrameworkStylePath(Includer, IsIncluderPrivateHeader, FromFramework))
+ SmallString<128> FromIncludeSpelling, ToIncludeSpelling;
+ if (!isFrameworkStylePath(Includer, IsIncluderPrivateHeader, FromFramework,
+ FromIncludeSpelling))
return;
bool IsIncludeePrivateHeader = false;
- bool IsIncludeeInFramework = isFrameworkStylePath(
- IncludeFE->getName(), IsIncludeePrivateHeader, ToFramework);
+ bool IsIncludeeInFramework =
+ isFrameworkStylePath(IncludeFE->getName(), IsIncludeePrivateHeader,
+ ToFramework, ToIncludeSpelling);
if (!isAngled && !FoundByHeaderMap) {
SmallString<128> NewInclude("<");
if (IsIncludeeInFramework) {
- NewInclude += ToFramework.str().drop_back(10); // drop .framework
- NewInclude += "/";
+ NewInclude += ToIncludeSpelling;
+ NewInclude += ">";
+ } else {
+ NewInclude += IncludeFilename;
+ NewInclude += ">";
}
- NewInclude += IncludeFilename;
- NewInclude += ">";
Diags.Report(IncludeLoc, diag::warn_quoted_include_in_framework_header)
<< IncludeFilename
<< FixItHint::CreateReplacement(IncludeLoc, NewInclude);
@@ -794,12 +825,15 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
/// search is needed. Microsoft mode will pass all \#including files.
Optional<FileEntryRef> HeaderSearch::LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
- const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
+ const DirectoryLookup *FromDir, const DirectoryLookup **CurDirArg,
ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache,
bool BuildSystemModule) {
+ const DirectoryLookup *CurDirLocal = nullptr;
+ const DirectoryLookup *&CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+
if (IsMapped)
*IsMapped = false;
@@ -1046,7 +1080,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
ScratchFilename += Filename;
Optional<FileEntryRef> File = LookupFile(
- ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, CurDir,
+ ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, &CurDir,
Includers.front(), SearchPath, RelativePath, RequestingModule,
SuggestedModule, IsMapped, /*IsFrameworkFound=*/nullptr);
@@ -1203,7 +1237,6 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
HFI.isImport |= OtherHFI.isImport;
HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
HFI.isModuleHeader |= OtherHFI.isModuleHeader;
- HFI.NumIncludes += OtherHFI.NumIncludes;
if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
HFI.ControllingMacro = OtherHFI.ControllingMacro;
@@ -1364,7 +1397,7 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
FileInfo.isImport = true;
// Has this already been #import'ed or #include'd?
- if (FileInfo.NumIncludes && !TryEnterImported())
+ if (PP.alreadyIncluded(File) && !TryEnterImported())
return false;
} else {
// Otherwise, if this is a #include of a file that was previously #import'd
@@ -1387,10 +1420,7 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
}
}
- // Increment the number of times this file has been included.
- ++FileInfo.NumIncludes;
-
- IsFirstIncludeOfFile = FileInfo.NumIncludes == 1;
+ IsFirstIncludeOfFile = PP.markIncluded(File);
return true;
}
@@ -1779,11 +1809,8 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
}
// Populate the list of modules.
- for (ModuleMap::module_iterator M = ModMap.module_begin(),
- MEnd = ModMap.module_end();
- M != MEnd; ++M) {
- Modules.push_back(M->getValue());
- }
+ llvm::transform(ModMap.modules(), std::back_inserter(Modules),
+ [](const auto &NameAndMod) { return NameAndMod.second; });
}
void HeaderSearch::loadTopLevelSystemModules() {
@@ -1843,9 +1870,9 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
using namespace llvm::sys;
unsigned BestPrefixLength = 0;
- // Checks whether Dir and File shares a common prefix, if they do and that's
- // the longest prefix we've seen so for it returns true and updates the
- // BestPrefixLength accordingly.
+ // Checks whether `Dir` is a strict path prefix of `File`. If so and that's
+ // the longest prefix we've seen so for it, returns true and updates the
+ // `BestPrefixLength` accordingly.
auto CheckDir = [&](llvm::StringRef Dir) -> bool {
llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
if (!WorkingDir.empty() && !path::is_absolute(Dir))
@@ -1880,26 +1907,48 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
path::is_separator(NI->front()) && path::is_separator(DI->front()))
continue;
+ // Special case Apple .sdk folders since the search path is typically a
+ // symlink like `iPhoneSimulator14.5.sdk` while the file is instead
+ // located in `iPhoneSimulator.sdk` (the real folder).
+ if (NI->endswith(".sdk") && DI->endswith(".sdk")) {
+ StringRef NBasename = path::stem(*NI);
+ StringRef DBasename = path::stem(*DI);
+ if (DBasename.startswith(NBasename))
+ continue;
+ }
+
if (*NI != *DI)
break;
}
return false;
};
+ bool BestPrefixIsFramework = false;
for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- // FIXME: Support this search within frameworks.
- if (!SearchDirs[I].isNormalDir())
- continue;
-
- StringRef Dir = SearchDirs[I].getDir()->getName();
- if (CheckDir(Dir) && IsSystem)
- *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ if (SearchDirs[I].isNormalDir()) {
+ StringRef Dir = SearchDirs[I].getDir()->getName();
+ if (CheckDir(Dir)) {
+ if (IsSystem)
+ *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ BestPrefixIsFramework = false;
+ }
+ } else if (SearchDirs[I].isFramework()) {
+ StringRef Dir = SearchDirs[I].getFrameworkDir()->getName();
+ if (CheckDir(Dir)) {
+ if (IsSystem)
+ *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ BestPrefixIsFramework = true;
+ }
+ }
}
// Try to shorten include path using TUs directory, if we couldn't find any
// suitable prefix in include search paths.
- if (!BestPrefixLength && CheckDir(path::parent_path(MainFile)) && IsSystem)
- *IsSystem = false;
+ if (!BestPrefixLength && CheckDir(path::parent_path(MainFile))) {
+ if (IsSystem)
+ *IsSystem = false;
+ BestPrefixIsFramework = false;
+ }
// Try resolving resulting filename via reverse search in header maps,
// key from header name is user prefered name for the include file.
@@ -1912,8 +1961,19 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
SearchDirs[I].getHeaderMap()->reverseLookupFilename(Filename);
if (!SpelledFilename.empty()) {
Filename = SpelledFilename;
+ BestPrefixIsFramework = false;
break;
}
}
+
+ // If the best prefix is a framework path, we need to compute the proper
+ // include spelling for the framework header.
+ bool IsPrivateHeader;
+ SmallString<128> FrameworkName, IncludeSpelling;
+ if (BestPrefixIsFramework &&
+ isFrameworkStylePath(Filename, IsPrivateHeader, FrameworkName,
+ IncludeSpelling)) {
+ Filename = IncludeSpelling;
+ }
return path::convert_to_slash(Filename);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
index ed1314f3b03d..ec7b1505c7bf 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
@@ -10,11 +10,10 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Config/config.h" // C_INCLUDE_DIRS
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -354,7 +353,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
break;
case llvm::Triple::PS4: {
// <isysroot> gets prepended later in AddPath().
- std::string BaseSDKPath = "";
+ std::string BaseSDKPath;
if (!HasSysroot) {
const char *envValue = getenv("SCE_ORBIS_SDK_DIR");
if (envValue)
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index 38467a1835d0..89e89c7c1f17 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -2548,9 +2548,9 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
// Position of the first trigraph in the ending sequence.
- const char *TrigraphPos = 0;
+ const char *TrigraphPos = nullptr;
// Position of the first whitespace after a '\' in the ending sequence.
- const char *SpacePos = 0;
+ const char *SpacePos = nullptr;
while (true) {
// Back up off the newline.
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index ef7a5351953e..f3aefdd22b51 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -818,10 +818,13 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
Optional<FileEntryRef> Preprocessor::LookupFile(
SourceLocation FilenameLoc, StringRef Filename, bool isAngled,
const DirectoryLookup *FromDir, const FileEntry *FromFile,
- const DirectoryLookup *&CurDir, SmallVectorImpl<char> *SearchPath,
+ const DirectoryLookup **CurDirArg, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
bool *IsFrameworkFound, bool SkipCache) {
+ const DirectoryLookup *CurDirLocal = nullptr;
+ const DirectoryLookup *&CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+
Module *RequestingModule = getModuleForLocation(FilenameLoc);
bool RequestingModuleIsModuleInterface = !SourceMgr.isInMainFile(FilenameLoc);
@@ -877,7 +880,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
const DirectoryLookup *TmpCurDir = CurDir;
const DirectoryLookup *TmpFromDir = nullptr;
while (Optional<FileEntryRef> FE = HeaderInfo.LookupFile(
- Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
+ Filename, FilenameLoc, isAngled, TmpFromDir, &TmpCurDir,
Includers, SearchPath, RelativePath, RequestingModule,
SuggestedModule, /*IsMapped=*/nullptr,
/*IsFrameworkFound=*/nullptr, SkipCache)) {
@@ -895,7 +898,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
// Do a standard file entry lookup.
Optional<FileEntryRef> FE = HeaderInfo.LookupFile(
- Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
+ Filename, FilenameLoc, isAngled, FromDir, &CurDir, Includers, SearchPath,
RelativePath, RequestingModule, SuggestedModule, IsMapped,
IsFrameworkFound, SkipCache, BuildSystemModule);
if (FE) {
@@ -1827,7 +1830,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef& Filename,
+ const DirectoryLookup **CurDir, StringRef& Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
bool &IsMapped, const DirectoryLookup *LookupFrom,
@@ -2028,7 +2031,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
}
Optional<FileEntryRef> File = LookupHeaderIncludeOrImport(
- CurDir, Filename, FilenameLoc, FilenameRange, FilenameTok,
+ &CurDir, Filename, FilenameLoc, FilenameRange, FilenameTok,
IsFrameworkFound, IsImportDecl, IsMapped, LookupFrom, LookupFromFile,
LookupFilename, RelativePath, SearchPath, SuggestedModule, isAngled);
@@ -2055,7 +2058,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// include cycle. Don't enter already processed files again as it can lead to
// reaching the max allowed include depth again.
if (Action == Enter && HasReachedMaxIncludeDepth && File &&
- HeaderInfo.getFileInfo(&File->getFileEntry()).NumIncludes)
+ alreadyIncluded(*File))
Action = IncludeLimitReached;
// Determine whether we should try to import the module for this #include, if
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index cfee7a3c2513..f6c95a8b67c6 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1228,10 +1228,9 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
return false;
// Search include directories.
- const DirectoryLookup *CurDir;
Optional<FileEntryRef> File =
PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
- CurDir, nullptr, nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
SrcMgr::CharacteristicKind FileType = SrcMgr::C_User;
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index 67daa5841983..eb7e7cbc4714 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -526,9 +526,8 @@ static llvm::Optional<Token> LexHeader(Preprocessor &PP,
return llvm::None;
// Search include directories for this file.
- const DirectoryLookup *CurDir;
File = PP.LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
- nullptr, CurDir, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
nullptr);
if (!File) {
if (!SuppressIncludeNotFoundError)
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index b026ae36fc0f..3c338a2b8123 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -549,7 +549,7 @@ void Preprocessor::EnterMainSourceFile() {
// Tell the header info that the main file was entered. If the file is later
// #imported, it won't be re-entered.
if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
- HeaderInfo.IncrementIncludeCount(FE);
+ markIncluded(FE);
}
// Preprocess Predefines to populate the initial preprocessor state.
@@ -566,11 +566,10 @@ void Preprocessor::EnterMainSourceFile() {
if (!PPOpts->PCHThroughHeader.empty()) {
// Lookup and save the FileID for the through header. If it isn't found
// in the search path, it's a fatal error.
- const DirectoryLookup *CurDir;
Optional<FileEntryRef> File = LookupFile(
SourceLocation(), PPOpts->PCHThroughHeader,
- /*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr, CurDir,
- /*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
+ /*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr,
+ /*CurDir=*/nullptr, /*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
/*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr,
/*IsFrameworkFound=*/nullptr);
if (!File) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 19cddc69ebfc..7330c2b7593d 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -140,8 +140,22 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
// function body.
if (ConsumeAndStoreFunctionPrologue(Toks)) {
// We didn't find the left-brace we expected after the
- // constructor initializer; we already printed an error, and it's likely
- // impossible to recover, so don't try to parse this method later.
+ // constructor initializer.
+
+ // If we're code-completing and the completion point was in the broken
+ // initializer, we want to parse it even though that will fail.
+ if (PP.isCodeCompletionEnabled() &&
+ llvm::any_of(Toks, [](const Token &Tok) {
+ return Tok.is(tok::code_completion);
+ })) {
+ // If we gave up at the completion point, the initializer list was
+ // likely truncated, so don't eat more tokens. We'll hit some extra
+ // errors, but they should be ignored in code completion.
+ return FnD;
+ }
+
+ // We already printed an error, and it's likely impossible to recover,
+ // so don't try to parse this method later.
// Skip over the rest of the decl and back to somewhere that looks
// reasonable.
SkipMalformedDecl();
@@ -803,7 +817,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
// We always want this function to consume at least one token if the first
// token isn't T and if not at EOF.
bool isFirstTokenConsumed = true;
- while (1) {
+ while (true) {
// If we found one of the tokens, stop and return true.
if (Tok.is(T1) || Tok.is(T2)) {
if (ConsumeFinalToken) {
@@ -1159,7 +1173,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
unsigned AngleCount = 0;
unsigned KnownTemplateCount = 0;
- while (1) {
+ while (true) {
switch (Tok.getKind()) {
case tok::comma:
// If we might be in a template, perform a tentative parse to check.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 0c1f88bc51d1..f21938c81689 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -2419,8 +2419,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
auto ThisVarDecl = dyn_cast_or_null<VarDecl>(ThisDecl);
auto RunSignatureHelp = [&]() {
QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2439,8 +2440,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
if (ParseExpressionList(Exprs, CommaLocs, ExpressionStarts)) {
if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
Actions.ProduceConstructorSignatureHelp(
- getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
+ /*Braced=*/false);
CalledSignatureHelp = true;
}
Actions.ActOnInitializerError(ThisDecl);
@@ -3078,7 +3080,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParsedAttributesWithRange attrs(AttrFactory);
// We use Sema's policy to get bool macros right.
PrintingPolicy Policy = Actions.getPrintingPolicy();
- while (1) {
+ while (true) {
bool isInvalid = false;
bool isStorageClass = false;
const char *PrevSpec = nullptr;
@@ -3574,12 +3576,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
}
}
ConsumedEnd = Tok.getLocation();
+ DS.setTypeofParensRange(Tracker.getRange());
// Even if something went wrong above, continue as if we've seen
// `decltype(auto)`.
isInvalid = DS.SetTypeSpecType(TST_decltype_auto, Loc, PrevSpec,
DiagID, TemplateId, Policy);
} else {
- isInvalid = DS.SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID,
+ isInvalid = DS.SetTypeSpecType(TST_auto, AutoLoc, PrevSpec, DiagID,
TemplateId, Policy);
}
break;
@@ -4269,7 +4272,7 @@ void Parser::ParseStructDeclaration(
// Read struct-declarators until we find the semicolon.
bool FirstDeclarator = true;
SourceLocation CommaLoc;
- while (1) {
+ while (true) {
ParsingFieldDeclarator DeclaratorInfo(*this, DS);
DeclaratorInfo.D.setCommaLoc(CommaLoc);
@@ -4536,7 +4539,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
CXXScopeSpec Spec;
if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/true))
return;
@@ -5420,7 +5423,7 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/true)) {
TPA.Revert();
return false;
@@ -5578,7 +5581,7 @@ void Parser::ParseTypeQualifierListOpt(
SourceLocation EndLoc;
- while (1) {
+ while (true) {
bool isInvalid = false;
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
@@ -5802,7 +5805,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.getContext() == DeclaratorContext::Member;
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, EnteringContext);
+ /*ObjectHasErrors=*/false, EnteringContext);
if (SS.isNotEmpty()) {
if (Tok.isNot(tok::star)) {
@@ -6031,7 +6034,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
D.getContext() == DeclaratorContext::Member;
ParseOptionalCXXScopeSpecifier(
D.getCXXScopeSpec(), /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, EnteringContext);
+ /*ObjectHasErrors=*/false, EnteringContext);
}
if (D.getCXXScopeSpec().isValid()) {
@@ -6270,7 +6273,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.hasName() && !D.getNumTypeObjects())
MaybeParseCXX11Attributes(D);
- while (1) {
+ while (true) {
if (Tok.is(tok::l_paren)) {
bool IsFunctionDeclaration = D.isFunctionDeclaratorAFunctionDeclaration();
// Enter function-declaration scope, limiting any declarators to the
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index f5a6ffcff9e9..c08a586604b1 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -291,7 +291,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
@@ -529,7 +529,7 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
@@ -598,7 +598,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDtor=*/nullptr,
/*IsTypename=*/false,
@@ -1007,6 +1007,9 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
if (Tok.is(tok::annot_decltype)) {
Result = getExprAnnotation(Tok);
EndLoc = Tok.getAnnotationEndLoc();
+ // Unfortunately, we don't know the LParen source location as the annotated
+ // token doesn't have it.
+ DS.setTypeofParensRange(SourceRange(SourceLocation(), EndLoc));
ConsumeAnnotationToken();
if (Result.isInvalid()) {
DS.SetTypeSpecError();
@@ -1071,6 +1074,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// Match the ')'
T.consumeClose();
+ DS.setTypeofParensRange(T.getRange());
if (T.getCloseLocation().isInvalid()) {
DS.SetTypeSpecError();
// FIXME: this should return the location of the last token
@@ -1190,7 +1194,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Parse optional nested-name-specifier
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false))
return true;
@@ -1609,7 +1613,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
CXXScopeSpec Spec;
bool HasValidSpec = true;
if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext)) {
DS.SetTypeSpecError();
HasValidSpec = false;
@@ -2605,7 +2609,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Collect the scope specifier token we annotated earlier.
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
if (SS.isInvalid()) {
@@ -2905,7 +2909,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// member-declarator
// member-declarator-list ',' member-declarator
- while (1) {
+ while (true) {
InClassInitStyle HasInClassInit = ICIS_NoInit;
bool HasStaticInitializer = false;
if (Tok.isOneOf(tok::equal, tok::l_brace) && PureSpecLoc.isInvalid()) {
@@ -3674,7 +3678,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false))
return true;
@@ -3740,8 +3744,8 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
if (TemplateTypeTy.isInvalid())
return QualType();
QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
- T.getOpenLocation());
+ ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
+ T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index 09a3842f5809..fbf79a0a8746 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -400,7 +400,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
SourceLocation ColonLoc;
auto SavedType = PreferredType;
- while (1) {
+ while (true) {
// Every iteration may rely on a preferred type for the whole expression.
PreferredType = SavedType;
// If this token has a lower precedence than we are allowed to parse (e.g.
@@ -1588,7 +1588,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// cast expression.
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
AnnotateTemplateIdTokenAsType(SS);
return ParseCastExpression(ParseKind, isAddressOfOperand, NotCastExpr,
@@ -1853,7 +1853,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// parsed, see if there are any postfix-expression pieces here.
SourceLocation Loc;
auto SavedType = PreferredType;
- while (1) {
+ while (true) {
// Each iteration relies on preferred type for the whole expression.
PreferredType = SavedType;
switch (Tok.getKind()) {
@@ -2019,7 +2019,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&]() -> QualType {
QualType PreferredType = Actions.ProduceCallSignatureHelp(
- getCurScope(), LHS.get(), ArgExprs, PT.getOpenLocation());
+ LHS.get(), ArgExprs, PT.getOpenLocation());
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2558,7 +2558,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
// FIXME: This loop leaks the index expressions on error.
- while (1) {
+ while (true) {
if (Tok.is(tok::period)) {
// offsetof-member-designator: offsetof-member-designator '.' identifier
Comps.push_back(Sema::OffsetOfComponent());
@@ -3358,7 +3358,7 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts) {
bool SawError = false;
- while (1) {
+ while (true) {
if (ExpressionStarts)
ExpressionStarts();
@@ -3418,7 +3418,7 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
bool
Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs) {
- while (1) {
+ while (true) {
ExprResult Expr = ParseAssignmentExpression();
if (Expr.isInvalid())
return true;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index 76c510ddd36c..2d38891c723f 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -668,7 +668,7 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
//
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
Token Replacement;
@@ -1877,8 +1877,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
QualType PreferredType;
if (TypeRep)
PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DS.getEndLoc(), Exprs, T.getOpenLocation());
+ TypeRep.get()->getCanonicalTypeInternal(), DS.getEndLoc(), Exprs,
+ T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -1953,6 +1953,9 @@ Parser::ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
/// \param Loc The location of the start of the statement that requires this
/// condition, e.g., the "for" in a for loop.
///
+/// \param MissingOK Whether an empty condition is acceptable here. Otherwise
+/// it is considered an error to be recovered from.
+///
/// \param FRI If non-null, a for range declaration is permitted, and if
/// present will be parsed and stored here, and a null result will be returned.
///
@@ -1960,11 +1963,10 @@ Parser::ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
/// appropriate moment for a 'for' loop.
///
/// \returns The parsed condition.
-Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
- SourceLocation Loc,
- Sema::ConditionKind CK,
- ForRangeInfo *FRI,
- bool EnterForConditionScope) {
+Sema::ConditionResult
+Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
+ Sema::ConditionKind CK, bool MissingOK,
+ ForRangeInfo *FRI, bool EnterForConditionScope) {
// Helper to ensure we always enter a continue/break scope if requested.
struct ForConditionScopeRAII {
Scope *S;
@@ -2019,7 +2021,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
}
ConsumeToken();
*InitStmt = Actions.ActOnNullStmt(SemiLoc);
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
// Parse the expression.
@@ -2031,10 +2033,11 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
WarnOnInit();
*InitStmt = Actions.ActOnExprStmt(Expr.get());
ConsumeToken();
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
- return Actions.ActOnCondition(getCurScope(), Loc, Expr.get(), CK);
+ return Actions.ActOnCondition(getCurScope(), Loc, Expr.get(), CK,
+ MissingOK);
}
case ConditionOrInitStatement::InitStmtDecl: {
@@ -2048,7 +2051,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
DG = ParseSimpleDeclaration(DeclaratorContext::SelectionInit, DeclEnd,
attrs, /*RequireSemi=*/true);
*InitStmt = Actions.ActOnDeclStmt(DG, DeclStart, DeclEnd);
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
case ConditionOrInitStatement::ForRangeDecl: {
@@ -2454,8 +2457,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
- if (ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs,
- RAngleLoc))
+ if (ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs, RAngleLoc,
+ Template))
return true;
// If this is a non-template, we already issued a diagnostic.
@@ -3167,8 +3170,9 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
// `new decltype(invalid) (^)`.
if (TypeRep)
PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen,
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -3585,7 +3589,7 @@ ExprResult Parser::ParseRequiresExpression() {
// We need to consume the typename to allow 'requires { typename a; }'
SourceLocation TypenameKWLoc = ConsumeToken();
- if (TryAnnotateCXXScopeToken()) {
+ if (TryAnnotateOptionalCXXScopeToken()) {
TPA.Commit();
SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
break;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
index 9d9c03d28a97..fd0faba9c1c1 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
@@ -459,12 +459,22 @@ ExprResult Parser::ParseBraceInitializer() {
Actions, EnterExpressionEvaluationContext::InitList);
bool InitExprsOk = true;
- DesignatorCompletionInfo DesignatorCompletion{
- InitExprs,
- PreferredType.get(T.getOpenLocation()),
+ QualType LikelyType = PreferredType.get(T.getOpenLocation());
+ DesignatorCompletionInfo DesignatorCompletion{InitExprs, LikelyType};
+ bool CalledSignatureHelp = false;
+ auto RunSignatureHelp = [&] {
+ QualType PreferredType;
+ if (!LikelyType.isNull())
+ PreferredType = Actions.ProduceConstructorSignatureHelp(
+ LikelyType->getCanonicalTypeInternal(), T.getOpenLocation(),
+ InitExprs, T.getOpenLocation(), /*Braced=*/true);
+ CalledSignatureHelp = true;
+ return PreferredType;
};
- while (1) {
+ while (true) {
+ PreferredType.enterFunctionArgument(Tok.getLocation(), RunSignatureHelp);
+
// Handle Microsoft __if_exists/if_not_exists if necessary.
if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
Tok.is(tok::kw___if_not_exists))) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index 9e145f57d61f..f493ac9b92ca 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -134,7 +134,7 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
SmallVector<SourceLocation, 8> ClassLocs;
SmallVector<ObjCTypeParamList *, 8> ClassTypeParams;
- while (1) {
+ while (true) {
MaybeSkipAttributes(tok::objc_class);
if (expectIdentifier()) {
SkipUntil(tok::semi);
@@ -598,7 +598,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
SourceRange AtEnd;
- while (1) {
+ while (true) {
// If this is a method prototype, parse it.
if (Tok.isOneOf(tok::minus, tok::plus)) {
if (Decl *methodPrototype =
@@ -848,7 +848,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
@@ -1149,7 +1149,7 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
assert(Context == DeclaratorContext::ObjCParameter ||
Context == DeclaratorContext::ObjCResult);
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCPassingType(
@@ -1401,7 +1401,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
Scope::FunctionDeclarationScope | Scope::DeclScope);
AttributePool allParamAttrs(AttrFactory);
- while (1) {
+ while (true) {
ParsedAttributes paramAttrs(AttrFactory);
Sema::ObjCArgInfo ArgInfo;
@@ -1531,7 +1531,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
SmallVector<IdentifierLocPair, 8> ProtocolIdents;
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
@@ -2050,7 +2050,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
// Parse the list of forward declarations.
- while (1) {
+ while (true) {
ConsumeToken(); // the ','
if (expectIdentifier()) {
SkipUntil(tok::semi);
@@ -3179,7 +3179,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
ExprVector KeyExprs;
if (Tok.is(tok::colon)) {
- while (1) {
+ while (true) {
// Each iteration parses a single keyword argument.
KeyIdents.push_back(selIdent);
KeyLocs.push_back(Loc);
@@ -3599,7 +3599,7 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
unsigned nColons = 0;
if (Tok.isNot(tok::r_paren)) {
- while (1) {
+ while (true) {
if (TryConsumeToken(tok::coloncolon)) { // Handle :: in C++.
++nColons;
KeyIdents.push_back(nullptr);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index 300b022d83b9..8ad5edb1bcd6 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -23,6 +23,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
using namespace clang;
@@ -469,8 +470,8 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
SourceLocation LParLoc = T.getOpenLocation();
auto RunSignatureHelp = [this, OmpPrivParm, LParLoc, &Exprs]() {
QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs, LParLoc);
+ OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc, /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -1813,38 +1814,55 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
void Parser::ParseOMPDeclareTargetClauses(
Sema::DeclareTargetContextInfo &DTCI) {
SourceLocation DeviceTypeLoc;
- bool RequiresToOrLinkClause = false;
- bool HasToOrLinkClause = false;
+ bool RequiresToOrLinkOrIndirectClause = false;
+ bool HasToOrLinkOrIndirectClause = false;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OMPDeclareTargetDeclAttr::MapTypeTy MT = OMPDeclareTargetDeclAttr::MT_To;
bool HasIdentifier = Tok.is(tok::identifier);
if (HasIdentifier) {
// If we see any clause we need a to or link clause.
- RequiresToOrLinkClause = true;
+ RequiresToOrLinkOrIndirectClause = true;
IdentifierInfo *II = Tok.getIdentifierInfo();
StringRef ClauseName = II->getName();
bool IsDeviceTypeClause =
getLangOpts().OpenMP >= 50 &&
getOpenMPClauseKind(ClauseName) == OMPC_device_type;
+ bool IsIndirectClause = getLangOpts().OpenMP >= 51 &&
+ getOpenMPClauseKind(ClauseName) == OMPC_indirect;
+ if (DTCI.Indirect.hasValue() && IsIndirectClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(OMPD_declare_target)
+ << getOpenMPClauseName(OMPC_indirect) << 0;
+ break;
+ }
bool IsToOrLinkClause =
OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT);
assert((!IsDeviceTypeClause || !IsToOrLinkClause) && "Cannot be both!");
- if (!IsDeviceTypeClause && DTCI.Kind == OMPD_begin_declare_target) {
+ if (!IsDeviceTypeClause && !IsIndirectClause &&
+ DTCI.Kind == OMPD_begin_declare_target) {
Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName << 0;
+ << ClauseName << (getLangOpts().OpenMP >= 51 ? 3 : 0);
break;
}
- if (!IsDeviceTypeClause && !IsToOrLinkClause) {
+ if (!IsDeviceTypeClause && !IsToOrLinkClause && !IsIndirectClause) {
Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName << (getLangOpts().OpenMP >= 50 ? 2 : 1);
+ << ClauseName
+ << (getLangOpts().OpenMP >= 51 ? 4
+ : getLangOpts().OpenMP >= 50 ? 2
+ : 1);
break;
}
- if (IsToOrLinkClause)
- HasToOrLinkClause = true;
+ if (IsToOrLinkClause || IsIndirectClause)
+ HasToOrLinkOrIndirectClause = true;
+ if (IsIndirectClause) {
+ if (!ParseOpenMPIndirectClause(DTCI, /*ParseOnly*/ false))
+ break;
+ continue;
+ }
// Parse 'device_type' clause and go to next clause if any.
if (IsDeviceTypeClause) {
Optional<SimpleClauseData> DevTypeData =
@@ -1910,10 +1928,14 @@ void Parser::ParseOMPDeclareTargetClauses(
ConsumeToken();
}
+ if (DTCI.Indirect.hasValue() && DTCI.DT != OMPDeclareTargetDeclAttr::DT_Any)
+ Diag(DeviceTypeLoc, diag::err_omp_declare_target_indirect_device_type);
+
// For declare target require at least 'to' or 'link' to be present.
- if (DTCI.Kind == OMPD_declare_target && RequiresToOrLinkClause &&
- !HasToOrLinkClause)
- Diag(DTCI.Loc, diag::err_omp_declare_target_missing_to_or_link_clause);
+ if (DTCI.Kind == OMPD_declare_target && RequiresToOrLinkOrIndirectClause &&
+ !HasToOrLinkOrIndirectClause)
+ Diag(DTCI.Loc, diag::err_omp_declare_target_missing_to_or_link_clause)
+ << (getLangOpts().OpenMP >= 51 ? 1 : 0);
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
}
@@ -2188,12 +2210,12 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
VariantMatchInfo VMI;
TI.getAsVariantMatchInfo(ASTCtx, VMI);
- std::function<void(StringRef)> DiagUnknownTrait = [this, Loc](
- StringRef ISATrait) {
- // TODO Track the selector locations in a way that is accessible here to
- // improve the diagnostic location.
- Diag(Loc, diag::warn_unknown_begin_declare_variant_isa_trait) << ISATrait;
- };
+ std::function<void(StringRef)> DiagUnknownTrait =
+ [this, Loc](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here
+ // to improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
+ };
TargetOMPContext OMPCtx(
ASTCtx, std::move(DiagUnknownTrait),
/* CurrentFunctionDecl */ nullptr,
@@ -2282,11 +2304,12 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end);
- bool HasImplicitMappings =
- DKind == OMPD_begin_declare_target || !HasClauses;
Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc);
if (HasClauses)
ParseOMPDeclareTargetClauses(DTCI);
+ bool HasImplicitMappings =
+ DKind == OMPD_begin_declare_target || !HasClauses ||
+ (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect.hasValue());
// Skip the last annot_pragma_openmp_end.
ConsumeAnyToken();
@@ -2528,7 +2551,13 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
TPA.Revert();
// End of the first iteration. Parser is reset to the start of metadirective
- TargetOMPContext OMPCtx(ASTContext, /* DiagUnknownTrait */ nullptr,
+ std::function<void(StringRef)> DiagUnknownTrait =
+ [this, Loc](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here
+ // to improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
+ };
+ TargetOMPContext OMPCtx(ASTContext, std::move(DiagUnknownTrait),
/* CurrentFunctionDecl */ nullptr,
ArrayRef<llvm::omp::TraitProperty>());
@@ -2936,7 +2965,7 @@ bool Parser::ParseOpenMPSimpleVarList(
if (AllowScopeSpecifier && getLangOpts().CPlusPlus &&
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, false)) {
+ /*ObjectHasErrors=*/false, false)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -3383,6 +3412,47 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc);
}
+/// Parse indirect clause for '#pragma omp declare target' directive.
+/// 'indirect' '[' '(' invoked-by-fptr ')' ']'
+/// where invoked-by-fptr is a constant boolean expression that evaluates to
+/// true or false at compile time.
+bool Parser::ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
+ bool ParseOnly) {
+ SourceLocation Loc = ConsumeToken();
+ SourceLocation RLoc;
+
+ if (Tok.isNot(tok::l_paren)) {
+ if (ParseOnly)
+ return false;
+ DTCI.Indirect = nullptr;
+ return true;
+ }
+
+ ExprResult Val =
+ ParseOpenMPParensExpr(getOpenMPClauseName(OMPC_indirect), RLoc);
+ if (Val.isInvalid())
+ return false;
+
+ if (ParseOnly)
+ return false;
+
+ if (!Val.get()->isValueDependent() && !Val.get()->isTypeDependent() &&
+ !Val.get()->isInstantiationDependent() &&
+ !Val.get()->containsUnexpandedParameterPack()) {
+ ExprResult Ret = Actions.CheckBooleanCondition(Loc, Val.get());
+ if (Ret.isInvalid())
+ return false;
+ llvm::APSInt Result;
+ Ret = Actions.VerifyIntegerConstantExpression(Val.get(), &Result,
+ Sema::AllowFold);
+ if (Ret.isInvalid())
+ return false;
+ DTCI.Indirect = Val.get();
+ return true;
+ }
+ return false;
+}
+
/// Parsing of OpenMP clauses that use an interop-var.
///
/// init-clause:
@@ -3817,7 +3887,7 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
@@ -4050,7 +4120,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
InvalidReductionId = ParseReductionId(
*this, Data.ReductionOrMapperIdScopeSpec, UnqualifiedReductionId);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index 292ab03e8614..ee07775b6346 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -1068,7 +1068,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
SourceLocation LabelLoc = ConsumeToken();
SmallVector<Decl *, 8> DeclsInGroup;
- while (1) {
+ while (true) {
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected) << tok::identifier;
break;
@@ -1191,22 +1191,24 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &Cond,
SourceLocation Loc,
- Sema::ConditionKind CK,
+ Sema::ConditionKind CK, bool MissingOK,
SourceLocation *LParenLoc,
SourceLocation *RParenLoc) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
+ SourceLocation Start = Tok.getLocation();
- if (getLangOpts().CPlusPlus)
- Cond = ParseCXXCondition(InitStmt, Loc, CK);
- else {
+ if (getLangOpts().CPlusPlus) {
+ Cond = ParseCXXCondition(InitStmt, Loc, CK, MissingOK);
+ } else {
ExprResult CondExpr = ParseExpression();
// If required, convert to a boolean value.
if (CondExpr.isInvalid())
Cond = Sema::ConditionError();
else
- Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK);
+ Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK,
+ MissingOK);
}
// If the parser was confused by the condition and we don't have a ')', try to
@@ -1220,7 +1222,16 @@ bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
return true;
}
- // Otherwise the condition is valid or the rparen is present.
+ if (Cond.isInvalid()) {
+ ExprResult CondExpr = Actions.CreateRecoveryExpr(
+ Start, Tok.getLocation() == Start ? Start : PrevTokLocation, {},
+ Actions.PreferredConditionType(CK));
+ if (!CondExpr.isInvalid())
+ Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK,
+ MissingOK);
+ }
+
+ // Either the condition is valid or the rparen is present.
T.consumeClose();
if (LParenLoc != nullptr) {
@@ -1404,7 +1415,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (ParseParenExprOrCondition(&InitStmt, Cond, IfLoc,
IsConstexpr ? Sema::ConditionKind::ConstexprIf
: Sema::ConditionKind::Boolean,
- &LParen, &RParen))
+ /*MissingOK=*/false, &LParen, &RParen))
return StmtError();
if (IsConstexpr)
@@ -1599,7 +1610,8 @@ StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) {
SourceLocation LParen;
SourceLocation RParen;
if (ParseParenExprOrCondition(&InitStmt, Cond, SwitchLoc,
- Sema::ConditionKind::Switch, &LParen, &RParen))
+ Sema::ConditionKind::Switch,
+ /*MissingOK=*/false, &LParen, &RParen))
return StmtError();
StmtResult Switch = Actions.ActOnStartOfSwitchStmt(
@@ -1689,7 +1701,8 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
SourceLocation LParen;
SourceLocation RParen;
if (ParseParenExprOrCondition(nullptr, Cond, WhileLoc,
- Sema::ConditionKind::Boolean, &LParen, &RParen))
+ Sema::ConditionKind::Boolean,
+ /*MissingOK=*/false, &LParen, &RParen))
return StmtError();
// C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
@@ -1780,10 +1793,18 @@ StmtResult Parser::ParseDoStatement() {
// A do-while expression is not a condition, so can't have attributes.
DiagnoseAndSkipCXX11Attributes();
+ SourceLocation Start = Tok.getLocation();
ExprResult Cond = ParseExpression();
// Correct the typos in condition before closing the scope.
if (Cond.isUsable())
Cond = Actions.CorrectDelayedTyposInExpr(Cond);
+ else {
+ if (!Tok.isOneOf(tok::r_paren, tok::r_square, tok::r_brace))
+ SkipUntil(tok::semi);
+ Cond = Actions.CreateRecoveryExpr(
+ Start, Start == Tok.getLocation() ? Start : PrevTokLocation, {},
+ Actions.getASTContext().BoolTy);
+ }
T.consumeClose();
DoScope.Exit();
@@ -2038,10 +2059,11 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// for-range-declaration next.
bool MightBeForRangeStmt = !ForRangeInfo.ParsedForRangeDecl();
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
- SecondPart =
- ParseCXXCondition(nullptr, ForLoc, Sema::ConditionKind::Boolean,
- MightBeForRangeStmt ? &ForRangeInfo : nullptr,
- /*EnterForConditionScope*/ true);
+ SecondPart = ParseCXXCondition(
+ nullptr, ForLoc, Sema::ConditionKind::Boolean,
+ // FIXME: recovery if we don't see another semi!
+ /*MissingOK=*/true, MightBeForRangeStmt ? &ForRangeInfo : nullptr,
+ /*EnterForConditionScope*/ true);
if (ForRangeInfo.ParsedForRangeDecl()) {
Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
@@ -2065,9 +2087,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (SecondExpr.isInvalid())
SecondPart = Sema::ConditionError();
else
- SecondPart =
- Actions.ActOnCondition(getCurScope(), ForLoc, SecondExpr.get(),
- Sema::ConditionKind::Boolean);
+ SecondPart = Actions.ActOnCondition(
+ getCurScope(), ForLoc, SecondExpr.get(),
+ Sema::ConditionKind::Boolean, /*MissingOK=*/true);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
index 3e9ce8fd668f..04c3a8700c10 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
@@ -222,7 +222,7 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
// Require an identifier here.
@@ -508,7 +508,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
TokLoc = Tok.getLocation();
++NumTokensRead;
SkippedStartOfLine = false;
- } while (1);
+ } while (true);
if (BraceNesting && BraceCount != savedBraceCount) {
// __asm without closing brace (this can happen at EOF).
@@ -681,7 +681,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
/// asm-qualifier
/// asm-qualifier-list asm-qualifier
bool Parser::parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ) {
- while (1) {
+ while (true) {
const GNUAsmQualifiers::AQ A = getGNUAsmQualifier(Tok);
if (A == GNUAsmQualifiers::AQ_unspecified) {
if (Tok.isNot(tok::l_paren)) {
@@ -810,7 +810,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
// Parse the asm-string list for clobbers if present.
if (!AteExtraColon && isTokenStringLiteral()) {
- while (1) {
+ while (true) {
ExprResult Clobber(ParseAsmStringLiteral(/*ForAsmLabel*/ false));
if (Clobber.isInvalid())
@@ -888,7 +888,7 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
if (!isTokenStringLiteral() && Tok.isNot(tok::l_square))
return false;
- while (1) {
+ while (true) {
// Read the [id] if present.
if (Tok.is(tok::l_square)) {
BalancedDelimiterTracker T(*this, tok::l_square);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index 45af61a3926a..f875e3bf43e8 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -391,7 +391,7 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(
SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
+ /*ObjectHasErrors=*/false, /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
SS.isInvalid()) {
@@ -500,7 +500,7 @@ bool Parser::ParseTemplateParameters(
bool
Parser::ParseTemplateParameterList(const unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams) {
- while (1) {
+ while (true) {
if (NamedDecl *TmpParam
= ParseTemplateParameter(Depth, TemplateParams.size())) {
@@ -715,7 +715,7 @@ bool Parser::TryAnnotateTypeConstraint() {
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
// If this is not a type-constraint, then
@@ -787,7 +787,7 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
bool TypenameKeyword = false;
SourceLocation KeyLoc;
ParseOptionalCXXScopeSpecifier(TypeConstraintSS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext*/ false);
if (Tok.is(tok::annot_template_id)) {
// Consume the 'type-constraint'.
@@ -1222,7 +1222,6 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
return false;
}
-
/// Parses a template-id that after the template name has
/// already been parsed.
///
@@ -1234,11 +1233,13 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
/// token that forms the template-id. Otherwise, we will leave the
/// last token in the stream (e.g., so that it can be replaced with an
/// annotation token).
-bool
-Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
- SourceLocation &LAngleLoc,
- TemplateArgList &TemplateArgs,
- SourceLocation &RAngleLoc) {
+///
+/// \param NameHint is not required, and merely affects code completion.
+bool Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ SourceLocation &RAngleLoc,
+ TemplateTy Template) {
assert(Tok.is(tok::less) && "Must have already parsed the template-name");
// Consume the '<'.
@@ -1251,7 +1252,7 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
if (!Tok.isOneOf(tok::greater, tok::greatergreater,
tok::greatergreatergreater, tok::greaterequal,
tok::greatergreaterequal))
- Invalid = ParseTemplateArgumentList(TemplateArgs);
+ Invalid = ParseTemplateArgumentList(TemplateArgs, Template, LAngleLoc);
if (Invalid) {
// Try to find the closing '>'.
@@ -1332,8 +1333,8 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
TemplateArgList TemplateArgs;
bool ArgsInvalid = false;
if (!TypeConstraint || Tok.is(tok::less)) {
- ArgsInvalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
- TemplateArgs, RAngleLoc);
+ ArgsInvalid = ParseTemplateIdAfterTemplateName(
+ false, LAngleLoc, TemplateArgs, RAngleLoc, Template);
// If we couldn't recover from invalid arguments, don't form an annotation
// token -- we don't know how much to annotate.
// FIXME: This can lead to duplicate diagnostics if we retry parsing this
@@ -1467,7 +1468,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// '>', or (in some cases) '>>'.
CXXScopeSpec SS; // nested-name-specifier, if present
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
ParsedTemplateArgument Result;
@@ -1585,19 +1586,34 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
/// template-argument-list: [C++ 14.2]
/// template-argument
/// template-argument-list ',' template-argument
-bool
-Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
+///
+/// \param Template is only used for code completion, and may be null.
+bool Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
+ TemplateTy Template,
+ SourceLocation OpenLoc) {
ColonProtectionRAIIObject ColonProtection(*this, false);
+ auto RunSignatureHelp = [&] {
+ if (!Template)
+ return QualType();
+ CalledSignatureHelp = true;
+ return Actions.ProduceTemplateArgumentSignatureHelp(Template, TemplateArgs,
+ OpenLoc);
+ };
+
do {
+ PreferredType.enterFunctionArgument(Tok.getLocation(), RunSignatureHelp);
ParsedTemplateArgument Arg = ParseTemplateArgument();
SourceLocation EllipsisLoc;
if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
- if (Arg.isInvalid())
+ if (Arg.isInvalid()) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
return true;
+ }
// Save this template argument.
TemplateArgs.push_back(Arg);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index 35c9036fb27e..512993a5278e 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -277,7 +277,7 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
/// '{' '}'
///
Parser::TPResult Parser::TryParseInitDeclaratorList() {
- while (1) {
+ while (true) {
// declarator
TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
if (TPR != TPResult::Ambiguous)
@@ -1068,7 +1068,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
if (mayHaveDirectInit)
return TPResult::Ambiguous;
- while (1) {
+ while (true) {
TPResult TPR(TPResult::Ambiguous);
if (Tok.is(tok::l_paren)) {
@@ -1898,7 +1898,7 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// parameter-declaration
// parameter-declaration-list ',' parameter-declaration
//
- while (1) {
+ while (true) {
// '...'[opt]
if (Tok.is(tok::ellipsis)) {
ConsumeToken();
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index 11113fa1a060..ffa1e0f027f1 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -279,7 +279,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
// We always want this function to skip at least one token if the first token
// isn't T and if not at EOF.
bool isFirstTokenSkipped = true;
- while (1) {
+ while (true) {
// If we found one of the tokens, stop and return true.
for (unsigned i = 0, NumToks = Toks.size(); i != NumToks; ++i) {
if (Tok.is(Toks[i])) {
@@ -1448,7 +1448,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
ParseDeclarator(ParmDeclarator);
// Handle the full declarator list.
- while (1) {
+ while (true) {
// If attributes are present, parse them.
MaybeParseGNUAttributes(ParmDeclarator);
@@ -1634,7 +1634,7 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus &&
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext))
return ANK_Error;
@@ -1882,7 +1882,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false, nullptr,
/*IsTypename*/ true))
return true;
@@ -1953,7 +1953,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext*/ false))
return true;
@@ -2084,7 +2084,7 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext))
return true;
if (SS.isEmpty())
@@ -2195,7 +2195,7 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse nested-name-specifier.
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Result.SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
// Check nested-name specifier.
diff --git a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
index e9b678b69594..70ea2fcd3d04 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -542,7 +542,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
// macros.
- while (1) {
+ while (true) {
Token Tok;
L.LexFromRawLexer(Tok);
diff --git a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
index 3b06afc76e16..8950bfb7c4dc 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
@@ -223,6 +223,7 @@ std::string Rewriter::getRewrittenText(CharSourceRange Range) const {
RewriteBuffer::iterator Start = RB.begin();
std::advance(Start, StartOff);
RewriteBuffer::iterator End = Start;
+ assert(EndOff >= StartOff && "Invalid iteration distance");
std::advance(End, EndOff-StartOff);
return std::string(Start, End);
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index b4dcc9759b99..ac5ad52c0b1d 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -128,7 +128,7 @@ class LogicalErrorHandler : public CFGCallback {
Sema &S;
public:
- LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {}
+ LogicalErrorHandler(Sema &S) : S(S) {}
static bool HasMacroID(const Expr *E) {
if (E->getExprLoc().isMacroID())
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index 0a2ca54e244a..fefe20941f17 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -506,11 +506,92 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
case CK_FunctionType:
return Type;
+
+ case CK_Template:
+ case CK_Aggregate:
+ return nullptr;
}
llvm_unreachable("Invalid CandidateKind!");
}
+unsigned CodeCompleteConsumer::OverloadCandidate::getNumParams() const {
+ if (Kind == CK_Template)
+ return Template->getTemplateParameters()->size();
+
+ if (Kind == CK_Aggregate) {
+ unsigned Count =
+ std::distance(AggregateType->field_begin(), AggregateType->field_end());
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType))
+ Count += CRD->getNumBases();
+ return Count;
+ }
+
+ if (const auto *FT = getFunctionType())
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FT))
+ return FPT->getNumParams();
+
+ return 0;
+}
+
+QualType
+CodeCompleteConsumer::OverloadCandidate::getParamType(unsigned N) const {
+ if (Kind == CK_Aggregate) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType)) {
+ if (N < CRD->getNumBases())
+ return std::next(CRD->bases_begin(), N)->getType();
+ N -= CRD->getNumBases();
+ }
+ for (const auto *Field : AggregateType->fields())
+ if (N-- == 0)
+ return Field->getType();
+ return QualType();
+ }
+
+ if (Kind == CK_Template) {
+ TemplateParameterList *TPL = getTemplate()->getTemplateParameters();
+ if (N < TPL->size())
+ if (const auto *D = dyn_cast<NonTypeTemplateParmDecl>(TPL->getParam(N)))
+ return D->getType();
+ return QualType();
+ }
+
+ if (const auto *FT = getFunctionType())
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FT))
+ if (N < FPT->getNumParams())
+ return FPT->getParamType(N);
+ return QualType();
+}
+
+const NamedDecl *
+CodeCompleteConsumer::OverloadCandidate::getParamDecl(unsigned N) const {
+ if (Kind == CK_Aggregate) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType)) {
+ if (N < CRD->getNumBases())
+ return std::next(CRD->bases_begin(), N)->getType()->getAsTagDecl();
+ N -= CRD->getNumBases();
+ }
+ for (const auto *Field : AggregateType->fields())
+ if (N-- == 0)
+ return Field;
+ return nullptr;
+ }
+
+ if (Kind == CK_Template) {
+ TemplateParameterList *TPL = getTemplate()->getTemplateParameters();
+ if (N < TPL->size())
+ return TPL->getParam(N);
+ return nullptr;
+ }
+
+ // Note that if we only have a FunctionProtoType, we don't have param decls.
+ if (const auto *FD = getFunction()) {
+ if (N < FD->param_size())
+ return FD->getParamDecl(N);
+ }
+ return nullptr;
+}
+
//===----------------------------------------------------------------------===//
// Code completion consumer implementation
//===----------------------------------------------------------------------===//
@@ -645,7 +726,7 @@ static std::string getOverloadAsString(const CodeCompletionString &CCS) {
void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
Sema &SemaRef, unsigned CurrentArg, OverloadCandidate *Candidates,
- unsigned NumCandidates, SourceLocation OpenParLoc) {
+ unsigned NumCandidates, SourceLocation OpenParLoc, bool Braced) {
OS << "OPENING_PAREN_LOC: ";
OpenParLoc.print(OS, SemaRef.getSourceManager());
OS << "\n";
@@ -653,7 +734,7 @@ void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
for (unsigned I = 0; I != NumCandidates; ++I) {
if (CodeCompletionString *CCS = Candidates[I].CreateSignatureString(
CurrentArg, SemaRef, getAllocator(), CCTUInfo,
- includeBriefComments())) {
+ includeBriefComments(), Braced)) {
OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n";
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index 38debc5aa9fc..df2f206041c1 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -80,11 +80,14 @@ def FuncExtKhrLocalInt32ExtendedAtomics : FunctionExtension<"cl_khr_local_int32
def FuncExtKhrInt64BaseAtomics : FunctionExtension<"cl_khr_int64_base_atomics">;
def FuncExtKhrInt64ExtendedAtomics : FunctionExtension<"cl_khr_int64_extended_atomics">;
def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_image">;
+def FuncExtKhrMipmapImageReadWrite : FunctionExtension<"cl_khr_mipmap_image __opencl_c_read_write_images">;
def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
+def FuncExtKhrGlMsaaSharingReadWrite : FunctionExtension<"cl_khr_gl_msaa_sharing __opencl_c_read_write_images">;
def FuncExtOpenCLCPipes : FunctionExtension<"__opencl_c_pipes">;
def FuncExtOpenCLCWGCollectiveFunctions : FunctionExtension<"__opencl_c_work_group_collective_functions">;
+def FuncExtOpenCLCReadWriteImages : FunctionExtension<"__opencl_c_read_write_images">;
def FuncExtFloatAtomicsFp16GlobalLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_load_store">;
def FuncExtFloatAtomicsFp16LocalLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_load_store">;
def FuncExtFloatAtomicsFp16GenericLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_load_store __opencl_c_ext_fp16_local_atomic_load_store">;
@@ -1390,30 +1393,35 @@ foreach coordTy = [Int, Float] in {
}
// --- Table 23: Sampler-less Read Functions ---
+multiclass ImageReadSamplerless<string aQual> {
+ foreach imgTy = [Image2d, Image1dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ }
+ foreach imgTy = [Image3d, Image2dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ }
+ foreach imgTy = [Image1d, Image1dBuffer] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ }
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
+}
+
let MinVersion = CL12 in {
- foreach aQual = ["RO", "RW"] in {
- foreach imgTy = [Image2d, Image1dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- }
- foreach imgTy = [Image3d, Image2dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- }
- foreach imgTy = [Image1d, Image1dBuffer] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- }
- def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ defm : ImageReadSamplerless<"RO">;
+ let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageReadSamplerless<"RW">;
}
}
// --- Table 24: Image Write Functions ---
-foreach aQual = ["WO", "RW"] in {
+multiclass ImageWrite<string aQual> {
foreach imgTy = [Image2d] in {
def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 2>, VectorType<Float, 4>]>;
def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 2>, VectorType<Int, 4>]>;
@@ -1443,8 +1451,13 @@ foreach aQual = ["WO", "RW"] in {
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Float]>;
}
+defm : ImageWrite<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageWrite<"RW">;
+}
+
// --- Table 25: Image Query Functions ---
-foreach aQual = ["RO", "WO", "RW"] in {
+multiclass ImageQuery<string aQual> {
foreach imgTy = [Image1d, Image1dBuffer, Image2d, Image3d,
Image1dArray, Image2dArray, Image2dDepth,
Image2dArrayDepth] in {
@@ -1468,6 +1481,12 @@ foreach aQual = ["RO", "WO", "RW"] in {
}
}
+defm : ImageQuery<"RO">;
+defm : ImageQuery<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageQuery<"RW">;
+}
+
// OpenCL extension v2.0 s5.1.9: Built-in Image Read Functions
// --- Table 8 ---
foreach aQual = ["RO"] in {
@@ -1488,7 +1507,7 @@ foreach aQual = ["RO"] in {
// OpenCL extension v2.0 s5.1.10: Built-in Image Sampler-less Read Functions
// --- Table 9 ---
let MinVersion = CL12 in {
- foreach aQual = ["RO", "RW"] in {
+ multiclass ImageReadHalf<string aQual> {
foreach name = ["read_imageh"] in {
foreach imgTy = [Image2d, Image1dArray] in {
def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
@@ -1501,10 +1520,14 @@ let MinVersion = CL12 in {
}
}
}
+ defm : ImageReadHalf<"RO">;
+ let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageReadHalf<"RW">;
+ }
}
// OpenCL extension v2.0 s5.1.11: Built-in Image Write Functions
// --- Table 10 ---
-foreach aQual = ["WO", "RW"] in {
+multiclass ImageWriteHalf<string aQual> {
foreach name = ["write_imageh"] in {
def : Builtin<name, [Void, ImageType<Image2d, aQual>, VectorType<Int, 2>, VectorType<Half, 4>]>;
def : Builtin<name, [Void, ImageType<Image2dArray, aQual>, VectorType<Int, 4>, VectorType<Half, 4>]>;
@@ -1515,6 +1538,12 @@ foreach aQual = ["WO", "RW"] in {
}
}
+defm : ImageWriteHalf<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageWriteHalf<"RW">;
+}
+
+
//--------------------------------------------------------------------
// OpenCL v2.0 s6.13.15 - Work-group Functions
@@ -1688,14 +1717,24 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
- // Added to section 6.13.14.5
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
- def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
- }
+}
+
+// Added to section 6.13.14.5
+multiclass ImageQueryNumMipLevels<string aQual> {
+ foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
+ def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
}
}
+let Extension = FuncExtKhrMipmapImage in {
+ defm : ImageQueryNumMipLevels<"RO">;
+ defm : ImageQueryNumMipLevels<"WO">;
+}
+
+let Extension = FuncExtKhrMipmapImageReadWrite in {
+ defm : ImageQueryNumMipLevels<"RW">;
+}
+
// Write functions are enabled using a separate extension.
let Extension = FuncExtKhrMipmapImageWrites in {
// Added to section 6.13.14.4.
@@ -1734,39 +1773,48 @@ let Extension = FuncExtKhrMipmapImageWrites in {
//--------------------------------------------------------------------
// OpenCL Extension v2.0 s18.3 - Creating OpenCL Memory Objects from OpenGL MSAA Textures
-let Extension = FuncExtKhrGlMsaaSharing in {
- // --- Table 6.13.14.3 ---
- foreach aQual = ["RO", "RW"] in {
- foreach imgTy = [Image2dMsaa] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- }
- foreach imgTy = [Image2dArrayMsaa] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- }
- foreach name = ["read_imagef"] in {
- def : Builtin<name, [Float, ImageType<Image2dMsaaDepth, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<name, [Float, ImageType<Image2dArrayMsaaDepth, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- }
- }
-
- // --- Table 6.13.14.5 ---
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image2dMsaa, Image2dArrayMsaa, Image2dMsaaDepth, Image2dArrayMsaaDepth] in {
- foreach name = ["get_image_width", "get_image_height",
- "get_image_channel_data_type", "get_image_channel_order",
- "get_image_num_samples"] in {
- def : Builtin<name, [Int, ImageType<imgTy, aQual>], Attr.Const>;
- }
- def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
- }
- foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
- def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
+// --- Table 6.13.14.3 ---
+multiclass ImageReadMsaa<string aQual> {
+ foreach imgTy = [Image2dMsaa] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ }
+ foreach imgTy = [Image2dArrayMsaa] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ }
+ foreach name = ["read_imagef"] in {
+ def : Builtin<name, [Float, ImageType<Image2dMsaaDepth, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<name, [Float, ImageType<Image2dArrayMsaaDepth, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ }
+}
+
+// --- Table 6.13.14.5 ---
+multiclass ImageQueryMsaa<string aQual> {
+ foreach imgTy = [Image2dMsaa, Image2dArrayMsaa, Image2dMsaaDepth, Image2dArrayMsaaDepth] in {
+ foreach name = ["get_image_width", "get_image_height",
+ "get_image_channel_data_type", "get_image_channel_order",
+ "get_image_num_samples"] in {
+ def : Builtin<name, [Int, ImageType<imgTy, aQual>], Attr.Const>;
}
+ def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
}
+ foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
+ def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
+ }
+}
+
+let Extension = FuncExtKhrGlMsaaSharing in {
+ defm : ImageReadMsaa<"RO">;
+ defm : ImageQueryMsaa<"RO">;
+ defm : ImageQueryMsaa<"WO">;
+}
+
+let Extension = FuncExtKhrGlMsaaSharingReadWrite in {
+ defm : ImageReadMsaa<"RW">;
+ defm : ImageQueryMsaa<"RW">;
}
//--------------------------------------------------------------------
diff --git a/contrib/llvm-project/clang/lib/Sema/Scope.cpp b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
index 51b0b24e57b7..499279a2659d 100644
--- a/contrib/llvm-project/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
@@ -91,7 +91,7 @@ void Scope::Init(Scope *parent, unsigned flags) {
UsingDirectives.clear();
Entity = nullptr;
ErrorTrap.reset();
- NRVO.setPointerAndInt(nullptr, 0);
+ NRVO.setPointerAndInt(nullptr, false);
}
bool Scope::containedInPrototypeScope() const {
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 734ed0f62ec6..20b4a9a5d4e6 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -60,6 +60,16 @@ ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
DarwinSDKInfo *
Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform) {
+ auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
+ if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
+ Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
+ << Platform;
+ WarnedDarwinSDKInfoMissing = true;
+ }
+ return SDKInfo;
+}
+
+DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
if (CachedDarwinSDKInfo)
return CachedDarwinSDKInfo->get();
auto SDKInfo = parseDarwinSDKInfo(
@@ -71,8 +81,6 @@ Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
}
if (!SDKInfo)
llvm::consumeError(SDKInfo.takeError());
- Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
- << Platform;
CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
return nullptr;
}
@@ -187,7 +195,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()),
CurInitSeg(nullptr), VisContext(nullptr),
PragmaAttributeCurrentTargetDecl(nullptr),
- IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
+ IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
@@ -324,9 +332,12 @@ void Sema::Initialize() {
Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
- if (getLangOpts().getOpenCLCompatibleVersion() >= 200) {
- addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
- addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
+ if (OCLCompatibleVersion >= 200) {
+ if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
+ addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
+ addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ }
if (getLangOpts().OpenCLPipes)
addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
@@ -394,7 +405,6 @@ void Sema::Initialize() {
}
}
-
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
addImplicitTypedef(#ExtType, Context.Id##Ty); \
@@ -1858,6 +1868,15 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (isUnevaluatedContext() || Ty.isNull())
return;
+ // The original idea behind checkTypeSupport function is that unused
+ // declarations can be replaced with an array of bytes of the same size during
+ // codegen, such replacement doesn't seem to be possible for types without
+ // constant byte size like zero length arrays. So, do a deep check for SYCL.
+ if (D && LangOpts.SYCLIsDevice) {
+ llvm::DenseSet<QualType> Visited;
+ deepTypeCheckForSYCLDevice(Loc, Visited, D);
+ }
+
Decl *C = cast<Decl>(getCurLexicalContext());
// Memcpy operations for structs containing a member with unsupported type
@@ -1932,7 +1951,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
};
auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
- if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice))
+ if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) ||
+ LangOpts.CUDAIsDevice)
CheckDeviceType(Ty);
QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
@@ -2534,6 +2554,11 @@ static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain,
bool (*IsPlausibleResult)(QualType)) {
+ if (isSFINAEContext()) {
+ // If this is a SFINAE context, don't try anything that might trigger ADL
+ // prematurely.
+ return false;
+ }
SourceLocation Loc = E.get()->getExprLoc();
SourceRange Range = E.get()->getSourceRange();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 8cecf6c6ab4f..4781d71080c9 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -881,7 +881,8 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
- DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
ColonColonLoc);
return false;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index 4e83fa1fffca..c8fb36b8311a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -499,7 +499,8 @@ public:
1 /* null byte always written by sprintf */) {}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *, unsigned SpecifierLen) override {
+ const char *, unsigned SpecifierLen,
+ const TargetInfo &) override {
const size_t FieldWidth = computeFieldWidth(FS);
const size_t Precision = computePrecision(FS);
@@ -1578,11 +1579,26 @@ static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
return TheCall;
}
+// Emit an error and return true if the current object format type is in the
+// list of unsupported types.
+static bool CheckBuiltinTargetNotInUnsupported(
+ Sema &S, unsigned BuiltinID, CallExpr *TheCall,
+ ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
+ llvm::Triple::ObjectFormatType CurObjFormat =
+ S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
+ if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
+ << TheCall->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
-CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
- ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
+CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
+ ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
llvm::Triple::ArchType CurArch =
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
@@ -1664,6 +1680,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
switch (BuiltinID) {
case Builtin::BI__builtin___CFStringMakeConstantString:
+ // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
+ // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
+ if (CheckBuiltinTargetNotInUnsupported(
+ *this, BuiltinID, TheCall,
+ {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
+ return ExprError();
assert(TheCall->getNumArgs() == 1 &&
"Wrong # arguments to builtin CFStringMakeConstantString");
if (CheckObjCString(TheCall->getArg(0)))
@@ -1698,7 +1720,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
- if (CheckBuiltinTargetSupport(
+ if (CheckBuiltinTargetInSupported(
*this, BuiltinID, TheCall,
{llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
@@ -1711,9 +1733,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
- if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
- {llvm::Triple::x86_64, llvm::Triple::arm,
- llvm::Triple::thumb, llvm::Triple::aarch64}))
+ if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
+ {llvm::Triple::x86_64, llvm::Triple::arm,
+ llvm::Triple::thumb,
+ llvm::Triple::aarch64}))
return ExprError();
break;
@@ -1750,10 +1773,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_alloca_with_align:
+ case Builtin::BI__builtin_alloca_with_align_uninitialized:
if (SemaBuiltinAllocaWithAlign(TheCall))
return ExprError();
LLVM_FALLTHROUGH;
case Builtin::BI__builtin_alloca:
+ case Builtin::BI__builtin_alloca_uninitialized:
Diag(TheCall->getBeginLoc(), diag::warn_alloca)
<< TheCall->getDirectCallee();
break;
@@ -2189,9 +2214,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
- // __builtin_elementwise_ceil restricts the element type to floating point
+ // These builtins restrict the element type to floating point
// types only.
- case Builtin::BI__builtin_elementwise_ceil: {
+ case Builtin::BI__builtin_elementwise_ceil:
+ case Builtin::BI__builtin_elementwise_floor:
+ case Builtin::BI__builtin_elementwise_roundeven:
+ case Builtin::BI__builtin_elementwise_trunc: {
if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
return ExprError();
@@ -2232,8 +2260,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
- // __builtin_reduce_xor supports vector of integers only.
- case Builtin::BI__builtin_reduce_xor: {
+ // These builtins support vectors of integers only.
+ case Builtin::BI__builtin_reduce_xor:
+ case Builtin::BI__builtin_reduce_or:
+ case Builtin::BI__builtin_reduce_and: {
if (PrepareBuiltinReduceMathOneArgCall(TheCall))
return ExprError();
@@ -3946,23 +3976,39 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
// Check if each required feature is included
for (StringRef F : ReqFeatures) {
- if (TI.hasFeature(F))
- continue;
-
- // If the feature is 64bit, alter the string so it will print better in
- // the diagnostic.
- if (F == "64bit")
- F = "RV64";
-
- // Convert features like "zbr" and "experimental-zbr" to "Zbr".
- F.consume_front("experimental-");
- std::string FeatureStr = F.str();
- FeatureStr[0] = std::toupper(FeatureStr[0]);
+ SmallVector<StringRef> ReqOpFeatures;
+ F.split(ReqOpFeatures, '|');
+ bool HasFeature = false;
+ for (StringRef OF : ReqOpFeatures) {
+ if (TI.hasFeature(OF)) {
+ HasFeature = true;
+ continue;
+ }
+ }
- // Error message
- FeatureMissing = true;
- Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
- << TheCall->getSourceRange() << StringRef(FeatureStr);
+ if (!HasFeature) {
+ std::string FeatureStrs = "";
+ for (StringRef OF : ReqOpFeatures) {
+ // If the feature is 64bit, alter the string so it will print better in
+ // the diagnostic.
+ if (OF == "64bit")
+ OF = "RV64";
+
+ // Convert features like "zbr" and "experimental-zbr" to "Zbr".
+ OF.consume_front("experimental-");
+ std::string FeatureStr = OF.str();
+ FeatureStr[0] = std::toupper(FeatureStr[0]);
+ // Combine strings.
+ FeatureStrs += FeatureStrs == "" ? "" : ", ";
+ FeatureStrs += "'";
+ FeatureStrs += FeatureStr;
+ FeatureStrs += "'";
+ }
+ // Error message
+ FeatureMissing = true;
+ Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
+ << TheCall->getSourceRange() << StringRef(FeatureStrs);
+ }
}
if (FeatureMissing)
@@ -8880,8 +8926,8 @@ public:
void handleInvalidMaskType(StringRef MaskType) override;
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *startSpecifier,
- unsigned specifierLen) override;
+ const char *startSpecifier, unsigned specifierLen,
+ const TargetInfo &Target) override;
bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
unsigned SpecifierLen,
@@ -9140,11 +9186,9 @@ bool CheckPrintfHandler::checkForCStrMembers(
return false;
}
-bool
-CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
- &FS,
- const char *startSpecifier,
- unsigned specifierLen) {
+bool CheckPrintfHandler::HandlePrintfSpecifier(
+ const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier,
+ unsigned specifierLen, const TargetInfo &Target) {
using namespace analyze_format_string;
using namespace analyze_printf;
@@ -9276,6 +9320,15 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
}
}
+ const llvm::Triple &Triple = Target.getTriple();
+ if (CS.getKind() == ConversionSpecifier::nArg &&
+ (Triple.isAndroid() || Triple.isOSFuchsia())) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+
// Check for invalid use of field width
if (!FS.hasValidFieldWidth()) {
HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
@@ -14021,7 +14074,7 @@ class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
const Expr *UsageExpr;
SequenceTree::Seq Seq;
- Usage() : UsageExpr(nullptr), Seq() {}
+ Usage() : UsageExpr(nullptr) {}
};
struct UsageInfo {
@@ -14030,7 +14083,7 @@ class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
/// Have we issued a diagnostic for this object already?
bool Diagnosed;
- UsageInfo() : Uses(), Diagnosed(false) {}
+ UsageInfo() : Diagnosed(false) {}
};
using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index 93c07ccc891f..01fdf51c60c3 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -36,6 +36,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
@@ -98,7 +99,7 @@ private:
unsigned SingleDeclIndex;
public:
- ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) {}
+ ShadowMapEntry() : SingleDeclIndex(0) {}
ShadowMapEntry(const ShadowMapEntry &) = delete;
ShadowMapEntry(ShadowMapEntry &&Move) { *this = std::move(Move); }
ShadowMapEntry &operator=(const ShadowMapEntry &) = delete;
@@ -1437,7 +1438,7 @@ bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
- return 0;
+ return false;
if (const auto *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
@@ -1895,6 +1896,7 @@ static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
Policy.SuppressStrongLifetime = true;
Policy.SuppressUnwrittenScope = true;
Policy.SuppressScope = true;
+ Policy.CleanUglifiedParameters = true;
return Policy;
}
@@ -2816,14 +2818,18 @@ formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
Optional<ArrayRef<QualType>> ObjCSubsts = None);
static std::string
-FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
- bool SuppressName = false, bool SuppressBlock = false,
+FormatFunctionParameter(const PrintingPolicy &Policy,
+ const DeclaratorDecl *Param, bool SuppressName = false,
+ bool SuppressBlock = false,
Optional<ArrayRef<QualType>> ObjCSubsts = None) {
// Params are unavailable in FunctionTypeLoc if the FunctionType is invalid.
// It would be better to pass in the param Type, which is usually available.
// But this case is rare, so just pretend we fell back to int as elsewhere.
if (!Param)
return "int";
+ Decl::ObjCDeclQualifier ObjCQual = Decl::OBJC_TQ_None;
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(Param))
+ ObjCQual = PVD->getObjCDeclQualifier();
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -2832,18 +2838,17 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
std::string Result;
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
- Result = std::string(Param->getIdentifier()->getName());
+ Result = std::string(Param->getIdentifier()->deuglifiedName());
QualType Type = Param->getType();
if (ObjCSubsts)
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
if (ObjCMethodParam) {
- Result =
- "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
+ Result = "(" + formatObjCParamQualifiers(ObjCQual, Type);
Result += Type.getAsString(Policy) + ")";
if (Param->getIdentifier() && !SuppressName)
- Result += Param->getIdentifier()->getName();
+ Result += Param->getIdentifier()->deuglifiedName();
} else {
Type.getAsStringInternal(Result, Policy);
}
@@ -2871,20 +2876,19 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
// for the block; just use the parameter type as a placeholder.
std::string Result;
if (!ObjCMethodParam && Param->getIdentifier())
- Result = std::string(Param->getIdentifier()->getName());
+ Result = std::string(Param->getIdentifier()->deuglifiedName());
QualType Type = Param->getType().getUnqualifiedType();
if (ObjCMethodParam) {
Result = Type.getAsString(Policy);
- std::string Quals =
- formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
+ std::string Quals = formatObjCParamQualifiers(ObjCQual, Type);
if (!Quals.empty())
Result = "(" + Quals + " " + Result + ")";
if (Result.back() != ')')
Result += " ";
if (Param->getIdentifier())
- Result += Param->getIdentifier()->getName();
+ Result += Param->getIdentifier()->deuglifiedName();
} else {
Type.getAsStringInternal(Result, Policy);
}
@@ -3079,14 +3083,14 @@ static void AddTemplateParameterChunks(
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
- PlaceholderStr += TTP->getIdentifier()->getName();
+ PlaceholderStr += TTP->getIdentifier()->deuglifiedName();
}
HasDefaultArg = TTP->hasDefaultArgument();
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
- PlaceholderStr = std::string(NTTP->getIdentifier()->getName());
+ PlaceholderStr = std::string(NTTP->getIdentifier()->deuglifiedName());
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
@@ -3098,7 +3102,7 @@ static void AddTemplateParameterChunks(
PlaceholderStr = "template<...> class";
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
- PlaceholderStr += TTP->getIdentifier()->getName();
+ PlaceholderStr += TTP->getIdentifier()->deuglifiedName();
}
HasDefaultArg = TTP->hasDefaultArgument();
@@ -3688,6 +3692,31 @@ const RawComment *clang::getParameterComment(
return nullptr;
}
+static void AddOverloadAggregateChunks(const RecordDecl *RD,
+ const PrintingPolicy &Policy,
+ CodeCompletionBuilder &Result,
+ unsigned CurrentArg) {
+ unsigned ChunkIndex = 0;
+ auto AddChunk = [&](llvm::StringRef Placeholder) {
+ if (ChunkIndex > 0)
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+ const char *Copy = Result.getAllocator().CopyString(Placeholder);
+ if (ChunkIndex == CurrentArg)
+ Result.AddCurrentParameterChunk(Copy);
+ else
+ Result.AddPlaceholderChunk(Copy);
+ ++ChunkIndex;
+ };
+ // Aggregate initialization has all bases followed by all fields.
+ // (Bases are not legal in C++11 but in that case we never get here).
+ if (auto *CRD = llvm::dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &Base : CRD->bases())
+ AddChunk(Base.getType().getAsString(Policy));
+ }
+ for (const auto &Field : RD->fields())
+ AddChunk(FormatFunctionParameter(Policy, Field));
+}
+
/// Add function overload parameter chunks to the given code completion
/// string.
static void AddOverloadParameterChunks(ASTContext &Context,
@@ -3697,6 +3726,11 @@ static void AddOverloadParameterChunks(ASTContext &Context,
CodeCompletionBuilder &Result,
unsigned CurrentArg, unsigned Start = 0,
bool InOptional = false) {
+ if (!Function && !Prototype) {
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ return;
+ }
+
bool FirstParameter = true;
unsigned NumParams =
Function ? Function->getNumParams() : Prototype->getNumParams();
@@ -3757,10 +3791,83 @@ static void AddOverloadParameterChunks(ASTContext &Context,
}
}
+static std::string
+formatTemplateParameterPlaceholder(const NamedDecl *Param, bool &Optional,
+ const PrintingPolicy &Policy) {
+ if (const auto *Type = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ Optional = Type->hasDefaultArgument();
+ } else if (const auto *NonType = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Optional = NonType->hasDefaultArgument();
+ } else if (const auto *Template = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ Optional = Template->hasDefaultArgument();
+ }
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Param->print(OS, Policy);
+ return Result;
+}
+
+static std::string templateResultType(const TemplateDecl *TD,
+ const PrintingPolicy &Policy) {
+ if (const auto *CTD = dyn_cast<ClassTemplateDecl>(TD))
+ return CTD->getTemplatedDecl()->getKindName().str();
+ if (const auto *VTD = dyn_cast<VarTemplateDecl>(TD))
+ return VTD->getTemplatedDecl()->getType().getAsString(Policy);
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(TD))
+ return FTD->getTemplatedDecl()->getReturnType().getAsString(Policy);
+ if (isa<TypeAliasTemplateDecl>(TD))
+ return "type";
+ if (isa<TemplateTemplateParmDecl>(TD))
+ return "class";
+ if (isa<ConceptDecl>(TD))
+ return "concept";
+ return "";
+}
+
+static CodeCompletionString *createTemplateSignatureString(
+ const TemplateDecl *TD, CodeCompletionBuilder &Builder, unsigned CurrentArg,
+ const PrintingPolicy &Policy) {
+ llvm::ArrayRef<NamedDecl *> Params = TD->getTemplateParameters()->asArray();
+ CodeCompletionBuilder OptionalBuilder(Builder.getAllocator(),
+ Builder.getCodeCompletionTUInfo());
+ std::string ResultType = templateResultType(TD, Policy);
+ if (!ResultType.empty())
+ Builder.AddResultTypeChunk(Builder.getAllocator().CopyString(ResultType));
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(TD->getNameAsString()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ // Initially we're writing into the main string. Once we see an optional arg
+ // (with default), we're writing into the nested optional chunk.
+ CodeCompletionBuilder *Current = &Builder;
+ for (unsigned I = 0; I < Params.size(); ++I) {
+ bool Optional = false;
+ std::string Placeholder =
+ formatTemplateParameterPlaceholder(Params[I], Optional, Policy);
+ if (Optional)
+ Current = &OptionalBuilder;
+ if (I > 0)
+ Current->AddChunk(CodeCompletionString::CK_Comma);
+ Current->AddChunk(I == CurrentArg
+ ? CodeCompletionString::CK_CurrentParameter
+ : CodeCompletionString::CK_Placeholder,
+ Current->getAllocator().CopyString(Placeholder));
+ }
+ // Add the optional chunk to the main string if we ever used it.
+ if (Current == &OptionalBuilder)
+ Builder.AddOptionalChunk(OptionalBuilder.TakeString());
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ // For function templates, ResultType was the function's return type.
+ // Give some clue this is a function. (Don't show the possibly-bulky params).
+ if (isa<FunctionTemplateDecl>(TD))
+ Builder.AddInformativeChunk("()");
+ return Builder.TakeString();
+}
+
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
unsigned CurrentArg, Sema &S, CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments) const {
+ CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments,
+ bool Braced) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// Show signatures of constructors as they are declared:
// vector(int n) rather than vector<string>(int n)
@@ -3770,22 +3877,20 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
// FIXME: Set priority, availability appropriately.
CodeCompletionBuilder Result(Allocator, CCTUInfo, 1,
CXAvailability_Available);
+
+ if (getKind() == CK_Template)
+ return createTemplateSignatureString(getTemplate(), Result, CurrentArg,
+ Policy);
+
FunctionDecl *FDecl = getFunction();
const FunctionProtoType *Proto =
- dyn_cast<FunctionProtoType>(getFunctionType());
- if (!FDecl && !Proto) {
- // Function without a prototype. Just give the return type and a
- // highlighted ellipsis.
- const FunctionType *FT = getFunctionType();
- Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- FT->getReturnType().getAsString(Policy)));
- Result.AddChunk(CodeCompletionString::CK_LeftParen);
- Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
- Result.AddChunk(CodeCompletionString::CK_RightParen);
- return Result.TakeString();
- }
+ dyn_cast_or_null<FunctionProtoType>(getFunctionType());
- if (FDecl) {
+ // First, the name/type of the callee.
+ if (getKind() == CK_Aggregate) {
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(getAggregate()->getName()));
+ } else if (FDecl) {
if (IncludeBriefComments) {
if (auto RC = getParameterComment(S.getASTContext(), *this, CurrentArg))
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
@@ -3797,14 +3902,21 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
FDecl->getDeclName().print(OS, Policy);
Result.AddTextChunk(Result.getAllocator().CopyString(OS.str()));
} else {
+ // Function without a declaration. Just give the return type.
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- Proto->getReturnType().getAsString(Policy)));
+ getFunctionType()->getReturnType().getAsString(Policy)));
}
- Result.AddChunk(CodeCompletionString::CK_LeftParen);
- AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result,
- CurrentArg);
- Result.AddChunk(CodeCompletionString::CK_RightParen);
+ // Next, the brackets and parameters.
+ Result.AddChunk(Braced ? CodeCompletionString::CK_LeftBrace
+ : CodeCompletionString::CK_LeftParen);
+ if (getKind() == CK_Aggregate)
+ AddOverloadAggregateChunks(getAggregate(), Policy, Result, CurrentArg);
+ else
+ AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result,
+ CurrentArg);
+ Result.AddChunk(Braced ? CodeCompletionString::CK_RightBrace
+ : CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
@@ -5408,11 +5520,18 @@ QualType getApproximateType(const Expr *E) {
: getApproximateType(CDSME->getBase());
if (CDSME->isArrow() && !Base.isNull())
Base = Base->getPointeeType(); // could handle unique_ptr etc here?
- RecordDecl *RD = Base.isNull() ? nullptr : getAsRecordDecl(Base);
+ auto *RD =
+ Base.isNull()
+ ? nullptr
+ : llvm::dyn_cast_or_null<CXXRecordDecl>(getAsRecordDecl(Base));
if (RD && RD->isCompleteDefinition()) {
- for (const auto *Member : RD->lookup(CDSME->getMember()))
- if (const ValueDecl *VD = llvm::dyn_cast<ValueDecl>(Member))
- return VD->getType().getNonReferenceType();
+ // Look up member heuristically, including in bases.
+ for (const auto *Member : RD->lookupDependentName(
+ CDSME->getMember(), [](const NamedDecl *Member) {
+ return llvm::isa<ValueDecl>(Member);
+ })) {
+ return llvm::cast<ValueDecl>(Member)->getType().getNonReferenceType();
+ }
}
}
return Unresolved;
@@ -5843,36 +5962,37 @@ static QualType getParamType(Sema &SemaRef,
// overload candidates.
QualType ParamType;
for (auto &Candidate : Candidates) {
- if (const auto *FType = Candidate.getFunctionType())
- if (const auto *Proto = dyn_cast<FunctionProtoType>(FType))
- if (N < Proto->getNumParams()) {
- if (ParamType.isNull())
- ParamType = Proto->getParamType(N);
- else if (!SemaRef.Context.hasSameUnqualifiedType(
- ParamType.getNonReferenceType(),
- Proto->getParamType(N).getNonReferenceType()))
- // Otherwise return a default-constructed QualType.
- return QualType();
- }
+ QualType CandidateParamType = Candidate.getParamType(N);
+ if (CandidateParamType.isNull())
+ continue;
+ if (ParamType.isNull()) {
+ ParamType = CandidateParamType;
+ continue;
+ }
+ if (!SemaRef.Context.hasSameUnqualifiedType(
+ ParamType.getNonReferenceType(),
+ CandidateParamType.getNonReferenceType()))
+ // Two conflicting types, give up.
+ return QualType();
}
return ParamType;
}
static QualType
-ProduceSignatureHelp(Sema &SemaRef, Scope *S,
- MutableArrayRef<ResultCandidate> Candidates,
- unsigned CurrentArg, SourceLocation OpenParLoc) {
+ProduceSignatureHelp(Sema &SemaRef, MutableArrayRef<ResultCandidate> Candidates,
+ unsigned CurrentArg, SourceLocation OpenParLoc,
+ bool Braced) {
if (Candidates.empty())
return QualType();
if (SemaRef.getPreprocessor().isCodeCompletionReached())
SemaRef.CodeCompleter->ProcessOverloadCandidates(
- SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc);
+ SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc,
+ Braced);
return getParamType(SemaRef, Candidates, CurrentArg);
}
-QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
- ArrayRef<Expr *> Args,
+QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc) {
Fn = unwrapParenList(Fn);
if (!CodeCompleter || !Fn)
@@ -5969,53 +6089,158 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
}
}
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
- QualType ParamType =
- ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+ QualType ParamType = ProduceSignatureHelp(*this, Results, Args.size(),
+ OpenParLoc, /*Braced=*/false);
return !CandidateSet.empty() ? ParamType : QualType();
}
-QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
+// Determine which param to continue aggregate initialization from after
+// a designated initializer.
+//
+// Given struct S { int a,b,c,d,e; }:
+// after `S{.b=1,` we want to suggest c to continue
+// after `S{.b=1, 2,` we continue with d (this is legal C and ext in C++)
+// after `S{.b=1, .a=2,` we continue with b (this is legal C and ext in C++)
+//
+// Possible outcomes:
+// - we saw a designator for a field, and continue from the returned index.
+// Only aggregate initialization is allowed.
+// - we saw a designator, but it was complex or we couldn't find the field.
+// Only aggregate initialization is possible, but we can't assist with it.
+// Returns an out-of-range index.
+// - we saw no designators, just positional arguments.
+// Returns None.
+static llvm::Optional<unsigned>
+getNextAggregateIndexAfterDesignatedInit(const ResultCandidate &Aggregate,
+ ArrayRef<Expr *> Args) {
+ static constexpr unsigned Invalid = std::numeric_limits<unsigned>::max();
+ assert(Aggregate.getKind() == ResultCandidate::CK_Aggregate);
+
+ // Look for designated initializers.
+ // They're in their syntactic form, not yet resolved to fields.
+ IdentifierInfo *DesignatedFieldName = nullptr;
+ unsigned ArgsAfterDesignator = 0;
+ for (const Expr *Arg : Args) {
+ if (const auto *DIE = dyn_cast<DesignatedInitExpr>(Arg)) {
+ if (DIE->size() == 1 && DIE->getDesignator(0)->isFieldDesignator()) {
+ DesignatedFieldName = DIE->getDesignator(0)->getFieldName();
+ ArgsAfterDesignator = 0;
+ } else {
+ return Invalid; // Complicated designator.
+ }
+ } else if (isa<DesignatedInitUpdateExpr>(Arg)) {
+ return Invalid; // Unsupported.
+ } else {
+ ++ArgsAfterDesignator;
+ }
+ }
+ if (!DesignatedFieldName)
+ return llvm::None;
+
+ // Find the index within the class's fields.
+ // (Probing getParamDecl() directly would be quadratic in number of fields).
+ unsigned DesignatedIndex = 0;
+ const FieldDecl *DesignatedField = nullptr;
+ for (const auto *Field : Aggregate.getAggregate()->fields()) {
+ if (Field->getIdentifier() == DesignatedFieldName) {
+ DesignatedField = Field;
+ break;
+ }
+ ++DesignatedIndex;
+ }
+ if (!DesignatedField)
+ return Invalid; // Designator referred to a missing field, give up.
+
+ // Find the index within the aggregate (which may have leading bases).
+ unsigned AggregateSize = Aggregate.getNumParams();
+ while (DesignatedIndex < AggregateSize &&
+ Aggregate.getParamDecl(DesignatedIndex) != DesignatedField)
+ ++DesignatedIndex;
+
+ // Continue from the index after the last named field.
+ return DesignatedIndex + ArgsAfterDesignator + 1;
+}
+
+QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc) {
+ SourceLocation OpenParLoc,
+ bool Braced) {
if (!CodeCompleter)
return QualType();
+ SmallVector<ResultCandidate, 8> Results;
// A complete type is needed to lookup for constructors.
- CXXRecordDecl *RD =
- isCompleteType(Loc, Type) ? Type->getAsCXXRecordDecl() : nullptr;
+ RecordDecl *RD =
+ isCompleteType(Loc, Type) ? Type->getAsRecordDecl() : nullptr;
if (!RD)
return Type;
+ CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD);
+
+ // Consider aggregate initialization.
+ // We don't check that types so far are correct.
+ // We also don't handle C99/C++17 brace-elision, we assume init-list elements
+ // are 1:1 with fields.
+ // FIXME: it would be nice to support "unwrapping" aggregates that contain
+ // a single subaggregate, like std::array<T, N> -> T __elements[N].
+ if (Braced && !RD->isUnion() &&
+ (!LangOpts.CPlusPlus || (CRD && CRD->isAggregate()))) {
+ ResultCandidate AggregateSig(RD);
+ unsigned AggregateSize = AggregateSig.getNumParams();
+
+ if (auto NextIndex =
+ getNextAggregateIndexAfterDesignatedInit(AggregateSig, Args)) {
+ // A designator was used, only aggregate init is possible.
+ if (*NextIndex >= AggregateSize)
+ return Type;
+ Results.push_back(AggregateSig);
+ return ProduceSignatureHelp(*this, Results, *NextIndex, OpenParLoc,
+ Braced);
+ }
+
+ // Describe aggregate initialization, but also constructors below.
+ if (Args.size() < AggregateSize)
+ Results.push_back(AggregateSig);
+ }
// FIXME: Provide support for member initializers.
// FIXME: Provide support for variadic template constructors.
- OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ if (CRD) {
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ for (NamedDecl *C : LookupConstructors(CRD)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(C)) {
+ // FIXME: we can't yet provide correct signature help for initializer
+ // list constructors, so skip them entirely.
+ if (Braced && LangOpts.CPlusPlus && isInitListConstructor(FD))
+ continue;
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
+ CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true,
+ /*AllowExplicit*/ true);
+ } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
+ if (Braced && LangOpts.CPlusPlus &&
+ isInitListConstructor(FTD->getTemplatedDecl()))
+ continue;
- for (NamedDecl *C : LookupConstructors(RD)) {
- if (auto *FD = dyn_cast<FunctionDecl>(C)) {
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
- CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true,
- /*AllowExplicit*/ true);
- } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
- AddTemplateOverloadCandidate(
- FTD, DeclAccessPair::make(FTD, C->getAccess()),
- /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true);
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, C->getAccess()),
+ /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true);
+ }
}
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
}
- SmallVector<ResultCandidate, 8> Results;
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
- return ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+ return ProduceSignatureHelp(*this, Results, Args.size(), OpenParLoc, Braced);
}
QualType Sema::ProduceCtorInitMemberSignatureHelp(
- Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
- ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc) {
+ Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
+ ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
+ bool Braced) {
if (!CodeCompleter)
return QualType();
@@ -6026,12 +6251,66 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
// FIXME: Add support for Base class constructors as well.
if (ValueDecl *MemberDecl = tryLookupCtorInitMemberDecl(
Constructor->getParent(), SS, TemplateTypeTy, II))
- return ProduceConstructorSignatureHelp(getCurScope(), MemberDecl->getType(),
+ return ProduceConstructorSignatureHelp(MemberDecl->getType(),
MemberDecl->getLocation(), ArgExprs,
- OpenParLoc);
+ OpenParLoc, Braced);
return QualType();
}
+static bool argMatchesTemplateParams(const ParsedTemplateArgument &Arg,
+ unsigned Index,
+ const TemplateParameterList &Params) {
+ const NamedDecl *Param;
+ if (Index < Params.size())
+ Param = Params.getParam(Index);
+ else if (Params.hasParameterPack())
+ Param = Params.asArray().back();
+ else
+ return false; // too many args
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type:
+ return llvm::isa<TemplateTypeParmDecl>(Param); // constraints not checked
+ case ParsedTemplateArgument::NonType:
+ return llvm::isa<NonTypeTemplateParmDecl>(Param); // type not checked
+ case ParsedTemplateArgument::Template:
+ return llvm::isa<TemplateTemplateParmDecl>(Param); // signature not checked
+ }
+ llvm_unreachable("Unhandled switch case");
+}
+
+QualType Sema::ProduceTemplateArgumentSignatureHelp(
+ TemplateTy ParsedTemplate, ArrayRef<ParsedTemplateArgument> Args,
+ SourceLocation LAngleLoc) {
+ if (!CodeCompleter || !ParsedTemplate)
+ return QualType();
+
+ SmallVector<ResultCandidate, 8> Results;
+ auto Consider = [&](const TemplateDecl *TD) {
+ // Only add if the existing args are compatible with the template.
+ bool Matches = true;
+ for (unsigned I = 0; I < Args.size(); ++I) {
+ if (!argMatchesTemplateParams(Args[I], I, *TD->getTemplateParameters())) {
+ Matches = false;
+ break;
+ }
+ }
+ if (Matches)
+ Results.emplace_back(TD);
+ };
+
+ TemplateName Template = ParsedTemplate.get();
+ if (const auto *TD = Template.getAsTemplateDecl()) {
+ Consider(TD);
+ } else if (const auto *OTS = Template.getAsOverloadedTemplate()) {
+ for (const NamedDecl *ND : *OTS)
+ if (const auto *TD = llvm::dyn_cast<TemplateDecl>(ND))
+ Consider(TD);
+ }
+ return ProduceSignatureHelp(*this, Results, Args.size(), LAngleLoc,
+ /*Braced=*/false);
+}
+
static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
for (unsigned I = 0; I < Desig.getNumDesignators(); ++I) {
if (BaseType.isNull())
@@ -6073,7 +6352,15 @@ void Sema::CodeCompleteDesignator(QualType BaseType,
CodeCompleter->getCodeCompletionTUInfo(), CCC);
Results.EnterNewScope();
- for (const auto *FD : RD->fields()) {
+ for (const Decl *D : RD->decls()) {
+ const FieldDecl *FD;
+ if (auto *IFD = dyn_cast<IndirectFieldDecl>(D))
+ FD = IFD->getAnonField();
+ else if (auto *DFD = dyn_cast<FieldDecl>(D))
+ FD = DFD;
+ else
+ continue;
+
// FIXME: Make use of previous designators to mark any fields before those
// inaccessible, and also compute the next initializer priority.
ResultBuilder::Result Result(FD, Results.getBasePriority(FD));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index 466e37831f66..ce99d4848cca 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -18,7 +18,6 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Initialization.h"
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/OperatorPrecedence.h"
@@ -1058,7 +1057,7 @@ concepts::ExprRequirement::ExprRequirement(
concepts::ExprRequirement::ReturnTypeRequirement::
ReturnTypeRequirement(TemplateParameterList *TPL) :
- TypeConstraintInfo(TPL, 0) {
+ TypeConstraintInfo(TPL, false) {
assert(TPL->size() == 1);
const TypeConstraint *TC =
cast<TemplateTypeParmDecl>(TPL->getParam(0))->getTypeConstraint();
@@ -1070,7 +1069,7 @@ ReturnTypeRequirement(TemplateParameterList *TPL) :
Constraint->getTemplateArgsAsWritten() &&
TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
Constraint->getTemplateArgsAsWritten()->arguments().drop_front(1));
- TypeConstraintInfo.setInt(Dependent ? 1 : 0);
+ TypeConstraintInfo.setInt(Dependent ? true : false);
}
concepts::TypeRequirement::TypeRequirement(TypeSourceInfo *T) :
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index e89cecd08cca..e7e60b7e7daf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -653,7 +653,7 @@ static void checkNoThrow(Sema &S, const Stmt *E,
}
if (ThrowingDecls.empty()) {
// [dcl.fct.def.coroutine]p15
- // The expression co_­await promise.final_­suspend() shall not be
+ // The expression co_await promise.final_suspend() shall not be
// potentially-throwing ([except.spec]).
//
// First time seeing an error, emit the error message.
@@ -663,32 +663,32 @@ static void checkNoThrow(Sema &S, const Stmt *E,
ThrowingDecls.insert(D);
}
};
- auto SC = E->getStmtClass();
- if (SC == Expr::CXXConstructExprClass) {
- auto const *Ctor = cast<CXXConstructExpr>(E)->getConstructor();
+
+ if (auto *CE = dyn_cast<CXXConstructExpr>(E)) {
+ CXXConstructorDecl *Ctor = CE->getConstructor();
checkDeclNoexcept(Ctor);
// Check the corresponding destructor of the constructor.
- checkDeclNoexcept(Ctor->getParent()->getDestructor(), true);
- } else if (SC == Expr::CallExprClass || SC == Expr::CXXMemberCallExprClass ||
- SC == Expr::CXXOperatorCallExprClass) {
- if (!cast<CallExpr>(E)->isTypeDependent()) {
- checkDeclNoexcept(cast<CallExpr>(E)->getCalleeDecl());
- auto ReturnType = cast<CallExpr>(E)->getCallReturnType(S.getASTContext());
- // Check the destructor of the call return type, if any.
- if (ReturnType.isDestructedType() ==
- QualType::DestructionKind::DK_cxx_destructor) {
- const auto *T =
- cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(
- dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(), true);
- }
+ checkDeclNoexcept(Ctor->getParent()->getDestructor(), /*IsDtor=*/true);
+ } else if (auto *CE = dyn_cast<CallExpr>(E)) {
+ if (CE->isTypeDependent())
+ return;
+
+ checkDeclNoexcept(CE->getCalleeDecl());
+ QualType ReturnType = CE->getCallReturnType(S.getASTContext());
+ // Check the destructor of the call return type, if any.
+ if (ReturnType.isDestructedType() ==
+ QualType::DestructionKind::DK_cxx_destructor) {
+ const auto *T =
+ cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
+ checkDeclNoexcept(dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ /*IsDtor=*/true);
+ }
+ } else
+ for (const auto *Child : E->children()) {
+ if (!Child)
+ continue;
+ checkNoThrow(S, Child, ThrowingDecls);
}
- }
- for (const auto *Child : E->children()) {
- if (!Child)
- continue;
- checkNoThrow(S, Child, ThrowingDecls);
- }
}
bool Sema::checkFinalSuspendNoThrow(const Stmt *FinalSuspend) {
@@ -1184,13 +1184,13 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
"cannot make statement while the promise type is dependent");
// [dcl.fct.def.coroutine]p10
- // If a search for the name get_­return_­object_­on_­allocation_­failure in
+ // If a search for the name get_return_object_on_allocation_failure in
// the scope of the promise type ([class.member.lookup]) finds any
// declarations, then the result of a call to an allocation function used to
// obtain storage for the coroutine state is assumed to return nullptr if it
// fails to obtain storage, ... If the allocation function returns nullptr,
// ... and the return value is obtained by a call to
- // T::get_­return_­object_­on_­allocation_­failure(), where T is the
+ // T::get_return_object_on_allocation_failure(), where T is the
// promise type.
DeclarationName DN =
S.PP.getIdentifierInfo("get_return_object_on_allocation_failure");
@@ -1433,10 +1433,10 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
"cannot make statement while the promise type is dependent");
// [dcl.fct.def.coroutine]/p6
- // If searches for the names return_­void and return_­value in the scope of
+ // If searches for the names return_void and return_value in the scope of
// the promise type each find any declarations, the program is ill-formed.
- // [Note 1: If return_­void is found, flowing off the end of a coroutine is
- // equivalent to a co_­return with no operand. Otherwise, flowing off the end
+ // [Note 1: If return_void is found, flowing off the end of a coroutine is
+ // equivalent to a co_return with no operand. Otherwise, flowing off the end
// of a coroutine results in undefined behavior ([stmt.return.coroutine]). —
// end note]
bool HasRVoid, HasRValue;
@@ -1529,7 +1529,7 @@ bool CoroutineStmtBuilder::makeOnException() {
bool CoroutineStmtBuilder::makeReturnObject() {
// [dcl.fct.def.coroutine]p7
- // The expression promise.get_­return_­object() is used to initialize the
+ // The expression promise.get_return_object() is used to initialize the
// returned reference or prvalue result object of a call to a coroutine.
ExprResult ReturnObject =
buildPromiseCall(S, Fn.CoroutinePromise, Loc, "get_return_object", None);
@@ -1740,30 +1740,39 @@ ClassTemplateDecl *Sema::lookupCoroutineTraits(SourceLocation KwLoc,
return nullptr;
}
- if (!InStd) {
- // Found only in std::experimental.
- Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
- << "coroutine_traits";
- } else if (InExp) {
- // Found in std and std::experimental.
- Diag(KwLoc,
- diag::err_mixed_use_std_and_experimental_namespace_for_coroutine);
- Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
- << "coroutine_traits";
- return nullptr;
- }
-
// Prefer ::std to std::experimental.
auto &Result = InStd ? ResStd : ResExp;
CoroTraitsNamespaceCache = InStd ? StdSpace : ExpSpace;
// coroutine_traits is required to be a class template.
- if (!(StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>())) {
+ StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>();
+ if (!StdCoroutineTraitsCache) {
Result.suppressDiagnostics();
NamedDecl *Found = *Result.begin();
Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
return nullptr;
}
+
+ if (InExp) {
+ // Found in std::experimental
+ Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
+ << "coroutine_traits";
+ ResExp.suppressDiagnostics();
+ auto *Found = *ResExp.begin();
+ Diag(Found->getLocation(), diag::note_entity_declared_at) << Found;
+
+ if (InStd &&
+ StdCoroutineTraitsCache != ResExp.getAsSingle<ClassTemplateDecl>()) {
+ // Also found something different in std
+ Diag(KwLoc,
+ diag::err_mixed_use_std_and_experimental_namespace_for_coroutine);
+ Diag(StdCoroutineTraitsCache->getLocation(),
+ diag::note_entity_declared_at)
+ << StdCoroutineTraitsCache;
+
+ return nullptr;
+ }
+ }
}
Namespace = CoroTraitsNamespaceCache;
return StdCoroutineTraitsCache;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 3c58f1d19c04..3252671991b7 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -1586,10 +1586,13 @@ void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
/// We've determined that \p New is a redeclaration of \p Old. Check that they
/// have compatible owning modules.
bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
- // FIXME: The Modules TS is not clear about how friend declarations are
- // to be treated. It's not meaningful to have different owning modules for
- // linkage in redeclarations of the same entity, so for now allow the
- // redeclaration and change the owning modules to match.
+ // [module.interface]p7:
+ // A declaration is attached to a module as follows:
+ // - If the declaration is a non-dependent friend declaration that nominates a
+ // function with a declarator-id that is a qualified-id or template-id or that
+ // nominates a class other than with an elaborated-type-specifier with neither
+ // a nested-name-specifier nor a simple-template-id, it is attached to the
+ // module to which the friend is attached ([basic.link]).
if (New->getFriendObjectKind() &&
Old->getOwningModuleForLinkage() != New->getOwningModuleForLinkage()) {
New->setLocalOwningModule(Old->getOwningModule());
@@ -1628,6 +1631,52 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
return false;
}
+// [module.interface]p6:
+// A redeclaration of an entity X is implicitly exported if X was introduced by
+// an exported declaration; otherwise it shall not be exported.
+bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
+ // [module.interface]p1:
+ // An export-declaration shall inhabit a namespace scope.
+ //
+ // So it is meaningless to talk about redeclaration which is not at namespace
+ // scope.
+ if (!New->getLexicalDeclContext()
+ ->getNonTransparentContext()
+ ->isFileContext() ||
+ !Old->getLexicalDeclContext()
+ ->getNonTransparentContext()
+ ->isFileContext())
+ return false;
+
+ bool IsNewExported = New->isInExportDeclContext();
+ bool IsOldExported = Old->isInExportDeclContext();
+
+ // It should be irrevelant if both of them are not exported.
+ if (!IsNewExported && !IsOldExported)
+ return false;
+
+ if (IsOldExported)
+ return false;
+
+ assert(IsNewExported);
+
+ Diag(New->getLocation(), diag::err_redeclaration_non_exported) << New;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+}
+
+// A wrapper function for checking the semantic restrictions of
+// a redeclaration within a module.
+bool Sema::CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old) {
+ if (CheckRedeclarationModuleOwnership(New, Old))
+ return true;
+
+ if (CheckRedeclarationExported(New, Old))
+ return true;
+
+ return false;
+}
+
static bool isUsingDecl(NamedDecl *D) {
return isa<UsingShadowDecl>(D) ||
isa<UnresolvedUsingTypenameDecl>(D) ||
@@ -3390,7 +3439,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
}
- if (CheckRedeclarationModuleOwnership(New, Old))
+ if (CheckRedeclarationInModule(New, Old))
return true;
if (!getLangOpts().CPlusPlus) {
@@ -4269,7 +4318,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
- if (CheckRedeclarationModuleOwnership(New, Old))
+ if (CheckRedeclarationInModule(New, Old))
return;
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
@@ -5759,7 +5808,15 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
else if (isa<BlockDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_block)
<< Name << SS.getRange();
- else
+ else if (isa<ExportDecl>(Cur)) {
+ if (!isa<NamespaceDecl>(DC))
+ Diag(Loc, diag::err_export_non_namespace_scope_name)
+ << Name << SS.getRange();
+ else
+ // The cases that DC is not NamespaceDecl should be handled in
+ // CheckRedeclarationExported.
+ return false;
+ } else
Diag(Loc, diag::err_invalid_declarator_scope)
<< Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
@@ -7798,8 +7855,6 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
DeclarationName Name = R.getLookupName();
// Emit warning and note.
- if (getSourceManager().isInSystemMacro(R.getNameLoc()))
- return;
ShadowedDeclKind Kind = computeShadowedDeclKind(ShadowedDecl, OldDC);
Diag(R.getNameLoc(), WarningDiag) << Name << Kind << OldDC;
if (!CaptureLoc.isInvalid())
@@ -9128,6 +9183,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
NewFD->setInvalidDecl();
}
+ if ((Parent->isClass() || Parent->isStruct()) &&
+ Parent->hasAttr<SYCLSpecialClassAttr>() &&
+ NewFD->getKind() == Decl::Kind::CXXMethod &&
+ NewFD->getName() == "__init" && D.isFunctionDefinition()) {
+ if (auto *Def = Parent->getDefinition())
+ Def->setInitMethod(true);
+ }
}
SetNestedNameSpecifier(*this, NewFD, D);
@@ -9921,7 +9983,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< NewFD;
// Turn this into a variadic function with no parameters.
- const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
+ const auto *FT = NewFD->getType()->castAs<FunctionType>();
FunctionProtoType::ExtProtoInfo EPI(
Context.getDefaultCallingConvention(true, false));
EPI.Variadic = true;
@@ -14632,8 +14694,10 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
Diag(FD->getLocation(), diag::ext_pure_function_definition);
if (!FD->isInvalidDecl()) {
- // Don't diagnose unused parameters of defaulted or deleted functions.
- if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody())
+ // Don't diagnose unused parameters of defaulted, deleted or naked
+ // functions.
+ if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody() &&
+ !FD->hasAttr<NakedAttr>())
DiagnoseUnusedParameters(FD->parameters());
DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
FD->getReturnType(), FD);
@@ -15002,7 +15066,24 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
diag_id = diag::ext_implicit_function_decl;
else
diag_id = diag::warn_implicit_function_decl;
+
+ TypoCorrection Corrected;
+ // Because typo correction is expensive, only do it if the implicit
+ // function declaration is going to be treated as an error.
+ //
+ // Perform the corection before issuing the main diagnostic, as some consumers
+ // use typo-correction callbacks to enhance the main diagnostic.
+ if (S && !ExternCPrev &&
+ (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error)) {
+ DeclFilterCCC<FunctionDecl> CCC{};
+ Corrected = CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
+ S, nullptr, CCC, CTK_NonError);
+ }
+
Diag(Loc, diag_id) << &II;
+ if (Corrected)
+ diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
+ /*ErrorRecovery*/ false);
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
@@ -15010,18 +15091,6 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
if (ExternCPrev)
return ExternCPrev;
- // Because typo correction is expensive, only do it if the implicit
- // function declaration is going to be treated as an error.
- if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
- TypoCorrection Corrected;
- DeclFilterCCC<FunctionDecl> CCC{};
- if (S && (Corrected =
- CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
- S, nullptr, CCC, CTK_NonError)))
- diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
- /*ErrorRecovery*/false);
- }
-
// Set a Declarator for the implicit definition: int foo();
const char *Dummy;
AttributeFactory attrFactory;
@@ -15191,11 +15260,11 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
Context.BuiltinInfo.isConstWithoutErrno(BuiltinID))
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
- // We make "fma" on some platforms const because we know it does not set
+ // We make "fma" on GNU or Windows const because we know it does not set
// errno in those environments even though it could set errno based on the
// C standard.
const llvm::Triple &Trip = Context.getTargetInfo().getTriple();
- if ((Trip.isGNUEnvironment() || Trip.isAndroid() || Trip.isOSMSVCRT()) &&
+ if ((Trip.isGNUEnvironment() || Trip.isOSMSVCRT()) &&
!FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BI__builtin_fma:
@@ -16532,7 +16601,7 @@ CreateNewDecl:
SetMemberAccessSpecifier(New, PrevDecl, AS);
if (PrevDecl)
- CheckRedeclarationModuleOwnership(New, PrevDecl);
+ CheckRedeclarationInModule(New, PrevDecl);
if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
@@ -16682,8 +16751,21 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
RD->completeDefinition();
}
- if (isa<CXXRecordDecl>(Tag)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(Tag)) {
FieldCollector->FinishClass();
+ if (RD->hasAttr<SYCLSpecialClassAttr>()) {
+ auto *Def = RD->getDefinition();
+ assert(Def && "The record is expected to have a completed definition");
+ unsigned NumInitMethods = 0;
+ for (auto *Method : Def->methods()) {
+ if (!Method->getIdentifier())
+ continue;
+ if (Method->getName() == "__init")
+ NumInitMethods++;
+ }
+ if (NumInitMethods > 1 || !Def->hasInitMethod())
+ Diag(RD->getLocation(), diag::err_sycl_special_type_num_init_method);
+ }
}
// Exit this scope of this tag's definition.
@@ -18586,7 +18668,7 @@ void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
AttributeCommonInfo Info(AliasName, SourceRange(AliasNameLoc),
AttributeCommonInfo::AS_Pragma);
AsmLabelAttr *Attr = AsmLabelAttr::CreateImplicit(
- Context, AliasName->getName(), /*LiteralLabel=*/true, Info);
+ Context, AliasName->getName(), /*IsLiteralLabel=*/true, Info);
// If a declaration that:
// 1) declares a function or a variable
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index b6bd2e69629d..f04236ab96c3 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -2625,37 +2625,53 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
NewII = &S.Context.Idents.get("watchos_app_extension");
if (NewII) {
- auto adjustWatchOSVersion = [](VersionTuple Version) -> VersionTuple {
- if (Version.empty())
- return Version;
- auto Major = Version.getMajor();
- auto NewMajor = Major >= 9 ? Major - 7 : 0;
- if (NewMajor >= 2) {
- if (Version.getMinor().hasValue()) {
- if (Version.getSubminor().hasValue())
- return VersionTuple(NewMajor, Version.getMinor().getValue(),
- Version.getSubminor().getValue());
- else
- return VersionTuple(NewMajor, Version.getMinor().getValue());
- }
- return VersionTuple(NewMajor);
+ const auto *SDKInfo = S.getDarwinSDKInfoForAvailabilityChecking();
+ const auto *IOSToWatchOSMapping =
+ SDKInfo ? SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::iOStoWatchOSPair())
+ : nullptr;
+
+ auto adjustWatchOSVersion =
+ [IOSToWatchOSMapping](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+ auto MinimumWatchOSVersion = VersionTuple(2, 0);
+
+ if (IOSToWatchOSMapping) {
+ if (auto MappedVersion = IOSToWatchOSMapping->map(
+ Version, MinimumWatchOSVersion, None)) {
+ return MappedVersion.getValue();
}
+ }
- return VersionTuple(2, 0);
- };
+ auto Major = Version.getMajor();
+ auto NewMajor = Major >= 9 ? Major - 7 : 0;
+ if (NewMajor >= 2) {
+ if (Version.getMinor().hasValue()) {
+ if (Version.getSubminor().hasValue())
+ return VersionTuple(NewMajor, Version.getMinor().getValue(),
+ Version.getSubminor().getValue());
+ else
+ return VersionTuple(NewMajor, Version.getMinor().getValue());
+ }
+ return VersionTuple(NewMajor);
+ }
- auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
- auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
- auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
-
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
- NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
- if (NewAttr)
- D->addAttr(NewAttr);
- }
+ return MinimumWatchOSVersion;
+ };
+
+ auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
+ auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
+ auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
+ NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
+ Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
} else if (S.Context.getTargetInfo().getTriple().isTvOS()) {
// Transcribe "ios" to "tvos" (and add a new attribute) if the versioning
// matches before the start of the tvOS platform.
@@ -2666,14 +2682,38 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
NewII = &S.Context.Idents.get("tvos_app_extension");
if (NewII) {
+ const auto *SDKInfo = S.getDarwinSDKInfoForAvailabilityChecking();
+ const auto *IOSToTvOSMapping =
+ SDKInfo ? SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::iOStoTvOSPair())
+ : nullptr;
+
+ auto AdjustTvOSVersion =
+ [IOSToTvOSMapping](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+
+ if (IOSToTvOSMapping) {
+ if (auto MappedVersion =
+ IOSToTvOSMapping->map(Version, VersionTuple(0, 0), None)) {
+ return MappedVersion.getValue();
+ }
+ }
+ return Version;
+ };
+
+ auto NewIntroduced = AdjustTvOSVersion(Introduced.Version);
+ auto NewDeprecated = AdjustTvOSVersion(Deprecated.Version);
+ auto NewObsoleted = AdjustTvOSVersion(Obsoleted.Version);
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL, NewII, true /*Implicit*/, Introduced.Version,
- Deprecated.Version, Obsoleted.Version, IsUnavailable, Str, IsStrict,
- Replacement, Sema::AMK_None,
+ ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
+ NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
+ Sema::AMK_None,
PriorityModifier + Sema::AP_InferredFromOtherPlatform);
if (NewAttr)
D->addAttr(NewAttr);
- }
+ }
} else if (S.Context.getTargetInfo().getTriple().getOS() ==
llvm::Triple::IOS &&
S.Context.getTargetInfo().getTriple().isMacCatalystEnvironment()) {
@@ -8261,6 +8301,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_SYCLKernel:
handleSYCLKernelAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SYCLSpecialClass:
+ handleSimpleAttribute<SYCLSpecialClassAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_Format:
handleFormatAttr(S, D, AL);
break;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index 01f0079198c7..16cdb7e57723 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -13012,7 +13012,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
NewDecl->setInvalidDecl();
else if (OldDecl) {
NewDecl->setPreviousDecl(OldDecl);
- CheckRedeclarationModuleOwnership(NewDecl, OldDecl);
+ CheckRedeclarationInModule(NewDecl, OldDecl);
}
NewND = NewDecl;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
index d6e659e17069..d4fefc3d18d8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
@@ -2212,9 +2212,8 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
}
-static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
- ObjCMethodDecl *method,
- bool &IncompleteImpl,
+static void WarnUndefinedMethod(Sema &S, ObjCImplDecl *Impl,
+ ObjCMethodDecl *method, bool &IncompleteImpl,
unsigned DiagID,
NamedDecl *NeededFor = nullptr) {
// No point warning no definition of method which is 'unavailable'.
@@ -2227,10 +2226,19 @@ static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
// separate warnings. We will give that approach a try, as that
// matches what we do with protocols.
{
- const Sema::SemaDiagnosticBuilder &B = S.Diag(ImpLoc, DiagID);
+ const Sema::SemaDiagnosticBuilder &B = S.Diag(Impl->getLocation(), DiagID);
B << method;
if (NeededFor)
B << NeededFor;
+
+ // Add an empty definition at the end of the @implementation.
+ std::string FixItStr;
+ llvm::raw_string_ostream Out(FixItStr);
+ method->print(Out, Impl->getASTContext().getPrintingPolicy());
+ Out << " {\n}\n\n";
+
+ SourceLocation Loc = Impl->getAtEndRange().getBegin();
+ B << FixItHint::CreateInsertion(Loc, FixItStr);
}
// Issue a note to the original declaration.
@@ -2679,14 +2687,10 @@ static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super,
/// CheckProtocolMethodDefs - This routine checks unimplemented methods
/// Declared in protocol, and those referenced by it.
-static void CheckProtocolMethodDefs(Sema &S,
- SourceLocation ImpLoc,
- ObjCProtocolDecl *PDecl,
- bool& IncompleteImpl,
- const Sema::SelectorSet &InsMap,
- const Sema::SelectorSet &ClsMap,
- ObjCContainerDecl *CDecl,
- LazyProtocolNameSet &ProtocolsExplictImpl) {
+static void CheckProtocolMethodDefs(
+ Sema &S, ObjCImplDecl *Impl, ObjCProtocolDecl *PDecl, bool &IncompleteImpl,
+ const Sema::SelectorSet &InsMap, const Sema::SelectorSet &ClsMap,
+ ObjCContainerDecl *CDecl, LazyProtocolNameSet &ProtocolsExplictImpl) {
ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
: dyn_cast<ObjCInterfaceDecl>(CDecl);
@@ -2773,9 +2777,8 @@ static void CheckProtocolMethodDefs(Sema &S,
if (C || MethodInClass->isPropertyAccessor())
continue;
unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
- WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG,
- PDecl);
+ if (!S.Diags.isIgnored(DIAG, Impl->getLocation())) {
+ WarnUndefinedMethod(S, Impl, method, IncompleteImpl, DIAG, PDecl);
}
}
}
@@ -2796,15 +2799,15 @@ static void CheckProtocolMethodDefs(Sema &S,
continue;
unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
- WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl);
+ if (!S.Diags.isIgnored(DIAG, Impl->getLocation())) {
+ WarnUndefinedMethod(S, Impl, method, IncompleteImpl, DIAG, PDecl);
}
}
}
// Check on this protocols's referenced protocols, recursively.
for (auto *PI : PDecl->protocols())
- CheckProtocolMethodDefs(S, ImpLoc, PI, IncompleteImpl, InsMap, ClsMap,
- CDecl, ProtocolsExplictImpl);
+ CheckProtocolMethodDefs(S, Impl, PI, IncompleteImpl, InsMap, ClsMap, CDecl,
+ ProtocolsExplictImpl);
}
/// MatchAllMethodDeclarations - Check methods declared in interface
@@ -2827,7 +2830,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!InsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
continue;
} else {
@@ -2857,7 +2860,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!ClsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
} else {
ObjCMethodDecl *ImpMethodDecl =
@@ -3024,16 +3027,15 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
for (auto *PI : I->all_referenced_protocols())
- CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl,
- InsMap, ClsMap, I, ExplicitImplProtocols);
+ CheckProtocolMethodDefs(*this, IMPDecl, PI, IncompleteImpl, InsMap,
+ ClsMap, I, ExplicitImplProtocols);
} else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
// For extended class, unimplemented methods in its protocols will
// be reported in the primary class.
if (!C->IsClassExtension()) {
for (auto *P : C->protocols())
- CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), P,
- IncompleteImpl, InsMap, ClsMap, CDecl,
- ExplicitImplProtocols);
+ CheckProtocolMethodDefs(*this, IMPDecl, P, IncompleteImpl, InsMap,
+ ClsMap, CDecl, ExplicitImplProtocols);
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
/*SynthesizeProperties=*/false);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index 3af4c6f4bc41..29cb4be7b1ba 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -391,9 +391,8 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
}
- if (getLangOpts().MSVCCompat && ESI.Type != EST_DependentNoexcept) {
- // Allow missing exception specifications in redeclarations as an extension.
- DiagID = diag::ext_ms_missing_exception_specification;
+ if (getLangOpts().MSVCCompat && isDynamicExceptionSpec(ESI.Type)) {
+ DiagID = diag::ext_missing_exception_specification;
ReturnValueOnError = false;
} else if (New->isReplaceableGlobalAllocationFunction() &&
ESI.Type != EST_DependentNoexcept) {
@@ -402,6 +401,10 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
DiagID = diag::ext_missing_exception_specification;
ReturnValueOnError = false;
} else if (ESI.Type == EST_NoThrow) {
+ // Don't emit any warning for missing 'nothrow' in MSVC.
+ if (getLangOpts().MSVCCompat) {
+ return false;
+ }
// Allow missing attribute 'nothrow' in redeclarations, since this is a very
// common omission.
DiagID = diag::ext_missing_exception_specification;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index d32b3f217aa0..7de43705c2b1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -55,7 +55,6 @@
using namespace clang;
using namespace sema;
-using llvm::RoundingMode;
/// Determine whether the use of this declaration is valid, without
/// emitting diagnostics.
@@ -4500,6 +4499,10 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
}
// C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ if (isUnevaluatedContext() && ExprKind == UETT_SizeOf &&
+ TInfo->getType()->isVariablyModifiedType())
+ TInfo = TransformToPotentiallyEvaluated(TInfo);
+
return new (Context) UnaryExprOrTypeTraitExpr(
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
}
@@ -4646,6 +4649,38 @@ static bool isMSPropertySubscriptExpr(Sema &S, Expr *Base) {
return isa<MSPropertySubscriptExpr>(BaseNoParens);
}
+// Returns the type used for LHS[RHS], given one of LHS, RHS is type-dependent.
+// Typically this is DependentTy, but can sometimes be more precise.
+//
+// There are cases when we could determine a non-dependent type:
+// - LHS and RHS may have non-dependent types despite being type-dependent
+// (e.g. unbounded array static members of the current instantiation)
+// - one may be a dependent-sized array with known element type
+// - one may be a dependent-typed valid index (enum in current instantiation)
+//
+// We *always* return a dependent type, in such cases it is DependentTy.
+// This avoids creating type-dependent expressions with non-dependent types.
+// FIXME: is this important to avoid? See https://reviews.llvm.org/D107275
+static QualType getDependentArraySubscriptType(Expr *LHS, Expr *RHS,
+ const ASTContext &Ctx) {
+ assert(LHS->isTypeDependent() || RHS->isTypeDependent());
+ QualType LTy = LHS->getType(), RTy = RHS->getType();
+ QualType Result = Ctx.DependentTy;
+ if (RTy->isIntegralOrUnscopedEnumerationType()) {
+ if (const PointerType *PT = LTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ArrayType *AT = LTy->getAsArrayTypeUnsafe())
+ Result = AT->getElementType();
+ } else if (LTy->isIntegralOrUnscopedEnumerationType()) {
+ if (const PointerType *PT = RTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ArrayType *AT = RTy->getAsArrayTypeUnsafe())
+ Result = AT->getElementType();
+ }
+ // Ensure we return a dependent type.
+ return Result->isDependentType() ? Result : Ctx.DependentTy;
+}
+
ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
Expr *idx, SourceLocation rbLoc) {
@@ -4738,8 +4773,9 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// Build an unanalyzed expression if either operand is type-dependent.
if (getLangOpts().CPlusPlus &&
(base->isTypeDependent() || idx->isTypeDependent())) {
- return new (Context) ArraySubscriptExpr(base, idx, Context.DependentTy,
- VK_LValue, OK_Ordinary, rbLoc);
+ return new (Context) ArraySubscriptExpr(
+ base, idx, getDependentArraySubscriptType(base, idx, getASTContext()),
+ VK_LValue, OK_Ordinary, rbLoc);
}
// MSDN, property (C++)
@@ -5493,7 +5529,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (LHSTy->isDependentType() || RHSTy->isDependentType()) {
BaseExpr = LHSExp;
IndexExpr = RHSExp;
- ResultType = Context.DependentTy;
+ ResultType =
+ getDependentArraySubscriptType(LHSExp, RHSExp, getASTContext());
} else if (const PointerType *PTy = LHSTy->getAs<PointerType>()) {
BaseExpr = LHSExp;
IndexExpr = RHSExp;
@@ -7694,8 +7731,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
CastExpr = Result.get();
}
- if (getLangOpts().CPlusPlus && !castType->isVoidType() &&
- !getSourceManager().isInSystemMacro(LParenLoc))
+ if (getLangOpts().CPlusPlus && !castType->isVoidType())
Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange();
CheckTollFreeBridgeCast(castType, CastExpr);
@@ -15913,7 +15949,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// promoted type and the underlying type are the same except for
// signedness. Ask the AST for the correctly corresponding type and see
// if that's compatible.
- if (!PromoteType.isNull() &&
+ if (!PromoteType.isNull() && !UnderlyingType->isBooleanType() &&
PromoteType->isUnsignedIntegerType() !=
UnderlyingType->isUnsignedIntegerType()) {
UnderlyingType =
@@ -16569,6 +16605,16 @@ ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
return TransformToPE(*this).TransformExpr(E);
}
+TypeSourceInfo *Sema::TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo) {
+ assert(isUnevaluatedContext() &&
+ "Should only transform unevaluated expressions");
+ ExprEvalContexts.back().Context =
+ ExprEvalContexts[ExprEvalContexts.size() - 2].Context;
+ if (isUnevaluatedContext())
+ return TInfo;
+ return TransformToPE(*this).TransformType(TInfo);
+}
+
void
Sema::PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl,
@@ -19176,10 +19222,12 @@ ExprResult Sema::CheckBooleanCondition(SourceLocation Loc, Expr *E,
}
Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc,
- Expr *SubExpr, ConditionKind CK) {
- // Empty conditions are valid in for-statements.
+ Expr *SubExpr, ConditionKind CK,
+ bool MissingOK) {
+ // MissingOK indicates whether having no condition expression is valid
+ // (for loop) or invalid (e.g. while loop).
if (!SubExpr)
- return ConditionResult();
+ return MissingOK ? ConditionResult() : ConditionError();
ExprResult Cond;
switch (CK) {
@@ -19197,7 +19245,7 @@ Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc,
}
if (Cond.isInvalid()) {
Cond = CreateRecoveryExpr(SubExpr->getBeginLoc(), SubExpr->getEndLoc(),
- {SubExpr});
+ {SubExpr}, PreferredConditionType(CK));
if (!Cond.get())
return ConditionError();
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index 54f0242d2ca1..b34b744d7312 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -6614,7 +6614,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
const Type *ClassOrBound;
Step(Kind K, const Type *ClassOrBound = nullptr)
- : K(K), Quals(), ClassOrBound(ClassOrBound) {}
+ : K(K), ClassOrBound(ClassOrBound) {}
QualType rebuild(ASTContext &Ctx, QualType T) const {
T = Ctx.getQualifiedType(T, Quals);
switch (K) {
@@ -7410,8 +7410,10 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
// the member function body.
if (!BaseType->isDependentType() &&
!isThisOutsideMemberFunctionBody(BaseType) &&
- RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access))
- return ExprError();
+ RequireCompleteType(OpLoc, BaseType,
+ diag::err_incomplete_member_access)) {
+ return CreateRecoveryExpr(Base->getBeginLoc(), Base->getEndLoc(), {Base});
+ }
// C++ [basic.lookup.classref]p2:
// If the id-expression in a class member access (5.2.5) is an
@@ -7767,7 +7769,8 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
- DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
@@ -8697,7 +8700,7 @@ Sema::ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS,
if (TypeName) {
QualType T = CheckTypenameType(ETK_Typename, TypenameKWLoc,
SS.getWithLocInContext(Context), *TypeName,
- NameLoc, &TSI, /*DeducedTypeContext=*/false);
+ NameLoc, &TSI, /*DeducedTSTContext=*/false);
if (T.isNull())
return nullptr;
} else {
@@ -8748,7 +8751,7 @@ Sema::ActOnCompoundRequirement(
/*HasTypeConstraint=*/true);
if (BuildTypeConstraint(SS, TypeConstraint, TParam,
- /*EllpsisLoc=*/SourceLocation(),
+ /*EllipsisLoc=*/SourceLocation(),
/*AllowUnexpandedPack=*/true))
// Just produce a requirement with no type requirements.
return BuildExprRequirement(E, /*IsSimple=*/false, NoexceptLoc, {});
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
index 83006f9d804a..dfd93aa4638d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
@@ -504,9 +504,12 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
}
}
- assert(BaseType->isDependentType() ||
- NameInfo.getName().isDependentName() ||
- isDependentScopeSpecifier(SS));
+ assert(BaseType->isDependentType() || NameInfo.getName().isDependentName() ||
+ isDependentScopeSpecifier(SS) ||
+ (TemplateArgs && llvm::any_of(TemplateArgs->arguments(),
+ [](const TemplateArgumentLoc &Arg) {
+ return Arg.getArgument().isDependent();
+ })));
// Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
// must have pointer type, and the accessed type is the pointee.
@@ -1642,6 +1645,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
<< BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
<< FixItHint::CreateReplacement(OpLoc, "->");
+ if (S.isSFINAEContext())
+ return ExprError();
+
// Recurse as an -> access.
IsArrow = true;
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index bdc8e1e0b336..4702c405fb4e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -1280,11 +1280,11 @@ static ObjCMethodDecl *findMethodInCurrentClass(Sema &S, Selector Sel) {
// whether Sel is potentially direct in this context.
if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/true))
return MD;
- if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/true))
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*Instance=*/true))
return MD;
if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/false))
return MD;
- if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/false))
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*Instance=*/false))
return MD;
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 635e93ba8460..af6ee24240ce 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -620,7 +620,7 @@ void LookupResult::resolveKind() {
getSema().diagnoseEquivalentInternalLinkageDeclarations(
getNameLoc(), HasNonFunction, EquivalentNonFunctions);
- Decls.set_size(N);
+ Decls.truncate(N);
if (HasNonFunction && (HasFunction || HasUnresolved))
Ambiguous = true;
@@ -4307,18 +4307,35 @@ void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
if (!CList.empty() && !CList.back().isResolved())
CList.pop_back();
if (NamedDecl *NewND = Correction.getCorrectionDecl()) {
- std::string CorrectionStr = Correction.getAsString(SemaRef.getLangOpts());
- for (TypoResultList::iterator RI = CList.begin(), RIEnd = CList.end();
- RI != RIEnd; ++RI) {
- // If the Correction refers to a decl already in the result list,
- // replace the existing result if the string representation of Correction
- // comes before the current result alphabetically, then stop as there is
- // nothing more to be done to add Correction to the candidate set.
- if (RI->getCorrectionDecl() == NewND) {
- if (CorrectionStr < RI->getAsString(SemaRef.getLangOpts()))
- *RI = Correction;
- return;
- }
+ auto RI = llvm::find_if(CList, [NewND](const TypoCorrection &TypoCorr) {
+ return TypoCorr.getCorrectionDecl() == NewND;
+ });
+ if (RI != CList.end()) {
+ // The Correction refers to a decl already in the list. No insertion is
+ // necessary and all further cases will return.
+
+ auto IsDeprecated = [](Decl *D) {
+ while (D) {
+ if (D->isDeprecated())
+ return true;
+ D = llvm::dyn_cast_or_null<NamespaceDecl>(D->getDeclContext());
+ }
+ return false;
+ };
+
+ // Prefer non deprecated Corrections over deprecated and only then
+ // sort using an alphabetical order.
+ std::pair<bool, std::string> NewKey = {
+ IsDeprecated(Correction.getFoundDecl()),
+ Correction.getAsString(SemaRef.getLangOpts())};
+
+ std::pair<bool, std::string> PrevKey = {
+ IsDeprecated(RI->getFoundDecl()),
+ RI->getAsString(SemaRef.getLangOpts())};
+
+ if (NewKey < PrevKey)
+ *RI = Correction;
+ return;
}
}
if (CList.empty() || Correction.isResolved())
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index a4b9f3c242c1..747734f2d0ff 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -395,7 +395,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// [module.interface]p1:
// An export-declaration shall inhabit a namespace scope and appear in the
// purview of a module interface unit.
- Diag(ExportLoc, diag::err_export_not_in_module_interface);
+ Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
}
return Import;
@@ -527,21 +527,30 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
// Set this temporarily so we know the export-declaration was braced.
D->setRBraceLoc(LBraceLoc);
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+
// C++2a [module.interface]p1:
// An export-declaration shall appear only [...] in the purview of a module
// interface unit. An export-declaration shall not appear directly or
// indirectly within [...] a private-module-fragment.
if (ModuleScopes.empty() || !ModuleScopes.back().Module->isModulePurview()) {
Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
+ D->setInvalidDecl();
+ return D;
} else if (!ModuleScopes.back().ModuleInterface) {
Diag(ExportLoc, diag::err_export_not_in_module_interface) << 1;
Diag(ModuleScopes.back().BeginLoc,
diag::note_not_module_interface_add_export)
<< FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
+ D->setInvalidDecl();
+ return D;
} else if (ModuleScopes.back().Module->Kind ==
Module::PrivateModuleFragment) {
Diag(ExportLoc, diag::err_export_in_private_module_fragment);
Diag(ModuleScopes.back().BeginLoc, diag::note_private_module_fragment);
+ D->setInvalidDecl();
+ return D;
}
for (const DeclContext *DC = CurContext; DC; DC = DC->getLexicalParent()) {
@@ -553,7 +562,7 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
Diag(ND->getLocation(), diag::note_anonymous_namespace);
// Don't diagnose internal-linkage declarations in this region.
D->setInvalidDecl();
- break;
+ return D;
}
// A declaration is exported if it is [...] a namespace-definition
@@ -572,10 +581,10 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
Diag(ExportLoc, diag::err_export_within_export);
if (ED->hasBraces())
Diag(ED->getLocation(), diag::note_export);
+ D->setInvalidDecl();
+ return D;
}
- CurContext->addDecl(D);
- PushDeclContext(S, D);
D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
return D;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
index 74c73ace3c5f..118afb81dd72 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
@@ -112,8 +112,8 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
return;
// Look for a property with the same name.
- if (ObjCPropertyDecl *ProtoProp =
- Proto->lookup(Prop->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ if (ObjCPropertyDecl *ProtoProp = Proto->getProperty(
+ Prop->getIdentifier(), Prop->isInstanceProperty())) {
S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
return;
}
@@ -231,8 +231,8 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
bool FoundInSuper = false;
ObjCInterfaceDecl *CurrentInterfaceDecl = IFace;
while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) {
- if (ObjCPropertyDecl *SuperProp =
- Super->lookup(Res->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ if (ObjCPropertyDecl *SuperProp = Super->getProperty(
+ Res->getIdentifier(), Res->isInstanceProperty())) {
DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false);
FoundInSuper = true;
break;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index ba0481874577..ae91a6470471 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -36,6 +36,7 @@
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <set>
@@ -7051,7 +7052,8 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
QualType AdjustedFnType = FD->getType();
if (NumAppendArgs) {
- if (isa<FunctionNoProtoType>(FD->getType())) {
+ const auto *PTy = AdjustedFnType->getAsAdjusted<FunctionProtoType>();
+ if (!PTy) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_prototype_required)
<< SR;
return None;
@@ -7069,8 +7071,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
Diag(SR.getBegin(), diag::err_omp_interop_type_not_found) << SR;
return None;
}
- QualType InteropType = QualType(TD->getTypeForDecl(), 0);
- auto *PTy = cast<FunctionProtoType>(FD->getType());
+ QualType InteropType = Context.getTypeDeclType(TD);
if (PTy->isVariadic()) {
Diag(FD->getLocation(), diag::err_omp_append_args_with_varargs) << SR;
return None;
@@ -13148,7 +13149,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
if (FullClause) {
if (!VerifyPositiveIntegerConstantInClause(
LoopHelper.NumIterations, OMPC_full, /*StrictlyPositive=*/false,
- /*SuppressExprDigs=*/true)
+ /*SuppressExprDiags=*/true)
.isUsable()) {
Diag(AStmt->getBeginLoc(), diag::err_omp_unroll_full_variable_trip_count);
Diag(FullClause->getBeginLoc(), diag::note_omp_directive_here)
@@ -20761,8 +20762,7 @@ Sema::ActOnOpenMPEndDeclareTargetDirective() {
void Sema::ActOnFinishedOpenMPDeclareTargetContext(
DeclareTargetContextInfo &DTCI) {
for (auto &It : DTCI.ExplicitlyMapped)
- ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT,
- DTCI.DT);
+ ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT, DTCI);
}
NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
@@ -20799,9 +20799,9 @@ NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
return ND;
}
-void Sema::ActOnOpenMPDeclareTargetName(
- NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT,
- OMPDeclareTargetDeclAttr::DevTypeTy DT) {
+void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
+ OMPDeclareTargetDeclAttr::MapTypeTy MT,
+ DeclareTargetContextInfo &DTCI) {
assert((isa<VarDecl>(ND) || isa<FunctionDecl>(ND) ||
isa<FunctionTemplateDecl>(ND)) &&
"Expected variable, function or function template.");
@@ -20818,10 +20818,10 @@ void Sema::ActOnOpenMPDeclareTargetName(
auto *VD = cast<ValueDecl>(ND);
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(VD);
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DT &&
+ if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DTCI.DT &&
ActiveAttr.getValue()->getLevel() == Level) {
Diag(Loc, diag::err_omp_device_type_mismatch)
- << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DT)
+ << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DTCI.DT)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(
ActiveAttr.getValue()->getDevType());
return;
@@ -20835,8 +20835,16 @@ void Sema::ActOnOpenMPDeclareTargetName(
if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() == Level)
return;
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT, DT, Level,
- SourceRange(Loc, Loc));
+ Expr *IndirectE = nullptr;
+ bool IsIndirect = false;
+ if (DTCI.Indirect.hasValue()) {
+ IndirectE = DTCI.Indirect.getValue();
+ if (!IndirectE)
+ IsIndirect = true;
+ }
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ Context, MT, DTCI.DT, IndirectE, IsIndirect, Level,
+ SourceRange(Loc, Loc));
ND->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
@@ -20927,9 +20935,16 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() >= Level)
return;
DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
+ Expr *IndirectE = nullptr;
+ bool IsIndirect = false;
+ if (DTCI.Indirect.hasValue()) {
+ IndirectE = DTCI.Indirect.getValue();
+ if (!IndirectE)
+ IsIndirect = true;
+ }
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, OMPDeclareTargetDeclAttr::MT_To, DTCI.DT, Level,
- SourceRange(DTCI.Loc, DTCI.Loc));
+ Context, OMPDeclareTargetDeclAttr::MT_To, DTCI.DT, IndirectE,
+ IsIndirect, Level, SourceRange(DTCI.Loc, DTCI.Loc));
D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index 42b1340f9a65..483247aaa7c5 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -2405,9 +2405,8 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
!getLangOpts().ObjCAutoRefCount) {
ConvertedType = BuildSimilarlyQualifiedPointerType(
- FromType->getAs<ObjCObjectPointerType>(),
- ToPointeeType,
- ToType, Context);
+ FromType->castAs<ObjCObjectPointerType>(), ToPointeeType, ToType,
+ Context);
return true;
}
const PointerType *FromTypePtr = FromType->getAs<PointerType>();
@@ -3718,8 +3717,7 @@ compareConversionFunctions(Sema &S, FunctionDecl *Function1,
CallingConv Conv2CC = Conv2FuncRet->getCallConv();
CXXMethodDecl *CallOp = Conv2->getParent()->getLambdaCallOperator();
- const FunctionProtoType *CallOpProto =
- CallOp->getType()->getAs<FunctionProtoType>();
+ const auto *CallOpProto = CallOp->getType()->castAs<FunctionProtoType>();
CallingConv CallOpCC =
CallOp->getType()->castAs<FunctionType>()->getCallConv();
@@ -9416,12 +9414,12 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
AddOverloadCandidate(
FD, FoundDecl, Args, CandidateSet, /*SuppressUserConversions=*/false,
PartialOverloading, /*AllowExplicit=*/true,
- /*AllowExplicitConversions=*/false, ADLCallKind::UsesADL);
+ /*AllowExplicitConversion=*/false, ADLCallKind::UsesADL);
if (CandidateSet.getRewriteInfo().shouldAddReversed(Context, FD)) {
AddOverloadCandidate(
FD, FoundDecl, {Args[1], Args[0]}, CandidateSet,
/*SuppressUserConversions=*/false, PartialOverloading,
- /*AllowExplicit=*/true, /*AllowExplicitConversions=*/false,
+ /*AllowExplicit=*/true, /*AllowExplicitConversion=*/false,
ADLCallKind::UsesADL, None, OverloadCandidateParamOrder::Reversed);
}
} else {
@@ -14322,8 +14320,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
FoundDecl = MemExpr->getFoundDecl();
Qualifier = MemExpr->getQualifier();
UnbridgedCasts.restore();
- } else {
- UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
+ } else if (auto *UnresExpr = dyn_cast<UnresolvedMemberExpr>(NakedMemExpr)) {
Qualifier = UnresExpr->getQualifier();
QualType ObjectType = UnresExpr->getBaseType();
@@ -14436,7 +14433,9 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
- }
+ } else
+ // Unimaged NakedMemExpr type.
+ return ExprError();
QualType ResultType = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultType);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
index 815463307ecc..f8c713c8545d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
@@ -48,3 +48,101 @@ bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
}
+
+static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) {
+ if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty))
+ return CAT->getSize() == 0;
+ return false;
+}
+
+void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
+ llvm::DenseSet<QualType> Visited,
+ ValueDecl *DeclToCheck) {
+ assert(getLangOpts().SYCLIsDevice &&
+ "Should only be called during SYCL compilation");
+ // Emit notes only for the first discovered declaration of unsupported type
+ // to avoid mess of notes. This flag is to track that error already happened.
+ bool NeedToEmitNotes = true;
+
+ auto Check = [&](QualType TypeToCheck, const ValueDecl *D) {
+ bool ErrorFound = false;
+ if (isZeroSizedArray(*this, TypeToCheck)) {
+ SYCLDiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1;
+ ErrorFound = true;
+ }
+ // Checks for other types can also be done here.
+ if (ErrorFound) {
+ if (NeedToEmitNotes) {
+ if (auto *FD = dyn_cast<FieldDecl>(D))
+ SYCLDiagIfDeviceCode(FD->getLocation(),
+ diag::note_illegal_field_declared_here)
+ << FD->getType()->isPointerType() << FD->getType();
+ else
+ SYCLDiagIfDeviceCode(D->getLocation(), diag::note_declared_at);
+ }
+ }
+
+ return ErrorFound;
+ };
+
+ // In case we have a Record used do the DFS for a bad field.
+ SmallVector<const ValueDecl *, 4> StackForRecursion;
+ StackForRecursion.push_back(DeclToCheck);
+
+ // While doing DFS save how we get there to emit a nice set of notes.
+ SmallVector<const FieldDecl *, 4> History;
+ History.push_back(nullptr);
+
+ do {
+ const ValueDecl *Next = StackForRecursion.pop_back_val();
+ if (!Next) {
+ assert(!History.empty());
+ // Found a marker, we have gone up a level.
+ History.pop_back();
+ continue;
+ }
+ QualType NextTy = Next->getType();
+
+ if (!Visited.insert(NextTy).second)
+ continue;
+
+ auto EmitHistory = [&]() {
+ // The first element is always nullptr.
+ for (uint64_t Index = 1; Index < History.size(); ++Index) {
+ SYCLDiagIfDeviceCode(History[Index]->getLocation(),
+ diag::note_within_field_of_type)
+ << History[Index]->getType();
+ }
+ };
+
+ if (Check(NextTy, Next)) {
+ if (NeedToEmitNotes)
+ EmitHistory();
+ NeedToEmitNotes = false;
+ }
+
+ // In case pointer/array/reference type is met get pointee type, then
+ // proceed with that type.
+ while (NextTy->isAnyPointerType() || NextTy->isArrayType() ||
+ NextTy->isReferenceType()) {
+ if (NextTy->isArrayType())
+ NextTy = QualType{NextTy->getArrayElementTypeNoTypeQual(), 0};
+ else
+ NextTy = NextTy->getPointeeType();
+ if (Check(NextTy, Next)) {
+ if (NeedToEmitNotes)
+ EmitHistory();
+ NeedToEmitNotes = false;
+ }
+ }
+
+ if (const auto *RecDecl = NextTy->getAsRecordDecl()) {
+ if (auto *NextFD = dyn_cast<FieldDecl>(Next))
+ History.push_back(NextFD);
+ // When nullptr is discovered, this means we've gone back up a level, so
+ // the history should be cleaned.
+ StackForRecursion.push_back(nullptr);
+ llvm::copy(RecDecl->fields(), std::back_inserter(StackForRecursion));
+ }
+ } while (!StackForRecursion.empty());
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index 1d90759f2406..746eb82a5bdc 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -410,9 +410,13 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
- // If we're in C89 mode, check that we don't have any decls after stmts. If
- // so, emit an extension diagnostic.
- if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
+ // If we're in C mode, check that we don't have any decls after stmts. If
+ // so, emit an extension diagnostic in C89 and potentially a warning in later
+ // versions.
+ const unsigned MixedDeclsCodeID = getLangOpts().C99
+ ? diag::warn_mixed_decls_code
+ : diag::ext_mixed_decls_code;
+ if (!getLangOpts().CPlusPlus && !Diags.isIgnored(MixedDeclsCodeID, L)) {
// Note that __extension__ can be around a decl.
unsigned i = 0;
// Skip over all declarations.
@@ -425,7 +429,7 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
if (i != NumElts) {
Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
- Diag(D->getLocation(), diag::ext_mixed_decls_code);
+ Diag(D->getLocation(), MixedDeclsCodeID);
}
}
@@ -869,12 +873,7 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc,
Stmt *thenStmt, SourceLocation ElseLoc,
Stmt *elseStmt) {
if (Cond.isInvalid())
- Cond = ConditionResult(
- *this, nullptr,
- MakeFullExpr(new (Context) OpaqueValueExpr(SourceLocation(),
- Context.BoolTy, VK_PRValue),
- IfLoc),
- false);
+ return StmtError();
bool ConstevalOrNegatedConsteval =
StatementKind == IfStatementKind::ConstevalNonNegated ||
@@ -2468,6 +2467,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
Stmt *First, SourceLocation ColonLoc,
Expr *Range, SourceLocation RParenLoc,
BuildForRangeKind Kind) {
+ // FIXME: recover in order to allow the body to be parsed.
if (!First)
return StmtError();
@@ -3878,7 +3878,8 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
RetValExp, nullptr, /*RecoverUncorrectedTypos=*/true);
if (RetVal.isInvalid())
return StmtError();
- StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get());
+ StmtResult R =
+ BuildReturnStmt(ReturnLoc, RetVal.get(), /*AllowRecovery=*/true);
if (R.isInvalid() || ExprEvalContexts.back().isDiscardedStatementContext())
return R;
@@ -3908,7 +3909,8 @@ static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S,
return false;
}
-StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ bool AllowRecovery) {
// Check for unexpanded parameter packs.
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
@@ -3985,11 +3987,25 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// If we've already decided this function is invalid, e.g. because
// we saw a `return` whose expression had an error, don't keep
// trying to deduce its return type.
- if (FD->isInvalidDecl())
- return StmtError();
- if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
+ // (Some return values may be needlessly wrapped in RecoveryExpr).
+ if (FD->isInvalidDecl() ||
+ DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
- return StmtError();
+ if (!AllowRecovery)
+ return StmtError();
+ // The deduction failure is diagnosed and marked, try to recover.
+ if (RetValExp) {
+ // Wrap return value with a recovery expression of the previous type.
+ // If no deduction yet, use DependentTy.
+ auto Recovery = CreateRecoveryExpr(
+ RetValExp->getBeginLoc(), RetValExp->getEndLoc(), RetValExp,
+ AT->isDeduced() ? FnRetType : QualType());
+ if (Recovery.isInvalid())
+ return StmtError();
+ RetValExp = Recovery.get();
+ } else {
+ // Nothing to do: a ReturnStmt with no value is fine recovery.
+ }
} else {
FnRetType = FD->getReturnType();
}
@@ -4002,7 +4018,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ReturnStmt *Result = nullptr;
if (FnRetType->isVoidType()) {
if (RetValExp) {
- if (isa<InitListExpr>(RetValExp)) {
+ if (auto *ILE = dyn_cast<InitListExpr>(RetValExp)) {
// We simply never allow init lists as the return value of void
// functions. This is compatible because this was never allowed before,
// so there's no legacy code to deal with.
@@ -4018,8 +4034,12 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
Diag(ReturnLoc, diag::err_return_init_list)
<< CurDecl << FunctionKind << RetValExp->getSourceRange();
- // Drop the expression.
- RetValExp = nullptr;
+ // Preserve the initializers in the AST.
+ RetValExp = AllowRecovery
+ ? CreateRecoveryExpr(ILE->getLBraceLoc(),
+ ILE->getRBraceLoc(), ILE->inits())
+ .get()
+ : nullptr;
} else if (!RetValExp->isTypeDependent()) {
// C99 6.8.6.4p1 (ext_ since GCC warns)
unsigned D = diag::ext_return_has_expr;
@@ -4116,6 +4136,9 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
InitializedEntity::InitializeResult(ReturnLoc, RetType);
ExprResult Res = PerformMoveOrCopyInitialization(
Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves);
+ if (Res.isInvalid() && AllowRecovery)
+ Res = CreateRecoveryExpr(RetValExp->getBeginLoc(),
+ RetValExp->getEndLoc(), RetValExp, RetType);
if (Res.isInvalid()) {
// FIXME: Clean up temporaries here anyway?
return StmtError();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 2482f6d404ea..64a0b45feb98 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -2063,7 +2063,7 @@ DeclResult Sema::CheckClassTemplate(
}
if (PrevClassTemplate)
- CheckRedeclarationModuleOwnership(NewTemplate, PrevClassTemplate);
+ CheckRedeclarationInModule(NewTemplate, PrevClassTemplate);
if (Invalid) {
NewTemplate->setInvalidDecl();
@@ -3672,7 +3672,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return QualType();
QualType CanonType;
@@ -4318,7 +4318,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(VarTemplate, TemplateNameLoc, TemplateArgs,
false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the variable template (partial) specialization declaration that
@@ -4489,7 +4489,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
if (CheckTemplateArgumentList(
Template, TemplateNameLoc,
const_cast<TemplateArgumentListInfo &>(TemplateArgs), false,
- Converted, /*UpdateArgsWithConversion=*/true))
+ Converted, /*UpdateArgsWithConversions=*/true))
return true;
// Produce a placeholder value if the specialization is dependent.
@@ -4677,7 +4677,7 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
if (CheckTemplateArgumentList(NamedConcept, ConceptNameInfo.getLoc(),
const_cast<TemplateArgumentListInfo&>(*TemplateArgs),
/*PartialTemplateArgs=*/false, Converted,
- /*UpdateArgsWithConversion=*/false))
+ /*UpdateArgsWithConversions=*/false))
return ExprError();
ConstraintSatisfaction Satisfaction;
@@ -8343,7 +8343,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the class template (partial) specialization declaration that
@@ -9595,7 +9595,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the class template specialization declaration that
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index e9636d2b942e..22dd395d9943 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -4452,7 +4452,7 @@ namespace {
public:
SubstituteDeducedTypeTransform(Sema &SemaRef, DependentAuto DA)
- : TreeTransform<SubstituteDeducedTypeTransform>(SemaRef), Replacement(),
+ : TreeTransform<SubstituteDeducedTypeTransform>(SemaRef),
ReplacementIsPack(DA.IsPack), UseTypeSugar(true) {}
SubstituteDeducedTypeTransform(Sema &SemaRef, QualType Replacement,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 7d4c000e7e90..7c6bb4c8a5f8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -2790,11 +2790,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CurrentInstantiationScope = I->Scope;
// Allow 'this' within late-parsed attributes.
- NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
- CXXRecordDecl *ThisContext =
- dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
+ auto *ND = cast<NamedDecl>(I->NewDecl);
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
- ND && ND->isCXXInstanceMember());
+ ND->isCXXInstanceMember());
Attr *NewAttr =
instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 27ac2cd08f2a..1da0dfec3f23 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -3637,7 +3637,7 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
InstTemplateArgs,
false,
Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return nullptr;
// Figure out where to insert this class template explicit specialization
@@ -3759,7 +3759,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
SmallVector<TemplateArgument, 4> Converted;
if (SemaRef.CheckTemplateArgumentList(InstVarTemplate, D->getLocation(),
VarTemplateArgsInfo, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ /*UpdateArgsWithConversions=*/true))
return nullptr;
// Check whether we've already seen a declaration of this specialization.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index 7a038301a249..959f4903b030 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
@@ -1495,8 +1496,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type() &&
- !S.getLangOpts().SYCLIsDevice &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().SYCLIsDevice || S.getLangOpts().CUDAIsDevice ||
+ (S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice)))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
@@ -2515,7 +2516,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Diag(ArraySize->getBeginLoc(),
isSFINAEContext() ? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
- << ArraySize->getSourceRange();
+ << 0 << ArraySize->getSourceRange();
}
// Is the array too large?
@@ -5973,6 +5974,11 @@ namespace {
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
TL.setUnderlyingTInfo(TInfo);
}
+ void VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
+ TL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ TL.setRParenLoc(DS.getTypeofParensRange().getEnd());
+ }
void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
// FIXME: This holds only because we only have one unary transform.
assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType);
@@ -6036,6 +6042,8 @@ namespace {
DS.getTypeSpecType() == TST_auto_type ||
DS.getTypeSpecType() == TST_unspecified);
TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ if (DS.getTypeSpecType() == TST_decltype_auto)
+ TL.setRParenLoc(DS.getTypeofParensRange().getEnd());
if (!DS.isConstrainedAuto())
return;
TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 298a3f7a83d8..e43b3ca968eb 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -4076,7 +4076,8 @@ Sema::ConditionResult TreeTransform<Derived>::TransformCondition(
if (CondExpr.isInvalid())
return Sema::ConditionError();
- return getSema().ActOnCondition(nullptr, Loc, CondExpr.get(), Kind);
+ return getSema().ActOnCondition(nullptr, Loc, CondExpr.get(), Kind,
+ /*MissingOK=*/true);
}
return Sema::ConditionResult();
@@ -6228,15 +6229,15 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != T->getUnderlyingExpr()) {
- Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc());
+ Result = getDerived().RebuildDecltypeType(E.get(), TL.getDecltypeLoc());
if (Result.isNull())
return QualType();
}
else E.get();
DecltypeTypeLoc NewTL = TLB.push<DecltypeTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ NewTL.setDecltypeLoc(TL.getDecltypeLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
return Result;
}
@@ -6634,6 +6635,7 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
NewTL.setFoundDecl(TL.getFoundDecl());
NewTL.setLAngleLoc(TL.getLAngleLoc());
NewTL.setRAngleLoc(TL.getRAngleLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
for (unsigned I = 0; I < NewTL.getNumArgs(); ++I)
NewTL.setArgLocInfo(I, NewTemplateArgs.arguments()[I].getLocInfo());
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index f93e0d2ed1c4..d806fb9e1949 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -142,7 +142,6 @@ using namespace clang;
using namespace clang::serialization;
using namespace clang::serialization::reader;
using llvm::BitstreamCursor;
-using llvm::RoundingMode;
//===----------------------------------------------------------------------===//
// ChainedASTReaderListener implementation
@@ -1888,10 +1887,6 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
HFI.isPragmaOnce |= (Flags >> 4) & 0x01;
HFI.DirInfo = (Flags >> 1) & 0x07;
HFI.IndexHeaderMapHeader = Flags & 0x01;
- // FIXME: Find a better way to handle this. Maybe just store a
- // "has been included" flag?
- HFI.NumIncludes = std::max(endian::readNext<uint16_t, little, unaligned>(d),
- HFI.NumIncludes);
HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
M, endian::readNext<uint32_t, little, unaligned>(d));
if (unsigned FrameworkOffset =
@@ -2963,6 +2958,22 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
}
+void ASTReader::readIncludedFiles(ModuleFile &F, StringRef Blob,
+ Preprocessor &PP) {
+ using namespace llvm::support;
+
+ const unsigned char *D = (const unsigned char *)Blob.data();
+ unsigned FileCount = endian::readNext<uint32_t, little, unaligned>(D);
+
+ for (unsigned I = 0; I < FileCount; ++I) {
+ size_t ID = endian::readNext<uint32_t, little, unaligned>(D);
+ InputFileInfo IFI = readInputFileInfo(F, ID);
+ if (llvm::ErrorOr<const FileEntry *> File =
+ PP.getFileManager().getFile(IFI.Filename))
+ PP.getIncludedFiles().insert(*File);
+ }
+}
+
llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
unsigned ClientLoadCapabilities) {
BitstreamCursor &Stream = F.Stream;
@@ -3701,6 +3712,10 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
}
+ case PP_INCLUDED_FILES:
+ readIncludedFiles(F, Blob, PP);
+ break;
+
case LATE_PARSED_TEMPLATE:
LateParsedTemplates.emplace_back(
std::piecewise_construct, std::forward_as_tuple(&F),
@@ -4762,11 +4777,11 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
break;
unsigned Count = Record[0];
const char *Byte = Blob.data();
- F->SearchPathUsage = llvm::BitVector(Count, 0);
+ F->SearchPathUsage = llvm::BitVector(Count, false);
for (unsigned I = 0; I < Count; ++Byte)
for (unsigned Bit = 0; Bit < 8 && I < Count; ++Bit, ++I)
if (*Byte & (1 << Bit))
- F->SearchPathUsage[I] = 1;
+ F->SearchPathUsage[I] = true;
break;
}
}
@@ -6629,7 +6644,8 @@ void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
}
void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ TL.setDecltypeLoc(readSourceLocation());
+ TL.setRParenLoc(readSourceLocation());
}
void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
@@ -6652,6 +6668,8 @@ void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
TL.setArgLocInfo(i, Reader.readTemplateArgumentLocInfo(
TL.getTypePtr()->getArg(i).getKind()));
}
+ if (Reader.readBool())
+ TL.setRParenLoc(readSourceLocation());
}
void TypeLocReader::VisitDeducedTemplateSpecializationTypeLoc(
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 2144befcdb14..1ab26e58a404 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2945,391 +2945,6 @@ uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
-static bool isSameTemplateParameterList(const ASTContext &C,
- const TemplateParameterList *X,
- const TemplateParameterList *Y);
-static bool isSameEntity(NamedDecl *X, NamedDecl *Y);
-
-/// Determine whether two template parameters are similar enough
-/// that they may be used in declarations of the same template.
-static bool isSameTemplateParameter(const NamedDecl *X,
- const NamedDecl *Y) {
- if (X->getKind() != Y->getKind())
- return false;
-
- if (const auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
- const auto *TY = cast<TemplateTypeParmDecl>(Y);
- if (TX->isParameterPack() != TY->isParameterPack())
- return false;
- if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
- return false;
- const TypeConstraint *TXTC = TX->getTypeConstraint();
- const TypeConstraint *TYTC = TY->getTypeConstraint();
- if (!TXTC != !TYTC)
- return false;
- if (TXTC && TYTC) {
- auto *NCX = TXTC->getNamedConcept();
- auto *NCY = TYTC->getNamedConcept();
- if (!NCX || !NCY || !isSameEntity(NCX, NCY))
- return false;
- if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
- return false;
- if (TXTC->hasExplicitTemplateArgs()) {
- const auto *TXTCArgs = TXTC->getTemplateArgsAsWritten();
- const auto *TYTCArgs = TYTC->getTemplateArgsAsWritten();
- if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs)
- return false;
- llvm::FoldingSetNodeID XID, YID;
- for (const auto &ArgLoc : TXTCArgs->arguments())
- ArgLoc.getArgument().Profile(XID, X->getASTContext());
- for (const auto &ArgLoc : TYTCArgs->arguments())
- ArgLoc.getArgument().Profile(YID, Y->getASTContext());
- if (XID != YID)
- return false;
- }
- }
- return true;
- }
-
- if (const auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
- const auto *TY = cast<NonTypeTemplateParmDecl>(Y);
- return TX->isParameterPack() == TY->isParameterPack() &&
- TX->getASTContext().hasSameType(TX->getType(), TY->getType());
- }
-
- const auto *TX = cast<TemplateTemplateParmDecl>(X);
- const auto *TY = cast<TemplateTemplateParmDecl>(Y);
- return TX->isParameterPack() == TY->isParameterPack() &&
- isSameTemplateParameterList(TX->getASTContext(),
- TX->getTemplateParameters(),
- TY->getTemplateParameters());
-}
-
-static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
- if (auto *NS = X->getAsNamespace())
- return NS;
- if (auto *NAS = X->getAsNamespaceAlias())
- return NAS->getNamespace();
- return nullptr;
-}
-
-static bool isSameQualifier(const NestedNameSpecifier *X,
- const NestedNameSpecifier *Y) {
- if (auto *NSX = getNamespace(X)) {
- auto *NSY = getNamespace(Y);
- if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
- return false;
- } else if (X->getKind() != Y->getKind())
- return false;
-
- // FIXME: For namespaces and types, we're permitted to check that the entity
- // is named via the same tokens. We should probably do so.
- switch (X->getKind()) {
- case NestedNameSpecifier::Identifier:
- if (X->getAsIdentifier() != Y->getAsIdentifier())
- return false;
- break;
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::NamespaceAlias:
- // We've already checked that we named the same namespace.
- break;
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::TypeSpecWithTemplate:
- if (X->getAsType()->getCanonicalTypeInternal() !=
- Y->getAsType()->getCanonicalTypeInternal())
- return false;
- break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- return true;
- }
-
- // Recurse into earlier portion of NNS, if any.
- auto *PX = X->getPrefix();
- auto *PY = Y->getPrefix();
- if (PX && PY)
- return isSameQualifier(PX, PY);
- return !PX && !PY;
-}
-
-/// Determine whether two template parameter lists are similar enough
-/// that they may be used in declarations of the same template.
-static bool isSameTemplateParameterList(const ASTContext &C,
- const TemplateParameterList *X,
- const TemplateParameterList *Y) {
- if (X->size() != Y->size())
- return false;
-
- for (unsigned I = 0, N = X->size(); I != N; ++I)
- if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
- return false;
-
- const Expr *XRC = X->getRequiresClause();
- const Expr *YRC = Y->getRequiresClause();
- if (!XRC != !YRC)
- return false;
- if (XRC) {
- llvm::FoldingSetNodeID XRCID, YRCID;
- XRC->Profile(XRCID, C, /*Canonical=*/true);
- YRC->Profile(YRCID, C, /*Canonical=*/true);
- if (XRCID != YRCID)
- return false;
- }
-
- return true;
-}
-
-/// Determine whether the attributes we can overload on are identical for A and
-/// B. Will ignore any overloadable attrs represented in the type of A and B.
-static bool hasSameOverloadableAttrs(const FunctionDecl *A,
- const FunctionDecl *B) {
- // Note that pass_object_size attributes are represented in the function's
- // ExtParameterInfo, so we don't need to check them here.
-
- llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
- auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
-
- for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
- Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
- Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
-
- // Return false if the number of enable_if attributes is different.
- if (!Cand1A || !Cand2A)
- return false;
-
- Cand1ID.clear();
- Cand2ID.clear();
-
- (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
- (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
-
- // Return false if any of the enable_if expressions of A and B are
- // different.
- if (Cand1ID != Cand2ID)
- return false;
- }
- return true;
-}
-
-/// Determine whether the two declarations refer to the same entity.
-static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
- if (X == Y)
- return true;
-
- if (X->getDeclName() != Y->getDeclName())
- return false;
-
- // Must be in the same context.
- //
- // Note that we can't use DeclContext::Equals here, because the DeclContexts
- // could be two different declarations of the same function. (We will fix the
- // semantic DC to refer to the primary definition after merging.)
- if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
- cast<Decl>(Y->getDeclContext()->getRedeclContext())))
- return false;
-
- // Two typedefs refer to the same entity if they have the same underlying
- // type.
- if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
- if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
- return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
- TypedefY->getUnderlyingType());
-
- // Must have the same kind.
- if (X->getKind() != Y->getKind())
- return false;
-
- // Objective-C classes and protocols with the same name always match.
- if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
- return true;
-
- if (isa<ClassTemplateSpecializationDecl>(X)) {
- // No need to handle these here: we merge them when adding them to the
- // template.
- return false;
- }
-
- // Compatible tags match.
- if (const auto *TagX = dyn_cast<TagDecl>(X)) {
- const auto *TagY = cast<TagDecl>(Y);
- return (TagX->getTagKind() == TagY->getTagKind()) ||
- ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class ||
- TagX->getTagKind() == TTK_Interface) &&
- (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class ||
- TagY->getTagKind() == TTK_Interface));
- }
-
- // Functions with the same type and linkage match.
- // FIXME: This needs to cope with merging of prototyped/non-prototyped
- // functions, etc.
- if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
- const auto *FuncY = cast<FunctionDecl>(Y);
- if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
- const auto *CtorY = cast<CXXConstructorDecl>(Y);
- if (CtorX->getInheritedConstructor() &&
- !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
- CtorY->getInheritedConstructor().getConstructor()))
- return false;
- }
-
- if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
- return false;
-
- // Multiversioned functions with different feature strings are represented
- // as separate declarations.
- if (FuncX->isMultiVersion()) {
- const auto *TAX = FuncX->getAttr<TargetAttr>();
- const auto *TAY = FuncY->getAttr<TargetAttr>();
- assert(TAX && TAY && "Multiversion Function without target attribute");
-
- if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
- return false;
- }
-
- ASTContext &C = FuncX->getASTContext();
-
- const Expr *XRC = FuncX->getTrailingRequiresClause();
- const Expr *YRC = FuncY->getTrailingRequiresClause();
- if (!XRC != !YRC)
- return false;
- if (XRC) {
- llvm::FoldingSetNodeID XRCID, YRCID;
- XRC->Profile(XRCID, C, /*Canonical=*/true);
- YRC->Profile(YRCID, C, /*Canonical=*/true);
- if (XRCID != YRCID)
- return false;
- }
-
- auto GetTypeAsWritten = [](const FunctionDecl *FD) {
- // Map to the first declaration that we've already merged into this one.
- // The TSI of redeclarations might not match (due to calling conventions
- // being inherited onto the type but not the TSI), but the TSI type of
- // the first declaration of the function should match across modules.
- FD = FD->getCanonicalDecl();
- return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
- : FD->getType();
- };
- QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
- if (!C.hasSameType(XT, YT)) {
- // We can get functions with different types on the redecl chain in C++17
- // if they have differing exception specifications and at least one of
- // the excpetion specs is unresolved.
- auto *XFPT = XT->getAs<FunctionProtoType>();
- auto *YFPT = YT->getAs<FunctionProtoType>();
- if (C.getLangOpts().CPlusPlus17 && XFPT && YFPT &&
- (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
- isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
- C.hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
- return true;
- return false;
- }
-
- return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
- hasSameOverloadableAttrs(FuncX, FuncY);
- }
-
- // Variables with the same type and linkage match.
- if (const auto *VarX = dyn_cast<VarDecl>(X)) {
- const auto *VarY = cast<VarDecl>(Y);
- if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
- ASTContext &C = VarX->getASTContext();
- if (C.hasSameType(VarX->getType(), VarY->getType()))
- return true;
-
- // We can get decls with different types on the redecl chain. Eg.
- // template <typename T> struct S { static T Var[]; }; // #1
- // template <typename T> T S<T>::Var[sizeof(T)]; // #2
- // Only? happens when completing an incomplete array type. In this case
- // when comparing #1 and #2 we should go through their element type.
- const ArrayType *VarXTy = C.getAsArrayType(VarX->getType());
- const ArrayType *VarYTy = C.getAsArrayType(VarY->getType());
- if (!VarXTy || !VarYTy)
- return false;
- if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
- return C.hasSameType(VarXTy->getElementType(), VarYTy->getElementType());
- }
- return false;
- }
-
- // Namespaces with the same name and inlinedness match.
- if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
- const auto *NamespaceY = cast<NamespaceDecl>(Y);
- return NamespaceX->isInline() == NamespaceY->isInline();
- }
-
- // Identical template names and kinds match if their template parameter lists
- // and patterns match.
- if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
- const auto *TemplateY = cast<TemplateDecl>(Y);
- return isSameEntity(TemplateX->getTemplatedDecl(),
- TemplateY->getTemplatedDecl()) &&
- isSameTemplateParameterList(TemplateX->getASTContext(),
- TemplateX->getTemplateParameters(),
- TemplateY->getTemplateParameters());
- }
-
- // Fields with the same name and the same type match.
- if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
- const auto *FDY = cast<FieldDecl>(Y);
- // FIXME: Also check the bitwidth is odr-equivalent, if any.
- return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
- }
-
- // Indirect fields with the same target field match.
- if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
- const auto *IFDY = cast<IndirectFieldDecl>(Y);
- return IFDX->getAnonField()->getCanonicalDecl() ==
- IFDY->getAnonField()->getCanonicalDecl();
- }
-
- // Enumerators with the same name match.
- if (isa<EnumConstantDecl>(X))
- // FIXME: Also check the value is odr-equivalent.
- return true;
-
- // Using shadow declarations with the same target match.
- if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
- const auto *USY = cast<UsingShadowDecl>(Y);
- return USX->getTargetDecl() == USY->getTargetDecl();
- }
-
- // Using declarations with the same qualifier match. (We already know that
- // the name matches.)
- if (const auto *UX = dyn_cast<UsingDecl>(X)) {
- const auto *UY = cast<UsingDecl>(Y);
- return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
- UX->hasTypename() == UY->hasTypename() &&
- UX->isAccessDeclaration() == UY->isAccessDeclaration();
- }
- if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
- const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
- return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
- UX->isAccessDeclaration() == UY->isAccessDeclaration();
- }
- if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
- return isSameQualifier(
- UX->getQualifier(),
- cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
- }
-
- // Using-pack declarations are only created by instantiation, and match if
- // they're instantiated from matching UnresolvedUsing...Decls.
- if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
- return declaresSameEntity(
- UX->getInstantiatedFromUsingDecl(),
- cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
- }
-
- // Namespace alias definitions with the same target match.
- if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
- const auto *NAY = cast<NamespaceAliasDecl>(Y);
- return NAX->getNamespace()->Equals(NAY->getNamespace());
- }
-
- return false;
-}
-
/// Find the context in which we should search for previous declarations when
/// looking for declarations to merge.
DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
@@ -3511,12 +3126,13 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
return Result;
}
+ ASTContext &C = Reader.getContext();
DeclContext *DC = D->getDeclContext()->getRedeclContext();
if (TypedefNameForLinkage) {
auto It = Reader.ImportedTypedefNamesForLinkage.find(
std::make_pair(DC, TypedefNameForLinkage));
if (It != Reader.ImportedTypedefNamesForLinkage.end())
- if (isSameEntity(It->second, D))
+ if (C.isSameEntity(It->second, D))
return FindExistingResult(Reader, D, It->second, AnonymousDeclNumber,
TypedefNameForLinkage);
// Go on to check in other places in case an existing typedef name
@@ -3528,7 +3144,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// in its context by number.
if (auto *Existing = getAnonymousDeclForMerging(
Reader, D->getLexicalDeclContext(), AnonymousDeclNumber))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
} else if (DC->isTranslationUnit() &&
@@ -3560,7 +3176,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
IEnd = IdResolver.end();
I != IEnd; ++I) {
if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
}
@@ -3568,7 +3184,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
DeclContext::lookup_result R = MergeDC->noload_lookup(Name);
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
}
@@ -4781,10 +4397,12 @@ void ASTDeclReader::UpdateDecl(Decl *D,
case UPD_DECL_MARKED_OPENMP_DECLARETARGET: {
auto MapType = Record.readEnum<OMPDeclareTargetDeclAttr::MapTypeTy>();
auto DevType = Record.readEnum<OMPDeclareTargetDeclAttr::DevTypeTy>();
+ Expr *IndirectE = Record.readExpr();
+ bool Indirect = Record.readBool();
unsigned Level = Record.readInt();
D->addAttr(OMPDeclareTargetDeclAttr::CreateImplicit(
- Reader.getContext(), MapType, DevType, Level, readSourceRange(),
- AttributeCommonInfo::AS_Pragma));
+ Reader.getContext(), MapType, DevType, IndirectE, Indirect, Level,
+ readSourceRange(), AttributeCommonInfo::AS_Pragma));
break;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
index 265a77fdb215..4a4cfcce156d 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
@@ -30,7 +30,6 @@ class ASTReader;
class FileEntry;
struct HeaderFileInfo;
class HeaderSearch;
-class IdentifierTable;
class ObjCMethodDecl;
namespace serialization {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 65a780e67510..763fc9537c04 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -427,7 +427,8 @@ void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
}
void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ Record.AddSourceLocation(TL.getDecltypeLoc());
+ Record.AddSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
@@ -451,6 +452,9 @@ void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
Record.AddTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
TL.getArgLocInfo(I));
}
+ Record.push_back(TL.isDecltypeAuto());
+ if (TL.isDecltypeAuto())
+ Record.AddSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
@@ -858,6 +862,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
RECORD(PP_CONDITIONAL_STACK);
RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
+ RECORD(PP_INCLUDED_FILES);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -1769,7 +1774,7 @@ namespace {
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
- unsigned DataLen = 1 + 2 + 4 + 4;
+ unsigned DataLen = 1 + 4 + 4;
for (auto ModInfo : Data.KnownHeaders)
if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
DataLen += 4;
@@ -1801,7 +1806,6 @@ namespace {
| (Data.HFI.DirInfo << 1)
| Data.HFI.IndexHeaderMapHeader;
LE.write<uint8_t>(Flags);
- LE.write<uint16_t>(Data.HFI.NumIncludes);
if (!Data.HFI.ControllingMacro)
LE.write<uint32_t>(Data.HFI.ControllingMacroID);
@@ -2250,6 +2254,29 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
return false;
}
+void ASTWriter::writeIncludedFiles(raw_ostream &Out, const Preprocessor &PP) {
+ using namespace llvm::support;
+
+ const Preprocessor::IncludedFilesSet &IncludedFiles = PP.getIncludedFiles();
+
+ std::vector<uint32_t> IncludedInputFileIDs;
+ IncludedInputFileIDs.reserve(IncludedFiles.size());
+
+ for (const FileEntry *File : IncludedFiles) {
+ auto InputFileIt = InputFileIDs.find(File);
+ if (InputFileIt == InputFileIDs.end())
+ continue;
+ IncludedInputFileIDs.push_back(InputFileIt->second);
+ }
+
+ llvm::sort(IncludedInputFileIDs);
+
+ endian::Writer LE(Out, little);
+ LE.write<uint32_t>(IncludedInputFileIDs.size());
+ for (uint32_t ID : IncludedInputFileIDs)
+ LE.write<uint32_t>(ID);
+}
+
/// Writes the block containing the serialized form of the
/// preprocessor.
void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
@@ -2458,6 +2485,20 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
MacroOffsetsBase - ASTBlockStartOffset};
Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
}
+
+ {
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(PP_INCLUDED_FILES));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IncludedFilesAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
+
+ SmallString<2048> Buffer;
+ raw_svector_ostream Out(Buffer);
+ writeIncludedFiles(Out, PP);
+ RecordData::value_type Record[] = {PP_INCLUDED_FILES};
+ Stream.EmitRecordWithBlob(IncludedFilesAbbrev, Record, Buffer.data(),
+ Buffer.size());
+ }
}
void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
index f4882c7be3f7..4fd217cf7a6e 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
@@ -304,23 +304,22 @@ ModuleManager::addInMemoryBuffer(StringRef FileName,
InMemoryBuffers[Entry] = std::move(Buffer);
}
-ModuleManager::VisitState *ModuleManager::allocateVisitState() {
+std::unique_ptr<ModuleManager::VisitState> ModuleManager::allocateVisitState() {
// Fast path: if we have a cached state, use it.
if (FirstVisitState) {
- VisitState *Result = FirstVisitState;
- FirstVisitState = FirstVisitState->NextState;
- Result->NextState = nullptr;
+ auto Result = std::move(FirstVisitState);
+ FirstVisitState = std::move(Result->NextState);
return Result;
}
// Allocate and return a new state.
- return new VisitState(size());
+ return std::make_unique<VisitState>(size());
}
-void ModuleManager::returnVisitState(VisitState *State) {
+void ModuleManager::returnVisitState(std::unique_ptr<VisitState> State) {
assert(State->NextState == nullptr && "Visited state is in list?");
- State->NextState = FirstVisitState;
- FirstVisitState = State;
+ State->NextState = std::move(FirstVisitState);
+ FirstVisitState = std::move(State);
}
void ModuleManager::setGlobalIndex(GlobalModuleIndex *Index) {
@@ -351,8 +350,6 @@ ModuleManager::ModuleManager(FileManager &FileMgr,
: FileMgr(FileMgr), ModuleCache(&ModuleCache),
PCHContainerRdr(PCHContainerRdr), HeaderSearchInfo(HeaderSearchInfo) {}
-ModuleManager::~ModuleManager() { delete FirstVisitState; }
-
void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
// If the visitation order vector is the wrong size, recompute the order.
@@ -396,11 +393,10 @@ void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
assert(VisitOrder.size() == N && "Visitation order is wrong?");
- delete FirstVisitState;
FirstVisitState = nullptr;
}
- VisitState *State = allocateVisitState();
+ auto State = allocateVisitState();
unsigned VisitNumber = State->NextVisitNumber++;
// If the caller has provided us with a hit-set that came from the global
@@ -452,7 +448,7 @@ void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
} while (true);
}
- returnVisitState(State);
+ returnVisitState(std::move(State));
}
bool ModuleManager::lookupModuleFile(StringRef FileName, off_t ExpectedSize,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 7cdd78b8adfb..7841fd82e370 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -302,7 +302,7 @@ class ExplodedGraphViewer : public Checker< check::EndAnalysis > {
public:
ExplodedGraphViewer() {}
void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const {
- Eng.ViewGraph(0);
+ Eng.ViewGraph(false);
}
};
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 66ef781871ec..e2209e3debfd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -22,15 +22,14 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/YAMLTraits.h"
-#include <algorithm>
#include <limits>
#include <memory>
-#include <unordered_map>
#include <utility>
using namespace clang;
@@ -38,577 +37,651 @@ using namespace ento;
using namespace taint;
namespace {
-class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
-public:
- static void *getTag() {
- static int Tag;
- return &Tag;
+
+class GenericTaintChecker;
+
+/// Check for CWE-134: Uncontrolled Format String.
+constexpr llvm::StringLiteral MsgUncontrolledFormatString =
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
+
+/// Check for:
+/// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+/// CWE-78, "Failure to Sanitize Data into an OS Command"
+constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+/// Check if tainted data is used as a buffer size in strn.. functions,
+/// and allocators.
+constexpr llvm::StringLiteral MsgTaintedBufferSize =
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
+ "for character data and the null terminator)";
+
+/// Check if tainted data is used as a custom sink's parameter.
+constexpr llvm::StringLiteral MsgCustomSink =
+ "Untrusted data is passed to a user-defined sink";
+
+using ArgIdxTy = int;
+using ArgVecTy = llvm::SmallVector<ArgIdxTy, 2>;
+
+/// Denotes the return value.
+constexpr ArgIdxTy ReturnValueIndex{-1};
+
+static ArgIdxTy fromArgumentCount(unsigned Count) {
+ assert(Count <=
+ static_cast<std::size_t>(std::numeric_limits<ArgIdxTy>::max()) &&
+ "ArgIdxTy is not large enough to represent the number of arguments.");
+ return Count;
+}
+
+/// Check if the region the expression evaluates to is the standard input,
+/// and thus, is tainted.
+/// FIXME: Move this to Taint.cpp.
+bool isStdin(SVal Val, const ASTContext &ACtx) {
+ // FIXME: What if Val is NonParamVarRegion?
+
+ // The region should be symbolic, we do not know it's value.
+ const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(Val.getAsRegion());
+ if (!SymReg)
+ return false;
+
+ // Get it's symbol and find the declaration region it's pointing to.
+ const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ if (!Sm)
+ return false;
+ const auto *DeclReg = dyn_cast<DeclRegion>(Sm->getRegion());
+ if (!DeclReg)
+ return false;
+
+ // This region corresponds to a declaration, find out if it's a global/extern
+ // variable named stdin with the proper type.
+ if (const auto *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ D = D->getCanonicalDecl();
+ // FIXME: This should look for an exact match.
+ if (D->getName().contains("stdin") && D->isExternC()) {
+ const QualType FILETy = ACtx.getFILEType().getCanonicalType();
+ const QualType Ty = D->getType().getCanonicalType();
+
+ if (Ty->isPointerType())
+ return Ty->getPointeeType() == FILETy;
+ }
}
+ return false;
+}
- void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+SVal getPointeeOf(const CheckerContext &C, Loc LValue) {
+ const QualType ArgTy = LValue.getType(C.getASTContext());
+ if (!ArgTy->isPointerType() || !ArgTy->getPointeeType()->isVoidType())
+ return C.getState()->getSVal(LValue);
- void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
- const char *Sep) const override;
+ // Do not dereference void pointers. Treat them as byte pointers instead.
+ // FIXME: we might want to consider more than just the first byte.
+ return C.getState()->getSVal(LValue, C.getASTContext().CharTy);
+}
+
+/// Given a pointer/reference argument, return the value it refers to.
+Optional<SVal> getPointeeOf(const CheckerContext &C, SVal Arg) {
+ if (auto LValue = Arg.getAs<Loc>())
+ return getPointeeOf(C, *LValue);
+ return None;
+}
- using ArgVector = SmallVector<unsigned, 2>;
- using SignedArgVector = SmallVector<int, 2>;
+/// Given a pointer, return the SVal of its pointee or if it is tainted,
+/// otherwise return the pointer's SVal if tainted.
+/// Also considers stdin as a taint source.
+Optional<SVal> getTaintedPointeeOrPointer(const CheckerContext &C, SVal Arg) {
+ const ProgramStateRef State = C.getState();
- enum class VariadicType { None, Src, Dst };
+ if (auto Pointee = getPointeeOf(C, Arg))
+ if (isTainted(State, *Pointee)) // FIXME: isTainted(...) ? Pointee : None;
+ return Pointee;
- /// Used to parse the configuration file.
- struct TaintConfiguration {
- using NameScopeArgs = std::tuple<std::string, std::string, ArgVector>;
-
- struct Propagation {
- std::string Name;
- std::string Scope;
- ArgVector SrcArgs;
- SignedArgVector DstArgs;
- VariadicType VarType;
- unsigned VarIndex;
- };
-
- std::vector<Propagation> Propagations;
- std::vector<NameScopeArgs> Filters;
- std::vector<NameScopeArgs> Sinks;
-
- TaintConfiguration() = default;
- TaintConfiguration(const TaintConfiguration &) = default;
- TaintConfiguration(TaintConfiguration &&) = default;
- TaintConfiguration &operator=(const TaintConfiguration &) = default;
- TaintConfiguration &operator=(TaintConfiguration &&) = default;
- };
+ if (isTainted(State, Arg))
+ return Arg;
+
+ // FIXME: This should be done by the isTainted() API.
+ if (isStdin(Arg, C.getASTContext()))
+ return Arg;
+
+ return None;
+}
- /// Convert SignedArgVector to ArgVector.
- ArgVector convertToArgVector(CheckerManager &Mgr, const std::string &Option,
- const SignedArgVector &Args);
+bool isTaintedOrPointsToTainted(const Expr *E, const ProgramStateRef &State,
+ CheckerContext &C) {
+ return getTaintedPointeeOrPointer(C, C.getSVal(E)).hasValue();
+}
- /// Parse the config.
- void parseConfiguration(CheckerManager &Mgr, const std::string &Option,
- TaintConfiguration &&Config);
+/// ArgSet is used to describe arguments relevant for taint detection or
+/// taint application. A discrete set of argument indexes and a variadic
+/// argument list signified by a starting index are supported.
+class ArgSet {
+public:
+ ArgSet() = default;
+ ArgSet(ArgVecTy &&DiscreteArgs, Optional<ArgIdxTy> VariadicIndex = None)
+ : DiscreteArgs(std::move(DiscreteArgs)),
+ VariadicIndex(std::move(VariadicIndex)) {}
- static const unsigned InvalidArgIndex{std::numeric_limits<unsigned>::max()};
- /// Denotes the return vale.
- static const unsigned ReturnValueIndex{std::numeric_limits<unsigned>::max() -
- 1};
+ bool contains(ArgIdxTy ArgIdx) const {
+ if (llvm::is_contained(DiscreteArgs, ArgIdx))
+ return true;
-private:
- mutable std::unique_ptr<BugType> BT;
- void initBugType() const {
- if (!BT)
- BT = std::make_unique<BugType>(this, "Use of Untrusted Data",
- "Untrusted Data");
+ return VariadicIndex && ArgIdx >= *VariadicIndex;
}
- struct FunctionData {
- FunctionData() = delete;
- FunctionData(const FunctionDecl *FDecl, StringRef Name,
- std::string FullName)
- : FDecl(FDecl), Name(Name), FullName(std::move(FullName)) {}
- FunctionData(const FunctionData &) = default;
- FunctionData(FunctionData &&) = default;
- FunctionData &operator=(const FunctionData &) = delete;
- FunctionData &operator=(FunctionData &&) = delete;
-
- static Optional<FunctionData> create(const CallEvent &Call,
- const CheckerContext &C) {
- if (!Call.getDecl())
- return None;
-
- const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
- if (!FDecl || (FDecl->getKind() != Decl::Function &&
- FDecl->getKind() != Decl::CXXMethod))
- return None;
-
- StringRef Name = C.getCalleeName(FDecl);
- std::string FullName = FDecl->getQualifiedNameAsString();
- if (Name.empty() || FullName.empty())
- return None;
-
- return FunctionData{FDecl, Name, std::move(FullName)};
- }
+ bool isEmpty() const { return DiscreteArgs.empty() && !VariadicIndex; }
- bool isInScope(StringRef Scope) const {
- return StringRef(FullName).startswith(Scope);
+ ArgVecTy ArgsUpTo(ArgIdxTy LastArgIdx) const {
+ ArgVecTy Args;
+ for (ArgIdxTy I = ReturnValueIndex; I <= LastArgIdx; ++I) {
+ if (contains(I))
+ Args.push_back(I);
}
+ return Args;
+ }
- const FunctionDecl *const FDecl;
- const StringRef Name;
- const std::string FullName;
- };
+private:
+ ArgVecTy DiscreteArgs;
+ Optional<ArgIdxTy> VariadicIndex;
+};
- /// Catch taint related bugs. Check if tainted data is passed to a
- /// system call etc. Returns true on matching.
- bool checkPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+/// A struct used to specify taint propagation rules for a function.
+///
+/// If any of the possible taint source arguments is tainted, all of the
+/// destination arguments should also be tainted. If ReturnValueIndex is added
+/// to the dst list, the return value will be tainted.
+class GenericTaintRule {
+ /// Arguments which are taints sinks and should be checked, and a report
+ /// should be emitted if taint reaches these.
+ ArgSet SinkArgs;
+ /// Arguments which should be sanitized on function return.
+ ArgSet FilterArgs;
+ /// Arguments which can participate in taint propagationa. If any of the
+ /// arguments in PropSrcArgs is tainted, all arguments in PropDstArgs should
+ /// be tainted.
+ ArgSet PropSrcArgs;
+ ArgSet PropDstArgs;
+
+ /// A message that explains why the call is sensitive to taint.
+ Optional<StringRef> SinkMsg;
+
+ GenericTaintRule() = default;
+
+ GenericTaintRule(ArgSet &&Sink, ArgSet &&Filter, ArgSet &&Src, ArgSet &&Dst,
+ Optional<StringRef> SinkMsg = None)
+ : SinkArgs(std::move(Sink)), FilterArgs(std::move(Filter)),
+ PropSrcArgs(std::move(Src)), PropDstArgs(std::move(Dst)),
+ SinkMsg(SinkMsg) {}
- /// Add taint sources on a pre-visit. Returns true on matching.
- bool addSourcesPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+public:
+ /// Make a rule that reports a warning if taint reaches any of \p FilterArgs
+ /// arguments.
+ static GenericTaintRule Sink(ArgSet &&SinkArgs,
+ Optional<StringRef> Msg = None) {
+ return {std::move(SinkArgs), {}, {}, {}, Msg};
+ }
- /// Mark filter's arguments not tainted on a pre-visit. Returns true on
- /// matching.
- bool addFiltersPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ /// Make a rule that sanitizes all FilterArgs arguments.
+ static GenericTaintRule Filter(ArgSet &&FilterArgs) {
+ return {{}, std::move(FilterArgs), {}, {}};
+ }
- /// Propagate taint generated at pre-visit. Returns true on matching.
- static bool propagateFromPre(const CallEvent &Call, CheckerContext &C);
+ /// Make a rule that unconditionally taints all Args.
+ /// If Func is provided, it must also return true for taint to propagate.
+ static GenericTaintRule Source(ArgSet &&SourceArgs) {
+ return {{}, {}, {}, std::move(SourceArgs)};
+ }
- /// Check if the region the expression evaluates to is the standard input,
- /// and thus, is tainted.
- static bool isStdin(const Expr *E, CheckerContext &C);
+ /// Make a rule that taints all PropDstArgs if any of PropSrcArgs is tainted.
+ static GenericTaintRule Prop(ArgSet &&SrcArgs, ArgSet &&DstArgs) {
+ return {{}, {}, std::move(SrcArgs), std::move(DstArgs)};
+ }
- /// Given a pointer argument, return the value it points to.
- static Optional<SVal> getPointeeOf(CheckerContext &C, const Expr *Arg);
+ /// Make a rule that taints all PropDstArgs if any of PropSrcArgs is tainted.
+ static GenericTaintRule SinkProp(ArgSet &&SinkArgs, ArgSet &&SrcArgs,
+ ArgSet &&DstArgs,
+ Optional<StringRef> Msg = None) {
+ return {
+ std::move(SinkArgs), {}, std::move(SrcArgs), std::move(DstArgs), Msg};
+ }
- /// Check for CWE-134: Uncontrolled Format String.
- static constexpr llvm::StringLiteral MsgUncontrolledFormatString =
- "Untrusted data is used as a format string "
- "(CWE-134: Uncontrolled Format String)";
- bool checkUncontrolledFormatString(const CallEvent &Call,
- CheckerContext &C) const;
+ /// Process a function which could either be a taint source, a taint sink, a
+ /// taint filter or a taint propagator.
+ void process(const GenericTaintChecker &Checker, const CallEvent &Call,
+ CheckerContext &C) const;
- /// Check for:
- /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
- /// CWE-78, "Failure to Sanitize Data into an OS Command"
- static constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
- "Untrusted data is passed to a system call "
- "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
- bool checkSystemCall(const CallEvent &Call, StringRef Name,
- CheckerContext &C) const;
-
- /// Check if tainted data is used as a buffer size ins strn.. functions,
- /// and allocators.
- static constexpr llvm::StringLiteral MsgTaintedBufferSize =
- "Untrusted data is used to specify the buffer size "
- "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
- "for character data and the null terminator)";
- bool checkTaintedBufferSize(const CallEvent &Call, CheckerContext &C) const;
-
- /// Check if tainted data is used as a custom sink's parameter.
- static constexpr llvm::StringLiteral MsgCustomSink =
- "Untrusted data is passed to a user-defined sink";
- bool checkCustomSinks(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ /// Handles the resolution of indexes of type ArgIdxTy to Expr*-s.
+ static const Expr *GetArgExpr(ArgIdxTy ArgIdx, const CallEvent &Call) {
+ return ArgIdx == ReturnValueIndex ? Call.getOriginExpr()
+ : Call.getArgExpr(ArgIdx);
+ };
- /// Generate a report if the expression is tainted or points to tainted data.
- bool generateReportIfTainted(const Expr *E, StringRef Msg,
- CheckerContext &C) const;
+ /// Functions for custom taintedness propagation.
+ static bool UntrustedEnv(CheckerContext &C);
+};
+
+using RuleLookupTy = CallDescriptionMap<GenericTaintRule>;
+
+/// Used to parse the configuration file.
+struct TaintConfiguration {
+ using NameScopeArgs = std::tuple<std::string, std::string, ArgVecTy>;
+ enum class VariadicType { None, Src, Dst };
+
+ struct Common {
+ std::string Name;
+ std::string Scope;
+ };
+
+ struct Sink : Common {
+ ArgVecTy SinkArgs;
+ };
+
+ struct Filter : Common {
+ ArgVecTy FilterArgs;
+ };
- struct TaintPropagationRule;
- template <typename T>
- using ConfigDataMap =
- std::unordered_multimap<std::string, std::pair<std::string, T>>;
- using NameRuleMap = ConfigDataMap<TaintPropagationRule>;
- using NameArgMap = ConfigDataMap<ArgVector>;
-
- /// Find a function with the given name and scope. Returns the first match
- /// or the end of the map.
- template <typename T>
- static auto findFunctionInConfig(const ConfigDataMap<T> &Map,
- const FunctionData &FData);
-
- /// A struct used to specify taint propagation rules for a function.
- ///
- /// If any of the possible taint source arguments is tainted, all of the
- /// destination arguments should also be tainted. Use InvalidArgIndex in the
- /// src list to specify that all of the arguments can introduce taint. Use
- /// InvalidArgIndex in the dst arguments to signify that all the non-const
- /// pointer and reference arguments might be tainted on return. If
- /// ReturnValueIndex is added to the dst list, the return value will be
- /// tainted.
- struct TaintPropagationRule {
- using PropagationFuncType = bool (*)(bool IsTainted, const CallEvent &Call,
- CheckerContext &C);
-
- /// List of arguments which can be taint sources and should be checked.
- ArgVector SrcArgs;
- /// List of arguments which should be tainted on function return.
- ArgVector DstArgs;
- /// Index for the first variadic parameter if exist.
- unsigned VariadicIndex;
- /// Show when a function has variadic parameters. If it has, it marks all
- /// of them as source or destination.
+ struct Propagation : Common {
+ ArgVecTy SrcArgs;
+ ArgVecTy DstArgs;
VariadicType VarType;
- /// Special function for tainted source determination. If defined, it can
- /// override the default behavior.
- PropagationFuncType PropagationFunc;
-
- TaintPropagationRule()
- : VariadicIndex(InvalidArgIndex), VarType(VariadicType::None),
- PropagationFunc(nullptr) {}
-
- TaintPropagationRule(ArgVector &&Src, ArgVector &&Dst,
- VariadicType Var = VariadicType::None,
- unsigned VarIndex = InvalidArgIndex,
- PropagationFuncType Func = nullptr)
- : SrcArgs(std::move(Src)), DstArgs(std::move(Dst)),
- VariadicIndex(VarIndex), VarType(Var), PropagationFunc(Func) {}
-
- /// Get the propagation rule for a given function.
- static TaintPropagationRule
- getTaintPropagationRule(const NameRuleMap &CustomPropagations,
- const FunctionData &FData, CheckerContext &C);
-
- void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
- void addDstArg(unsigned A) { DstArgs.push_back(A); }
-
- bool isNull() const {
- return SrcArgs.empty() && DstArgs.empty() &&
- VariadicType::None == VarType;
- }
+ ArgIdxTy VarIndex;
+ };
- bool isDestinationArgument(unsigned ArgNum) const {
- return llvm::is_contained(DstArgs, ArgNum);
- }
+ std::vector<Propagation> Propagations;
+ std::vector<Filter> Filters;
+ std::vector<Sink> Sinks;
- static bool isTaintedOrPointsToTainted(const Expr *E,
- const ProgramStateRef &State,
- CheckerContext &C) {
- if (isTainted(State, E, C.getLocationContext()) || isStdin(E, C))
- return true;
+ TaintConfiguration() = default;
+ TaintConfiguration(const TaintConfiguration &) = default;
+ TaintConfiguration(TaintConfiguration &&) = default;
+ TaintConfiguration &operator=(const TaintConfiguration &) = default;
+ TaintConfiguration &operator=(TaintConfiguration &&) = default;
+};
- if (!E->getType().getTypePtr()->isPointerType())
- return false;
+struct GenericTaintRuleParser {
+ GenericTaintRuleParser(CheckerManager &Mgr) : Mgr(Mgr) {}
+ /// Container type used to gather call identification objects grouped into
+ /// pairs with their corresponding taint rules. It is temporary as it is used
+ /// to finally initialize RuleLookupTy, which is considered to be immutable.
+ using RulesContTy = std::vector<std::pair<CallDescription, GenericTaintRule>>;
+ RulesContTy parseConfiguration(const std::string &Option,
+ TaintConfiguration &&Config) const;
- Optional<SVal> V = getPointeeOf(C, E);
- return (V && isTainted(State, *V));
- }
+private:
+ using NamePartsTy = llvm::SmallVector<SmallString<32>, 2>;
- /// Pre-process a function which propagates taint according to the
- /// taint rule.
- ProgramStateRef process(const CallEvent &Call, CheckerContext &C) const;
+ /// Validate part of the configuration, which contains a list of argument
+ /// indexes.
+ void validateArgVector(const std::string &Option, const ArgVecTy &Args) const;
- // Functions for custom taintedness propagation.
- static bool postSocket(bool IsTainted, const CallEvent &Call,
- CheckerContext &C);
- };
+ template <typename Config> static NamePartsTy parseNameParts(const Config &C);
- /// Defines a map between the propagation function's name, scope
- /// and TaintPropagationRule.
- NameRuleMap CustomPropagations;
+ // Takes the config and creates a CallDescription for it and associates a Rule
+ // with that.
+ template <typename Config>
+ static void consumeRulesFromConfig(const Config &C, GenericTaintRule &&Rule,
+ RulesContTy &Rules);
- /// Defines a map between the filter function's name, scope and filtering
- /// args.
- NameArgMap CustomFilters;
+ void parseConfig(const std::string &Option, TaintConfiguration::Sink &&P,
+ RulesContTy &Rules) const;
+ void parseConfig(const std::string &Option, TaintConfiguration::Filter &&P,
+ RulesContTy &Rules) const;
+ void parseConfig(const std::string &Option,
+ TaintConfiguration::Propagation &&P,
+ RulesContTy &Rules) const;
- /// Defines a map between the sink function's name, scope and sinking args.
- NameArgMap CustomSinks;
+ CheckerManager &Mgr;
};
-const unsigned GenericTaintChecker::ReturnValueIndex;
-const unsigned GenericTaintChecker::InvalidArgIndex;
+class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
+public:
+ static void *getTag() {
+ static int Tag;
+ return &Tag;
+ }
-// FIXME: these lines can be removed in C++17
-constexpr llvm::StringLiteral GenericTaintChecker::MsgUncontrolledFormatString;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgSanitizeSystemArgs;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgTaintedBufferSize;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgCustomSink;
-} // end of anonymous namespace
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
-using TaintConfig = GenericTaintChecker::TaintConfiguration;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
-LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfig::Propagation)
-LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfig::NameScopeArgs)
+ /// Generate a report if the expression is tainted or points to tainted data.
+ bool generateReportIfTainted(const Expr *E, StringRef Msg,
+ CheckerContext &C) const;
+
+private:
+ const BugType BT{this, "Use of Untrusted Data", "Untrusted Data"};
+
+ bool checkUncontrolledFormatString(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void taintUnsafeSocketProtocol(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ /// Default taint rules are initilized with the help of a CheckerContext to
+ /// access the names of built-in functions like memcpy.
+ void initTaintRules(CheckerContext &C) const;
+
+ /// CallDescription currently cannot restrict matches to the global namespace
+ /// only, which is why multiple CallDescriptionMaps are used, as we want to
+ /// disambiguate global C functions from functions inside user-defined
+ /// namespaces.
+ // TODO: Remove separation to simplify matching logic once CallDescriptions
+ // are more expressive.
+
+ mutable Optional<RuleLookupTy> StaticTaintRules;
+ mutable Optional<RuleLookupTy> DynamicTaintRules;
+};
+} // end of anonymous namespace
+
+/// YAML serialization mapping.
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Sink)
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Filter)
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Propagation)
namespace llvm {
namespace yaml {
-template <> struct MappingTraits<TaintConfig> {
- static void mapping(IO &IO, TaintConfig &Config) {
+template <> struct MappingTraits<TaintConfiguration> {
+ static void mapping(IO &IO, TaintConfiguration &Config) {
IO.mapOptional("Propagations", Config.Propagations);
IO.mapOptional("Filters", Config.Filters);
IO.mapOptional("Sinks", Config.Sinks);
}
};
-template <> struct MappingTraits<TaintConfig::Propagation> {
- static void mapping(IO &IO, TaintConfig::Propagation &Propagation) {
+template <> struct MappingTraits<TaintConfiguration::Sink> {
+ static void mapping(IO &IO, TaintConfiguration::Sink &Sink) {
+ IO.mapRequired("Name", Sink.Name);
+ IO.mapOptional("Scope", Sink.Scope);
+ IO.mapRequired("Args", Sink.SinkArgs);
+ }
+};
+
+template <> struct MappingTraits<TaintConfiguration::Filter> {
+ static void mapping(IO &IO, TaintConfiguration::Filter &Filter) {
+ IO.mapRequired("Name", Filter.Name);
+ IO.mapOptional("Scope", Filter.Scope);
+ IO.mapRequired("Args", Filter.FilterArgs);
+ }
+};
+
+template <> struct MappingTraits<TaintConfiguration::Propagation> {
+ static void mapping(IO &IO, TaintConfiguration::Propagation &Propagation) {
IO.mapRequired("Name", Propagation.Name);
IO.mapOptional("Scope", Propagation.Scope);
IO.mapOptional("SrcArgs", Propagation.SrcArgs);
IO.mapOptional("DstArgs", Propagation.DstArgs);
- IO.mapOptional("VariadicType", Propagation.VarType,
- GenericTaintChecker::VariadicType::None);
- IO.mapOptional("VariadicIndex", Propagation.VarIndex,
- GenericTaintChecker::InvalidArgIndex);
- }
-};
-
-template <> struct ScalarEnumerationTraits<GenericTaintChecker::VariadicType> {
- static void enumeration(IO &IO, GenericTaintChecker::VariadicType &Value) {
- IO.enumCase(Value, "None", GenericTaintChecker::VariadicType::None);
- IO.enumCase(Value, "Src", GenericTaintChecker::VariadicType::Src);
- IO.enumCase(Value, "Dst", GenericTaintChecker::VariadicType::Dst);
+ IO.mapOptional("VariadicType", Propagation.VarType);
+ IO.mapOptional("VariadicIndex", Propagation.VarIndex);
}
};
-template <> struct MappingTraits<TaintConfig::NameScopeArgs> {
- static void mapping(IO &IO, TaintConfig::NameScopeArgs &NSA) {
- IO.mapRequired("Name", std::get<0>(NSA));
- IO.mapOptional("Scope", std::get<1>(NSA));
- IO.mapRequired("Args", std::get<2>(NSA));
+template <> struct ScalarEnumerationTraits<TaintConfiguration::VariadicType> {
+ static void enumeration(IO &IO, TaintConfiguration::VariadicType &Value) {
+ IO.enumCase(Value, "None", TaintConfiguration::VariadicType::None);
+ IO.enumCase(Value, "Src", TaintConfiguration::VariadicType::Src);
+ IO.enumCase(Value, "Dst", TaintConfiguration::VariadicType::Dst);
}
};
} // namespace yaml
} // namespace llvm
/// A set which is used to pass information from call pre-visit instruction
-/// to the call post-visit. The values are unsigned integers, which are either
+/// to the call post-visit. The values are signed integers, which are either
/// ReturnValueIndex, or indexes of the pointer/reference argument, which
/// points to data, which should be tainted on return.
-REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
-
-GenericTaintChecker::ArgVector
-GenericTaintChecker::convertToArgVector(CheckerManager &Mgr,
- const std::string &Option,
- const SignedArgVector &Args) {
- ArgVector Result;
- for (int Arg : Args) {
- if (Arg == -1)
- Result.push_back(ReturnValueIndex);
- else if (Arg < -1) {
- Result.push_back(InvalidArgIndex);
+REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, ArgIdxTy)
+
+void GenericTaintRuleParser::validateArgVector(const std::string &Option,
+ const ArgVecTy &Args) const {
+ for (ArgIdxTy Arg : Args) {
+ if (Arg < ReturnValueIndex) {
Mgr.reportInvalidCheckerOptionValue(
- this, Option,
+ Mgr.getChecker<GenericTaintChecker>(), Option,
"an argument number for propagation rules greater or equal to -1");
- } else
- Result.push_back(static_cast<unsigned>(Arg));
+ }
}
- return Result;
}
-void GenericTaintChecker::parseConfiguration(CheckerManager &Mgr,
- const std::string &Option,
- TaintConfiguration &&Config) {
- for (auto &P : Config.Propagations) {
- GenericTaintChecker::CustomPropagations.emplace(
- P.Name,
- std::make_pair(P.Scope, TaintPropagationRule{
- std::move(P.SrcArgs),
- convertToArgVector(Mgr, Option, P.DstArgs),
- P.VarType, P.VarIndex}));
+template <typename Config>
+GenericTaintRuleParser::NamePartsTy
+GenericTaintRuleParser::parseNameParts(const Config &C) {
+ NamePartsTy NameParts;
+ if (!C.Scope.empty()) {
+ // If the Scope argument contains multiple "::" parts, those are considered
+ // namespace identifiers.
+ llvm::SmallVector<StringRef, 2> NSParts;
+ StringRef{C.Scope}.split(NSParts, "::", /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
+ NameParts.append(NSParts.begin(), NSParts.end());
}
+ NameParts.emplace_back(C.Name);
+ return NameParts;
+}
- for (auto &F : Config.Filters) {
- GenericTaintChecker::CustomFilters.emplace(
- std::get<0>(F),
- std::make_pair(std::move(std::get<1>(F)), std::move(std::get<2>(F))));
- }
+template <typename Config>
+void GenericTaintRuleParser::consumeRulesFromConfig(const Config &C,
+ GenericTaintRule &&Rule,
+ RulesContTy &Rules) {
+ NamePartsTy NameParts = parseNameParts(C);
+ llvm::SmallVector<const char *, 2> CallDescParts{NameParts.size()};
+ llvm::transform(NameParts, CallDescParts.begin(),
+ [](SmallString<32> &S) { return S.c_str(); });
+ Rules.emplace_back(CallDescription(CallDescParts), std::move(Rule));
+}
- for (auto &S : Config.Sinks) {
- GenericTaintChecker::CustomSinks.emplace(
- std::get<0>(S),
- std::make_pair(std::move(std::get<1>(S)), std::move(std::get<2>(S))));
- }
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Sink &&S,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, S.SinkArgs);
+ consumeRulesFromConfig(S, GenericTaintRule::Sink(std::move(S.SinkArgs)),
+ Rules);
}
-template <typename T>
-auto GenericTaintChecker::findFunctionInConfig(const ConfigDataMap<T> &Map,
- const FunctionData &FData) {
- auto Range = Map.equal_range(std::string(FData.Name));
- auto It =
- std::find_if(Range.first, Range.second, [&FData](const auto &Entry) {
- const auto &Value = Entry.second;
- StringRef Scope = Value.first;
- return Scope.empty() || FData.isInScope(Scope);
- });
- return It != Range.second ? It : Map.end();
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Filter &&S,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, S.FilterArgs);
+ consumeRulesFromConfig(S, GenericTaintRule::Filter(std::move(S.FilterArgs)),
+ Rules);
}
-GenericTaintChecker::TaintPropagationRule
-GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
- const NameRuleMap &CustomPropagations, const FunctionData &FData,
- CheckerContext &C) {
- // TODO: Currently, we might lose precision here: we always mark a return
- // value as tainted even if it's just a pointer, pointing to tainted data.
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Propagation &&P,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, P.SrcArgs);
+ validateArgVector(Option, P.DstArgs);
+ bool IsSrcVariadic = P.VarType == TaintConfiguration::VariadicType::Src;
+ bool IsDstVariadic = P.VarType == TaintConfiguration::VariadicType::Dst;
+ Optional<ArgIdxTy> JustVarIndex = P.VarIndex;
+
+ ArgSet SrcDesc(std::move(P.SrcArgs), IsSrcVariadic ? JustVarIndex : None);
+ ArgSet DstDesc(std::move(P.DstArgs), IsDstVariadic ? JustVarIndex : None);
+
+ consumeRulesFromConfig(
+ P, GenericTaintRule::Prop(std::move(SrcDesc), std::move(DstDesc)), Rules);
+}
+
+GenericTaintRuleParser::RulesContTy
+GenericTaintRuleParser::parseConfiguration(const std::string &Option,
+ TaintConfiguration &&Config) const {
+
+ RulesContTy Rules;
+
+ for (auto &F : Config.Filters)
+ parseConfig(Option, std::move(F), Rules);
+
+ for (auto &S : Config.Sinks)
+ parseConfig(Option, std::move(S), Rules);
+ for (auto &P : Config.Propagations)
+ parseConfig(Option, std::move(P), Rules);
+
+ return Rules;
+}
+
+void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
// Check for exact name match for functions without builtin substitutes.
// Use qualified name, because these are C functions without namespace.
- TaintPropagationRule Rule =
- llvm::StringSwitch<TaintPropagationRule>(FData.FullName)
- // Source functions
- // TODO: Add support for vfscanf & family.
- .Case("fdopen", {{}, {ReturnValueIndex}})
- .Case("fopen", {{}, {ReturnValueIndex}})
- .Case("freopen", {{}, {ReturnValueIndex}})
- .Case("getch", {{}, {ReturnValueIndex}})
- .Case("getchar", {{}, {ReturnValueIndex}})
- .Case("getchar_unlocked", {{}, {ReturnValueIndex}})
- .Case("gets", {{}, {0, ReturnValueIndex}})
- .Case("scanf", {{}, {}, VariadicType::Dst, 1})
- .Case("socket", {{},
- {ReturnValueIndex},
- VariadicType::None,
- InvalidArgIndex,
- &TaintPropagationRule::postSocket})
- .Case("wgetch", {{}, {ReturnValueIndex}})
- // Propagating functions
- .Case("atoi", {{0}, {ReturnValueIndex}})
- .Case("atol", {{0}, {ReturnValueIndex}})
- .Case("atoll", {{0}, {ReturnValueIndex}})
- .Case("fgetc", {{0}, {ReturnValueIndex}})
- .Case("fgetln", {{0}, {ReturnValueIndex}})
- .Case("fgets", {{2}, {0, ReturnValueIndex}})
- .Case("fscanf", {{0}, {}, VariadicType::Dst, 2})
- .Case("sscanf", {{0}, {}, VariadicType::Dst, 2})
- .Case("getc", {{0}, {ReturnValueIndex}})
- .Case("getc_unlocked", {{0}, {ReturnValueIndex}})
- .Case("getdelim", {{3}, {0}})
- .Case("getline", {{2}, {0}})
- .Case("getw", {{0}, {ReturnValueIndex}})
- .Case("pread", {{0, 1, 2, 3}, {1, ReturnValueIndex}})
- .Case("read", {{0, 2}, {1, ReturnValueIndex}})
- .Case("strchr", {{0}, {ReturnValueIndex}})
- .Case("strrchr", {{0}, {ReturnValueIndex}})
- .Case("tolower", {{0}, {ReturnValueIndex}})
- .Case("toupper", {{0}, {ReturnValueIndex}})
- .Default({});
-
- if (!Rule.isNull())
- return Rule;
+
+ if (StaticTaintRules || DynamicTaintRules)
+ return;
+
+ using RulesConstructionTy =
+ std::vector<std::pair<CallDescription, GenericTaintRule>>;
+ using TR = GenericTaintRule;
+
+ const Builtin::Context &BI = C.getASTContext().BuiltinInfo;
+
+ RulesConstructionTy GlobalCRules{
+ // Sources
+ {{"fdopen"}, TR::Source({{ReturnValueIndex}})},
+ {{"fopen"}, TR::Source({{ReturnValueIndex}})},
+ {{"freopen"}, TR::Source({{ReturnValueIndex}})},
+ {{"getch"}, TR::Source({{ReturnValueIndex}})},
+ {{"getchar"}, TR::Source({{ReturnValueIndex}})},
+ {{"getchar_unlocked"}, TR::Source({{ReturnValueIndex}})},
+ {{"gets"}, TR::Source({{0}, ReturnValueIndex})},
+ {{"scanf"}, TR::Source({{}, 1})},
+ {{"wgetch"}, TR::Source({{}, ReturnValueIndex})},
+
+ // Props
+ {{"atoi"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"atol"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"atoll"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"fgetc"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"fgetln"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"fgets"}, TR::Prop({{2}}, {{0}, ReturnValueIndex})},
+ {{"fscanf"}, TR::Prop({{0}}, {{}, 2})},
+ {{"sscanf"}, TR::Prop({{0}}, {{}, 2})},
+ {{"getc"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"getc_unlocked"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"getdelim"}, TR::Prop({{3}}, {{0}})},
+ {{"getline"}, TR::Prop({{2}}, {{0}})},
+ {{"getw"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"pread"}, TR::Prop({{0, 1, 2, 3}}, {{1, ReturnValueIndex}})},
+ {{"read"}, TR::Prop({{0, 2}}, {{1, ReturnValueIndex}})},
+ {{"strchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strrchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"tolower"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"toupper"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncat)}},
+ TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcpy)}},
+ TR::Prop({{1, 2}}, {{0}})},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcat)}},
+ TR::Prop({{1, 2}}, {{0}})},
+ {{CDF_MaybeBuiltin, {"snprintf"}},
+ TR::Prop({{1}, 3}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"sprintf"}},
+ TR::Prop({{1}, 2}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"strcpy"}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"stpcpy"}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"strcat"}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"strdup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"strdupa"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {"wcsdup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // Sinks
+ {{"system"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"popen"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execl"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execle"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execlp"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execvp"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execvP"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"execve"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{"dlopen"}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{CDF_MaybeBuiltin, {"malloc"}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {"calloc"}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {"alloca"}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {"memccpy"}}, TR::Sink({{3}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {"realloc"}}, TR::Sink({{1}}, MsgTaintedBufferSize)},
+ {{{"setproctitle"}}, TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
+ {{{"setproctitle_fast"}},
+ TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
+
+ // SinkProps
+ {{CDF_MaybeBuiltin, BI.getName(Builtin::BImemcpy)},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BImemmove)}},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncpy)}},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrndup)}},
+ TR::SinkProp({{1}}, {{0, 1}}, {{ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {"bcopy"}},
+ TR::SinkProp({{2}}, {{0, 2}}, {{1}}, MsgTaintedBufferSize)}};
// `getenv` returns taint only in untrusted environments.
- if (FData.FullName == "getenv") {
- if (C.getAnalysisManager()
- .getAnalyzerOptions()
- .ShouldAssumeControlledEnvironment)
- return {};
- return {{}, {ReturnValueIndex}};
+ if (TR::UntrustedEnv(C)) {
+ // void setproctitle_init(int argc, char *argv[], char *envp[])
+ GlobalCRules.push_back(
+ {{{"setproctitle_init"}}, TR::Sink({{2}}, MsgCustomSink)});
+ GlobalCRules.push_back({{"getenv"}, TR::Source({{ReturnValueIndex}})});
}
- assert(FData.FDecl);
-
- // Check if it's one of the memory setting/copying functions.
- // This check is specialized but faster then calling isCLibraryFunction.
- const FunctionDecl *FDecl = FData.FDecl;
- unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind())) {
- switch (BId) {
- case Builtin::BImemcpy:
- case Builtin::BImemmove:
- case Builtin::BIstrncpy:
- case Builtin::BIstrncat:
- return {{1, 2}, {0, ReturnValueIndex}};
- case Builtin::BIstrlcpy:
- case Builtin::BIstrlcat:
- return {{1, 2}, {0}};
- case Builtin::BIstrndup:
- return {{0, 1}, {ReturnValueIndex}};
-
- default:
- break;
- }
- }
+ StaticTaintRules.emplace(std::make_move_iterator(GlobalCRules.begin()),
+ std::make_move_iterator(GlobalCRules.end()));
- // Process all other functions which could be defined as builtins.
- if (Rule.isNull()) {
- const auto OneOf = [FDecl](const auto &... Name) {
- // FIXME: use fold expression in C++17
- using unused = int[];
- bool ret = false;
- static_cast<void>(unused{
- 0, (ret |= CheckerContext::isCLibraryFunction(FDecl, Name), 0)...});
- return ret;
- };
- if (OneOf("snprintf"))
- return {{1}, {0, ReturnValueIndex}, VariadicType::Src, 3};
- if (OneOf("sprintf"))
- return {{1}, {0, ReturnValueIndex}, VariadicType::Src, 2};
- if (OneOf("strcpy", "stpcpy", "strcat"))
- return {{1}, {0, ReturnValueIndex}};
- if (OneOf("bcopy"))
- return {{0, 2}, {1}};
- if (OneOf("strdup", "strdupa", "wcsdup"))
- return {{0}, {ReturnValueIndex}};
+ // User-provided taint configuration.
+ CheckerManager *Mgr = C.getAnalysisManager().getCheckerManager();
+ assert(Mgr);
+ GenericTaintRuleParser ConfigParser{*Mgr};
+ std::string Option{"Config"};
+ StringRef ConfigFile =
+ Mgr->getAnalyzerOptions().getCheckerStringOption(this, Option);
+ llvm::Optional<TaintConfiguration> Config =
+ getConfiguration<TaintConfiguration>(*Mgr, this, Option, ConfigFile);
+ if (!Config) {
+ // We don't have external taint config, no parsing required.
+ DynamicTaintRules = RuleLookupTy{};
+ return;
}
- // Skipping the following functions, since they might be used for cleansing or
- // smart memory copy:
- // - memccpy - copying until hitting a special character.
+ GenericTaintRuleParser::RulesContTy Rules{
+ ConfigParser.parseConfiguration(Option, std::move(Config.getValue()))};
- auto It = findFunctionInConfig(CustomPropagations, FData);
- if (It != CustomPropagations.end())
- return It->second.second;
- return {};
+ DynamicTaintRules.emplace(std::make_move_iterator(Rules.begin()),
+ std::make_move_iterator(Rules.end()));
}
void GenericTaintChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<FunctionData> FData = FunctionData::create(Call, C);
- if (!FData)
- return;
-
- // Check for taintedness related errors first: system call, uncontrolled
- // format string, tainted buffer size.
- if (checkPre(Call, *FData, C))
- return;
-
- // Marks the function's arguments and/or return value tainted if it present in
- // the list.
- if (addSourcesPre(Call, *FData, C))
- return;
-
- addFiltersPre(Call, *FData, C);
+ initTaintRules(C);
+
+ // FIXME: this should be much simpler.
+ if (const auto *Rule =
+ Call.isGlobalCFunction() ? StaticTaintRules->lookup(Call) : nullptr)
+ Rule->process(*this, Call, C);
+ else if (const auto *Rule = DynamicTaintRules->lookup(Call))
+ Rule->process(*this, Call, C);
+
+ // FIXME: These edge cases are to be eliminated from here eventually.
+ //
+ // Additional check that is not supported by CallDescription.
+ // TODO: Make CallDescription be able to match attributes such as printf-like
+ // arguments.
+ checkUncontrolledFormatString(Call, C);
+
+ // TODO: Modeling sockets should be done in a specific checker.
+ // Socket is a source, which taints the return value.
+ taintUnsafeSocketProtocol(Call, C);
}
void GenericTaintChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
// Set the marked values as tainted. The return value only accessible from
// checkPostStmt.
- propagateFromPre(Call, C);
-}
-
-void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const {
- printTaint(State, Out, NL, Sep);
-}
-
-bool GenericTaintChecker::addSourcesPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- // First, try generating a propagation rule for this function.
- TaintPropagationRule Rule = TaintPropagationRule::getTaintPropagationRule(
- this->CustomPropagations, FData, C);
- if (!Rule.isNull()) {
- ProgramStateRef State = Rule.process(Call, C);
- if (State) {
- C.addTransition(State);
- return true;
- }
- }
- return false;
-}
-
-bool GenericTaintChecker::addFiltersPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- auto It = findFunctionInConfig(CustomFilters, FData);
- if (It == CustomFilters.end())
- return false;
-
- ProgramStateRef State = C.getState();
- const auto &Value = It->second;
- const ArgVector &Args = Value.second;
- for (unsigned ArgNum : Args) {
- if (ArgNum >= Call.getNumArgs())
- continue;
-
- const Expr *Arg = Call.getArgExpr(ArgNum);
- Optional<SVal> V = getPointeeOf(C, Arg);
- if (V)
- State = removeTaint(State, *V);
- }
-
- if (State != C.getState()) {
- C.addTransition(State);
- return true;
- }
- return false;
-}
-
-bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
- CheckerContext &C) {
ProgramStateRef State = C.getState();
// Depending on what was tainted at pre-visit, we determined a set of
@@ -616,9 +689,9 @@ bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
// stored in the state as TaintArgsOnPostVisit set.
TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
if (TaintArgs.isEmpty())
- return false;
+ return;
- for (unsigned ArgNum : TaintArgs) {
+ for (ArgIdxTy ArgNum : TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
State = addTaint(State, Call.getReturnValue());
@@ -627,234 +700,147 @@ bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- if (Call.getNumArgs() < (ArgNum + 1))
- return false;
- const Expr *Arg = Call.getArgExpr(ArgNum);
- Optional<SVal> V = getPointeeOf(C, Arg);
- if (V)
+ if (auto V = getPointeeOf(C, Call.getArgSVal(ArgNum)))
State = addTaint(State, *V);
}
// Clear up the taint info from the state.
State = State->remove<TaintArgsOnPostVisit>();
-
- if (State != C.getState()) {
- C.addTransition(State);
- return true;
- }
- return false;
-}
-
-bool GenericTaintChecker::checkPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- if (checkUncontrolledFormatString(Call, C))
- return true;
-
- if (checkSystemCall(Call, FData.Name, C))
- return true;
-
- if (checkTaintedBufferSize(Call, C))
- return true;
-
- return checkCustomSinks(Call, FData, C);
+ C.addTransition(State);
}
-Optional<SVal> GenericTaintChecker::getPointeeOf(CheckerContext &C,
- const Expr *Arg) {
- ProgramStateRef State = C.getState();
- SVal AddrVal = C.getSVal(Arg->IgnoreParens());
- if (AddrVal.isUnknownOrUndef())
- return None;
-
- Optional<Loc> AddrLoc = AddrVal.getAs<Loc>();
- if (!AddrLoc)
- return None;
-
- QualType ArgTy = Arg->getType().getCanonicalType();
- if (!ArgTy->isPointerType())
- return State->getSVal(*AddrLoc);
-
- QualType ValTy = ArgTy->getPointeeType();
-
- // Do not dereference void pointers. Treat them as byte pointers instead.
- // FIXME: we might want to consider more than just the first byte.
- if (ValTy->isVoidType())
- ValTy = C.getASTContext().CharTy;
-
- return State->getSVal(*AddrLoc, ValTy);
+void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ printTaint(State, Out, NL, Sep);
}
-ProgramStateRef
-GenericTaintChecker::TaintPropagationRule::process(const CallEvent &Call,
- CheckerContext &C) const {
+void GenericTaintRule::process(const GenericTaintChecker &Checker,
+ const CallEvent &Call, CheckerContext &C) const {
ProgramStateRef State = C.getState();
+ const ArgIdxTy CallNumArgs = fromArgumentCount(Call.getNumArgs());
- // Check for taint in arguments.
- bool IsTainted = true;
- for (unsigned ArgNum : SrcArgs) {
- if (ArgNum >= Call.getNumArgs())
- continue;
-
- if ((IsTainted =
- isTaintedOrPointsToTainted(Call.getArgExpr(ArgNum), State, C)))
- break;
- }
-
- // Check for taint in variadic arguments.
- if (!IsTainted && VariadicType::Src == VarType) {
- // Check if any of the arguments is tainted
- for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
- if ((IsTainted =
- isTaintedOrPointsToTainted(Call.getArgExpr(i), State, C)))
- break;
+ /// Iterate every call argument, and get their corresponding Expr and SVal.
+ const auto ForEachCallArg = [&C, &Call, CallNumArgs](auto &&Fun) {
+ for (ArgIdxTy I = ReturnValueIndex; I < CallNumArgs; ++I) {
+ const Expr *E = GetArgExpr(I, Call);
+ Fun(I, E, C.getSVal(E));
}
- }
+ };
- if (PropagationFunc)
- IsTainted = PropagationFunc(IsTainted, Call, C);
+ /// Check for taint sinks.
+ ForEachCallArg([this, &Checker, &C, &State](ArgIdxTy I, const Expr *E, SVal) {
+ if (SinkArgs.contains(I) && isTaintedOrPointsToTainted(E, State, C))
+ Checker.generateReportIfTainted(E, SinkMsg.getValueOr(MsgCustomSink), C);
+ });
+
+ /// Check for taint filters.
+ ForEachCallArg([this, &C, &State](ArgIdxTy I, const Expr *E, SVal S) {
+ if (FilterArgs.contains(I)) {
+ State = removeTaint(State, S);
+ if (auto P = getPointeeOf(C, S))
+ State = removeTaint(State, *P);
+ }
+ });
+
+ /// Check for taint propagation sources.
+ /// A rule is relevant if PropSrcArgs is empty, or if any of its signified
+ /// args are tainted in context of the current CallEvent.
+ bool IsMatching = PropSrcArgs.isEmpty();
+ ForEachCallArg(
+ [this, &C, &IsMatching, &State](ArgIdxTy I, const Expr *E, SVal) {
+ IsMatching = IsMatching || (PropSrcArgs.contains(I) &&
+ isTaintedOrPointsToTainted(E, State, C));
+ });
- if (!IsTainted)
- return State;
+ if (!IsMatching)
+ return;
- // Mark the arguments which should be tainted after the function returns.
- for (unsigned ArgNum : DstArgs) {
- // Should mark the return value?
- if (ArgNum == ReturnValueIndex) {
- State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
- continue;
- }
+ const auto WouldEscape = [](SVal V, QualType Ty) -> bool {
+ if (!V.getAs<Loc>())
+ return false;
- if (ArgNum >= Call.getNumArgs())
- continue;
+ const bool IsNonConstRef = Ty->isReferenceType() && !Ty.isConstQualified();
+ const bool IsNonConstPtr =
+ Ty->isPointerType() && !Ty->getPointeeType().isConstQualified();
- // Mark the given argument.
- State = State->add<TaintArgsOnPostVisit>(ArgNum);
- }
+ return IsNonConstRef || IsNonConstPtr;
+ };
- // Mark all variadic arguments tainted if present.
- if (VariadicType::Dst == VarType) {
- // For all pointer and references that were passed in:
- // If they are not pointing to const data, mark data as tainted.
- // TODO: So far we are just going one level down; ideally we'd need to
- // recurse here.
- for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
- const Expr *Arg = Call.getArgExpr(i);
- // Process pointer argument.
- const Type *ArgTy = Arg->getType().getTypePtr();
- QualType PType = ArgTy->getPointeeType();
- if ((!PType.isNull() && !PType.isConstQualified()) ||
- (ArgTy->isReferenceType() && !Arg->getType().isConstQualified())) {
- State = State->add<TaintArgsOnPostVisit>(i);
- }
- }
- }
+ /// Propagate taint where it is necessary.
+ ForEachCallArg(
+ [this, &State, WouldEscape](ArgIdxTy I, const Expr *E, SVal V) {
+ if (PropDstArgs.contains(I))
+ State = State->add<TaintArgsOnPostVisit>(I);
+
+ // TODO: We should traverse all reachable memory regions via the
+ // escaping parameter. Instead of doing that we simply mark only the
+ // referred memory region as tainted.
+ if (WouldEscape(V, E->getType()))
+ State = State->add<TaintArgsOnPostVisit>(I);
+ });
- return State;
+ C.addTransition(State);
}
-// If argument 0(protocol domain) is network, the return value should get taint.
-bool GenericTaintChecker::TaintPropagationRule::postSocket(
- bool /*IsTainted*/, const CallEvent &Call, CheckerContext &C) {
- SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
- StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
- // White list the internal communication protocols.
- if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
- DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
- return false;
- return true;
+bool GenericTaintRule::UntrustedEnv(CheckerContext &C) {
+ return !C.getAnalysisManager()
+ .getAnalyzerOptions()
+ .ShouldAssumeControlledEnvironment;
}
-bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
- ProgramStateRef State = C.getState();
- SVal Val = C.getSVal(E);
-
- // stdin is a pointer, so it would be a region.
- const MemRegion *MemReg = Val.getAsRegion();
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
+ CheckerContext &C) const {
+ assert(E);
+ Optional<SVal> TaintedSVal{getTaintedPointeeOrPointer(C, C.getSVal(E))};
- // The region should be symbolic, we do not know it's value.
- const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
- if (!SymReg)
+ if (!TaintedSVal)
return false;
- // Get it's symbol and find the declaration region it's pointing to.
- const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
- if (!Sm)
- return false;
- const auto *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
- if (!DeclReg)
- return false;
-
- // This region corresponds to a declaration, find out if it's a global/extern
- // variable named stdin with the proper type.
- if (const auto *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
- D = D->getCanonicalDecl();
- if (D->getName().contains("stdin") && D->isExternC()) {
- const auto *PtrTy = dyn_cast<PointerType>(D->getType().getTypePtr());
- if (PtrTy && PtrTy->getPointeeType().getCanonicalType() ==
- C.getASTContext().getFILEType().getCanonicalType())
- return true;
- }
+ // Generate diagnostic.
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ report->addRange(E->getSourceRange());
+ report->addVisitor(std::make_unique<TaintBugVisitor>(*TaintedSVal));
+ C.emitReport(std::move(report));
+ return true;
}
return false;
}
+/// TODO: remove checking for printf format attributes and socket whitelisting
+/// from GenericTaintChecker, and that means the following functions:
+/// getPrintfFormatArgumentNum,
+/// GenericTaintChecker::checkUncontrolledFormatString,
+/// GenericTaintChecker::taintUnsafeSocketProtocol
+
static bool getPrintfFormatArgumentNum(const CallEvent &Call,
const CheckerContext &C,
- unsigned &ArgNum) {
+ ArgIdxTy &ArgNum) {
// Find if the function contains a format string argument.
// Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
// vsnprintf, syslog, custom annotated functions.
- const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
+ const Decl *CallDecl = Call.getDecl();
+ if (!CallDecl)
+ return false;
+ const FunctionDecl *FDecl = CallDecl->getAsFunction();
if (!FDecl)
return false;
+
+ const ArgIdxTy CallNumArgs = fromArgumentCount(Call.getNumArgs());
+
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
- if ((Format->getType()->getName() == "printf") &&
- Call.getNumArgs() > ArgNum)
+ if ((Format->getType()->getName() == "printf") && CallNumArgs > ArgNum)
return true;
}
- // Or if a function is named setproctitle (this is a heuristic).
- if (C.getCalleeName(FDecl).contains("setproctitle")) {
- ArgNum = 0;
- return true;
- }
-
- return false;
-}
-
-bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
- CheckerContext &C) const {
- assert(E);
-
- // Check for taint.
- ProgramStateRef State = C.getState();
- Optional<SVal> PointedToSVal = getPointeeOf(C, E);
- SVal TaintedSVal;
- if (PointedToSVal && isTainted(State, *PointedToSVal))
- TaintedSVal = *PointedToSVal;
- else if (isTainted(State, E, C.getLocationContext()))
- TaintedSVal = C.getSVal(E);
- else
- return false;
-
- // Generate diagnostic.
- if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- initBugType();
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
- report->addRange(E->getSourceRange());
- report->addVisitor(std::make_unique<TaintBugVisitor>(TaintedSVal));
- C.emitReport(std::move(report));
- return true;
- }
return false;
}
bool GenericTaintChecker::checkUncontrolledFormatString(
const CallEvent &Call, CheckerContext &C) const {
// Check if the function contains a format string argument.
- unsigned ArgNum = 0;
+ ArgIdxTy ArgNum = 0;
if (!getPrintfFormatArgumentNum(Call, C, ArgNum))
return false;
@@ -864,102 +850,32 @@ bool GenericTaintChecker::checkUncontrolledFormatString(
MsgUncontrolledFormatString, C);
}
-bool GenericTaintChecker::checkSystemCall(const CallEvent &Call, StringRef Name,
- CheckerContext &C) const {
- // TODO: It might make sense to run this check on demand. In some cases,
- // we should check if the environment has been cleansed here. We also might
- // need to know if the user was reset before these calls(seteuid).
- unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
- .Case("system", 0)
- .Case("popen", 0)
- .Case("execl", 0)
- .Case("execle", 0)
- .Case("execlp", 0)
- .Case("execv", 0)
- .Case("execvp", 0)
- .Case("execvP", 0)
- .Case("execve", 0)
- .Case("dlopen", 0)
- .Default(InvalidArgIndex);
-
- if (ArgNum == InvalidArgIndex || Call.getNumArgs() < (ArgNum + 1))
- return false;
-
- return generateReportIfTainted(Call.getArgExpr(ArgNum), MsgSanitizeSystemArgs,
- C);
-}
-
-// TODO: Should this check be a part of the CString checker?
-// If yes, should taint be a global setting?
-bool GenericTaintChecker::checkTaintedBufferSize(const CallEvent &Call,
- CheckerContext &C) const {
- const auto *FDecl = Call.getDecl()->getAsFunction();
- // If the function has a buffer size argument, set ArgNum.
- unsigned ArgNum = InvalidArgIndex;
- unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind())) {
- switch (BId) {
- case Builtin::BImemcpy:
- case Builtin::BImemmove:
- case Builtin::BIstrncpy:
- ArgNum = 2;
- break;
- case Builtin::BIstrndup:
- ArgNum = 1;
- break;
- default:
- break;
- }
- }
+void GenericTaintChecker::taintUnsafeSocketProtocol(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (Call.getNumArgs() < 1)
+ return;
+ const IdentifierInfo *ID = Call.getCalleeIdentifier();
+ if (!ID)
+ return;
+ if (!ID->getName().equals("socket"))
+ return;
- if (ArgNum == InvalidArgIndex) {
- using CCtx = CheckerContext;
- if (CCtx::isCLibraryFunction(FDecl, "malloc") ||
- CCtx::isCLibraryFunction(FDecl, "calloc") ||
- CCtx::isCLibraryFunction(FDecl, "alloca"))
- ArgNum = 0;
- else if (CCtx::isCLibraryFunction(FDecl, "memccpy"))
- ArgNum = 3;
- else if (CCtx::isCLibraryFunction(FDecl, "realloc"))
- ArgNum = 1;
- else if (CCtx::isCLibraryFunction(FDecl, "bcopy"))
- ArgNum = 2;
- }
+ SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
+ StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+ // Allow internal communication protocols.
+ bool SafeProtocol = DomName.equals("AF_SYSTEM") ||
+ DomName.equals("AF_LOCAL") || DomName.equals("AF_UNIX") ||
+ DomName.equals("AF_RESERVED_36");
+ if (SafeProtocol)
+ return;
- return ArgNum != InvalidArgIndex && Call.getNumArgs() > ArgNum &&
- generateReportIfTainted(Call.getArgExpr(ArgNum), MsgTaintedBufferSize,
- C);
+ C.addTransition(C.getState()->add<TaintArgsOnPostVisit>(ReturnValueIndex));
}
-bool GenericTaintChecker::checkCustomSinks(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- auto It = findFunctionInConfig(CustomSinks, FData);
- if (It == CustomSinks.end())
- return false;
-
- const auto &Value = It->second;
- const GenericTaintChecker::ArgVector &Args = Value.second;
- for (unsigned ArgNum : Args) {
- if (ArgNum >= Call.getNumArgs())
- continue;
-
- if (generateReportIfTainted(Call.getArgExpr(ArgNum), MsgCustomSink, C))
- return true;
- }
-
- return false;
-}
+/// Checker registration
void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
- auto *Checker = Mgr.registerChecker<GenericTaintChecker>();
- std::string Option{"Config"};
- StringRef ConfigFile =
- Mgr.getAnalyzerOptions().getCheckerStringOption(Checker, Option);
- llvm::Optional<TaintConfig> Config =
- getConfiguration<TaintConfig>(Mgr, Checker, Option, ConfigFile);
- if (Config)
- Checker->parseConfiguration(Mgr, Option, std::move(Config.getValue()));
+ Mgr.registerChecker<GenericTaintChecker>();
}
bool ento::shouldRegisterGenericTaintChecker(const CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 10ed6149528c..57080a84451a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -1643,7 +1643,7 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
ProgramStateRef State =
FreeMemAux(C, Call.getArgExpr(0), Call, C.getState(),
/*Hold=*/true, IsKnownToBeAllocatedMemory, AF_Malloc,
- /*RetNullOnFailure=*/true);
+ /*ReturnsNullOnFailure=*/true);
C.addTransition(State);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index 517a5d78271b..aa70db041c76 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -23,7 +23,6 @@
using namespace clang;
using namespace ento;
-using llvm::APSInt;
namespace {
class MmapWriteExecChecker : public Checker<check::PreCall> {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
index cd502241ef61..cf97439a468d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -59,7 +59,7 @@ private:
} // namespace
static std::string getName(const CallEvent &Call) {
- std::string Name = "";
+ std::string Name;
if (const auto *MD = dyn_cast<CXXMethodDecl>(Call.getDecl()))
if (const CXXRecordDecl *RD = MD->getParent())
Name += RD->getNameAsString() + "::";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
index ec6a7144fa45..e35ea4ef05dd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
@@ -17,10 +17,6 @@
#include <utility>
namespace clang {
-class CXXRecordDecl;
-class CXXBaseSpecifier;
-class FunctionDecl;
-class CXXMethodDecl;
class Expr;
/// This function de-facto defines a set of transformations that we consider
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
index 730a59977175..753adea0d14d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -15,7 +15,6 @@ namespace clang {
class CXXBaseSpecifier;
class CXXMethodDecl;
class CXXRecordDecl;
-class Expr;
class FunctionDecl;
class Type;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index ec612dde3b8b..497189f4c160 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
@@ -57,4 +57,4 @@ llvm::Optional<T> getConfiguration(CheckerManager &Mgr, Checker *Chk,
} // namespace ento
} // namespace clang
-#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MOVE_H
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKER_YAML_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index b957bec7493e..e13387fb1fc8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -2804,7 +2804,8 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
Out << '\''
<< Lexer::getSourceText(
CharSourceRange::getTokenRange(Ex->getSourceRange()),
- BRC.getSourceManager(), BRC.getASTContext().getLangOpts(), 0)
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts(),
+ nullptr)
<< '\'';
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 637e4edfd778..302a971a15f2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -416,7 +416,10 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntegralCast: {
// Delegate to SValBuilder to process.
SVal V = state->getSVal(Ex, LCtx);
- V = svalBuilder.evalIntegralCast(state, V, T, ExTy);
+ if (AMgr.options.ShouldSupportSymbolicIntegerCasts)
+ V = svalBuilder.evalCast(V, T, ExTy);
+ else
+ V = svalBuilder.evalIntegralCast(state, V, T, ExTy);
state = state->BindExpr(CastE, LCtx, V);
Bldr.generateNode(CastE, Pred, state);
continue;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 1ccb0de92fba..8d4e0bbb7dec 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -54,11 +54,7 @@ ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
}
ProgramState::ProgramState(const ProgramState &RHS)
- : llvm::FoldingSetNode(),
- stateMgr(RHS.stateMgr),
- Env(RHS.Env),
- store(RHS.store),
- GDM(RHS.GDM),
+ : stateMgr(RHS.stateMgr), Env(RHS.Env), store(RHS.store), GDM(RHS.GDM),
refCount(0) {
stateMgr->getStoreManager().incrementReferenceCount(store);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 8edcef319088..bb3261bae3bf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -980,15 +980,19 @@ SVal SValBuilder::evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
} else {
// Symbol to integer, float.
QualType T = Context.getCanonicalType(SE->getType());
- // If types are the same or both are integers, ignore the cast.
- // FIXME: Remove this hack when we support symbolic truncation/extension.
- // HACK: If both castTy and T are integers, ignore the cast. This is
- // not a permanent solution. Eventually we want to precisely handle
- // extension/truncation of symbolic integers. This prevents us from losing
- // precision when we assign 'x = y' and 'y' is symbolic and x and y are
- // different integer types.
- if (haveSameType(T, CastTy))
- return V;
+
+ // Produce SymbolCast if CastTy and T are different integers.
+ // NOTE: In the end the type of SymbolCast shall be equal to CastTy.
+ if (T->isIntegralOrEnumerationType() &&
+ CastTy->isIntegralOrEnumerationType()) {
+ AnalyzerOptions &Opts =
+ StateMgr.getOwningEngine().getAnalysisManager().getAnalyzerOptions();
+ // If appropriate option is disabled, ignore the cast.
+ // NOTE: ShouldSupportSymbolicIntegerCasts is `false` by default.
+ if (!Opts.ShouldSupportSymbolicIntegerCasts)
+ return V;
+ return simplifySymbolCast(V, CastTy);
+ }
if (!Loc::isLocType(CastTy))
if (!IsUnknownOriginalType || !CastTy->isFloatingType() ||
T->isFloatingType())
@@ -1004,3 +1008,75 @@ SVal SValBuilder::evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
// Member pointer to whatever.
return V;
}
+
+SVal clang::ento::SValBuilder::simplifySymbolCast(nonloc::SymbolVal V,
+ QualType CastTy) {
+ // We use seven conditions to recognize a simplification case.
+ // For the clarity let `CastTy` be `C`, SE->getType() - `T`, root type - `R`,
+ // prefix `u` for unsigned, `s` for signed, no prefix - any sign:
+ // E.g. (char)(short)(uint x)
+ // ( sC )( sT )( uR x)
+ //
+ // C === R (the same type)
+ // (char)(char x) -> (char x)
+ // (long)(long x) -> (long x)
+ // Note: Comparisons operators below are for bit width.
+ // C == T
+ // (short)(short)(int x) -> (short)(int x)
+ // (int)(long)(char x) -> (int)(char x) (sizeof(long) == sizeof(int))
+ // (long)(ullong)(char x) -> (long)(char x) (sizeof(long) == sizeof(ullong))
+ // C < T
+ // (short)(int)(char x) -> (short)(char x)
+ // (char)(int)(short x) -> (char)(short x)
+ // (short)(int)(short x) -> (short x)
+ // C > T > uR
+ // (int)(short)(uchar x) -> (int)(uchar x)
+ // (uint)(short)(uchar x) -> (uint)(uchar x)
+ // (int)(ushort)(uchar x) -> (int)(uchar x)
+ // C > sT > sR
+ // (int)(short)(char x) -> (int)(char x)
+ // (uint)(short)(char x) -> (uint)(char x)
+ // C > sT == sR
+ // (int)(char)(char x) -> (int)(char x)
+ // (uint)(short)(short x) -> (uint)(short x)
+ // C > uT == uR
+ // (int)(uchar)(uchar x) -> (int)(uchar x)
+ // (uint)(ushort)(ushort x) -> (uint)(ushort x)
+ // (llong)(ulong)(uint x) -> (llong)(uint x) (sizeof(ulong) == sizeof(uint))
+
+ SymbolRef SE = V.getSymbol();
+ QualType T = Context.getCanonicalType(SE->getType());
+
+ if (T == CastTy)
+ return V;
+
+ if (!isa<SymbolCast>(SE))
+ return makeNonLoc(SE, T, CastTy);
+
+ SymbolRef RootSym = cast<SymbolCast>(SE)->getOperand();
+ QualType RT = RootSym->getType().getCanonicalType();
+
+ BasicValueFactory &BVF = getBasicValueFactory();
+ APSIntType CTy = BVF.getAPSIntType(CastTy);
+ APSIntType TTy = BVF.getAPSIntType(T);
+
+ const auto WC = CTy.getBitWidth();
+ const auto WT = TTy.getBitWidth();
+
+ if (WC <= WT) {
+ const bool isSameType = (RT == CastTy);
+ if (isSameType)
+ return nonloc::SymbolVal(RootSym);
+ return makeNonLoc(RootSym, RT, CastTy);
+ }
+
+ APSIntType RTy = BVF.getAPSIntType(RT);
+ const auto WR = RTy.getBitWidth();
+ const bool UT = TTy.isUnsigned();
+ const bool UR = RTy.isUnsigned();
+
+ if (((WT > WR) && (UR || !UT)) || ((WT == WR) && (UT == UR)))
+ return makeNonLoc(RootSym, RT, CastTy);
+
+ return makeNonLoc(SE, T, CastTy);
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index dad8a7b3caae..0bd47ced15a5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -432,7 +432,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return evalCast(lhs, resultTy, QualType{});
}
- while (1) {
+ while (true) {
switch (lhs.getSubKind()) {
default:
return makeSymExprValNN(op, lhs, rhs, resultTy);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f6ddcb763f9d..b152f9d80ef5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -143,7 +143,7 @@ public:
}
if (Opts->PrintStats || Opts->ShouldSerializeStats) {
- llvm::EnableStatistics(/* PrintOnExit= */ false);
+ llvm::EnableStatistics(/* DoPrintOnExit= */ false);
}
if (Opts->ShouldDisplayMacroExpansions)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
index d2016c3b112c..4db26028362f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -29,10 +29,7 @@
namespace clang {
class CompilerInstance;
-class ASTUnit;
-class ASTReader;
class NamedDecl;
-class Module;
namespace ento {
class ModelInjector : public CodeInjector {
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index acceec690c11..80a70252721d 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -16,10 +16,10 @@ using namespace clang;
using namespace tooling;
using namespace dependencies;
-llvm::ErrorOr<llvm::vfs::Status>
-CachedFileSystemEntry::initFile(StringRef Filename, llvm::vfs::FileSystem &FS) {
+llvm::ErrorOr<DependencyScanningWorkerFilesystem::TentativeEntry>
+DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
// Load the file and its content from the file system.
- auto MaybeFile = FS.openFileForRead(Filename);
+ auto MaybeFile = getUnderlyingFS().openFileForRead(Filename);
if (!MaybeFile)
return MaybeFile.getError();
auto File = std::move(*MaybeFile);
@@ -34,24 +34,43 @@ CachedFileSystemEntry::initFile(StringRef Filename, llvm::vfs::FileSystem &FS) {
return MaybeBuffer.getError();
auto Buffer = std::move(*MaybeBuffer);
- OriginalContents = std::move(Buffer);
- return Stat;
+ // If the file size changed between read and stat, pretend it didn't.
+ if (Stat.getSize() != Buffer->getBufferSize())
+ Stat = llvm::vfs::Status::copyWithNewSize(Stat, Buffer->getBufferSize());
+
+ return TentativeEntry(Stat, std::move(Buffer));
}
-void CachedFileSystemEntry::minimizeFile() {
- assert(OriginalContents && "minimizing missing contents");
+EntryRef DependencyScanningWorkerFilesystem::minimizeIfNecessary(
+ const CachedFileSystemEntry &Entry, StringRef Filename, bool Disable) {
+ if (Entry.isError() || Entry.isDirectory() || Disable ||
+ !shouldMinimize(Filename, Entry.getUniqueID()))
+ return EntryRef(/*Minimized=*/false, Filename, Entry);
+
+ CachedFileContents *Contents = Entry.getContents();
+ assert(Contents && "contents not initialized");
+
+ // Double-checked locking.
+ if (Contents->MinimizedAccess.load())
+ return EntryRef(/*Minimized=*/true, Filename, Entry);
+
+ std::lock_guard<std::mutex> GuardLock(Contents->ValueLock);
+
+ // Double-checked locking.
+ if (Contents->MinimizedAccess.load())
+ return EntryRef(/*Minimized=*/true, Filename, Entry);
llvm::SmallString<1024> MinimizedFileContents;
// Minimize the file down to directives that might affect the dependencies.
SmallVector<minimize_source_to_dependency_directives::Token, 64> Tokens;
- if (minimizeSourceToDependencyDirectives(OriginalContents->getBuffer(),
+ if (minimizeSourceToDependencyDirectives(Contents->Original->getBuffer(),
MinimizedFileContents, Tokens)) {
// FIXME: Propagate the diagnostic if desired by the client.
// Use the original file if the minimization failed.
- MinimizedContentsStorage =
- llvm::MemoryBuffer::getMemBuffer(*OriginalContents);
- MinimizedContentsAccess.store(MinimizedContentsStorage.get());
- return;
+ Contents->MinimizedStorage =
+ llvm::MemoryBuffer::getMemBuffer(*Contents->Original);
+ Contents->MinimizedAccess.store(Contents->MinimizedStorage.get());
+ return EntryRef(/*Minimized=*/true, Filename, Entry);
}
// The contents produced by the minimizer must be null terminated.
@@ -74,16 +93,17 @@ void CachedFileSystemEntry::minimizeFile() {
}
Mapping[Range.Offset] = Range.Length;
}
- PPSkippedRangeMapping = std::move(Mapping);
+ Contents->PPSkippedRangeMapping = std::move(Mapping);
- MinimizedContentsStorage = std::make_unique<llvm::SmallVectorMemoryBuffer>(
+ Contents->MinimizedStorage = std::make_unique<llvm::SmallVectorMemoryBuffer>(
std::move(MinimizedFileContents));
- // The algorithm in `getOrCreateFileSystemEntry` uses the presence of
- // minimized contents to decide whether an entry is up-to-date or not.
- // If it is up-to-date, the skipped range mappings must be already computed.
- // This is why we need to store the minimized contents **after** storing the
- // skipped range mappings. Failing to do so would lead to a data race.
- MinimizedContentsAccess.store(MinimizedContentsStorage.get());
+ // This function performed double-checked locking using `MinimizedAccess`.
+ // Assigning it must be the last thing this function does. If we were to
+ // assign it before `PPSkippedRangeMapping`, other threads may skip the
+ // critical section (`MinimizedAccess != nullptr`) and access the mappings
+ // that are about to be initialized, leading to a data race.
+ Contents->MinimizedAccess.store(Contents->MinimizedStorage.get());
+ return EntryRef(/*Minimized=*/true, Filename, Entry);
}
DependencyScanningFilesystemSharedCache::
@@ -98,12 +118,70 @@ DependencyScanningFilesystemSharedCache::
CacheShards = std::make_unique<CacheShard[]>(NumShards);
}
-DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::get(StringRef Key) {
- CacheShard &Shard = CacheShards[llvm::hash_value(Key) % NumShards];
- std::lock_guard<std::mutex> LockGuard(Shard.CacheLock);
- auto It = Shard.Cache.try_emplace(Key);
- return It.first->getValue();
+DependencyScanningFilesystemSharedCache::CacheShard &
+DependencyScanningFilesystemSharedCache::getShardForFilename(
+ StringRef Filename) const {
+ return CacheShards[llvm::hash_value(Filename) % NumShards];
+}
+
+DependencyScanningFilesystemSharedCache::CacheShard &
+DependencyScanningFilesystemSharedCache::getShardForUID(
+ llvm::sys::fs::UniqueID UID) const {
+ auto Hash = llvm::hash_combine(UID.getDevice(), UID.getFile());
+ return CacheShards[Hash % NumShards];
+}
+
+const CachedFileSystemEntry *
+DependencyScanningFilesystemSharedCache::CacheShard::findEntryByFilename(
+ StringRef Filename) const {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto It = EntriesByFilename.find(Filename);
+ return It == EntriesByFilename.end() ? nullptr : It->getValue();
+}
+
+const CachedFileSystemEntry *
+DependencyScanningFilesystemSharedCache::CacheShard::findEntryByUID(
+ llvm::sys::fs::UniqueID UID) const {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto It = EntriesByUID.find(UID);
+ return It == EntriesByUID.end() ? nullptr : It->getSecond();
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::
+ getOrEmplaceEntryForFilename(StringRef Filename,
+ llvm::ErrorOr<llvm::vfs::Status> Stat) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto Insertion = EntriesByFilename.insert({Filename, nullptr});
+ if (Insertion.second)
+ Insertion.first->second =
+ new (EntryStorage.Allocate()) CachedFileSystemEntry(std::move(Stat));
+ return *Insertion.first->second;
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::getOrEmplaceEntryForUID(
+ llvm::sys::fs::UniqueID UID, llvm::vfs::Status Stat,
+ std::unique_ptr<llvm::MemoryBuffer> Contents) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto Insertion = EntriesByUID.insert({UID, nullptr});
+ if (Insertion.second) {
+ CachedFileContents *StoredContents = nullptr;
+ if (Contents)
+ StoredContents = new (ContentsStorage.Allocate())
+ CachedFileContents(std::move(Contents));
+ Insertion.first->second = new (EntryStorage.Allocate())
+ CachedFileSystemEntry(std::move(Stat), StoredContents);
+ }
+ return *Insertion.first->second;
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::
+ getOrInsertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ return *EntriesByFilename.insert({Filename, &Entry}).first->getValue();
}
/// Whitelist file extensions that should be minimized, treating no extension as
@@ -133,68 +211,79 @@ static bool shouldCacheStatFailures(StringRef Filename) {
}
void DependencyScanningWorkerFilesystem::disableMinimization(
- StringRef RawFilename) {
- llvm::SmallString<256> Filename;
- llvm::sys::path::native(RawFilename, Filename);
- NotToBeMinimized.insert(Filename);
+ StringRef Filename) {
+ // Since we're not done setting up `NotToBeMinimized` yet, we need to disable
+ // minimization explicitly.
+ if (llvm::ErrorOr<EntryRef> Result =
+ getOrCreateFileSystemEntry(Filename, /*DisableMinimization=*/true))
+ NotToBeMinimized.insert(Result->getStatus().getUniqueID());
}
-bool DependencyScanningWorkerFilesystem::shouldMinimize(StringRef RawFilename) {
- if (!shouldMinimizeBasedOnExtension(RawFilename))
- return false;
-
- llvm::SmallString<256> Filename;
- llvm::sys::path::native(RawFilename, Filename);
- return !NotToBeMinimized.contains(Filename);
+bool DependencyScanningWorkerFilesystem::shouldMinimize(
+ StringRef Filename, llvm::sys::fs::UniqueID UID) {
+ return shouldMinimizeBasedOnExtension(Filename) &&
+ !NotToBeMinimized.contains(UID);
}
-void CachedFileSystemEntry::init(llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus,
- StringRef Filename,
- llvm::vfs::FileSystem &FS) {
- if (!MaybeStatus || MaybeStatus->isDirectory())
- MaybeStat = std::move(MaybeStatus);
- else
- MaybeStat = initFile(Filename, FS);
+const CachedFileSystemEntry &
+DependencyScanningWorkerFilesystem::getOrEmplaceSharedEntryForUID(
+ TentativeEntry TEntry) {
+ auto &Shard = SharedCache.getShardForUID(TEntry.Status.getUniqueID());
+ return Shard.getOrEmplaceEntryForUID(TEntry.Status.getUniqueID(),
+ std::move(TEntry.Status),
+ std::move(TEntry.Contents));
}
-llvm::ErrorOr<EntryRef>
-DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
+const CachedFileSystemEntry *
+DependencyScanningWorkerFilesystem::findEntryByFilenameWithWriteThrough(
StringRef Filename) {
- bool ShouldBeMinimized = shouldMinimize(Filename);
-
- const auto *Entry = LocalCache.getCachedEntry(Filename);
- if (Entry && !Entry->needsUpdate(ShouldBeMinimized))
- return EntryRef(ShouldBeMinimized, *Entry);
-
- // FIXME: Handle PCM/PCH files.
- // FIXME: Handle module map files.
-
- auto &SharedCacheEntry = SharedCache.get(Filename);
- {
- std::lock_guard<std::mutex> LockGuard(SharedCacheEntry.ValueLock);
- CachedFileSystemEntry &CacheEntry = SharedCacheEntry.Value;
-
- if (!CacheEntry.isInitialized()) {
- auto MaybeStatus = getUnderlyingFS().status(Filename);
- if (!MaybeStatus && !shouldCacheStatFailures(Filename))
- // HACK: We need to always restat non source files if the stat fails.
- // This is because Clang first looks up the module cache and module
- // files before building them, and then looks for them again. If we
- // cache the stat failure, it won't see them the second time.
- return MaybeStatus.getError();
- CacheEntry.init(std::move(MaybeStatus), Filename, getUnderlyingFS());
- }
+ if (const auto *Entry = LocalCache.findEntryByFilename(Filename))
+ return Entry;
+ auto &Shard = SharedCache.getShardForFilename(Filename);
+ if (const auto *Entry = Shard.findEntryByFilename(Filename))
+ return &LocalCache.insertEntryForFilename(Filename, *Entry);
+ return nullptr;
+}
- // Checking `needsUpdate` verifies the entry represents an opened file.
- // Only checking `needsMinimization` could lead to minimization of files
- // that we failed to load (such files don't have `OriginalContents`).
- if (CacheEntry.needsUpdate(ShouldBeMinimized))
- CacheEntry.minimizeFile();
+llvm::ErrorOr<const CachedFileSystemEntry &>
+DependencyScanningWorkerFilesystem::computeAndStoreResult(StringRef Filename) {
+ llvm::ErrorOr<llvm::vfs::Status> Stat = getUnderlyingFS().status(Filename);
+ if (!Stat) {
+ if (!shouldCacheStatFailures(Filename))
+ return Stat.getError();
+ const auto &Entry =
+ getOrEmplaceSharedEntryForFilename(Filename, Stat.getError());
+ return insertLocalEntryForFilename(Filename, Entry);
}
- // Store the result in the local cache.
- Entry = &SharedCacheEntry.Value;
- return EntryRef(ShouldBeMinimized, *Entry);
+ if (const auto *Entry = findSharedEntryByUID(*Stat))
+ return insertLocalEntryForFilename(Filename, *Entry);
+
+ auto TEntry =
+ Stat->isDirectory() ? TentativeEntry(*Stat) : readFile(Filename);
+
+ const CachedFileSystemEntry *SharedEntry = [&]() {
+ if (TEntry) {
+ const auto &UIDEntry = getOrEmplaceSharedEntryForUID(std::move(*TEntry));
+ return &getOrInsertSharedEntryForFilename(Filename, UIDEntry);
+ }
+ return &getOrEmplaceSharedEntryForFilename(Filename, TEntry.getError());
+ }();
+
+ return insertLocalEntryForFilename(Filename, *SharedEntry);
+}
+
+llvm::ErrorOr<EntryRef>
+DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
+ StringRef Filename, bool DisableMinimization) {
+ if (const auto *Entry = findEntryByFilenameWithWriteThrough(Filename))
+ return minimizeIfNecessary(*Entry, Filename, DisableMinimization)
+ .unwrapError();
+ auto MaybeEntry = computeAndStoreResult(Filename);
+ if (!MaybeEntry)
+ return MaybeEntry.getError();
+ return minimizeIfNecessary(*MaybeEntry, Filename, DisableMinimization)
+ .unwrapError();
}
llvm::ErrorOr<llvm::vfs::Status>
@@ -241,16 +330,16 @@ private:
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MinimizedVFSFile::create(
EntryRef Entry, ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
+ assert(!Entry.isError() && "error");
+
if (Entry.isDirectory())
return std::make_error_code(std::errc::is_a_directory);
- llvm::ErrorOr<StringRef> Contents = Entry.getContents();
- if (!Contents)
- return Contents.getError();
auto Result = std::make_unique<MinimizedVFSFile>(
- llvm::MemoryBuffer::getMemBuffer(*Contents, Entry.getName(),
+ llvm::MemoryBuffer::getMemBuffer(Entry.getContents(),
+ Entry.getStatus().getName(),
/*RequiresNullTerminator=*/false),
- *Entry.getStatus());
+ Entry.getStatus());
const auto *EntrySkipMappings = Entry.getPPSkippedRangeMapping();
if (EntrySkipMappings && !EntrySkipMappings->empty() && PPSkipMappings)
diff --git a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index 29787b8a8894..75d0d50d851f 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -61,7 +61,7 @@ private:
continue;
llvm::BumpPtrAllocator Alloc;
llvm::StringSaver Saver(Alloc);
- llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Argv, false, false,
+ llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Argv, false, false, false,
llvm::StringRef(Cmd.Directory), *FS);
// Don't assign directly, Argv aliases CommandLine.
std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
diff --git a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index c1e25c41f719..51e8439b6b79 100644
--- a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -243,8 +243,7 @@ struct TransferableCommand {
llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
LangStandard::getLangStandardForKind(Std).getName()).str());
}
- if (Filename.startswith("-") || (ClangCLMode && Filename.startswith("/")))
- Result.CommandLine.push_back("--");
+ Result.CommandLine.push_back("--");
Result.CommandLine.push_back(std::string(Filename));
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
index c813865e95cd..981bac508f73 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
@@ -9,6 +9,7 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Tooling/Syntax/Nodes.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -202,7 +203,7 @@ static void dumpLeaf(raw_ostream &OS, const syntax::Leaf *L,
}
static void dumpNode(raw_ostream &OS, const syntax::Node *N,
- const SourceManager &SM, std::vector<bool> IndentMask) {
+ const SourceManager &SM, llvm::BitVector IndentMask) {
auto DumpExtraInfo = [&OS](const syntax::Node *N) {
if (N->getRole() != syntax::NodeRole::Unknown)
OS << " " << N->getRole();
@@ -228,8 +229,8 @@ static void dumpNode(raw_ostream &OS, const syntax::Node *N,
OS << "\n";
for (const syntax::Node &It : T->getChildren()) {
- for (bool Filled : IndentMask) {
- if (Filled)
+ for (unsigned Idx = 0; Idx < IndentMask.size(); ++Idx) {
+ if (IndentMask[Idx])
OS << "| ";
else
OS << " ";
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
index 242db2a16b43..4f41e2e90def 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
@@ -33,7 +33,6 @@ using namespace transformer;
// much as possible with the AST Matchers parsing.
namespace {
-using llvm::Error;
using llvm::Expected;
template <typename... Ts> using RangeSelectorOp = RangeSelector (*)(Ts...);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
index a1c99b60216b..7496e968469c 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
@@ -10,6 +10,8 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Tooling/Transformer/SourceCode.h"
#include "llvm/ADT/Twine.h"
#include <string>
@@ -60,6 +62,16 @@ bool tooling::needParensAfterUnaryOperator(const Expr &E) {
return false;
}
+bool tooling::isKnownPointerLikeType(QualType Ty, ASTContext &Context) {
+ using namespace ast_matchers;
+ const auto PointerLikeTy = type(hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(cxxRecordDecl(hasAnyName(
+ "::std::unique_ptr", "::std::shared_ptr", "::std::weak_ptr",
+ "::std::optional", "::absl::optional", "::llvm::Optional",
+ "absl::StatusOr", "::llvm::Expected"))))));
+ return match(PointerLikeTy, Ty, Context).size() > 0;
+}
+
llvm::Optional<std::string> tooling::buildParens(const Expr &E,
const ASTContext &Context) {
StringRef Text = getText(E, Context);
@@ -114,8 +126,10 @@ llvm::Optional<std::string> tooling::buildAddressOf(const Expr &E,
return ("&" + Text).str();
}
-llvm::Optional<std::string> tooling::buildDot(const Expr &E,
- const ASTContext &Context) {
+// Append the appropriate access operation (syntactically) to `E`, assuming `E`
+// is a non-pointer value.
+static llvm::Optional<std::string>
+buildAccessForValue(const Expr &E, const ASTContext &Context) {
if (const auto *Op = llvm::dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_Deref) {
// Strip leading '*', add following '->'.
@@ -138,8 +152,10 @@ llvm::Optional<std::string> tooling::buildDot(const Expr &E,
return (Text + ".").str();
}
-llvm::Optional<std::string> tooling::buildArrow(const Expr &E,
- const ASTContext &Context) {
+// Append the appropriate access operation (syntactically) to `E`, assuming `E`
+// is a pointer value.
+static llvm::Optional<std::string>
+buildAccessForPointer(const Expr &E, const ASTContext &Context) {
if (const auto *Op = llvm::dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_AddrOf) {
// Strip leading '&', add following '.'.
@@ -160,3 +176,63 @@ llvm::Optional<std::string> tooling::buildArrow(const Expr &E,
return ("(" + Text + ")->").str();
return (Text + "->").str();
}
+
+llvm::Optional<std::string> tooling::buildDot(const Expr &E,
+ const ASTContext &Context) {
+ return buildAccessForValue(E, Context);
+}
+
+llvm::Optional<std::string> tooling::buildArrow(const Expr &E,
+ const ASTContext &Context) {
+ return buildAccessForPointer(E, Context);
+}
+
+// If `E` is an overloaded-operator call of kind `K` on an object `O`, returns
+// `O`. Otherwise, returns `nullptr`.
+static const Expr *maybeGetOperatorObjectArg(const Expr &E,
+ OverloadedOperatorKind K) {
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(&E)) {
+ if (OpCall->getOperator() == K && OpCall->getNumArgs() == 1)
+ return OpCall->getArg(0);
+ }
+ return nullptr;
+}
+
+static bool treatLikePointer(QualType Ty, PLTClass C, ASTContext &Context) {
+ switch (C) {
+ case PLTClass::Value:
+ return false;
+ case PLTClass::Pointer:
+ return isKnownPointerLikeType(Ty, Context);
+ }
+ llvm_unreachable("Unknown PLTClass enum");
+}
+
+// FIXME: move over the other `maybe` functionality from Stencil. Should all be
+// in one place.
+llvm::Optional<std::string> tooling::buildAccess(const Expr &RawExpression,
+ ASTContext &Context,
+ PLTClass Classification) {
+ if (RawExpression.isImplicitCXXThis())
+ // Return the empty string, because `None` signifies some sort of failure.
+ return std::string();
+
+ const Expr *E = RawExpression.IgnoreImplicitAsWritten();
+
+ if (E->getType()->isAnyPointerType() ||
+ treatLikePointer(E->getType(), Classification, Context)) {
+ // Strip off operator-> calls. They can only occur inside an actual arrow
+ // member access, so we treat them as equivalent to an actual object
+ // expression.
+ if (const auto *Obj = maybeGetOperatorObjectArg(*E, clang::OO_Arrow))
+ E = Obj;
+ return buildAccessForPointer(*E, Context);
+ }
+
+ if (const auto *Obj = maybeGetOperatorObjectArg(*E, clang::OO_Star)) {
+ if (treatLikePointer(Obj->getType(), Classification, Context))
+ return buildAccessForPointer(*Obj, Context);
+ };
+
+ return buildAccessForValue(*E, Context);
+}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index 8b20ef34c3ff..348d04dbaf4a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -11,7 +11,6 @@
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Expr.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
-#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Transformer/SourceCode.h"
@@ -56,39 +55,6 @@ static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
return Error::success();
}
-// FIXME: Consider memoizing this function using the `ASTContext`.
-static bool isSmartPointerType(QualType Ty, ASTContext &Context) {
- using namespace ::clang::ast_matchers;
-
- // Optimization: hard-code common smart-pointer types. This can/should be
- // removed if we start caching the results of this function.
- auto KnownSmartPointer =
- cxxRecordDecl(hasAnyName("::std::unique_ptr", "::std::shared_ptr"));
- const auto QuacksLikeASmartPointer = cxxRecordDecl(
- hasMethod(cxxMethodDecl(hasOverloadedOperatorName("->"),
- returns(qualType(pointsTo(type()))))),
- hasMethod(cxxMethodDecl(hasOverloadedOperatorName("*"),
- returns(qualType(references(type()))))));
- const auto SmartPointer = qualType(hasDeclaration(
- cxxRecordDecl(anyOf(KnownSmartPointer, QuacksLikeASmartPointer))));
- return match(SmartPointer, Ty, Context).size() > 0;
-}
-
-// Identifies use of `operator*` on smart pointers, and returns the underlying
-// smart-pointer expression; otherwise, returns null.
-static const Expr *isSmartDereference(const Expr &E, ASTContext &Context) {
- using namespace ::clang::ast_matchers;
-
- const auto HasOverloadedArrow = cxxRecordDecl(hasMethod(cxxMethodDecl(
- hasOverloadedOperatorName("->"), returns(qualType(pointsTo(type()))))));
- // Verify it is a smart pointer by finding `operator->` in the class
- // declaration.
- auto Deref = cxxOperatorCallExpr(
- hasOverloadedOperatorName("*"), hasUnaryOperand(expr().bind("arg")),
- callee(cxxMethodDecl(ofClass(HasOverloadedArrow))));
- return selectFirst<Expr>("arg", match(Deref, E, Context));
-}
-
namespace {
// An arbitrary fragment of code within a stencil.
class RawTextStencil : public StencilInterface {
@@ -196,7 +162,7 @@ public:
break;
case UnaryNodeOperator::MaybeDeref:
if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
+ tooling::isKnownPointerLikeType(E->getType(), *Match.Context)) {
// Strip off any operator->. This can only occur inside an actual arrow
// member access, so we treat it as equivalent to an actual object
// expression.
@@ -216,7 +182,7 @@ public:
break;
case UnaryNodeOperator::MaybeAddressOf:
if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
+ tooling::isKnownPointerLikeType(E->getType(), *Match.Context)) {
// Strip off any operator->. This can only occur inside an actual arrow
// member access, so we treat it as equivalent to an actual object
// expression.
@@ -311,34 +277,12 @@ public:
if (E == nullptr)
return llvm::make_error<StringError>(errc::invalid_argument,
"Id not bound: " + BaseId);
- if (!E->isImplicitCXXThis()) {
- llvm::Optional<std::string> S;
- if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
- // Strip off any operator->. This can only occur inside an actual arrow
- // member access, so we treat it as equivalent to an actual object
- // expression.
- if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
- if (OpCall->getOperator() == clang::OO_Arrow &&
- OpCall->getNumArgs() == 1) {
- E = OpCall->getArg(0);
- }
- }
- S = tooling::buildArrow(*E, *Match.Context);
- } else if (const auto *Operand = isSmartDereference(*E, *Match.Context)) {
- // `buildDot` already handles the built-in dereference operator, so we
- // only need to catch overloaded `operator*`.
- S = tooling::buildArrow(*Operand, *Match.Context);
- } else {
- S = tooling::buildDot(*E, *Match.Context);
- }
- if (S.hasValue())
- *Result += *S;
- else
- return llvm::make_error<StringError>(
- errc::invalid_argument,
- "Could not construct object text from ID: " + BaseId);
- }
+ llvm::Optional<std::string> S = tooling::buildAccess(*E, *Match.Context);
+ if (!S.hasValue())
+ return llvm::make_error<StringError>(
+ errc::invalid_argument,
+ "Could not construct object text from ID: " + BaseId);
+ *Result += *S;
return Member->eval(Match, Result);
}
};
diff --git a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
index fd3b25ccb3cb..f648adeba483 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
@@ -237,8 +237,10 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
static_cast<void*>(&Clang->getDiagnostics()));
DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
- if (!Success)
+ if (!Success) {
+ Clang->getDiagnosticClient().finish();
return 1;
+ }
// Execute the frontend actions.
{
diff --git a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
index db3288d75281..6459d1534b39 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
@@ -228,7 +228,6 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
llvm::StringSwitch<llvm::DebugCompressionType>(A->getValue())
.Case("none", llvm::DebugCompressionType::None)
.Case("zlib", llvm::DebugCompressionType::Z)
- .Case("zlib-gnu", llvm::DebugCompressionType::GNU)
.Default(llvm::DebugCompressionType::None);
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 547ec2c82cb3..f4bf4b19911a 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -1311,6 +1311,11 @@ void clang::EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
else
OS << ", false";
+ if (R.getValueAsBit("ShowInSystemMacro"))
+ OS << ", true";
+ else
+ OS << ", false";
+
if (R.getValueAsBit("Deferrable"))
OS << ", true";
else
diff --git a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
index 1a2532fbf53f..cc830c1d5416 100644
--- a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
@@ -1489,8 +1489,7 @@ protected:
class raw_self_contained_string_ostream : private string_holder,
public raw_string_ostream {
public:
- raw_self_contained_string_ostream()
- : string_holder(), raw_string_ostream(S) {}
+ raw_self_contained_string_ostream() : raw_string_ostream(S) {}
};
const char LLVMLicenseHeader[] =
diff --git a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
index ff552b66c0e2..a69fbe0e42dc 100644
--- a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
@@ -292,7 +292,7 @@ class Variable {
std::string N;
public:
- Variable() : T(Type::getVoid()), N("") {}
+ Variable() : T(Type::getVoid()) {}
Variable(Type T, std::string N) : T(std::move(T)), N(std::move(N)) {}
Type getType() const { return T; }
@@ -1306,7 +1306,7 @@ void Intrinsic::emitBodyAsBuiltinCall() {
if (LocalCK == ClassB) {
Type T2 = T;
T2.makeOneVector();
- T2.makeInteger(8, /*Signed=*/true);
+ T2.makeInteger(8, /*Sign=*/true);
Cast = "(" + T2.str() + ")";
}
@@ -1473,7 +1473,7 @@ Intrinsic::DagEmitter::emitDagCall(DagInit *DI, bool MatchMangledName) {
Intr.Dependencies.insert(&Callee);
// Now create the call itself.
- std::string S = "";
+ std::string S;
if (!Callee.isBigEndianSafe())
S += CallPrefix.str();
S += Callee.getMangledName(true) + "(";
diff --git a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
index 62eef830318f..4b80d6da72fa 100644
--- a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -100,6 +100,9 @@ public:
bool isValid() const { return Valid; }
bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; }
bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; }
+ bool isVector(unsigned Width) const {
+ return isVector() && ElementBitwidth == Width;
+ }
bool isFloat() const { return ScalarType == ScalarTypeKind::Float; }
bool isSignedInteger() const {
return ScalarType == ScalarTypeKind::SignedInteger;
@@ -134,13 +137,16 @@ private:
using RVVTypePtr = RVVType *;
using RVVTypes = std::vector<RVVTypePtr>;
+using RISCVPredefinedMacroT = uint8_t;
-enum RISCVExtension : uint8_t {
+enum RISCVPredefinedMacro : RISCVPredefinedMacroT {
Basic = 0,
- F = 1 << 1,
- D = 1 << 2,
- Zfh = 1 << 3,
- Zvlsseg = 1 << 4,
+ V = 1 << 1,
+ Zfh = 1 << 2,
+ RV64 = 1 << 3,
+ VectorMaxELen64 = 1 << 4,
+ VectorMaxELenFp32 = 1 << 5,
+ VectorMaxELenFp64 = 1 << 6,
};
// TODO refactor RVVIntrinsic class design after support all intrinsic
@@ -164,7 +170,7 @@ private:
// The types we use to obtain the specific LLVM intrinsic. They are index of
// InputTypes. -1 means the return type.
std::vector<int64_t> IntrinsicTypes;
- uint8_t RISCVExtensions = 0;
+ RISCVPredefinedMacroT RISCVPredefinedMacros = 0;
unsigned NF = 1;
public:
@@ -174,7 +180,7 @@ public:
bool HasNoMaskedOverloaded, bool HasAutoDef,
StringRef ManualCodegen, const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
- StringRef RequiredExtension, unsigned NF);
+ const std::vector<StringRef> &RequiredFeatures, unsigned NF);
~RVVIntrinsic() = default;
StringRef getBuiltinName() const { return BuiltinName; }
@@ -188,7 +194,9 @@ public:
bool isMask() const { return IsMask; }
StringRef getIRName() const { return IRName; }
StringRef getManualCodegen() const { return ManualCodegen; }
- uint8_t getRISCVExtensions() const { return RISCVExtensions; }
+ RISCVPredefinedMacroT getRISCVPredefinedMacros() const {
+ return RISCVPredefinedMacros;
+ }
unsigned getNF() const { return NF; }
const std::vector<int64_t> &getIntrinsicTypes() const {
return IntrinsicTypes;
@@ -251,7 +259,8 @@ private:
// Emit the architecture preprocessor definitions. Return true when emits
// non-empty string.
- bool emitExtDefStr(uint8_t Extensions, raw_ostream &o);
+ bool emitMacroRestrictionStr(RISCVPredefinedMacroT PredefinedMacros,
+ raw_ostream &o);
// Slice Prototypes string into sub prototype string and process each sub
// prototype string individually in the Handler.
void parsePrototypes(StringRef Prototypes,
@@ -444,8 +453,8 @@ void RVVType::initBuiltinStr() {
return;
}
BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr;
- // Pointer to vector types. Defined for Zvlsseg load intrinsics.
- // Zvlsseg load intrinsics have pointer type arguments to store the loaded
+ // Pointer to vector types. Defined for segment load intrinsics.
+ // segment load intrinsics have pointer type arguments to store the loaded
// vector values.
if (IsPointer)
BuiltinStr += "*";
@@ -764,7 +773,8 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
bool HasNoMaskedOverloaded, bool HasAutoDef,
StringRef ManualCodegen, const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes,
- StringRef RequiredExtension, unsigned NF)
+ const std::vector<StringRef> &RequiredFeatures,
+ unsigned NF)
: IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy),
HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
ManualCodegen(ManualCodegen.str()), NF(NF) {
@@ -788,14 +798,23 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
// Init RISC-V extensions
for (const auto &T : OutInTypes) {
if (T->isFloatVector(16) || T->isFloat(16))
- RISCVExtensions |= RISCVExtension::Zfh;
- else if (T->isFloatVector(32) || T->isFloat(32))
- RISCVExtensions |= RISCVExtension::F;
- else if (T->isFloatVector(64) || T->isFloat(64))
- RISCVExtensions |= RISCVExtension::D;
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::Zfh;
+ if (T->isFloatVector(32))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp32;
+ if (T->isFloatVector(64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp64;
+ if (T->isVector(64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELen64;
+ }
+ for (auto Feature : RequiredFeatures) {
+ if (Feature == "RV64")
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::RV64;
+ // Note: Full multiply instruction (mulh, mulhu, mulhsu, smul) for EEW=64
+ // require V.
+ if (Feature == "FullMultiply" &&
+ (RISCVPredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::V;
}
- if (RequiredExtension == "Zvlsseg")
- RISCVExtensions |= RISCVExtension::Zvlsseg;
// Init OutputType and InputTypes
OutputType = OutInTypes[0];
@@ -978,7 +997,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
// The same extension include in the same arch guard marco.
llvm::stable_sort(Defs, [](const std::unique_ptr<RVVIntrinsic> &A,
const std::unique_ptr<RVVIntrinsic> &B) {
- return A->getRISCVExtensions() < B->getRISCVExtensions();
+ return A->getRISCVPredefinedMacros() < B->getRISCVPredefinedMacros();
});
OS << "#define __rvv_ai static __inline__\n";
@@ -1021,7 +1040,7 @@ void RVVEmitter::createBuiltins(raw_ostream &OS) {
OS << "#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)\n";
OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, "
- "ATTRS, \"experimental-v\")\n";
+ "ATTRS, \"zve32x|v\")\n";
OS << "#endif\n";
for (auto &Def : Defs) {
auto P =
@@ -1141,7 +1160,8 @@ void RVVEmitter::createRVVIntrinsics(
StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask");
std::vector<int64_t> IntrinsicTypes =
R->getValueAsListOfInts("IntrinsicTypes");
- StringRef RequiredExtension = R->getValueAsString("RequiredExtension");
+ std::vector<StringRef> RequiredFeatures =
+ R->getValueAsListOfStrings("RequiredFeatures");
StringRef IRName = R->getValueAsString("IRName");
StringRef IRNameMask = R->getValueAsString("IRNameMask");
unsigned NF = R->getValueAsInt("NF");
@@ -1209,7 +1229,7 @@ void RVVEmitter::createRVVIntrinsics(
Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy,
HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(),
- IntrinsicTypes, RequiredExtension, NF));
+ IntrinsicTypes, RequiredFeatures, NF));
if (HasMask) {
// Create a mask intrinsic
Optional<RVVTypes> MaskTypes =
@@ -1218,7 +1238,7 @@ void RVVEmitter::createRVVIntrinsics(
Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
/*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy,
HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
- MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF));
+ MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF));
}
} // end for Log2LMULList
} // end for TypeRange
@@ -1276,15 +1296,16 @@ Optional<RVVTypePtr> RVVEmitter::computeType(BasicType BT, int Log2LMUL,
void RVVEmitter::emitArchMacroAndBody(
std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS,
std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) {
- uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions();
- bool NeedEndif = emitExtDefStr(PrevExt, OS);
+ RISCVPredefinedMacroT PrevMacros =
+ (*Defs.begin())->getRISCVPredefinedMacros();
+ bool NeedEndif = emitMacroRestrictionStr(PrevMacros, OS);
for (auto &Def : Defs) {
- uint8_t CurExt = Def->getRISCVExtensions();
- if (CurExt != PrevExt) {
+ RISCVPredefinedMacroT CurMacros = Def->getRISCVPredefinedMacros();
+ if (CurMacros != PrevMacros) {
if (NeedEndif)
OS << "#endif\n\n";
- NeedEndif = emitExtDefStr(CurExt, OS);
- PrevExt = CurExt;
+ NeedEndif = emitMacroRestrictionStr(CurMacros, OS);
+ PrevMacros = CurMacros;
}
if (Def->hasAutoDef())
PrintBody(OS, *Def);
@@ -1293,19 +1314,24 @@ void RVVEmitter::emitArchMacroAndBody(
OS << "#endif\n\n";
}
-bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
- if (Extents == RISCVExtension::Basic)
+bool RVVEmitter::emitMacroRestrictionStr(RISCVPredefinedMacroT PredefinedMacros,
+ raw_ostream &OS) {
+ if (PredefinedMacros == RISCVPredefinedMacro::Basic)
return false;
OS << "#if ";
ListSeparator LS(" && ");
- if (Extents & RISCVExtension::F)
- OS << LS << "defined(__riscv_f)";
- if (Extents & RISCVExtension::D)
- OS << LS << "defined(__riscv_d)";
- if (Extents & RISCVExtension::Zfh)
+ if (PredefinedMacros & RISCVPredefinedMacro::V)
+ OS << LS << "defined(__riscv_v)";
+ if (PredefinedMacros & RISCVPredefinedMacro::Zfh)
OS << LS << "defined(__riscv_zfh)";
- if (Extents & RISCVExtension::Zvlsseg)
- OS << LS << "defined(__riscv_zvlsseg)";
+ if (PredefinedMacros & RISCVPredefinedMacro::RV64)
+ OS << LS << "(__riscv_xlen == 64)";
+ if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64)
+ OS << LS << "(__riscv_v_elen >= 64)";
+ if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELenFp32)
+ OS << LS << "(__riscv_v_elen_fp >= 32)";
+ if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELenFp64)
+ OS << LS << "(__riscv_v_elen_fp >= 64)";
OS << "\n";
return true;
}
diff --git a/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc b/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
index 44719126b596..0544b6b2ef71 100644
--- a/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
+++ b/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
@@ -128,8 +128,10 @@ INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
@@ -644,6 +646,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
(uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
(uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
/* Raw profile format version (start from 1). */
#define INSTR_PROF_RAW_VERSION 8
/* Indexed profile format version (start from 1). */
diff --git a/contrib/llvm-project/compiler-rt/include/sanitizer/dfsan_interface.h b/contrib/llvm-project/compiler-rt/include/sanitizer/dfsan_interface.h
index bc0652c99a14..8e581a67572d 100644
--- a/contrib/llvm-project/compiler-rt/include/sanitizer/dfsan_interface.h
+++ b/contrib/llvm-project/compiler-rt/include/sanitizer/dfsan_interface.h
@@ -27,6 +27,10 @@ typedef uint32_t dfsan_origin;
/// Signature of the callback argument to dfsan_set_write_callback().
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+/// Signature of the callback argument to dfsan_set_conditional_callback().
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+ dfsan_origin origin);
+
/// Computes the union of \c l1 and \c l2, resulting in a union label.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
@@ -74,6 +78,19 @@ void dfsan_flush(void);
/// callback executes. Pass in NULL to remove any callback.
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+/// Sets a callback to be invoked on any conditional expressions which have a
+/// taint label set. This can be used to find where tainted data influences
+/// the behavior of the program.
+/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
+void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+
+/// Conditional expressions occur during signal handlers.
+/// Making callbacks that handle signals well is tricky, so when
+/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
+/// handlers will add the labels they see into a global (bitwise-or together).
+/// This function returns all label bits seen in signal handler conditions.
+dfsan_label dfsan_get_labels_in_signal_conditional();
+
/// Interceptor hooks.
/// Whenever a dfsan's custom function is called the corresponding
/// hook is called it non-zero. The hooks should be defined by the user.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
index 1ff7091460ad..f9f1cfcd9f87 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
@@ -840,12 +840,12 @@ struct Allocator {
quarantine.PrintStats();
}
- void ForceLock() ACQUIRE(fallback_mutex) {
+ void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() RELEASE(fallback_mutex) {
+ void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@@ -1054,9 +1054,11 @@ uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
-void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
+void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ instance.ForceLock();
+}
-void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
+void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
instance.ForceUnlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
index 6ca6ee00e5c9..4ff09b103d5f 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
@@ -13,6 +13,8 @@
#ifndef ASAN_MAPPING_H
#define ASAN_MAPPING_H
+#include "sanitizer_common/sanitizer_platform.h"
+
// The full explanation of the memory mapping could be found here:
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
//
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp
new file mode 100644
index 000000000000..74e6eb0ddf1c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp
@@ -0,0 +1,15 @@
+//===-- asan_static_rtl.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Main file of the ASan run-time library.
+//===----------------------------------------------------------------------===//
+
+// This file is empty for now. Main reason to have it is workaround for Windows
+// build, which complains because no files are part of the asan_static lib.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
index a5671cc9dffd..e3a90f18ed81 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
@@ -56,6 +56,13 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp)
+# if defined(_MSC_VER) && !defined(__clang__)
+// Disable warnings such as: 'void memchr(void)': incorrect number of arguments
+// for intrinsic function, expected '3' arguments.
+# pragma warning(push)
+# pragma warning(disable : 4392)
+# endif
+
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
INTERCEPT_LIBRARY_FUNCTION(frexp);
@@ -87,6 +94,10 @@ INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
+# if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+# endif
+
#ifdef _WIN64
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
#else
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
index 9c015059af5a..69a3d8620f92 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
@@ -14,6 +14,12 @@
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
+#if defined(__linux__) && defined(__CET__)
+#if __has_include(<cet.h>)
+#include <cet.h>
+#endif
+#endif
+
#if defined(__APPLE__) && defined(__aarch64__)
#define SEPARATOR %%
#else
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
index cf12aa021d3d..aa0fd7d85b57 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
@@ -13,6 +13,10 @@
//
//===----------------------------------------------------------------------===//
+#ifndef __has_attribute
+#define __has_attribute(attr) 0
+#endif
+
#if defined(HAVE_INIT_PRIORITY)
#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
#elif __has_attribute(__constructor__)
@@ -37,10 +41,6 @@
#include <intrin.h>
#endif
-#ifndef __has_attribute
-#define __has_attribute(attr) 0
-#endif
-
enum VendorSignatures {
SIG_INTEL = 0x756e6547, // Genu
SIG_AMD = 0x68747541, // Auth
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
index e112fdf51440..3a2908a42484 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
@@ -30,7 +30,7 @@
// MSVC raises a warning about a nonstandard extension being used for the 0
// sized element in this array. Disable this for warn-as-error builds.
#pragma warning(push)
-#pragma warning(disable : 4206)
+#pragma warning(disable : 4200)
#endif
typedef struct emutls_address_array {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
index 26a3f4d10942..5b4969a441f2 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef FP_MODE
-#define FP_MODE
+#ifndef FP_MODE_H
+#define FP_MODE_H
typedef enum {
CRT_FE_TONEAREST,
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
index 10b41df28f84..ca17b36ce585 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
@@ -21,7 +21,7 @@
// MSVC throws a warning about mod 0 here, disable it for builds that
// warn-as-error
#pragma warning(push)
-#pragma warning(disable : 4724)
+#pragma warning(disable : 4723 4724)
#endif
COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
diff --git a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
index 65a10c999cc6..22f0b175dd87 100644
--- a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
@@ -322,14 +322,14 @@ void InitShadow() {
THREADLOCAL int in_loader;
Mutex shadow_update_lock;
-void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
+void EnterLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (in_loader == 0) {
shadow_update_lock.Lock();
}
++in_loader;
}
-void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
+void ExitLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
CHECK(in_loader > 0);
--in_loader;
UpdateShadow();
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
index ee7221c7b9a8..afb01c7d889e 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
@@ -332,9 +332,9 @@ static void MoveOrigin(const void *dst, const void *src, uptr size,
// origins by copying origins in a reverse order; otherwise, copy origins in
// a normal order. The orders of origin transfer are consistent with the
// orders of how memcpy and memmove transfer user data.
- uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL;
- uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL;
- uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL;
+ uptr src_aligned_beg = OriginAlignDown((uptr)src);
+ uptr src_aligned_end = OriginAlignDown((uptr)src + size);
+ uptr dst_aligned_beg = OriginAlignDown((uptr)dst);
if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg)
return ReverseCopyOrigin(dst, src, size, stack);
return CopyOrigin(dst, src, size, stack);
@@ -401,12 +401,18 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_mem_origin_transfer(
MoveOrigin(dst, src, len, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst,
- const void *src,
- uptr len) {
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(
+ const void *dst, const void *src, uptr len) {
__dfsan_mem_origin_transfer(dst, src, len);
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_shadow_transfer(
+ void *dst, const void *src, uptr len) {
+ internal_memcpy((void *)__dfsan::shadow_for(dst),
+ (const void *)__dfsan::shadow_for(src),
+ len * sizeof(dfsan_label));
+}
+
namespace __dfsan {
bool dfsan_inited = false;
@@ -414,8 +420,7 @@ bool dfsan_init_is_running = false;
void dfsan_copy_memory(void *dst, const void *src, uptr size) {
internal_memcpy(dst, src, size);
- internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src),
- size * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dst, src, size);
if (dfsan_get_track_origins())
dfsan_mem_origin_transfer(dst, src, size);
}
@@ -600,6 +605,60 @@ dfsan_has_label(dfsan_label label, dfsan_label elem) {
return (label & elem) == elem;
}
+namespace __dfsan {
+
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+ dfsan_origin origin);
+static dfsan_conditional_callback_t conditional_callback = nullptr;
+static dfsan_label labels_in_signal_conditional = 0;
+
+static void ConditionalCallback(dfsan_label label, dfsan_origin origin) {
+ // Programs have many branches. For efficiency the conditional sink callback
+ // handler needs to ignore as many as possible as early as possible.
+ if (label == 0) {
+ return;
+ }
+ if (conditional_callback == nullptr) {
+ return;
+ }
+
+ // This initial ConditionalCallback handler needs to be in here in dfsan
+ // runtime (rather than being an entirely user implemented hook) so that it
+ // has access to dfsan thread information.
+ DFsanThread *t = GetCurrentThread();
+ // A callback operation which does useful work (like record the flow) will
+ // likely be too long executed in a signal handler.
+ if (t && t->InSignalHandler()) {
+ // Record set of labels used in signal handler for completeness.
+ labels_in_signal_conditional |= label;
+ return;
+ }
+
+ conditional_callback(label, origin);
+}
+
+} // namespace __dfsan
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_conditional_callback_origin(dfsan_label label, dfsan_origin origin) {
+ __dfsan::ConditionalCallback(label, origin);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_conditional_callback(
+ dfsan_label label) {
+ __dfsan::ConditionalCallback(label, 0);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_set_conditional_callback(
+ __dfsan::dfsan_conditional_callback_t callback) {
+ __dfsan::conditional_callback = callback;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_get_labels_in_signal_conditional() {
+ return __dfsan::labels_in_signal_conditional;
+}
+
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
@@ -898,6 +957,7 @@ extern "C" void dfsan_flush() {
Die();
}
}
+ __dfsan::labels_in_signal_conditional = 0;
}
// TODO: CheckMemoryLayoutSanity is based on msan.
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
index b529e008f300..f3403dd44fa8 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
@@ -46,6 +46,10 @@ void dfsan_set_label_origin(dfsan_label label, dfsan_origin origin, void *addr,
// Copy or move the origins of the len bytes from src to dst.
void dfsan_mem_origin_transfer(const void *dst, const void *src, uptr len);
+
+// Copy shadow bytes from src to dst.
+// Note this preserves distinct taint labels at specific offsets.
+void dfsan_mem_shadow_transfer(void *dst, const void *src, uptr len);
} // extern "C"
template <typename T>
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 217bd35c1c54..1965d0ddff59 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -497,9 +497,7 @@ static void *dfsan_memmove_with_origin(void *dest, const void *src, size_t n) {
}
static void *dfsan_memcpy(void *dest, const void *src, size_t n) {
- dfsan_label *sdest = shadow_for(dest);
- const dfsan_label *ssrc = shadow_for(src);
- internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dest, src, n);
return internal_memcpy(dest, src, n);
}
@@ -584,10 +582,7 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strcat(char *dest, const char *src,
dfsan_label *ret_label) {
size_t dest_len = strlen(dest);
char *ret = strcat(dest, src);
- dfsan_label *sdest = shadow_for(dest + dest_len);
- const dfsan_label *ssrc = shadow_for(src);
- internal_memcpy((void *)sdest, (const void *)ssrc,
- strlen(src) * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dest + dest_len, src, strlen(src));
*ret_label = dest_label;
return ret;
}
@@ -598,12 +593,9 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strcat(
dfsan_origin *ret_origin) {
size_t dest_len = strlen(dest);
char *ret = strcat(dest, src);
- dfsan_label *sdest = shadow_for(dest + dest_len);
- const dfsan_label *ssrc = shadow_for(src);
size_t src_len = strlen(src);
dfsan_mem_origin_transfer(dest + dest_len, src, src_len);
- internal_memcpy((void *)sdest, (const void *)ssrc,
- src_len * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
*ret_label = dest_label;
*ret_origin = dest_origin;
return ret;
@@ -1120,8 +1112,7 @@ char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,
dfsan_label src_label, dfsan_label *ret_label) {
char *ret = strcpy(dest, src);
if (ret) {
- internal_memcpy(shadow_for(dest), shadow_for(src),
- sizeof(dfsan_label) * (strlen(src) + 1));
+ dfsan_mem_shadow_transfer(dest, src, strlen(src) + 1);
}
*ret_label = dst_label;
return ret;
@@ -1136,8 +1127,7 @@ char *__dfso_strcpy(char *dest, const char *src, dfsan_label dst_label,
if (ret) {
size_t str_len = strlen(src) + 1;
dfsan_mem_origin_transfer(dest, src, str_len);
- internal_memcpy(shadow_for(dest), shadow_for(src),
- sizeof(dfsan_label) * str_len);
+ dfsan_mem_shadow_transfer(dest, src, str_len);
}
*ret_label = dst_label;
*ret_origin = dst_origin;
@@ -2369,9 +2359,8 @@ static int format_buffer(char *str, size_t size, const char *fmt,
formatter.num_written_bytes(retval));
}
va_labels++;
- internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg),
- sizeof(dfsan_label) *
- formatter.num_written_bytes(retval));
+ dfsan_mem_shadow_transfer(formatter.str_cur(), arg,
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
index fc2dd02ccf5f..e8fcd83d13bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
@@ -46,6 +46,10 @@ fun:dfsan_get_init_origin=uninstrumented
fun:dfsan_get_init_origin=discard
fun:dfsan_get_track_origins=uninstrumented
fun:dfsan_get_track_origins=discard
+fun:dfsan_set_conditional_callback=uninstrumented
+fun:dfsan_set_conditional_callback=discard
+fun:dfsan_get_labels_in_signal_conditional=uninstrumented
+fun:dfsan_get_labels_in_signal_conditional=discard
###############################################################################
# glibc
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
index 38b8c058246a..9289e06b88fc 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
@@ -602,6 +602,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
index 61b64d4dc30f..6b06c4517cd5 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
@@ -230,8 +230,8 @@ void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
-void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
-void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
+void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
struct ScopedStopTheWorldLock {
ScopedStopTheWorldLock() {
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
index 6e8a6dfe16b4..a4204740c7fa 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
@@ -143,16 +143,16 @@ void ProcessGlobalRegions(Frontier *frontier) {
}
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- unsigned depth = 1;
- vm_size_t size = 0;
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
- mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
while (err == KERN_SUCCESS) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
struct vm_region_submap_info_64 info;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
index adbdf365bc4c..0974b898666b 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -524,12 +524,12 @@ struct Allocator {
void PrintStats() { allocator.PrintStats(); }
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
index df0cdecf79c7..d1b858930a7f 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -990,12 +990,13 @@ static void SignalAction(int signo, void *si, void *uc) {
ScopedThreadLocalStateBackup stlsb;
UnpoisonParam(3);
__msan_unpoison(si, sizeof(__sanitizer_sigaction));
- __msan_unpoison(uc, __sanitizer::ucontext_t_sz);
+ __msan_unpoison(uc, ucontext_t_sz(uc));
typedef void (*sigaction_cb)(int, void *, void *);
sigaction_cb cb =
(sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
cb(signo, si, uc);
+ CHECK_UNPOISONED(uc, ucontext_t_sz(uc));
}
static void read_sigaction(const __sanitizer_sigaction *act) {
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/adt.h b/contrib/llvm-project/compiler-rt/lib/orc/adt.h
index 33b731082f88..3a422871c47b 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/adt.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/adt.h
@@ -110,4 +110,4 @@ inline std::string to_string(string_view SV) {
} // end namespace __orc_rt
-#endif // ORC_RT_COMMON_H
+#endif // ORC_RT_ADT_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp b/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp
new file mode 100644
index 000000000000..af20fa4e6f4e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp
@@ -0,0 +1,83 @@
+//===- debug.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "debug.h"
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+
+namespace __orc_rt {
+
+#ifndef NDEBUG
+
+std::atomic<const char *> DebugTypes;
+char DebugTypesAll;
+char DebugTypesNone;
+
+/// Sets the DebugState and DebugTypes values -- this function may be called
+/// concurrently on multiple threads, but will always assign the same values so
+/// this should be safe.
+const char *initializeDebug() {
+ if (const char *DT = getenv("ORC_RT_DEBUG")) {
+ // If ORC_RT_DEBUG=1 then log everything.
+ if (strcmp(DT, "1") == 0) {
+ DebugTypes.store(&DebugTypesAll, std::memory_order_relaxed);
+ return &DebugTypesAll;
+ }
+
+ // If ORC_RT_DEBUG is non-empty then record the string for use in
+ // debugTypeEnabled.
+ if (strcmp(DT, "") != 0) {
+ DebugTypes.store(DT, std::memory_order_relaxed);
+ return DT;
+ }
+ }
+
+ // If ORT_RT_DEBUG is undefined or defined as empty then log nothing.
+ DebugTypes.store(&DebugTypesNone, std::memory_order_relaxed);
+ return &DebugTypesNone;
+}
+
+bool debugTypeEnabled(const char *Type, const char *Types) {
+ assert(Types && Types != &DebugTypesAll && Types != &DebugTypesNone &&
+ "Invalid Types value");
+ size_t TypeLen = strlen(Type);
+ const char *Start = Types;
+ const char *End = Start;
+
+ do {
+ if (*End == '\0' || *End == ',') {
+ size_t ItemLen = End - Start;
+ if (ItemLen == TypeLen && memcmp(Type, Start, TypeLen) == 0)
+ return true;
+ if (*End == '\0')
+ return false;
+ Start = End + 1;
+ }
+ ++End;
+ } while (true);
+}
+
+void printdbg(const char *format, ...) {
+ va_list Args;
+ va_start(Args, format);
+ vfprintf(stderr, format, Args);
+ va_end(Args);
+}
+
+#endif // !NDEBUG
+
+} // end namespace __orc_rt
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/debug.h b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
new file mode 100644
index 000000000000..4605d441c7cb
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
@@ -0,0 +1,56 @@
+//===- debug.h - Debugging output utilities ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_DEBUG_H
+#define ORC_RT_DEBUG_H
+
+#include <atomic>
+
+#ifndef NDEBUG
+
+namespace __orc_rt {
+
+extern std::atomic<const char *> DebugTypes;
+extern char DebugTypesAll;
+extern char DebugTypesNone;
+
+const char *initializeDebug();
+bool debugTypeEnabled(const char *Type, const char *Types);
+void printdbg(const char *format, ...);
+
+} // namespace __orc_rt
+
+#define ORC_RT_DEBUG_WITH_TYPE(TYPE, X) \
+ do { \
+ const char *Types = \
+ ::__orc_rt::DebugTypes.load(std::memory_order_relaxed); \
+ if (!Types) \
+ Types = initializeDebug(); \
+ if (Types == &DebugTypesNone) \
+ break; \
+ if (Types == &DebugTypesAll || \
+ ::__orc_rt::debugTypeEnabled(TYPE, Types)) { \
+ X; \
+ } \
+ } while (false)
+
+#else
+
+#define ORC_RT_DEBUG_WITH_TYPE(TYPE, X) \
+ do { \
+ } while (false)
+
+#endif // !NDEBUG
+
+#define ORC_RT_DEBUG(X) ORC_RT_DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+#endif // ORC_RT_COMMON_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_ehframe_registration.cpp b/contrib/llvm-project/compiler-rt/lib/orc/macho_ehframe_registration.cpp
index d0ea7e70201c..f077cea96820 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_ehframe_registration.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_ehframe_registration.cpp
@@ -13,6 +13,8 @@
#include "adt.h"
#include "c_api.h"
#include "common.h"
+#include "executor_address.h"
+#include "wrapper_function_utils.h"
using namespace __orc_rt;
@@ -49,20 +51,24 @@ void walkEHFrameSection(span<const char> EHFrameSection,
ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
__orc_rt_macho_register_ehframe_section(char *ArgData, size_t ArgSize) {
- // NOTE: Does not use SPS to deserialize arg buffer, instead the arg buffer
- // is taken to be the range of the eh-frame section.
- bool HasError = false;
- walkEHFrameSection(span<const char>(ArgData, ArgSize), __register_frame);
- return __orc_rt_CreateCWrapperFunctionResultFromRange((char*)&HasError,
- sizeof(HasError));
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddrRange FrameSection) -> Error {
+ walkEHFrameSection(FrameSection.toSpan<const char>(),
+ __register_frame);
+ return Error::success();
+ })
+ .release();
}
ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
__orc_rt_macho_deregister_ehframe_section(char *ArgData, size_t ArgSize) {
- // NOTE: Does not use SPS to deserialize arg buffer, instead the arg buffer
- // is taken to be the range of the eh-frame section.
- bool HasError = false;
- walkEHFrameSection(span<const char>(ArgData, ArgSize), __deregister_frame);
- return __orc_rt_CreateCWrapperFunctionResultFromRange((char*)&HasError,
- sizeof(HasError));
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddrRange FrameSection) -> Error {
+ walkEHFrameSection(FrameSection.toSpan<const char>(),
+ __deregister_frame);
+ return Error::success();
+ })
+ .release();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
index 32770219e65c..f2cca8eb829a 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
@@ -568,7 +568,7 @@ void destroyMachOTLVMgr(void *MachOTLVMgr) {
Error runWrapperFunctionCalls(std::vector<WrapperFunctionCall> WFCs) {
for (auto &WFC : WFCs)
- if (auto Err = WFC.runWithSPSRet())
+ if (auto Err = WFC.runWithSPSRet<void>())
return Err;
return Error::success();
}
@@ -593,28 +593,22 @@ __orc_rt_macho_platform_shutdown(char *ArgData, size_t ArgSize) {
ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
__orc_rt_macho_register_thread_data_section(char *ArgData, size_t ArgSize) {
- // NOTE: Does not use SPS to deserialize arg buffer, instead the arg buffer
- // is taken to be the range of the thread data section.
- return WrapperFunction<SPSError()>::handle(
- nullptr, 0,
- [&]() {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddrRange R) {
return MachOPlatformRuntimeState::get()
- .registerThreadDataSection(
- span<const char>(ArgData, ArgSize));
+ .registerThreadDataSection(R.toSpan<const char>());
})
.release();
}
ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
__orc_rt_macho_deregister_thread_data_section(char *ArgData, size_t ArgSize) {
- // NOTE: Does not use SPS to deserialize arg buffer, instead the arg buffer
- // is taken to be the range of the thread data section.
- return WrapperFunction<SPSError()>::handle(
- nullptr, 0,
- [&]() {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddrRange R) {
return MachOPlatformRuntimeState::get()
- .deregisterThreadDataSection(
- span<const char>(ArgData, ArgSize));
+ .deregisterThreadDataSection(R.toSpan<const char>());
})
.release();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
index 23385e1bd794..02ea37393276 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
@@ -395,25 +395,53 @@ makeMethodWrapperHandler(RetT (ClassT::*Method)(ArgTs...)) {
}
/// Represents a call to a wrapper function.
-struct WrapperFunctionCall {
- ExecutorAddr Func;
- ExecutorAddrRange ArgData;
+class WrapperFunctionCall {
+public:
+ // FIXME: Switch to a SmallVector<char, 24> once ORC runtime has a
+ // smallvector.
+ using ArgDataBufferType = std::vector<char>;
+
+ /// Create a WrapperFunctionCall using the given SPS serializer to serialize
+ /// the arguments.
+ template <typename SPSSerializer, typename... ArgTs>
+ static Expected<WrapperFunctionCall> Create(ExecutorAddr FnAddr,
+ const ArgTs &...Args) {
+ ArgDataBufferType ArgData;
+ ArgData.resize(SPSSerializer::size(Args...));
+ SPSOutputBuffer OB(&ArgData[0], ArgData.size());
+ if (SPSSerializer::serialize(OB, Args...))
+ return WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return make_error<StringError>("Cannot serialize arguments for "
+ "AllocActionCall");
+ }
WrapperFunctionCall() = default;
- WrapperFunctionCall(ExecutorAddr Func, ExecutorAddrRange ArgData)
- : Func(Func), ArgData(ArgData) {}
- /// Run and return result as WrapperFunctionResult.
- WrapperFunctionResult run() {
- WrapperFunctionResult WFR(
- Func.toPtr<__orc_rt_CWrapperFunctionResult (*)(const char *, size_t)>()(
- ArgData.Start.toPtr<const char *>(),
- static_cast<size_t>(ArgData.size().getValue())));
- return WFR;
+ /// Create a WrapperFunctionCall from a target function and arg buffer.
+ WrapperFunctionCall(ExecutorAddr FnAddr, ArgDataBufferType ArgData)
+ : FnAddr(FnAddr), ArgData(std::move(ArgData)) {}
+
+ /// Returns the address to be called.
+ const ExecutorAddr &getCallee() const { return FnAddr; }
+
+ /// Returns the argument data.
+ const ArgDataBufferType &getArgData() const { return ArgData; }
+
+ /// WrapperFunctionCalls convert to true if the callee is non-null.
+ explicit operator bool() const { return !!FnAddr; }
+
+ /// Run call returning raw WrapperFunctionResult.
+ WrapperFunctionResult run() const {
+ using FnTy =
+ __orc_rt_CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
+ return WrapperFunctionResult(
+ FnAddr.toPtr<FnTy *>()(ArgData.data(), ArgData.size()));
}
/// Run call and deserialize result using SPS.
- template <typename SPSRetT, typename RetT> Error runWithSPSRet(RetT &RetVal) {
+ template <typename SPSRetT, typename RetT>
+ std::enable_if_t<!std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet(RetT &RetVal) const {
auto WFR = run();
if (const char *ErrMsg = WFR.getOutOfBandError())
return make_error<StringError>(ErrMsg);
@@ -425,30 +453,49 @@ struct WrapperFunctionCall {
}
/// Overload for SPS functions returning void.
- Error runWithSPSRet() {
+ template <typename SPSRetT>
+ std::enable_if_t<std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet() const {
SPSEmpty E;
return runWithSPSRet<SPSEmpty>(E);
}
+
+ /// Run call and deserialize an SPSError result. SPSError returns and
+ /// deserialization failures are merged into the returned error.
+ Error runWithSPSRetErrorMerged() const {
+ detail::SPSSerializableError RetErr;
+ if (auto Err = runWithSPSRet<SPSError>(RetErr))
+ return Err;
+ return detail::fromSPSSerializable(std::move(RetErr));
+ }
+
+private:
+ ExecutorAddr FnAddr;
+ std::vector<char> ArgData;
};
-class SPSWrapperFunctionCall {};
+using SPSWrapperFunctionCall = SPSTuple<SPSExecutorAddr, SPSSequence<char>>;
template <>
class SPSSerializationTraits<SPSWrapperFunctionCall, WrapperFunctionCall> {
public:
static size_t size(const WrapperFunctionCall &WFC) {
- return SPSArgList<SPSExecutorAddr, SPSExecutorAddrRange>::size(WFC.Func,
- WFC.ArgData);
+ return SPSArgList<SPSExecutorAddr, SPSSequence<char>>::size(
+ WFC.getCallee(), WFC.getArgData());
}
static bool serialize(SPSOutputBuffer &OB, const WrapperFunctionCall &WFC) {
- return SPSArgList<SPSExecutorAddr, SPSExecutorAddrRange>::serialize(
- OB, WFC.Func, WFC.ArgData);
+ return SPSArgList<SPSExecutorAddr, SPSSequence<char>>::serialize(
+ OB, WFC.getCallee(), WFC.getArgData());
}
static bool deserialize(SPSInputBuffer &IB, WrapperFunctionCall &WFC) {
- return SPSArgList<SPSExecutorAddr, SPSExecutorAddrRange>::deserialize(
- IB, WFC.Func, WFC.ArgData);
+ ExecutorAddr FnAddr;
+ WrapperFunctionCall::ArgDataBufferType ArgData;
+ if (!SPSWrapperFunctionCall::AsArgList::deserialize(IB, FnAddr, ArgData))
+ return false;
+ WFC = WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return true;
}
};
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
index 7c1d357d96fe..557c0da2dbae 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
@@ -42,10 +42,10 @@ COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_version(void) {
}
COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
- uint64_t *I = __llvm_profile_begin_counters();
- uint64_t *E = __llvm_profile_end_counters();
+ char *I = __llvm_profile_begin_counters();
+ char *E = __llvm_profile_end_counters();
- memset(I, 0, sizeof(uint64_t) * (E - I));
+ memset(I, 0, E - I);
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
index 5b88d7178012..4433d7bd4887 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
@@ -86,8 +86,8 @@ const __llvm_profile_data *__llvm_profile_begin_data(void);
const __llvm_profile_data *__llvm_profile_end_data(void);
const char *__llvm_profile_begin_names(void);
const char *__llvm_profile_end_names(void);
-uint64_t *__llvm_profile_begin_counters(void);
-uint64_t *__llvm_profile_end_counters(void);
+char *__llvm_profile_begin_counters(void);
+char *__llvm_profile_end_counters(void);
ValueProfNode *__llvm_profile_begin_vnodes();
ValueProfNode *__llvm_profile_end_vnodes();
uint32_t *__llvm_profile_begin_orderfile();
@@ -260,17 +260,26 @@ uint64_t __llvm_profile_get_magic(void);
uint64_t __llvm_profile_get_version(void);
/*! \brief Get the number of entries in the profile data section. */
+uint64_t __llvm_profile_get_num_data(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End);
+
+/*! \brief Get the size of the profile data section in bytes. */
uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
const __llvm_profile_data *End);
+/*! \brief Get the size in bytes of a single counter entry. */
+size_t __llvm_profile_counter_entry_size(void);
+
+/*! \brief Get the number of entries in the profile counters section. */
+uint64_t __llvm_profile_get_num_counters(const char *Begin, const char *End);
+
+/*! \brief Get the size of the profile counters section in bytes. */
+uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End);
+
/* ! \brief Given the sizes of the data and counter information, return the
* number of padding bytes before and after the counters, and after the names,
* in the raw profile.
*
- * Note: In this context, "size" means "number of entries", i.e. the first two
- * arguments must be the result of __llvm_profile_get_data_size() and of
- * (__llvm_profile_end_counters() - __llvm_profile_begin_counters()) resp.
- *
* Note: When mmap() mode is disabled, no padding bytes before/after counters
* are needed. However, in mmap() mode, the counter section in the raw profile
* must be page-aligned: this API computes the number of padding bytes
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 68b4f5cd6f52..f3d15511452e 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -41,8 +41,8 @@ COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer(void) {
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
@@ -51,13 +51,36 @@ uint64_t __llvm_profile_get_size_for_buffer(void) {
}
COMPILER_RT_VISIBILITY
-uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
- const __llvm_profile_data *End) {
+uint64_t __llvm_profile_get_num_data(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End) {
intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
return ((EndI + sizeof(__llvm_profile_data) - 1) - BeginI) /
sizeof(__llvm_profile_data);
}
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End) {
+ return __llvm_profile_get_num_data(Begin, End) * sizeof(__llvm_profile_data);
+}
+
+COMPILER_RT_VISIBILITY size_t __llvm_profile_counter_entry_size(void) {
+ return sizeof(uint64_t);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_num_counters(const char *Begin, const char *End) {
+ intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
+ return ((EndI + __llvm_profile_counter_entry_size() - 1) - BeginI) /
+ __llvm_profile_counter_entry_size();
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End) {
+ return __llvm_profile_get_num_counters(Begin, End) *
+ __llvm_profile_counter_entry_size();
+}
+
/// Calculate the number of padding bytes needed to add to \p Offset in order
/// for (\p Offset + Padding) to be page-aligned.
static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset) {
@@ -89,24 +112,22 @@ void __llvm_profile_get_padding_sizes_for_counters(
// In continuous mode, the file offsets for headers and for the start of
// counter sections need to be page-aligned.
- uint64_t DataSizeInBytes = DataSize * sizeof(__llvm_profile_data);
- uint64_t CountersSizeInBytes = CountersSize * sizeof(uint64_t);
- *PaddingBytesBeforeCounters = calculateBytesNeededToPageAlign(
- sizeof(__llvm_profile_header) + DataSizeInBytes);
- *PaddingBytesAfterCounters =
- calculateBytesNeededToPageAlign(CountersSizeInBytes);
+ *PaddingBytesBeforeCounters =
+ calculateBytesNeededToPageAlign(sizeof(__llvm_profile_header) + DataSize);
+ *PaddingBytesAfterCounters = calculateBytesNeededToPageAlign(CountersSize);
*PaddingBytesAfterNames = calculateBytesNeededToPageAlign(NamesSize);
}
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
- const char *NamesBegin, const char *NamesEnd) {
+ const char *CountersBegin, const char *CountersEnd, const char *NamesBegin,
+ const char *NamesEnd) {
/* Match logic in __llvm_profile_write_buffer(). */
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
/* Determine how much padding is needed before/after the counters and after
* the names. */
@@ -117,9 +138,8 @@ uint64_t __llvm_profile_get_size_for_buffer_internal(
&PaddingBytesAfterCounters, &PaddingBytesAfterNames);
return sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters +
- (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters +
- NamesSize + PaddingBytesAfterNames;
+ DataSize + PaddingBytesBeforeCounters + CountersSize +
+ PaddingBytesAfterCounters + NamesSize + PaddingBytesAfterNames;
}
COMPILER_RT_VISIBILITY
@@ -136,8 +156,8 @@ COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer(char *Buffer) {
COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
- const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
- const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+ const __llvm_profile_data *DataEnd, const char *CountersBegin,
+ const char *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
ProfDataWriter BufferWriter;
initBufferWriter(&BufferWriter, Buffer);
return lprofWriteDataImpl(&BufferWriter, DataBegin, DataEnd, CountersBegin,
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
index efd9f06ac6ee..363ded9554ce 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -106,13 +106,14 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
* __llvm_profile_get_size_for_buffer(). */
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
/* Check that the counter and data sections in this image are
* page-aligned. */
@@ -136,11 +137,10 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
&PaddingBytesAfterCounters, &PaddingBytesAfterNames);
- uint64_t PageAlignedCountersLength =
- (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters;
- uint64_t FileOffsetToCounters =
- CurrentFileOffset + sizeof(__llvm_profile_header) +
- (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters;
+ uint64_t PageAlignedCountersLength = CountersSize + PaddingBytesAfterCounters;
+ uint64_t FileOffsetToCounters = CurrentFileOffset +
+ sizeof(__llvm_profile_header) + DataSize +
+ PaddingBytesBeforeCounters;
uint64_t *CounterMmap = (uint64_t *)mmap(
(void *)CountersBegin, PageAlignedCountersLength, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToCounters);
@@ -195,8 +195,8 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
* __llvm_profile_get_size_for_buffer(). */
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
/* Get the file size. */
uint64_t FileSize = 0;
@@ -211,8 +211,7 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
return 1;
}
const uint64_t CountersOffsetInBiasMode =
- sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data));
+ sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + DataSize;
/* Update the profile fields based on the current mapping. */
INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
(intptr_t)Profile - (uintptr_t)CountersBegin + CountersOffsetInBiasMode;
@@ -578,9 +577,8 @@ static void initializeProfileForContinuousMode(void) {
}
/* Get the sizes of counter section. */
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ uint64_t CountersSize = __llvm_profile_get_counters_size(
+ __llvm_profile_begin_counters(), __llvm_profile_end_counters());
int Length = getCurFilenameLength();
char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
index 1394ea8c42f8..b2ce11067abd 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -21,8 +21,8 @@
*/
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
- const char *NamesBegin, const char *NamesEnd);
+ const char *CountersBegin, const char *CountersEnd, const char *NamesBegin,
+ const char *NamesEnd);
/*!
* \brief Write instrumentation data to the given buffer, given explicit
@@ -35,8 +35,8 @@ uint64_t __llvm_profile_get_size_for_buffer_internal(
*/
int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
- const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
- const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+ const __llvm_profile_data *DataEnd, const char *CountersBegin,
+ const char *CountersEnd, const char *NamesBegin, const char *NamesEnd);
/*!
* The data structure describing the data to be written by the
@@ -152,8 +152,7 @@ int lprofWriteData(ProfDataWriter *Writer, VPDataReaderType *VPDataReader,
int lprofWriteDataImpl(ProfDataWriter *Writer,
const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin,
- const uint64_t *CountersEnd,
+ const char *CountersBegin, const char *CountersEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite);
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
index bf99521d4da7..3a520f1488a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -23,18 +23,18 @@ COMPILER_RT_VISIBILITY
uint64_t lprofGetLoadModuleSignature() {
/* A very fast way to compute a module signature. */
uint64_t Version = __llvm_profile_get_version();
- uint64_t CounterSize = (uint64_t)(__llvm_profile_end_counters() -
- __llvm_profile_begin_counters());
- uint64_t DataSize = __llvm_profile_get_data_size(__llvm_profile_begin_data(),
- __llvm_profile_end_data());
+ uint64_t NumCounters = __llvm_profile_get_num_counters(
+ __llvm_profile_begin_counters(), __llvm_profile_end_counters());
+ uint64_t NumData = __llvm_profile_get_num_data(__llvm_profile_begin_data(),
+ __llvm_profile_end_data());
uint64_t NamesSize =
(uint64_t)(__llvm_profile_end_names() - __llvm_profile_begin_names());
uint64_t NumVnodes =
(uint64_t)(__llvm_profile_end_vnodes() - __llvm_profile_begin_vnodes());
const __llvm_profile_data *FirstD = __llvm_profile_begin_data();
- return (NamesSize << 40) + (CounterSize << 30) + (DataSize << 20) +
- (NumVnodes << 10) + (DataSize > 0 ? FirstD->NameRef : 0) + Version +
+ return (NamesSize << 40) + (NumCounters << 30) + (NumData << 20) +
+ (NumVnodes << 10) + (NumData > 0 ? FirstD->NameRef : 0) + Version +
__llvm_profile_get_magic();
}
@@ -57,18 +57,20 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
if (Header->Magic != __llvm_profile_get_magic() ||
Header->Version != __llvm_profile_get_version() ||
Header->DataSize !=
- __llvm_profile_get_data_size(__llvm_profile_begin_data(),
- __llvm_profile_end_data()) ||
- Header->CountersSize != (uint64_t)(__llvm_profile_end_counters() -
- __llvm_profile_begin_counters()) ||
+ __llvm_profile_get_num_data(__llvm_profile_begin_data(),
+ __llvm_profile_end_data()) ||
+ Header->CountersSize !=
+ __llvm_profile_get_num_counters(__llvm_profile_begin_counters(),
+ __llvm_profile_end_counters()) ||
Header->NamesSize != (uint64_t)(__llvm_profile_end_names() -
__llvm_profile_begin_names()) ||
Header->ValueKindLast != IPVK_Last)
return 1;
- if (ProfileSize < sizeof(__llvm_profile_header) + Header->BinaryIdsSize +
- Header->DataSize * sizeof(__llvm_profile_data) +
- Header->NamesSize + Header->CountersSize)
+ if (ProfileSize <
+ sizeof(__llvm_profile_header) + Header->BinaryIdsSize +
+ Header->DataSize * sizeof(__llvm_profile_data) + Header->NamesSize +
+ Header->CountersSize * __llvm_profile_counter_entry_size())
return 1;
for (SrcData = SrcDataStart,
@@ -105,7 +107,7 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
- uint64_t *SrcCountersStart;
+ char *SrcCountersStart;
const char *SrcNameStart;
const char *SrcValueProfDataStart, *SrcValueProfData;
uintptr_t CountersDelta = Header->CountersDelta;
@@ -114,12 +116,13 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header) +
Header->BinaryIdsSize);
SrcDataEnd = SrcDataStart + Header->DataSize;
- SrcCountersStart = (uint64_t *)SrcDataEnd;
- SrcNameStart = (const char *)(SrcCountersStart + Header->CountersSize);
+ SrcCountersStart = (char *)SrcDataEnd;
+ SrcNameStart = SrcCountersStart +
+ Header->CountersSize * __llvm_profile_counter_entry_size();
SrcValueProfDataStart =
SrcNameStart + Header->NamesSize +
__llvm_profile_get_num_padding_bytes(Header->NamesSize);
- if (SrcNameStart < (const char *)SrcCountersStart)
+ if (SrcNameStart < SrcCountersStart)
return 1;
for (SrcData = SrcDataStart,
@@ -130,8 +133,8 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
// address of the data to the start address of the counter. On WIN64,
// CounterPtr is a truncated 32-bit value due to COFF limitation. Sign
// extend CounterPtr to get the original value.
- uint64_t *DstCounters =
- (uint64_t *)((uintptr_t)DstData + signextIfWin64(DstData->CounterPtr));
+ char *DstCounters =
+ (char *)((uintptr_t)DstData + signextIfWin64(DstData->CounterPtr));
unsigned NVK = 0;
// SrcData is a serialized representation of the memory image. We need to
@@ -141,21 +144,19 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
// CountersDelta computes the offset into the in-buffer counter section.
//
// On WIN64, CountersDelta is truncated as well, so no need for signext.
- uint64_t *SrcCounters =
- SrcCountersStart +
- ((uintptr_t)SrcData->CounterPtr - CountersDelta) / sizeof(uint64_t);
+ char *SrcCounters =
+ SrcCountersStart + ((uintptr_t)SrcData->CounterPtr - CountersDelta);
// CountersDelta needs to be decreased as we advance to the next data
// record.
CountersDelta -= sizeof(*SrcData);
unsigned NC = SrcData->NumCounters;
if (NC == 0)
return 1;
- if (SrcCounters < SrcCountersStart ||
- (const char *)SrcCounters >= SrcNameStart ||
- (const char *)(SrcCounters + NC) > SrcNameStart)
+ if (SrcCounters < SrcCountersStart || SrcCounters >= SrcNameStart ||
+ (SrcCounters + __llvm_profile_counter_entry_size() * NC) > SrcNameStart)
return 1;
for (unsigned I = 0; I < NC; I++)
- DstCounters[I] += SrcCounters[I];
+ ((uint64_t *)DstCounters)[I] += ((uint64_t *)SrcCounters)[I];
/* Now merge value profile data. */
if (!VPMergeHook)
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
index c2e7fad98386..d9f2a113f5b0 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
@@ -26,11 +26,10 @@ extern char
COMPILER_RT_VISIBILITY
extern char NamesEnd __asm("section$end$__DATA$" INSTR_PROF_NAME_SECT_NAME);
COMPILER_RT_VISIBILITY
-extern uint64_t
+extern char
CountersStart __asm("section$start$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
COMPILER_RT_VISIBILITY
-extern uint64_t
- CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
+extern char CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
COMPILER_RT_VISIBILITY
extern uint32_t
OrderFileStart __asm("section$start$__DATA$" INSTR_PROF_ORDERFILE_SECT_NAME);
@@ -53,9 +52,9 @@ const char *__llvm_profile_begin_names(void) { return &NamesStart; }
COMPILER_RT_VISIBILITY
const char *__llvm_profile_end_names(void) { return &NamesEnd; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart; }
+char *__llvm_profile_begin_counters(void) { return &CountersStart; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_end_counters(void) { return &CountersEnd; }
COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
index 9bea795e8e3a..ee2bd56b4b53 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
@@ -116,13 +116,13 @@ void __llvm_profile_initialize(void) {
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
const uint64_t CountersOffset =
- sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data));
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + DataSize;
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
/* Don't publish a VMO if there are no counters. */
if (!CountersSize)
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index e61f90b2cef9..592c09b49d4b 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -43,8 +43,8 @@ extern __llvm_profile_data PROF_DATA_START COMPILER_RT_VISIBILITY
COMPILER_RT_WEAK;
extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY
COMPILER_RT_WEAK;
-extern uint64_t PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
-extern uint64_t PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
@@ -65,10 +65,10 @@ COMPILER_RT_VISIBILITY const char *__llvm_profile_begin_names(void) {
COMPILER_RT_VISIBILITY const char *__llvm_profile_end_names(void) {
return &PROF_NAME_STOP;
}
-COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_begin_counters(void) {
+COMPILER_RT_VISIBILITY char *__llvm_profile_begin_counters(void) {
return &PROF_CNTS_START;
}
-COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_end_counters(void) {
+COMPILER_RT_VISIBILITY char *__llvm_profile_end_counters(void) {
return &PROF_CNTS_STOP;
}
COMPILER_RT_VISIBILITY uint32_t *__llvm_profile_begin_orderfile(void) {
@@ -125,11 +125,9 @@ static int WriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
static int WriteBinaryIdForNote(ProfDataWriter *Writer,
const ElfW(Nhdr) * Note) {
int BinaryIdSize = 0;
-
const char *NoteName = (const char *)Note + sizeof(ElfW(Nhdr));
if (Note->n_type == NT_GNU_BUILD_ID && Note->n_namesz == 4 &&
memcmp(NoteName, "GNU\0", 4) == 0) {
-
uint64_t BinaryIdLen = Note->n_descsz;
const uint8_t *BinaryIdData =
(const uint8_t *)(NoteName + RoundUp(Note->n_namesz, 4));
@@ -151,12 +149,12 @@ static int WriteBinaryIdForNote(ProfDataWriter *Writer,
*/
static int WriteBinaryIds(ProfDataWriter *Writer, const ElfW(Nhdr) * Note,
const ElfW(Nhdr) * NotesEnd) {
- int TotalBinaryIdsSize = 0;
+ int BinaryIdsSize = 0;
while (Note < NotesEnd) {
- int Result = WriteBinaryIdForNote(Writer, Note);
- if (Result == -1)
+ int OneBinaryIdSize = WriteBinaryIdForNote(Writer, Note);
+ if (OneBinaryIdSize == -1)
return -1;
- TotalBinaryIdsSize += Result;
+ BinaryIdsSize += OneBinaryIdSize;
/* Calculate the offset of the next note in notes section. */
size_t NoteOffset = sizeof(ElfW(Nhdr)) + RoundUp(Note->n_namesz, 4) +
@@ -164,7 +162,7 @@ static int WriteBinaryIds(ProfDataWriter *Writer, const ElfW(Nhdr) * Note,
Note = (const ElfW(Nhdr) *)((const char *)(Note) + NoteOffset);
}
- return TotalBinaryIdsSize;
+ return BinaryIdsSize;
}
/*
@@ -178,21 +176,46 @@ COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
const ElfW(Phdr) *ProgramHeader =
(const ElfW(Phdr) *)((uintptr_t)ElfHeader + ElfHeader->e_phoff);
+ int TotalBinaryIdsSize = 0;
uint32_t I;
/* Iterate through entries in the program header. */
for (I = 0; I < ElfHeader->e_phnum; I++) {
- /* Look for the notes section in program header entries. */
+ /* Look for the notes segment in program header entries. */
if (ProgramHeader[I].p_type != PT_NOTE)
continue;
- const ElfW(Nhdr) *Note =
- (const ElfW(Nhdr) *)((uintptr_t)ElfHeader + ProgramHeader[I].p_offset);
- const ElfW(Nhdr) *NotesEnd =
- (const ElfW(Nhdr) *)((const char *)(Note) + ProgramHeader[I].p_filesz);
- return WriteBinaryIds(Writer, Note, NotesEnd);
+ /* There can be multiple notes segment, and examine each of them. */
+ const ElfW(Nhdr) * Note;
+ const ElfW(Nhdr) * NotesEnd;
+ /*
+ * When examining notes in file, use p_offset, which is the offset within
+ * the elf file, to find the start of notes.
+ */
+ if (ProgramHeader[I].p_memsz == 0 ||
+ ProgramHeader[I].p_memsz == ProgramHeader[I].p_filesz) {
+ Note = (const ElfW(Nhdr) *)((uintptr_t)ElfHeader +
+ ProgramHeader[I].p_offset);
+ NotesEnd = (const ElfW(Nhdr) *)((const char *)(Note) +
+ ProgramHeader[I].p_filesz);
+ } else {
+ /*
+ * When examining notes in memory, use p_vaddr, which is the address of
+ * section after loaded to memory, to find the start of notes.
+ */
+ Note =
+ (const ElfW(Nhdr) *)((uintptr_t)ElfHeader + ProgramHeader[I].p_vaddr);
+ NotesEnd =
+ (const ElfW(Nhdr) *)((const char *)(Note) + ProgramHeader[I].p_memsz);
+ }
+
+ int BinaryIdsSize = WriteBinaryIds(Writer, Note, NotesEnd);
+ if (TotalBinaryIdsSize == -1)
+ return -1;
+
+ TotalBinaryIdsSize += BinaryIdsSize;
}
- return 0;
+ return TotalBinaryIdsSize;
}
#else /* !NT_GNU_BUILD_ID */
/*
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
index 48946ce94253..3e9b3ca0a045 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
@@ -20,8 +20,8 @@ static const __llvm_profile_data *DataFirst = NULL;
static const __llvm_profile_data *DataLast = NULL;
static const char *NamesFirst = NULL;
static const char *NamesLast = NULL;
-static uint64_t *CountersFirst = NULL;
-static uint64_t *CountersLast = NULL;
+static char *CountersFirst = NULL;
+static char *CountersLast = NULL;
static uint32_t *OrderFileFirst = NULL;
static const void *getMinAddr(const void *A1, const void *A2) {
@@ -46,19 +46,21 @@ void __llvm_profile_register_function(void *Data_) {
if (!DataFirst) {
DataFirst = Data;
DataLast = Data + 1;
- CountersFirst = (uint64_t *)((uintptr_t)Data_ + Data->CounterPtr);
- CountersLast = CountersFirst + Data->NumCounters;
+ CountersFirst = (char *)((uintptr_t)Data_ + Data->CounterPtr);
+ CountersLast =
+ CountersFirst + Data->NumCounters * __llvm_profile_counter_entry_size();
return;
}
DataFirst = (const __llvm_profile_data *)getMinAddr(DataFirst, Data);
- CountersFirst = (uint64_t *)getMinAddr(
- CountersFirst, (uint64_t *)((uintptr_t)Data_ + Data->CounterPtr));
+ CountersFirst = (char *)getMinAddr(
+ CountersFirst, (char *)((uintptr_t)Data_ + Data->CounterPtr));
DataLast = (const __llvm_profile_data *)getMaxAddr(DataLast, Data + 1);
- CountersLast = (uint64_t *)getMaxAddr(
+ CountersLast = (char *)getMaxAddr(
CountersLast,
- (uint64_t *)((uintptr_t)Data_ + Data->CounterPtr) + Data->NumCounters);
+ (char *)((uintptr_t)Data_ + Data->CounterPtr) +
+ Data->NumCounters * __llvm_profile_counter_entry_size());
}
COMPILER_RT_VISIBILITY
@@ -83,9 +85,9 @@ const char *__llvm_profile_begin_names(void) { return NamesFirst; }
COMPILER_RT_VISIBILITY
const char *__llvm_profile_end_names(void) { return NamesLast; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_begin_counters(void) { return CountersFirst; }
+char *__llvm_profile_begin_counters(void) { return CountersFirst; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_end_counters(void) { return CountersLast; }
+char *__llvm_profile_end_counters(void) { return CountersLast; }
/* TODO: correctly set up OrderFileFirst. */
COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return OrderFileFirst; }
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
index a0192ced4f26..dd576b2f8357 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
@@ -41,8 +41,8 @@ __llvm_profile_data COMPILER_RT_SECTION(".lprfd$Z") DataEnd = {0};
const char COMPILER_RT_SECTION(".lprfn$A") NamesStart = '\0';
const char COMPILER_RT_SECTION(".lprfn$Z") NamesEnd = '\0';
-uint64_t COMPILER_RT_SECTION(".lprfc$A") CountersStart;
-uint64_t COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
+char COMPILER_RT_SECTION(".lprfc$A") CountersStart;
+char COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
uint32_t COMPILER_RT_SECTION(".lorderfile$A") OrderFileStart;
ValueProfNode COMPILER_RT_SECTION(".lprfnd$A") VNodesStart;
@@ -56,8 +56,8 @@ const __llvm_profile_data *__llvm_profile_end_data(void) { return &DataEnd; }
const char *__llvm_profile_begin_names(void) { return &NamesStart + 1; }
const char *__llvm_profile_end_names(void) { return &NamesEnd; }
-uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
-uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
+char *__llvm_profile_end_counters(void) { return &CountersEnd; }
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
ValueProfNode *__llvm_profile_begin_vnodes(void) { return &VNodesStart + 1; }
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
index e5c0dc1479d9..800103d199ab 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -244,8 +244,8 @@ COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer,
/* Match logic in __llvm_profile_write_buffer(). */
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
return lprofWriteDataImpl(Writer, DataBegin, DataEnd, CountersBegin,
@@ -256,7 +256,7 @@ COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer,
COMPILER_RT_VISIBILITY int
lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ const char *CountersBegin, const char *CountersEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite) {
int DebugInfoCorrelate =
@@ -265,13 +265,18 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Calculate size of sections. */
const uint64_t DataSize =
DebugInfoCorrelate ? 0 : __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t CountersSize = CountersEnd - CountersBegin;
+ const uint64_t NumData =
+ DebugInfoCorrelate ? 0 : __llvm_profile_get_num_data(DataBegin, DataEnd);
+ const uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ const uint64_t NumCounters =
+ __llvm_profile_get_num_counters(CountersBegin, CountersEnd);
const uint64_t NamesSize = DebugInfoCorrelate ? 0 : NamesEnd - NamesBegin;
/* Create the header. */
__llvm_profile_header Header;
- if (!DataSize && (!DebugInfoCorrelate || !CountersSize))
+ if (!NumData && (!DebugInfoCorrelate || !NumCounters))
return 0;
/* Determine how much padding is needed before/after the counters and after
@@ -282,9 +287,16 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
&PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+ {
+ // TODO: Unfortunately the header's fields are named DataSize and
+ // CountersSize when they should be named NumData and NumCounters,
+ // respectively.
+ const uint64_t CountersSize = NumCounters;
+ const uint64_t DataSize = NumData;
/* Initialize header structure. */
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
#include "profile/InstrProfData.inc"
+ }
/* On WIN64, label differences are truncated 32-bit values. Truncate
* CountersDelta to match. */
@@ -309,10 +321,9 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Write the profile data. */
ProfDataIOVec IOVecData[] = {
- {DebugInfoCorrelate ? NULL : DataBegin, sizeof(__llvm_profile_data),
- DataSize, 0},
+ {DebugInfoCorrelate ? NULL : DataBegin, sizeof(uint8_t), DataSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
- {CountersBegin, sizeof(uint64_t), CountersSize, 0},
+ {CountersBegin, sizeof(uint8_t), CountersSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
{(SkipNameDataWrite || DebugInfoCorrelate) ? NULL : NamesBegin,
sizeof(uint8_t), NamesSize, 0},
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
index 7e2fa91089f1..fe48b9caf067 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -201,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
}
template <typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+void AddrHashMap<T, kSize>::acquire(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -330,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
}
template <typename T, uptr kSize>
- void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ void AddrHashMap<T, kSize>::release(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index c5a5fb7371dd..25a43a59f047 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -126,12 +126,12 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
RawInternalFree(addr, cache);
}
-void InternalAllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator_cache_mu.Lock();
internal_allocator()->ForceLock();
}
-void InternalAllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
internal_allocator()->ForceUnlock();
internal_allocator_cache_mu.Unlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 9a3602f730b3..b92cfa5bf4c4 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -175,12 +175,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index ae1b7e0d5f1c..f2471efced61 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -238,13 +238,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index f917310cfebb..66ba71d325da 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -354,13 +354,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index c24354cb5b2a..48afb2a29834 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -267,9 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
+ void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
- void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
+ void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 9ddb099a8dbc..139d5a066664 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -238,12 +238,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
- ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
- ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
+ ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
+ ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
- static void Lock() ACQUIRE(mutex_);
- static void Unlock() RELEASE(mutex_);
- static void CheckLocked() CHECK_LOCKED(mutex_);
+ static void Lock() SANITIZER_ACQUIRE(mutex_);
+ static void Unlock() SANITIZER_RELEASE(mutex_);
+ static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 52fd4f3e6d41..54a7ad3b8b3f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -606,8 +606,9 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
} else if (phdr->p_type == PT_NOTE) {
+# ifdef NT_GNU_BUILD_ID
uptr off = 0;
- while (off < phdr->p_memsz - sizeof(ElfW(Nhdr))) {
+ while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
phdr->p_vaddr + off);
constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
@@ -631,6 +632,7 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
RoundUpTo(nhdr->n_descsz, 4);
}
+# endif
}
}
modules->push_back(cur_module);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index 5ec6efaa6490..c16f5cdc1d71 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -20,25 +20,27 @@
namespace __sanitizer {
-class MUTEX StaticSpinMutex {
+class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
- bool TryLock() TRY_ACQUIRE(true) {
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
+ void Unlock() SANITIZER_RELEASE() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
- void CheckLocked() const CHECK_LOCKED() {
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
-class MUTEX SpinMutex : public StaticSpinMutex {
+class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@@ -156,12 +158,12 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
-class MUTEX Mutex : CheckedMutex {
+class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
explicit constexpr Mutex(MutexType type = MutexUnchecked)
: CheckedMutex(type) {}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
@@ -206,7 +208,7 @@ class MUTEX Mutex : CheckedMutex {
}
}
- void Unlock() RELEASE() {
+ void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@@ -234,7 +236,7 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
- void ReadLock() ACQUIRE_SHARED() {
+ void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
@@ -271,7 +273,7 @@ class MUTEX Mutex : CheckedMutex {
}
}
- void ReadUnlock() RELEASE_SHARED() {
+ void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
@@ -297,13 +299,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
- void CheckWriteLocked() const CHECK_LOCKED() {
+ void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
- void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
- void CheckReadLocked() const CHECK_LOCKED() {
+ void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@@ -361,13 +363,13 @@ void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
template <typename MutexType>
-class SCOPED_LOCK GenericScopedLock {
+class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
- explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
+ explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
- ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
+ ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
@@ -377,13 +379,14 @@ class SCOPED_LOCK GenericScopedLock {
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedReadLock {
+class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
+ explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
+ : mu_(mu) {
mu_->ReadLock();
}
- ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
+ ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
@@ -393,10 +396,10 @@ class SCOPED_LOCK GenericScopedReadLock {
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedRWLock {
+class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
- ACQUIRE(mu)
+ SANITIZER_ACQUIRE(mu)
: mu_(mu), write_(write) {
if (write_)
mu_->Lock();
@@ -404,7 +407,7 @@ class SCOPED_LOCK GenericScopedRWLock {
mu_->ReadLock();
}
- ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
+ ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
if (write_)
mu_->Unlock();
else
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index 64535805e40d..0d25fa80e2ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -130,7 +130,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
index 649e64fd1a32..9859c52ec69f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
index 531e07f2d4c5..648e502b904a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_sembuf_sz = sizeof(struct sembuf);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
index 9407803fc9c3..dc6eb59b2800 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index a1c452855ae7..82048f0eae2e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -170,8 +170,9 @@ typedef struct user_fpregs elf_fpregset_t;
#endif
// Include these after system headers to avoid name clashes and ambiguities.
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_common.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform_limits_posix.h"
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
@@ -214,10 +215,24 @@ namespace __sanitizer {
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
-#endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+ unsigned ucontext_t_sz(void *ctx) {
+# if SANITIZER_LINUX && SANITIZER_X64
+ // See kernel arch/x86/kernel/fpu/signal.c for details.
+ const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;
+ // The member names differ across header versions, but the actual layout
+ // is always the same. So avoid using members, just use arithmetic.
+ const uint32_t *after_xmm =
+ reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;
+ if (after_xmm[12] == FP_XSTATE_MAGIC1)
+ return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -
+ static_cast<const char *>(ctx);
+# endif
+ return sizeof(ucontext_t);
+ }
+# endif // !SANITIZER_ANDROID
+
+# if SANITIZER_LINUX
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned __user_cap_header_struct_sz =
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 6de1e8e14a4f..89f323dd2c72 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -57,12 +57,12 @@ extern unsigned struct_regmatch_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
-#endif // !SANITIZER_ANDROID
+unsigned ucontext_t_sz(void *uctx);
+# endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if defined(__x86_64__)
+# if defined(__x86_64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
index a113cb0d3490..dad7bde1498a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
@@ -89,7 +89,7 @@ namespace __sanitizer {
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_timespec_sz = sizeof(struct timespec);
#if SANITIZER_SOLARIS32
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
index cbab577bcf26..84a81265162c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
@@ -43,7 +43,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_timespec_sz;
extern unsigned struct_rlimit_sz;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
index 1f53e3e46d8f..62b2e5e03216 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -143,16 +143,16 @@ void MemoryMappingLayout::LoadFromCache() {
// early in the process, when dyld is one of the only images loaded,
// so it will be hit after only a few iterations.
static mach_header *get_dyld_image_header() {
- unsigned depth = 1;
- vm_size_t size = 0;
vm_address_t address = 0;
- kern_return_t err = KERN_SUCCESS;
- mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
struct vm_region_submap_info_64 info;
- err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
- (vm_region_info_t)&info, &count);
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ kern_return_t err =
+ vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 1a074d2bb700..4aa605485166 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -149,8 +149,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
- RELEASE(recycle_mutex_) {
+ void NOINLINE Recycle(uptr min_size, Callback cb)
+ SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 4791a3a35bdb..148470943b47 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -234,6 +234,11 @@ static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
return to;
}
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(push)
+// Disable 'nonstandard extension used: zero-sized array in struct/union'.
+# pragma warning(disable : 4200)
+#endif
namespace {
struct PackedHeader {
uptr size;
@@ -241,6 +246,9 @@ struct PackedHeader {
u8 data[];
};
} // namespace
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+#endif
uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
SpinMutexLock l(&mtx_);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index 1bfad811f712..4f1a8caac6ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -97,7 +97,7 @@ class StackStore {
Packed,
Unpacked,
};
- State state GUARDED_BY(mtx_);
+ State state SANITIZER_GUARDED_BY(mtx_);
uptr *Create(StackStore *store);
@@ -109,8 +109,8 @@ class StackStore {
void TestOnlyUnmap(StackStore *store);
bool Stored(uptr n);
bool IsPacked() const;
- void Lock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
- void Unlock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
+ void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
};
BlockInfo blocks_[kBlockCount] = {};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index c755b1829d2a..ac87fab3eaf1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -94,8 +94,8 @@ class CompressThread {
constexpr CompressThread() = default;
void NewWorkNotify();
void Stop();
- void LockAndStop() NO_THREAD_SAFETY_ANALYSIS;
- void Unlock() NO_THREAD_SAFETY_ANALYSIS;
+ void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
private:
enum class State {
@@ -114,8 +114,8 @@ class CompressThread {
Semaphore semaphore_ = {};
StaticSpinMutex mutex_ = {};
- State state_ GUARDED_BY(mutex_) = State::NotStarted;
- void *thread_ GUARDED_BY(mutex_) = nullptr;
+ State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
+ void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
atomic_uint8_t run_ = {};
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
index e12b9e5bee06..f114acea79c9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
@@ -122,7 +122,7 @@ DWORD WINAPI RunThread(void *argument) {
OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID);
CHECK(thread);
- if (SuspendThread(thread) == -1) {
+ if (SuspendThread(thread) == (DWORD)-1) {
DWORD last_error = GetLastError();
VPrintf(1, "Could not suspend thread %lu (error %lu)",
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
index c4a9d99fe2f0..4ce5de062756 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -2255,13 +2255,13 @@ PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }
POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }
PRE_SYSCALL(setcontext)(void *ucp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(setcontext)(long long res, void *ucp_) {}
PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(_lwp_create)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index 9975d78ec0bb..2c7e5c276fa1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -86,7 +86,7 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
-class MUTEX ThreadRegistry {
+class SANITIZER_MUTEX ThreadRegistry {
public:
ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -95,9 +95,9 @@ class MUTEX ThreadRegistry {
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
- void Lock() ACQUIRE() { mtx_.Lock(); }
- void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
- void Unlock() RELEASE() { mtx_.Unlock(); }
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
index 52b25edaa7a3..c34ea804da20 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
@@ -16,27 +16,34 @@
#define SANITIZER_THREAD_SAFETY_H
#if defined(__clang__)
-# define THREAD_ANNOTATION(x) __attribute__((x))
+# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))
#else
-# define THREAD_ANNOTATION(x)
+# define SANITIZER_THREAD_ANNOTATION(x)
#endif
-#define MUTEX THREAD_ANNOTATION(capability("mutex"))
-#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
-#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
-#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
-#define REQUIRES_SHARED(...) \
- THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
-#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
-#define ACQUIRE_SHARED(...) \
- THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
-#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
-#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
-#define RELEASE_SHARED(...) \
- THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
-#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
-#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
-#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex"))
+#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)
+#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))
+#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))
+#define SANITIZER_REQUIRES(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define SANITIZER_REQUIRES_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define SANITIZER_TRY_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE(...) \
+ SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define SANITIZER_EXCLUDES(...) \
+ SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define SANITIZER_CHECK_LOCKED(...) \
+ SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \
+ SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
index 7e01c81d0422..afcd01dae0b7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
@@ -57,30 +57,37 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
InitializeDbgHelpIfNeeded();
size = 0;
-#if defined(_WIN64)
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ int machine_type = IMAGE_FILE_MACHINE_ARM64;
+ stack_frame.AddrPC.Offset = ctx.Pc;
+ stack_frame.AddrFrame.Offset = ctx.Fp;
+ stack_frame.AddrStack.Offset = ctx.Sp;
+# else
int machine_type = IMAGE_FILE_MACHINE_AMD64;
stack_frame.AddrPC.Offset = ctx.Rip;
stack_frame.AddrFrame.Offset = ctx.Rbp;
stack_frame.AddrStack.Offset = ctx.Rsp;
-#else
+# endif
+# else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
stack_frame.AddrFrame.Offset = ctx.Ebp;
stack_frame.AddrStack.Offset = ctx.Esp;
-#endif
+# endif
stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
- SymGetModuleBase64, NULL) &&
- size < Min(max_depth, kStackTraceMax)) {
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-#endif // #if !SANITIZER_GO
+# ifdef __clang__
+# pragma clang diagnostic pop
+# endif
+# endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index cfe6cc2b394b..87758a4904ab 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -950,13 +950,18 @@ void SignalContext::InitPcSpBp() {
CONTEXT *context_record = (CONTEXT *)context;
pc = (uptr)exception_record->ExceptionAddress;
-#ifdef _WIN64
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ bp = (uptr)context_record->Fp;
+ sp = (uptr)context_record->Sp;
+# else
bp = (uptr)context_record->Rbp;
sp = (uptr)context_record->Rsp;
-#else
+# endif
+# else
bp = (uptr)context_record->Ebp;
sp = (uptr)context_record->Esp;
-#endif
+# endif
}
uptr SignalContext::GetAddress() const {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index 0bb38ba951a8..071dbbb279c6 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -94,6 +94,7 @@ isxdigit U
log10 U
lseek U
lseek64 U
+madvise U
malloc U
mbrlen U
mbrtowc U
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
index 172353fadb1f..5b6ac8b35493 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -299,8 +299,9 @@ struct Allocator {
NOINLINE bool isRssLimitExceeded();
// Allocates a chunk.
- void *allocate(uptr Size, uptr Alignment, AllocType Type,
- bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
+ void *
+ allocate(uptr Size, uptr Alignment, AllocType Type,
+ bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
if (UNLIKELY(Alignment > MaxAlignment)) {
@@ -404,8 +405,8 @@ struct Allocator {
// Place a chunk in the quarantine or directly deallocate it in the event of
// a zero-sized quarantine, or if the size of the chunk is greater than the
// quarantine chunk size threshold.
- void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
- uptr Size) NO_THREAD_SAFETY_ANALYSIS {
+ void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
UnpackedHeader NewHeader = *Header;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
index e1310974db45..eef4a7ba1e65 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
- inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
+ inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
- inline void lock() ACQUIRE(Mutex) {
+ inline void lock() SANITIZER_ACQUIRE(Mutex) {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
- inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
+ inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
index 29db8a2eff1a..dc4d982f2fa8 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -34,7 +34,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
}
ALWAYS_INLINE ScudoTSD *
-getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
+getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
*UnlockRequired = true;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
index fd85a7c4017f..fc691b21a213 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
@@ -64,7 +64,7 @@ void initThread(bool MinimalInit) {
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
}
-ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS {
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (NumberOfTSDs > 1) {
// Use the Precedence of the current TSD as our random seed. Since we are in
// the slow path, it means that tryLock failed, and as a result it's very
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc
index e46b044a81f8..b25392a9630e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc
@@ -42,7 +42,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
ALWAYS_INLINE ScudoTSD *
-getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
+getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
ScudoTSD *TSD = getCurrentTSD();
DCHECK(TSD && "No TSD associated with the current thread!");
*UnlockRequired = true;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
index 75044c38d5d2..86a3dcd332b2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
@@ -124,13 +124,13 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
-void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Lock();
global_proc()->internal_alloc_mtx.Lock();
InternalAllocatorLock();
}
-void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
InternalAllocatorUnlock();
global_proc()->internal_alloc_mtx.Unlock();
global_proc()->mtx.Unlock();
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
index c14af9788e32..5b46d5f5e2bc 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
@@ -521,7 +521,7 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkBefore(ThreadState *thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry.Lock();
ctx->report_mtx.Lock();
ScopedErrorReportLock::Lock();
@@ -543,7 +543,8 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
__tsan_test_only_on_fork();
}
-void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkParentAfter(ThreadState *thr,
+ uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;
@@ -554,7 +555,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
}
void ForkChildAfter(ThreadState *thr, uptr pc,
- bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
+ bool start_thread) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 256e92de99d7..425d605d95a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -2209,6 +2209,7 @@ void atfork_child() {
FdOnFork(thr, pc);
}
+#if !SANITIZER_IOS
TSAN_INTERCEPTOR(int, vfork, int fake) {
// Some programs (e.g. openjdk) call close for all file descriptors
// in the child process. Under tsan it leads to false positives, because
@@ -2225,6 +2226,7 @@ TSAN_INTERCEPTOR(int, vfork, int fake) {
// Instead we simply turn vfork into fork.
return WRAP(fork)(fake);
}
+#endif
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 7a72efb12263..00cc3a306fd3 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -124,21 +124,21 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
-void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->internal_alloc_mtx.Lock();
InternalAllocatorLock();
}
-void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
InternalAllocatorUnlock();
global_proc()->internal_alloc_mtx.Unlock();
}
-void GlobalProcessorLock() NO_THREAD_SAFETY_ANALYSIS {
+void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Lock();
}
-void GlobalProcessorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Unlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index ed60e250cff8..c068d8e486b0 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -113,7 +113,7 @@ static TracePart* TracePartAlloc(ThreadState* thr) {
return part;
}
-static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) {
+static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
DCHECK(part->trace);
part->trace = nullptr;
ctx->trace_part_recycle.PushFront(part);
@@ -208,7 +208,7 @@ static void DoResetImpl(uptr epoch) {
// Clang does not understand locking all slots in the loop:
// error: expecting mutex 'slot.mtx' to be held at start of each loop
-void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
+void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
{
for (auto& slot : ctx->slots) {
slot.mtx.Lock();
@@ -230,7 +230,7 @@ void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
void FlushShadowMemory() { DoReset(nullptr, 0); }
static TidSlot* FindSlotAndLock(ThreadState* thr)
- ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS {
+ SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
CHECK(!thr->slot);
TidSlot* slot = nullptr;
for (;;) {
@@ -334,7 +334,7 @@ void SlotDetach(ThreadState* thr) {
SlotDetachImpl(thr, true);
}
-void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
+void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
DCHECK(!thr->slot_locked);
#if SANITIZER_DEBUG
// Check these mutexes are not locked.
@@ -756,7 +756,7 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
GlobalProcessorLock();
// Detaching from the slot makes OnUserFree skip writing to the shadow.
// The slot will be locked so any attempts to use it will deadlock anyway.
@@ -783,7 +783,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
__tsan_test_only_on_fork();
}
-static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
+static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index d06358b462eb..fbf02806e34b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -332,12 +332,12 @@ struct Context {
Mutex slot_mtx;
uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
bool resetting; // global reset is in progress
- IList<TidSlot, &TidSlot::node> slot_queue GUARDED_BY(slot_mtx);
+ IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
- GUARDED_BY(slot_mtx);
- uptr trace_part_total_allocated GUARDED_BY(slot_mtx);
- uptr trace_part_recycle_finished GUARDED_BY(slot_mtx);
- uptr trace_part_finished_excess GUARDED_BY(slot_mtx);
+ SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
};
extern Context *ctx; // The one and the only global runtime context.
@@ -566,10 +566,10 @@ uptr ALWAYS_INLINE HeapEnd() {
}
#endif
-void SlotAttachAndLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
+void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
void SlotDetach(ThreadState *thr);
-void SlotLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
-void SlotUnlock(ThreadState *thr) RELEASE(thr->slot->mtx);
+void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
void DoReset(ThreadState *thr, uptr epoch);
void FlushShadowMemory();
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 940c20fcfa1a..e77bfba277a5 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -156,7 +156,7 @@ ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
Shadow old,
- AccessType typ) NO_THREAD_SAFETY_ANALYSIS {
+ AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
// For the free shadow markers the first element (that contains kFreeSid)
// triggers the race, but the second element contains info about the freeing
// thread, take it.
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
index f7b9fc54f472..fd534c2573f6 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
@@ -51,4 +51,4 @@ __ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data,
ValueHandle fnRTTI);
}
-#endif // UBSAN_HANDLERS_H
+#endif // UBSAN_HANDLERS_CXX_H
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/adjacent_find.h b/contrib/llvm-project/libcxx/include/__algorithm/adjacent_find.h
index 621ef5f20f82..29ad83f96810 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/adjacent_find.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/adjacent_find.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_ADJACENT_FIND_H
#define _LIBCPP___ALGORITHM_ADJACENT_FIND_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/binary_search.h b/contrib/llvm-project/libcxx/include/__algorithm/binary_search.h
index 8fc55b9becb1..2558dd0b27b9 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/binary_search.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/binary_search.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_BINARY_SEARCH_H
#define _LIBCPP___ALGORITHM_BINARY_SEARCH_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/lower_bound.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/clamp.h b/contrib/llvm-project/libcxx/include/__algorithm/clamp.h
index db28735e97a3..a51c1015be3f 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/clamp.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/clamp.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_CLAMP_H
#define _LIBCPP___ALGORITHM_CLAMP_H
+#include <__algorithm/comp.h>
#include <__config>
#include <__debug>
-#include <__algorithm/comp.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy.h b/contrib/llvm-project/libcxx/include/__algorithm/copy.h
index e7e8b9e51a3e..65f0e0b0ef7d 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_COPY_H
#define _LIBCPP___ALGORITHM_COPY_H
-#include <__config>
#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <cstring>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h b/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
index 4a2f8c0c49cd..ac733290abe4 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_COPY_BACKWARD_H
#define _LIBCPP___ALGORITHM_COPY_BACKWARD_H
-#include <__config>
#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <cstring>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy_if.h b/contrib/llvm-project/libcxx/include/__algorithm/copy_if.h
index 230826f63af4..d32514d999b5 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy_if.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy_if.h
@@ -10,10 +10,6 @@
#define _LIBCPP___ALGORITHM_COPY_IF_H
#include <__config>
-#include <__algorithm/unwrap_iter.h>
-#include <__iterator/iterator_traits.h>
-#include <cstring>
-#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy_n.h b/contrib/llvm-project/libcxx/include/__algorithm/copy_n.h
index 38a84a4105a4..cdcc0d50dbf1 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy_n.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy_n.h
@@ -9,11 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_COPY_N_H
#define _LIBCPP___ALGORITHM_COPY_N_H
-#include <__config>
#include <__algorithm/copy.h>
-#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
-#include <cstring>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/equal.h b/contrib/llvm-project/libcxx/include/__algorithm/equal.h
index 0fe1a21fe526..4c9ad05ad6b8 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/equal.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/equal.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_EQUAL_H
#define _LIBCPP___ALGORITHM_EQUAL_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h b/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
index 679456e27b43..e13f0bdd9695 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
@@ -9,12 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_EQUAL_RANGE_H
#define _LIBCPP___ALGORITHM_EQUAL_RANGE_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/half_positive.h>
#include <__algorithm/lower_bound.h>
#include <__algorithm/upper_bound.h>
+#include <__config>
#include <iterator>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/fill.h b/contrib/llvm-project/libcxx/include/__algorithm/fill.h
index 1fad1de993bf..0cb36b02c831 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/fill.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/fill.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_FILL_H
#define _LIBCPP___ALGORITHM_FILL_H
-#include <__config>
#include <__algorithm/fill_n.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/find_end.h b/contrib/llvm-project/libcxx/include/__algorithm/find_end.h
index 5d971c57a4e0..dd0f7d7ac03d 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/find_end.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/find_end.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_FIND_END_OF_H
#define _LIBCPP___ALGORITHM_FIND_END_OF_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/find_first_of.h b/contrib/llvm-project/libcxx/include/__algorithm/find_first_of.h
index 79a00acb9ee6..69354f61769f 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/find_first_of.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/find_first_of.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_FIND_FIRST_OF_H
#define _LIBCPP___ALGORITHM_FIND_FIRST_OF_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/in_in_result.h b/contrib/llvm-project/libcxx/include/__algorithm/in_in_result.h
new file mode 100644
index 000000000000..fbe53ae4f57e
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__algorithm/in_in_result.h
@@ -0,0 +1,45 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ALGORITHM_IN_IN_RESULT_H
+#define _LIBCPP___ALGORITHM_IN_IN_RESULT_H
+
+#include <__concepts/convertible_to.h>
+#include <__config>
+#include <__utility/move.h>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#ifndef _LIBCPP_HAS_NO_RANGES
+
+namespace ranges {
+template <class _I1, class _I2>
+struct in_in_result {
+ [[no_unique_address]] _I1 in1;
+ [[no_unique_address]] _I2 in2;
+
+ template <class _II1, class _II2>
+ requires convertible_to<const _I1&, _II1> && convertible_to<const _I2&, _II2>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ operator in_in_result<_II1, _II2>() const & {
+ return {in1, in2};
+ }
+
+ template <class _II1, class _II2>
+ requires convertible_to<_I1, _II1> && convertible_to<_I2, _II2>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ operator in_in_result<_II1, _II2>() && { return {_VSTD::move(in1), _VSTD::move(in2)}; }
+};
+} // namespace ranges
+
+#endif // _LIBCPP_HAS_NO_RANGES
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ALGORITHM_IN_IN_RESULT_H
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/in_out_result.h b/contrib/llvm-project/libcxx/include/__algorithm/in_out_result.h
new file mode 100644
index 000000000000..9d971157200f
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__algorithm/in_out_result.h
@@ -0,0 +1,52 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ALGORITHM_IN_OUT_RESULT_H
+#define _LIBCPP___ALGORITHM_IN_OUT_RESULT_H
+
+#include <__concepts/convertible_to.h>
+#include <__config>
+#include <__utility/move.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if !defined(_LIBCPP_HAS_NO_RANGES)
+namespace ranges {
+
+template<class _InputIterator, class _OutputIterator>
+struct in_out_result {
+ [[no_unique_address]] _InputIterator in;
+ [[no_unique_address]] _OutputIterator out;
+
+ template <class _InputIterator2, class _OutputIterator2>
+ requires convertible_to<const _InputIterator&, _InputIterator2> && convertible_to<const _OutputIterator&,
+ _OutputIterator2>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr operator in_out_result<_InputIterator2, _OutputIterator2>() const & {
+ return {in, out};
+ }
+
+ template <class _InputIterator2, class _OutputIterator2>
+ requires convertible_to<_InputIterator, _InputIterator2> && convertible_to<_OutputIterator, _OutputIterator2>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr operator in_out_result<_InputIterator2, _OutputIterator2>() && {
+ return {_VSTD::move(in), _VSTD::move(out)};
+ }
+};
+
+} // namespace ranges
+#endif // !defined(_LIBCPP_HAS_NO_RANGES)
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ALGORITHM_IN_OUT_RESULT_H
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/includes.h b/contrib/llvm-project/libcxx/include/__algorithm/includes.h
index 9cc54d938dcf..9d0bc694c0d3 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/includes.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/includes.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_INCLUDES_H
#define _LIBCPP___ALGORITHM_INCLUDES_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h b/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
index c74633a74cf3..e6f1efc011b1 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
@@ -9,14 +9,14 @@
#ifndef _LIBCPP___ALGORITHM_INPLACE_MERGE_H
#define _LIBCPP___ALGORITHM_INPLACE_MERGE_H
-#include <__config>
-#include <__algorithm/comp_ref_type.h>
#include <__algorithm/comp.h>
+#include <__algorithm/comp_ref_type.h>
#include <__algorithm/lower_bound.h>
#include <__algorithm/min.h>
#include <__algorithm/move.h>
#include <__algorithm/rotate.h>
#include <__algorithm/upper_bound.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
#include <memory>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
index 22c27a66d129..925ba8bfb8bd 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_IS_HEAP_H
#define _LIBCPP___ALGORITHM_IS_HEAP_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/is_heap_until.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h b/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
index dd8a62f07fd3..aa23b6d039d3 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_IS_HEAP_UNTIL_H
#define _LIBCPP___ALGORITHM_IS_HEAP_UNTIL_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/is_sorted_until.h b/contrib/llvm-project/libcxx/include/__algorithm/is_sorted_until.h
index 9a7f275c5400..57cad47761d9 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/is_sorted_until.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/is_sorted_until.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_IS_SORTED_UNTIL_H
#define _LIBCPP___ALGORITHM_IS_SORTED_UNTIL_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/lexicographical_compare.h b/contrib/llvm-project/libcxx/include/__algorithm/lexicographical_compare.h
index a110a58c01c1..55a1da620125 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/lexicographical_compare.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/lexicographical_compare.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_LEXICOGRAPHICAL_COMPARE_H
#define _LIBCPP___ALGORITHM_LEXICOGRAPHICAL_COMPARE_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/lower_bound.h b/contrib/llvm-project/libcxx/include/__algorithm/lower_bound.h
index ddaecb045b3e..663a0b16228e 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/lower_bound.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/lower_bound.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_LOWER_BOUND_H
#define _LIBCPP___ALGORITHM_LOWER_BOUND_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/half_positive.h>
+#include <__config>
#include <iterator>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/make_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/make_heap.h
index b3defd4de072..f489addaf5cc 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/make_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/make_heap.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_MAKE_HEAP_H
#define _LIBCPP___ALGORITHM_MAKE_HEAP_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/sift_down.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -32,7 +32,7 @@ __make_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compar
// start from the first parent, there is no need to consider children
for (difference_type __start = (__n - 2) / 2; __start >= 0; --__start)
{
- _VSTD::__sift_down<_Compare>(__first, __last, __comp, __n, __first + __start);
+ _VSTD::__sift_down<_Compare>(__first, __comp, __n, __first + __start);
}
}
}
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/max.h b/contrib/llvm-project/libcxx/include/__algorithm/max.h
index 79cbd2be86b6..0bbc971e0a9c 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/max.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/max.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_MAX_H
#define _LIBCPP___ALGORITHM_MAX_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/max_element.h>
+#include <__config>
#include <initializer_list>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/max_element.h b/contrib/llvm-project/libcxx/include/__algorithm/max_element.h
index f932ca7049fa..db2937260582 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/max_element.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/max_element.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_MAX_ELEMENT_H
#define _LIBCPP___ALGORITHM_MAX_ELEMENT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/merge.h b/contrib/llvm-project/libcxx/include/__algorithm/merge.h
index 480380db6caa..91826493771a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/merge.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/merge.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_MERGE_H
#define _LIBCPP___ALGORITHM_MERGE_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/copy.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/min.h b/contrib/llvm-project/libcxx/include/__algorithm/min.h
index 5cacb2f28e7e..ed2d3b87828a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/min.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/min.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_MIN_H
#define _LIBCPP___ALGORITHM_MIN_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/min_element.h>
+#include <__config>
#include <initializer_list>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/min_element.h b/contrib/llvm-project/libcxx/include/__algorithm/min_element.h
index 3aebebca91ab..407c7f93336d 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/min_element.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/min_element.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_MIN_ELEMENT_H
#define _LIBCPP___ALGORITHM_MIN_ELEMENT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/minmax.h b/contrib/llvm-project/libcxx/include/__algorithm/minmax.h
index a96a5b252c09..0bf88a70b8aa 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/minmax.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/minmax.h
@@ -9,12 +9,11 @@
#ifndef _LIBCPP___ALGORITHM_MINMAX_H
#define _LIBCPP___ALGORITHM_MINMAX_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <initializer_list>
#include <utility>
-
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/minmax_element.h b/contrib/llvm-project/libcxx/include/__algorithm/minmax_element.h
index d21ff6f8dc5a..5d768603843b 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/minmax_element.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/minmax_element.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_MINMAX_ELEMENT_H
#define _LIBCPP___ALGORITHM_MINMAX_ELEMENT_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <utility>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/mismatch.h b/contrib/llvm-project/libcxx/include/__algorithm/mismatch.h
index 7a01a985934a..230ade03df19 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/mismatch.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/mismatch.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_MISMATCH_H
#define _LIBCPP___ALGORITHM_MISMATCH_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <utility>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/move.h b/contrib/llvm-project/libcxx/include/__algorithm/move.h
index 7430bf087438..fa118f471669 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/move.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/move.h
@@ -9,12 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_MOVE_H
#define _LIBCPP___ALGORITHM_MOVE_H
-#include <__config>
#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <__utility/move.h>
#include <cstring>
-#include <utility>
#include <type_traits>
+#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/move_backward.h b/contrib/llvm-project/libcxx/include/__algorithm/move_backward.h
index ee72d39764ca..a4e3828b6069 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/move_backward.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/move_backward.h
@@ -9,11 +9,11 @@
#ifndef _LIBCPP___ALGORITHM_MOVE_BACKWARD_H
#define _LIBCPP___ALGORITHM_MOVE_BACKWARD_H
-#include <__config>
#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <cstring>
-#include <utility>
#include <type_traits>
+#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/next_permutation.h b/contrib/llvm-project/libcxx/include/__algorithm/next_permutation.h
index 1d71354eb375..eb81cceb7bbc 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/next_permutation.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/next_permutation.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_NEXT_PERMUTATION_H
#define _LIBCPP___ALGORITHM_NEXT_PERMUTATION_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/reverse.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/nth_element.h b/contrib/llvm-project/libcxx/include/__algorithm/nth_element.h
index 63feba1ea616..3afbd6c61846 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/nth_element.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/nth_element.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_NTH_ELEMENT_H
#define _LIBCPP___ALGORITHM_NTH_ELEMENT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/sort.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/partial_sort.h b/contrib/llvm-project/libcxx/include/__algorithm/partial_sort.h
index 622624ec4f42..a92a7e56610a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/partial_sort.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/partial_sort.h
@@ -9,12 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_PARTIAL_SORT_H
#define _LIBCPP___ALGORITHM_PARTIAL_SORT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/make_heap.h>
#include <__algorithm/sift_down.h>
#include <__algorithm/sort_heap.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
@@ -31,8 +31,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Compare, class _RandomAccessIterator>
_LIBCPP_CONSTEXPR_AFTER_CXX17 void
__partial_sort(_RandomAccessIterator __first, _RandomAccessIterator __middle, _RandomAccessIterator __last,
- _Compare __comp)
+ _Compare __comp)
{
+ if (__first == __middle)
+ return;
_VSTD::__make_heap<_Compare>(__first, __middle, __comp);
typename iterator_traits<_RandomAccessIterator>::difference_type __len = __middle - __first;
for (_RandomAccessIterator __i = __middle; __i != __last; ++__i)
@@ -40,7 +42,7 @@ __partial_sort(_RandomAccessIterator __first, _RandomAccessIterator __middle, _R
if (__comp(*__i, *__first))
{
swap(*__i, *__first);
- _VSTD::__sift_down<_Compare>(__first, __middle, __comp, __len, __first);
+ _VSTD::__sift_down<_Compare>(__first, __comp, __len, __first);
}
}
_VSTD::__sort_heap<_Compare>(__first, __middle, __comp);
@@ -64,7 +66,7 @@ void
partial_sort(_RandomAccessIterator __first, _RandomAccessIterator __middle, _RandomAccessIterator __last)
{
_VSTD::partial_sort(__first, __middle, __last,
- __less<typename iterator_traits<_RandomAccessIterator>::value_type>());
+ __less<typename iterator_traits<_RandomAccessIterator>::value_type>());
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/partial_sort_copy.h b/contrib/llvm-project/libcxx/include/__algorithm/partial_sort_copy.h
index 4c0c9f5ad04a..67a62bae1f5a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/partial_sort_copy.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/partial_sort_copy.h
@@ -9,14 +9,13 @@
#ifndef _LIBCPP___ALGORITHM_PARTIAL_SORT_COPY_H
#define _LIBCPP___ALGORITHM_PARTIAL_SORT_COPY_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/make_heap.h>
#include <__algorithm/sift_down.h>
#include <__algorithm/sort_heap.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
-#include <type_traits> // swap
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -40,7 +39,7 @@ __partial_sort_copy(_InputIterator __first, _InputIterator __last,
if (__comp(*__first, *__result_first))
{
*__result_first = *__first;
- _VSTD::__sift_down<_Compare>(__result_first, __r, __comp, __len, __result_first);
+ _VSTD::__sift_down<_Compare>(__result_first, __comp, __len, __result_first);
}
_VSTD::__sort_heap<_Compare>(__result_first, __r, __comp);
}
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/partition.h b/contrib/llvm-project/libcxx/include/__algorithm/partition.h
index 2614520ccbcf..131c5d3735d5 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/partition.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/partition.h
@@ -12,7 +12,6 @@
#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
-#include <utility> // pair
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/partition_point.h b/contrib/llvm-project/libcxx/include/__algorithm/partition_point.h
index 33aaf33d938c..18e6e6f6812f 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/partition_point.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/partition_point.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_PARTITION_POINT_H
#define _LIBCPP___ALGORITHM_PARTITION_POINT_H
-#include <__config>
#include <__algorithm/half_positive.h>
+#include <__config>
#include <iterator>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/pop_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/pop_heap.h
index e8c801a5c81f..c1cc8016ac91 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/pop_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/pop_heap.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_POP_HEAP_H
#define _LIBCPP___ALGORITHM_POP_HEAP_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/sift_down.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
@@ -31,7 +31,7 @@ __pop_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare
if (__len > 1)
{
swap(*__first, *--__last);
- _VSTD::__sift_down<_Compare>(__first, __last, __comp, __len - 1, __first);
+ _VSTD::__sift_down<_Compare>(__first, __comp, __len - 1, __first);
}
}
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/prev_permutation.h b/contrib/llvm-project/libcxx/include/__algorithm/prev_permutation.h
index 12c1816da37e..457c2695b324 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/prev_permutation.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/prev_permutation.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_PREV_PERMUTATION_H
#define _LIBCPP___ALGORITHM_PREV_PERMUTATION_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/reverse.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/push_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/push_heap.h
index 9327fe05b51d..864d419fa203 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/push_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/push_heap.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_PUSH_HEAP_H
#define _LIBCPP___ALGORITHM_PUSH_HEAP_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/move.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/remove.h b/contrib/llvm-project/libcxx/include/__algorithm/remove.h
index 171d83284a2e..681b9cc768d9 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/remove.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/remove.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_REMOVE_H
#define _LIBCPP___ALGORITHM_REMOVE_H
-#include <__config>
#include <__algorithm/find.h>
#include <__algorithm/find_if.h>
+#include <__config>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/remove_if.h b/contrib/llvm-project/libcxx/include/__algorithm/remove_if.h
index 4df36896afd5..36f817cfa6e1 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/remove_if.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/remove_if.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_REMOVE_IF_H
#define _LIBCPP___ALGORITHM_REMOVE_IF_H
-#include <__config>
#include <__algorithm/find_if.h>
+#include <__config>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/reverse.h b/contrib/llvm-project/libcxx/include/__algorithm/reverse.h
index 28bd2e84c8ae..1198aeaf41f4 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/reverse.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/reverse.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_REVERSE_H
#define _LIBCPP___ALGORITHM_REVERSE_H
-#include <__config>
#include <__algorithm/iter_swap.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/rotate_copy.h b/contrib/llvm-project/libcxx/include/__algorithm/rotate_copy.h
index 4c682ef93d5a..f9e644c88d0e 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/rotate_copy.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/rotate_copy.h
@@ -9,10 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_ROTATE_COPY_H
#define _LIBCPP___ALGORITHM_ROTATE_COPY_H
-#include <__config>
#include <__algorithm/copy.h>
-#include <iterator>
-#include <type_traits>
+#include <__config>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/search_n.h b/contrib/llvm-project/libcxx/include/__algorithm/search_n.h
index 67d066aa43d5..e4576cc76ac4 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/search_n.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/search_n.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___ALGORITHM_SEARCH_N_H
#define _LIBCPP___ALGORITHM_SEARCH_N_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <type_traits> // __convert_to_integral
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/set_difference.h b/contrib/llvm-project/libcxx/include/__algorithm/set_difference.h
index d4a9750d6dd7..00f61e070b4b 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/set_difference.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/set_difference.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_SET_DIFFERENCE_H
#define _LIBCPP___ALGORITHM_SET_DIFFERENCE_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/copy.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/set_intersection.h b/contrib/llvm-project/libcxx/include/__algorithm/set_intersection.h
index 518e5e68b39d..f6aa38217d95 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/set_intersection.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/set_intersection.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_SET_INTERSECTION_H
#define _LIBCPP___ALGORITHM_SET_INTERSECTION_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/set_symmetric_difference.h b/contrib/llvm-project/libcxx/include/__algorithm/set_symmetric_difference.h
index efdf62725709..5b5c2acff773 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/set_symmetric_difference.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/set_symmetric_difference.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_SET_SYMMETRIC_DIFFERENCE_H
#define _LIBCPP___ALGORITHM_SET_SYMMETRIC_DIFFERENCE_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/copy.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/set_union.h b/contrib/llvm-project/libcxx/include/__algorithm/set_union.h
index 388f037a73a4..5b3e3af79b4f 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/set_union.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/set_union.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_SET_UNION_H
#define _LIBCPP___ALGORITHM_SET_UNION_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/copy.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/shift_left.h b/contrib/llvm-project/libcxx/include/__algorithm/shift_left.h
index 8d9bc07d2e48..0466a3188a75 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/shift_left.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/shift_left.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_SHIFT_LEFT_H
#define _LIBCPP___ALGORITHM_SHIFT_LEFT_H
-#include <__config>
#include <__algorithm/move.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
-#include <type_traits> // swap
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/shift_right.h b/contrib/llvm-project/libcxx/include/__algorithm/shift_right.h
index cee17733a6a2..121712e85532 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/shift_right.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/shift_right.h
@@ -9,12 +9,13 @@
#ifndef _LIBCPP___ALGORITHM_SHIFT_RIGHT_H
#define _LIBCPP___ALGORITHM_SHIFT_RIGHT_H
-#include <__config>
#include <__algorithm/move.h>
#include <__algorithm/move_backward.h>
#include <__algorithm/swap_ranges.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
-#include <type_traits> // swap
+#include <__utility/swap.h>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/sift_down.h b/contrib/llvm-project/libcxx/include/__algorithm/sift_down.h
index 4d99ff237c96..bf5447698cd6 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/sift_down.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/sift_down.h
@@ -21,8 +21,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Compare, class _RandomAccessIterator>
_LIBCPP_CONSTEXPR_AFTER_CXX11 void
-__sift_down(_RandomAccessIterator __first, _RandomAccessIterator /*__last*/,
- _Compare __comp,
+__sift_down(_RandomAccessIterator __first, _Compare __comp,
typename iterator_traits<_RandomAccessIterator>::difference_type __len,
_RandomAccessIterator __start)
{
@@ -46,7 +45,7 @@ __sift_down(_RandomAccessIterator __first, _RandomAccessIterator /*__last*/,
// check if we are in heap-order
if (__comp(*__child_i, *__start))
- // we are, __start is larger than it's largest child
+ // we are, __start is larger than its largest child
return;
value_type __top(_VSTD::move(*__start));
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/sort.h b/contrib/llvm-project/libcxx/include/__algorithm/sort.h
index bc127689a674..5e09b280080a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/sort.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/sort.h
@@ -9,12 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_SORT_H
#define _LIBCPP___ALGORITHM_SORT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/min_element.h>
#include <__algorithm/partial_sort.h>
#include <__algorithm/unwrap_iter.h>
+#include <__config>
#include <__utility/swap.h>
#include <memory>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/sort_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/sort_heap.h
index bf6200c2a08d..64291ff2e8c9 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/sort_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/sort_heap.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_SORT_HEAP_H
#define _LIBCPP___ALGORITHM_SORT_HEAP_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/pop_heap.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <type_traits> // swap
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/stable_partition.h b/contrib/llvm-project/libcxx/include/__algorithm/stable_partition.h
index 323b323c53dd..331f0fde77dc 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/stable_partition.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/stable_partition.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_STABLE_PARTITION_H
#define _LIBCPP___ALGORITHM_STABLE_PARTITION_H
-#include <__config>
#include <__algorithm/rotate.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
#include <memory>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/stable_sort.h b/contrib/llvm-project/libcxx/include/__algorithm/stable_sort.h
index 41e17bde99ef..4ae17e0e4d94 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/stable_sort.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/stable_sort.h
@@ -9,15 +9,15 @@
#ifndef _LIBCPP___ALGORITHM_STABLE_SORT_H
#define _LIBCPP___ALGORITHM_STABLE_SORT_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__algorithm/inplace_merge.h>
#include <__algorithm/sort.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/swap.h>
#include <memory>
-#include <type_traits> // swap
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/unique.h b/contrib/llvm-project/libcxx/include/__algorithm/unique.h
index 62f0490b6d63..e17ff1567fa9 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/unique.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/unique.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_UNIQUE_H
#define _LIBCPP___ALGORITHM_UNIQUE_H
-#include <__config>
-#include <__algorithm/comp.h>
#include <__algorithm/adjacent_find.h>
+#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <__utility/move.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/unique_copy.h b/contrib/llvm-project/libcxx/include/__algorithm/unique_copy.h
index 4c916dc3ada2..4833ae9b59f5 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/unique_copy.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/unique_copy.h
@@ -9,8 +9,8 @@
#ifndef _LIBCPP___ALGORITHM_UNIQUE_COPY_H
#define _LIBCPP___ALGORITHM_UNIQUE_COPY_H
-#include <__config>
#include <__algorithm/comp.h>
+#include <__config>
#include <__iterator/iterator_traits.h>
#include <utility>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/unwrap_iter.h b/contrib/llvm-project/libcxx/include/__algorithm/unwrap_iter.h
index f77ecca6eee6..35765330bb7f 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/unwrap_iter.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/unwrap_iter.h
@@ -10,8 +10,8 @@
#define _LIBCPP___ALGORITHM_UNWRAP_ITER_H
#include <__config>
-#include <iterator>
#include <__memory/pointer_traits.h>
+#include <iterator>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/upper_bound.h b/contrib/llvm-project/libcxx/include/__algorithm/upper_bound.h
index 4ae7b8f9be1f..c064f1e35b9a 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/upper_bound.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/upper_bound.h
@@ -9,9 +9,9 @@
#ifndef _LIBCPP___ALGORITHM_UPPER_BOUND_H
#define _LIBCPP___ALGORITHM_UPPER_BOUND_H
-#include <__config>
#include <__algorithm/comp.h>
#include <__algorithm/half_positive.h>
+#include <__config>
#include <iterator>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__bit_reference b/contrib/llvm-project/libcxx/include/__bit_reference
index a02492c077dd..a60a5c721907 100644
--- a/contrib/llvm-project/libcxx/include/__bit_reference
+++ b/contrib/llvm-project/libcxx/include/__bit_reference
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___BIT_REFERENCE
#define _LIBCPP___BIT_REFERENCE
-#include <__config>
#include <__bits>
+#include <__config>
#include <algorithm>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__chrono/calendar.h b/contrib/llvm-project/libcxx/include/__chrono/calendar.h
new file mode 100644
index 000000000000..745f7f5cf529
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/calendar.h
@@ -0,0 +1,1276 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_CALENDAR_H
+#define _LIBCPP___CHRONO_CALENDAR_H
+
+#include <__chrono/duration.h>
+#include <__chrono/system_clock.h>
+#include <__chrono/time_point.h>
+#include <__config>
+#include <limits>
+#include <ratio>
+#include <type_traits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+#if _LIBCPP_STD_VER > 17
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+struct local_t {};
+template<class Duration>
+using local_time = time_point<local_t, Duration>;
+using local_seconds = local_time<seconds>;
+using local_days = local_time<days>;
+
+struct last_spec { explicit last_spec() = default; };
+
+class day {
+private:
+ unsigned char __d;
+public:
+ day() = default;
+ explicit inline constexpr day(unsigned __val) noexcept : __d(static_cast<unsigned char>(__val)) {}
+ inline constexpr day& operator++() noexcept { ++__d; return *this; }
+ inline constexpr day operator++(int) noexcept { day __tmp = *this; ++(*this); return __tmp; }
+ inline constexpr day& operator--() noexcept { --__d; return *this; }
+ inline constexpr day operator--(int) noexcept { day __tmp = *this; --(*this); return __tmp; }
+ constexpr day& operator+=(const days& __dd) noexcept;
+ constexpr day& operator-=(const days& __dd) noexcept;
+ explicit inline constexpr operator unsigned() const noexcept { return __d; }
+ inline constexpr bool ok() const noexcept { return __d >= 1 && __d <= 31; }
+ };
+
+
+inline constexpr
+bool operator==(const day& __lhs, const day& __rhs) noexcept
+{ return static_cast<unsigned>(__lhs) == static_cast<unsigned>(__rhs); }
+
+inline constexpr
+bool operator!=(const day& __lhs, const day& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const day& __lhs, const day& __rhs) noexcept
+{ return static_cast<unsigned>(__lhs) < static_cast<unsigned>(__rhs); }
+
+inline constexpr
+bool operator> (const day& __lhs, const day& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const day& __lhs, const day& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const day& __lhs, const day& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr
+day operator+ (const day& __lhs, const days& __rhs) noexcept
+{ return day(static_cast<unsigned>(__lhs) + __rhs.count()); }
+
+inline constexpr
+day operator+ (const days& __lhs, const day& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+day operator- (const day& __lhs, const days& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+inline constexpr
+days operator-(const day& __lhs, const day& __rhs) noexcept
+{ return days(static_cast<int>(static_cast<unsigned>(__lhs)) -
+ static_cast<int>(static_cast<unsigned>(__rhs))); }
+
+inline constexpr day& day::operator+=(const days& __dd) noexcept
+{ *this = *this + __dd; return *this; }
+
+inline constexpr day& day::operator-=(const days& __dd) noexcept
+{ *this = *this - __dd; return *this; }
+
+
+class month {
+private:
+ unsigned char __m;
+public:
+ month() = default;
+ explicit inline constexpr month(unsigned __val) noexcept : __m(static_cast<unsigned char>(__val)) {}
+ inline constexpr month& operator++() noexcept { ++__m; return *this; }
+ inline constexpr month operator++(int) noexcept { month __tmp = *this; ++(*this); return __tmp; }
+ inline constexpr month& operator--() noexcept { --__m; return *this; }
+ inline constexpr month operator--(int) noexcept { month __tmp = *this; --(*this); return __tmp; }
+ constexpr month& operator+=(const months& __m1) noexcept;
+ constexpr month& operator-=(const months& __m1) noexcept;
+ explicit inline constexpr operator unsigned() const noexcept { return __m; }
+ inline constexpr bool ok() const noexcept { return __m >= 1 && __m <= 12; }
+};
+
+
+inline constexpr
+bool operator==(const month& __lhs, const month& __rhs) noexcept
+{ return static_cast<unsigned>(__lhs) == static_cast<unsigned>(__rhs); }
+
+inline constexpr
+bool operator!=(const month& __lhs, const month& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const month& __lhs, const month& __rhs) noexcept
+{ return static_cast<unsigned>(__lhs) < static_cast<unsigned>(__rhs); }
+
+inline constexpr
+bool operator> (const month& __lhs, const month& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const month& __lhs, const month& __rhs) noexcept
+{ return !(__rhs < __lhs); }
+
+inline constexpr
+bool operator>=(const month& __lhs, const month& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr
+month operator+ (const month& __lhs, const months& __rhs) noexcept
+{
+ auto const __mu = static_cast<long long>(static_cast<unsigned>(__lhs)) + (__rhs.count() - 1);
+ auto const __yr = (__mu >= 0 ? __mu : __mu - 11) / 12;
+ return month{static_cast<unsigned>(__mu - __yr * 12 + 1)};
+}
+
+inline constexpr
+month operator+ (const months& __lhs, const month& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+month operator- (const month& __lhs, const months& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+inline constexpr
+months operator-(const month& __lhs, const month& __rhs) noexcept
+{
+ auto const __dm = static_cast<unsigned>(__lhs) - static_cast<unsigned>(__rhs);
+ return months(__dm <= 11 ? __dm : __dm + 12);
+}
+
+inline constexpr month& month::operator+=(const months& __dm) noexcept
+{ *this = *this + __dm; return *this; }
+
+inline constexpr month& month::operator-=(const months& __dm) noexcept
+{ *this = *this - __dm; return *this; }
+
+
+class year {
+private:
+ short __y;
+public:
+ year() = default;
+ explicit inline constexpr year(int __val) noexcept : __y(static_cast<short>(__val)) {}
+
+ inline constexpr year& operator++() noexcept { ++__y; return *this; }
+ inline constexpr year operator++(int) noexcept { year __tmp = *this; ++(*this); return __tmp; }
+ inline constexpr year& operator--() noexcept { --__y; return *this; }
+ inline constexpr year operator--(int) noexcept { year __tmp = *this; --(*this); return __tmp; }
+ constexpr year& operator+=(const years& __dy) noexcept;
+ constexpr year& operator-=(const years& __dy) noexcept;
+ inline constexpr year operator+() const noexcept { return *this; }
+ inline constexpr year operator-() const noexcept { return year{-__y}; }
+
+ inline constexpr bool is_leap() const noexcept { return __y % 4 == 0 && (__y % 100 != 0 || __y % 400 == 0); }
+ explicit inline constexpr operator int() const noexcept { return __y; }
+ constexpr bool ok() const noexcept;
+ static inline constexpr year min() noexcept { return year{-32767}; }
+ static inline constexpr year max() noexcept { return year{ 32767}; }
+};
+
+
+inline constexpr
+bool operator==(const year& __lhs, const year& __rhs) noexcept
+{ return static_cast<int>(__lhs) == static_cast<int>(__rhs); }
+
+inline constexpr
+bool operator!=(const year& __lhs, const year& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const year& __lhs, const year& __rhs) noexcept
+{ return static_cast<int>(__lhs) < static_cast<int>(__rhs); }
+
+inline constexpr
+bool operator> (const year& __lhs, const year& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const year& __lhs, const year& __rhs) noexcept
+{ return !(__rhs < __lhs); }
+
+inline constexpr
+bool operator>=(const year& __lhs, const year& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr
+year operator+ (const year& __lhs, const years& __rhs) noexcept
+{ return year(static_cast<int>(__lhs) + __rhs.count()); }
+
+inline constexpr
+year operator+ (const years& __lhs, const year& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year operator- (const year& __lhs, const years& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+inline constexpr
+years operator-(const year& __lhs, const year& __rhs) noexcept
+{ return years{static_cast<int>(__lhs) - static_cast<int>(__rhs)}; }
+
+
+inline constexpr year& year::operator+=(const years& __dy) noexcept
+{ *this = *this + __dy; return *this; }
+
+inline constexpr year& year::operator-=(const years& __dy) noexcept
+{ *this = *this - __dy; return *this; }
+
+inline constexpr bool year::ok() const noexcept
+{ return static_cast<int>(min()) <= __y && __y <= static_cast<int>(max()); }
+
+class weekday_indexed;
+class weekday_last;
+
+class weekday {
+private:
+ unsigned char __wd;
+ static constexpr unsigned char __weekday_from_days(int __days) noexcept;
+public:
+ weekday() = default;
+ inline explicit constexpr weekday(unsigned __val) noexcept : __wd(static_cast<unsigned char>(__val == 7 ? 0 : __val)) {}
+ inline constexpr weekday(const sys_days& __sysd) noexcept
+ : __wd(__weekday_from_days(__sysd.time_since_epoch().count())) {}
+ inline explicit constexpr weekday(const local_days& __locd) noexcept
+ : __wd(__weekday_from_days(__locd.time_since_epoch().count())) {}
+
+ inline constexpr weekday& operator++() noexcept { __wd = (__wd == 6 ? 0 : __wd + 1); return *this; }
+ inline constexpr weekday operator++(int) noexcept { weekday __tmp = *this; ++(*this); return __tmp; }
+ inline constexpr weekday& operator--() noexcept { __wd = (__wd == 0 ? 6 : __wd - 1); return *this; }
+ inline constexpr weekday operator--(int) noexcept { weekday __tmp = *this; --(*this); return __tmp; }
+ constexpr weekday& operator+=(const days& __dd) noexcept;
+ constexpr weekday& operator-=(const days& __dd) noexcept;
+ inline constexpr unsigned c_encoding() const noexcept { return __wd; }
+ inline constexpr unsigned iso_encoding() const noexcept { return __wd == 0u ? 7 : __wd; }
+ inline constexpr bool ok() const noexcept { return __wd <= 6; }
+ constexpr weekday_indexed operator[](unsigned __index) const noexcept;
+ constexpr weekday_last operator[](last_spec) const noexcept;
+};
+
+
+// https://howardhinnant.github.io/date_algorithms.html#weekday_from_days
+inline constexpr
+unsigned char weekday::__weekday_from_days(int __days) noexcept
+{
+ return static_cast<unsigned char>(
+ static_cast<unsigned>(__days >= -4 ? (__days+4) % 7 : (__days+5) % 7 + 6)
+ );
+}
+
+inline constexpr
+bool operator==(const weekday& __lhs, const weekday& __rhs) noexcept
+{ return __lhs.c_encoding() == __rhs.c_encoding(); }
+
+inline constexpr
+bool operator!=(const weekday& __lhs, const weekday& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const weekday& __lhs, const weekday& __rhs) noexcept
+{ return __lhs.c_encoding() < __rhs.c_encoding(); }
+
+inline constexpr
+bool operator> (const weekday& __lhs, const weekday& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const weekday& __lhs, const weekday& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept
+{
+ auto const __mu = static_cast<long long>(__lhs.c_encoding()) + __rhs.count();
+ auto const __yr = (__mu >= 0 ? __mu : __mu - 6) / 7;
+ return weekday{static_cast<unsigned>(__mu - __yr * 7)};
+}
+
+constexpr weekday operator+(const days& __lhs, const weekday& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+constexpr weekday operator-(const weekday& __lhs, const days& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+constexpr days operator-(const weekday& __lhs, const weekday& __rhs) noexcept
+{
+ const int __wdu = __lhs.c_encoding() - __rhs.c_encoding();
+ const int __wk = (__wdu >= 0 ? __wdu : __wdu-6) / 7;
+ return days{__wdu - __wk * 7};
+}
+
+inline constexpr weekday& weekday::operator+=(const days& __dd) noexcept
+{ *this = *this + __dd; return *this; }
+
+inline constexpr weekday& weekday::operator-=(const days& __dd) noexcept
+{ *this = *this - __dd; return *this; }
+
+
+class weekday_indexed {
+private:
+ chrono::weekday __wd;
+ unsigned char __idx;
+public:
+ weekday_indexed() = default;
+ inline constexpr weekday_indexed(const chrono::weekday& __wdval, unsigned __idxval) noexcept
+ : __wd{__wdval}, __idx(__idxval) {}
+ inline constexpr chrono::weekday weekday() const noexcept { return __wd; }
+ inline constexpr unsigned index() const noexcept { return __idx; }
+ inline constexpr bool ok() const noexcept { return __wd.ok() && __idx >= 1 && __idx <= 5; }
+};
+
+inline constexpr
+bool operator==(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept
+{ return __lhs.weekday() == __rhs.weekday() && __lhs.index() == __rhs.index(); }
+
+inline constexpr
+bool operator!=(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+
+class weekday_last {
+private:
+ chrono::weekday __wd;
+public:
+ explicit constexpr weekday_last(const chrono::weekday& __val) noexcept
+ : __wd{__val} {}
+ constexpr chrono::weekday weekday() const noexcept { return __wd; }
+ constexpr bool ok() const noexcept { return __wd.ok(); }
+};
+
+inline constexpr
+bool operator==(const weekday_last& __lhs, const weekday_last& __rhs) noexcept
+{ return __lhs.weekday() == __rhs.weekday(); }
+
+inline constexpr
+bool operator!=(const weekday_last& __lhs, const weekday_last& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+weekday_indexed weekday::operator[](unsigned __index) const noexcept { return weekday_indexed{*this, __index}; }
+
+inline constexpr
+weekday_last weekday::operator[](last_spec) const noexcept { return weekday_last{*this}; }
+
+
+inline constexpr last_spec last{};
+inline constexpr weekday Sunday{0};
+inline constexpr weekday Monday{1};
+inline constexpr weekday Tuesday{2};
+inline constexpr weekday Wednesday{3};
+inline constexpr weekday Thursday{4};
+inline constexpr weekday Friday{5};
+inline constexpr weekday Saturday{6};
+
+inline constexpr month January{1};
+inline constexpr month February{2};
+inline constexpr month March{3};
+inline constexpr month April{4};
+inline constexpr month May{5};
+inline constexpr month June{6};
+inline constexpr month July{7};
+inline constexpr month August{8};
+inline constexpr month September{9};
+inline constexpr month October{10};
+inline constexpr month November{11};
+inline constexpr month December{12};
+
+
+class month_day {
+private:
+ chrono::month __m;
+ chrono::day __d;
+public:
+ month_day() = default;
+ constexpr month_day(const chrono::month& __mval, const chrono::day& __dval) noexcept
+ : __m{__mval}, __d{__dval} {}
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::day day() const noexcept { return __d; }
+ constexpr bool ok() const noexcept;
+};
+
+inline constexpr
+bool month_day::ok() const noexcept
+{
+ if (!__m.ok()) return false;
+ const unsigned __dval = static_cast<unsigned>(__d);
+ if (__dval < 1 || __dval > 31) return false;
+ if (__dval <= 29) return true;
+// Now we've got either 30 or 31
+ const unsigned __mval = static_cast<unsigned>(__m);
+ if (__mval == 2) return false;
+ if (__mval == 4 || __mval == 6 || __mval == 9 || __mval == 11)
+ return __dval == 30;
+ return true;
+}
+
+inline constexpr
+bool operator==(const month_day& __lhs, const month_day& __rhs) noexcept
+{ return __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); }
+
+inline constexpr
+bool operator!=(const month_day& __lhs, const month_day& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+month_day operator/(const month& __lhs, const day& __rhs) noexcept
+{ return month_day{__lhs, __rhs}; }
+
+constexpr
+month_day operator/(const day& __lhs, const month& __rhs) noexcept
+{ return __rhs / __lhs; }
+
+inline constexpr
+month_day operator/(const month& __lhs, int __rhs) noexcept
+{ return __lhs / day(__rhs); }
+
+constexpr
+month_day operator/(int __lhs, const day& __rhs) noexcept
+{ return month(__lhs) / __rhs; }
+
+constexpr
+month_day operator/(const day& __lhs, int __rhs) noexcept
+{ return month(__rhs) / __lhs; }
+
+
+inline constexpr
+bool operator< (const month_day& __lhs, const month_day& __rhs) noexcept
+{ return __lhs.month() != __rhs.month() ? __lhs.month() < __rhs.month() : __lhs.day() < __rhs.day(); }
+
+inline constexpr
+bool operator> (const month_day& __lhs, const month_day& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const month_day& __lhs, const month_day& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const month_day& __lhs, const month_day& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+
+
+class month_day_last {
+private:
+ chrono::month __m;
+public:
+ explicit constexpr month_day_last(const chrono::month& __val) noexcept
+ : __m{__val} {}
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr bool ok() const noexcept { return __m.ok(); }
+};
+
+inline constexpr
+bool operator==(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return __lhs.month() == __rhs.month(); }
+
+inline constexpr
+bool operator!=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return __lhs.month() < __rhs.month(); }
+
+inline constexpr
+bool operator> (const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr
+month_day_last operator/(const month& __lhs, last_spec) noexcept
+{ return month_day_last{__lhs}; }
+
+inline constexpr
+month_day_last operator/(last_spec, const month& __rhs) noexcept
+{ return month_day_last{__rhs}; }
+
+inline constexpr
+month_day_last operator/(int __lhs, last_spec) noexcept
+{ return month_day_last{month(__lhs)}; }
+
+inline constexpr
+month_day_last operator/(last_spec, int __rhs) noexcept
+{ return month_day_last{month(__rhs)}; }
+
+
+class month_weekday {
+private:
+ chrono::month __m;
+ chrono::weekday_indexed __wdi;
+public:
+ constexpr month_weekday(const chrono::month& __mval, const chrono::weekday_indexed& __wdival) noexcept
+ : __m{__mval}, __wdi{__wdival} {}
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; }
+ inline constexpr bool ok() const noexcept { return __m.ok() && __wdi.ok(); }
+};
+
+inline constexpr
+bool operator==(const month_weekday& __lhs, const month_weekday& __rhs) noexcept
+{ return __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); }
+
+inline constexpr
+bool operator!=(const month_weekday& __lhs, const month_weekday& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+month_weekday operator/(const month& __lhs, const weekday_indexed& __rhs) noexcept
+{ return month_weekday{__lhs, __rhs}; }
+
+inline constexpr
+month_weekday operator/(int __lhs, const weekday_indexed& __rhs) noexcept
+{ return month_weekday{month(__lhs), __rhs}; }
+
+inline constexpr
+month_weekday operator/(const weekday_indexed& __lhs, const month& __rhs) noexcept
+{ return month_weekday{__rhs, __lhs}; }
+
+inline constexpr
+month_weekday operator/(const weekday_indexed& __lhs, int __rhs) noexcept
+{ return month_weekday{month(__rhs), __lhs}; }
+
+
+class month_weekday_last {
+ chrono::month __m;
+ chrono::weekday_last __wdl;
+ public:
+ constexpr month_weekday_last(const chrono::month& __mval, const chrono::weekday_last& __wdlval) noexcept
+ : __m{__mval}, __wdl{__wdlval} {}
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; }
+ inline constexpr bool ok() const noexcept { return __m.ok() && __wdl.ok(); }
+};
+
+inline constexpr
+bool operator==(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept
+{ return __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); }
+
+inline constexpr
+bool operator!=(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+
+inline constexpr
+month_weekday_last operator/(const month& __lhs, const weekday_last& __rhs) noexcept
+{ return month_weekday_last{__lhs, __rhs}; }
+
+inline constexpr
+month_weekday_last operator/(int __lhs, const weekday_last& __rhs) noexcept
+{ return month_weekday_last{month(__lhs), __rhs}; }
+
+inline constexpr
+month_weekday_last operator/(const weekday_last& __lhs, const month& __rhs) noexcept
+{ return month_weekday_last{__rhs, __lhs}; }
+
+inline constexpr
+month_weekday_last operator/(const weekday_last& __lhs, int __rhs) noexcept
+{ return month_weekday_last{month(__rhs), __lhs}; }
+
+
+class year_month {
+ chrono::year __y;
+ chrono::month __m;
+public:
+ year_month() = default;
+ constexpr year_month(const chrono::year& __yval, const chrono::month& __mval) noexcept
+ : __y{__yval}, __m{__mval} {}
+ inline constexpr chrono::year year() const noexcept { return __y; }
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr year_month& operator+=(const months& __dm) noexcept { this->__m += __dm; return *this; }
+ inline constexpr year_month& operator-=(const months& __dm) noexcept { this->__m -= __dm; return *this; }
+ inline constexpr year_month& operator+=(const years& __dy) noexcept { this->__y += __dy; return *this; }
+ inline constexpr year_month& operator-=(const years& __dy) noexcept { this->__y -= __dy; return *this; }
+ inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok(); }
+};
+
+inline constexpr
+year_month operator/(const year& __y, const month& __m) noexcept { return year_month{__y, __m}; }
+
+inline constexpr
+year_month operator/(const year& __y, int __m) noexcept { return year_month{__y, month(__m)}; }
+
+inline constexpr
+bool operator==(const year_month& __lhs, const year_month& __rhs) noexcept
+{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month(); }
+
+inline constexpr
+bool operator!=(const year_month& __lhs, const year_month& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const year_month& __lhs, const year_month& __rhs) noexcept
+{ return __lhs.year() != __rhs.year() ? __lhs.year() < __rhs.year() : __lhs.month() < __rhs.month(); }
+
+inline constexpr
+bool operator> (const year_month& __lhs, const year_month& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const year_month& __lhs, const year_month& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const year_month& __lhs, const year_month& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+constexpr year_month operator+(const year_month& __lhs, const months& __rhs) noexcept
+{
+ int __dmi = static_cast<int>(static_cast<unsigned>(__lhs.month())) - 1 + __rhs.count();
+ const int __dy = (__dmi >= 0 ? __dmi : __dmi-11) / 12;
+ __dmi = __dmi - __dy * 12 + 1;
+ return (__lhs.year() + years(__dy)) / month(static_cast<unsigned>(__dmi));
+}
+
+constexpr year_month operator+(const months& __lhs, const year_month& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+constexpr year_month operator+(const year_month& __lhs, const years& __rhs) noexcept
+{ return (__lhs.year() + __rhs) / __lhs.month(); }
+
+constexpr year_month operator+(const years& __lhs, const year_month& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+constexpr months operator-(const year_month& __lhs, const year_month& __rhs) noexcept
+{ return (__lhs.year() - __rhs.year()) + months(static_cast<unsigned>(__lhs.month()) - static_cast<unsigned>(__rhs.month())); }
+
+constexpr year_month operator-(const year_month& __lhs, const months& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+constexpr year_month operator-(const year_month& __lhs, const years& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+class year_month_day_last;
+
+class year_month_day {
+private:
+ chrono::year __y;
+ chrono::month __m;
+ chrono::day __d;
+public:
+ year_month_day() = default;
+ inline constexpr year_month_day(
+ const chrono::year& __yval, const chrono::month& __mval, const chrono::day& __dval) noexcept
+ : __y{__yval}, __m{__mval}, __d{__dval} {}
+ constexpr year_month_day(const year_month_day_last& __ymdl) noexcept;
+ inline constexpr year_month_day(const sys_days& __sysd) noexcept
+ : year_month_day(__from_days(__sysd.time_since_epoch())) {}
+ inline explicit constexpr year_month_day(const local_days& __locd) noexcept
+ : year_month_day(__from_days(__locd.time_since_epoch())) {}
+
+ constexpr year_month_day& operator+=(const months& __dm) noexcept;
+ constexpr year_month_day& operator-=(const months& __dm) noexcept;
+ constexpr year_month_day& operator+=(const years& __dy) noexcept;
+ constexpr year_month_day& operator-=(const years& __dy) noexcept;
+
+ inline constexpr chrono::year year() const noexcept { return __y; }
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::day day() const noexcept { return __d; }
+ inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
+ inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
+
+ constexpr bool ok() const noexcept;
+
+ static constexpr year_month_day __from_days(days __d) noexcept;
+ constexpr days __to_days() const noexcept;
+};
+
+
+// https://howardhinnant.github.io/date_algorithms.html#civil_from_days
+inline constexpr
+year_month_day
+year_month_day::__from_days(days __d) noexcept
+{
+ static_assert(numeric_limits<unsigned>::digits >= 18, "");
+ static_assert(numeric_limits<int>::digits >= 20 , "");
+ const int __z = __d.count() + 719468;
+ const int __era = (__z >= 0 ? __z : __z - 146096) / 146097;
+ const unsigned __doe = static_cast<unsigned>(__z - __era * 146097); // [0, 146096]
+ const unsigned __yoe = (__doe - __doe/1460 + __doe/36524 - __doe/146096) / 365; // [0, 399]
+ const int __yr = static_cast<int>(__yoe) + __era * 400;
+ const unsigned __doy = __doe - (365 * __yoe + __yoe/4 - __yoe/100); // [0, 365]
+ const unsigned __mp = (5 * __doy + 2)/153; // [0, 11]
+ const unsigned __dy = __doy - (153 * __mp + 2)/5 + 1; // [1, 31]
+ const unsigned __mth = __mp + (__mp < 10 ? 3 : -9); // [1, 12]
+ return year_month_day{chrono::year{__yr + (__mth <= 2)}, chrono::month{__mth}, chrono::day{__dy}};
+}
+
+// https://howardhinnant.github.io/date_algorithms.html#days_from_civil
+inline constexpr days year_month_day::__to_days() const noexcept
+{
+ static_assert(numeric_limits<unsigned>::digits >= 18, "");
+ static_assert(numeric_limits<int>::digits >= 20 , "");
+
+ const int __yr = static_cast<int>(__y) - (__m <= February);
+ const unsigned __mth = static_cast<unsigned>(__m);
+ const unsigned __dy = static_cast<unsigned>(__d);
+
+ const int __era = (__yr >= 0 ? __yr : __yr - 399) / 400;
+ const unsigned __yoe = static_cast<unsigned>(__yr - __era * 400); // [0, 399]
+ const unsigned __doy = (153 * (__mth + (__mth > 2 ? -3 : 9)) + 2) / 5 + __dy-1; // [0, 365]
+ const unsigned __doe = __yoe * 365 + __yoe/4 - __yoe/100 + __doy; // [0, 146096]
+ return days{__era * 146097 + static_cast<int>(__doe) - 719468};
+}
+
+inline constexpr
+bool operator==(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); }
+
+inline constexpr
+bool operator!=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{
+ if (__lhs.year() < __rhs.year()) return true;
+ if (__lhs.year() > __rhs.year()) return false;
+ if (__lhs.month() < __rhs.month()) return true;
+ if (__lhs.month() > __rhs.month()) return false;
+ return __lhs.day() < __rhs.day();
+}
+
+inline constexpr
+bool operator> (const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr
+year_month_day operator/(const year_month& __lhs, const day& __rhs) noexcept
+{ return year_month_day{__lhs.year(), __lhs.month(), __rhs}; }
+
+inline constexpr
+year_month_day operator/(const year_month& __lhs, int __rhs) noexcept
+{ return __lhs / day(__rhs); }
+
+inline constexpr
+year_month_day operator/(const year& __lhs, const month_day& __rhs) noexcept
+{ return __lhs / __rhs.month() / __rhs.day(); }
+
+inline constexpr
+year_month_day operator/(int __lhs, const month_day& __rhs) noexcept
+{ return year(__lhs) / __rhs; }
+
+inline constexpr
+year_month_day operator/(const month_day& __lhs, const year& __rhs) noexcept
+{ return __rhs / __lhs; }
+
+inline constexpr
+year_month_day operator/(const month_day& __lhs, int __rhs) noexcept
+{ return year(__rhs) / __lhs; }
+
+
+inline constexpr
+year_month_day operator+(const year_month_day& __lhs, const months& __rhs) noexcept
+{ return (__lhs.year()/__lhs.month() + __rhs)/__lhs.day(); }
+
+inline constexpr
+year_month_day operator+(const months& __lhs, const year_month_day& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_day operator-(const year_month_day& __lhs, const months& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+inline constexpr
+year_month_day operator+(const year_month_day& __lhs, const years& __rhs) noexcept
+{ return (__lhs.year() + __rhs) / __lhs.month() / __lhs.day(); }
+
+inline constexpr
+year_month_day operator+(const years& __lhs, const year_month_day& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_day operator-(const year_month_day& __lhs, const years& __rhs) noexcept
+{ return __lhs + -__rhs; }
+
+inline constexpr year_month_day& year_month_day::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
+inline constexpr year_month_day& year_month_day::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
+inline constexpr year_month_day& year_month_day::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
+inline constexpr year_month_day& year_month_day::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
+
+class year_month_day_last {
+private:
+ chrono::year __y;
+ chrono::month_day_last __mdl;
+public:
+ constexpr year_month_day_last(const year& __yval, const month_day_last& __mdlval) noexcept
+ : __y{__yval}, __mdl{__mdlval} {}
+
+ constexpr year_month_day_last& operator+=(const months& __m) noexcept;
+ constexpr year_month_day_last& operator-=(const months& __m) noexcept;
+ constexpr year_month_day_last& operator+=(const years& __y) noexcept;
+ constexpr year_month_day_last& operator-=(const years& __y) noexcept;
+
+ inline constexpr chrono::year year() const noexcept { return __y; }
+ inline constexpr chrono::month month() const noexcept { return __mdl.month(); }
+ inline constexpr chrono::month_day_last month_day_last() const noexcept { return __mdl; }
+ constexpr chrono::day day() const noexcept;
+ inline constexpr operator sys_days() const noexcept { return sys_days{year()/month()/day()}; }
+ inline explicit constexpr operator local_days() const noexcept { return local_days{year()/month()/day()}; }
+ inline constexpr bool ok() const noexcept { return __y.ok() && __mdl.ok(); }
+};
+
+inline constexpr
+chrono::day year_month_day_last::day() const noexcept
+{
+ constexpr chrono::day __d[] =
+ {
+ chrono::day(31), chrono::day(28), chrono::day(31),
+ chrono::day(30), chrono::day(31), chrono::day(30),
+ chrono::day(31), chrono::day(31), chrono::day(30),
+ chrono::day(31), chrono::day(30), chrono::day(31)
+ };
+ return (month() != February || !__y.is_leap()) && month().ok() ?
+ __d[static_cast<unsigned>(month()) - 1] : chrono::day{29};
+}
+
+inline constexpr
+bool operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{ return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); }
+
+inline constexpr
+bool operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+bool operator< (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{
+ if (__lhs.year() < __rhs.year()) return true;
+ if (__lhs.year() > __rhs.year()) return false;
+ return __lhs.month_day_last() < __rhs.month_day_last();
+}
+
+inline constexpr
+bool operator> (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{ return __rhs < __lhs; }
+
+inline constexpr
+bool operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{ return !(__rhs < __lhs);}
+
+inline constexpr
+bool operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
+{ return !(__lhs < __rhs); }
+
+inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept
+{ return year_month_day_last{__lhs.year(), month_day_last{__lhs.month()}}; }
+
+inline constexpr year_month_day_last operator/(const year& __lhs, const month_day_last& __rhs) noexcept
+{ return year_month_day_last{__lhs, __rhs}; }
+
+inline constexpr year_month_day_last operator/(int __lhs, const month_day_last& __rhs) noexcept
+{ return year_month_day_last{year{__lhs}, __rhs}; }
+
+inline constexpr year_month_day_last operator/(const month_day_last& __lhs, const year& __rhs) noexcept
+{ return __rhs / __lhs; }
+
+inline constexpr year_month_day_last operator/(const month_day_last& __lhs, int __rhs) noexcept
+{ return year{__rhs} / __lhs; }
+
+
+inline constexpr
+year_month_day_last operator+(const year_month_day_last& __lhs, const months& __rhs) noexcept
+{ return (__lhs.year() / __lhs.month() + __rhs) / last; }
+
+inline constexpr
+year_month_day_last operator+(const months& __lhs, const year_month_day_last& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_day_last operator-(const year_month_day_last& __lhs, const months& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+inline constexpr
+year_month_day_last operator+(const year_month_day_last& __lhs, const years& __rhs) noexcept
+{ return year_month_day_last{__lhs.year() + __rhs, __lhs.month_day_last()}; }
+
+inline constexpr
+year_month_day_last operator+(const years& __lhs, const year_month_day_last& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_day_last operator-(const year_month_day_last& __lhs, const years& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+inline constexpr year_month_day_last& year_month_day_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
+inline constexpr year_month_day_last& year_month_day_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
+inline constexpr year_month_day_last& year_month_day_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
+inline constexpr year_month_day_last& year_month_day_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
+
+inline constexpr year_month_day::year_month_day(const year_month_day_last& __ymdl) noexcept
+ : __y{__ymdl.year()}, __m{__ymdl.month()}, __d{__ymdl.day()} {}
+
+inline constexpr bool year_month_day::ok() const noexcept
+{
+ if (!__y.ok() || !__m.ok()) return false;
+ return chrono::day{1} <= __d && __d <= (__y / __m / last).day();
+}
+
+class year_month_weekday {
+ chrono::year __y;
+ chrono::month __m;
+ chrono::weekday_indexed __wdi;
+public:
+ year_month_weekday() = default;
+ constexpr year_month_weekday(const chrono::year& __yval, const chrono::month& __mval,
+ const chrono::weekday_indexed& __wdival) noexcept
+ : __y{__yval}, __m{__mval}, __wdi{__wdival} {}
+ constexpr year_month_weekday(const sys_days& __sysd) noexcept
+ : year_month_weekday(__from_days(__sysd.time_since_epoch())) {}
+ inline explicit constexpr year_month_weekday(const local_days& __locd) noexcept
+ : year_month_weekday(__from_days(__locd.time_since_epoch())) {}
+ constexpr year_month_weekday& operator+=(const months& m) noexcept;
+ constexpr year_month_weekday& operator-=(const months& m) noexcept;
+ constexpr year_month_weekday& operator+=(const years& y) noexcept;
+ constexpr year_month_weekday& operator-=(const years& y) noexcept;
+
+ inline constexpr chrono::year year() const noexcept { return __y; }
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::weekday weekday() const noexcept { return __wdi.weekday(); }
+ inline constexpr unsigned index() const noexcept { return __wdi.index(); }
+ inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; }
+
+ inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
+ inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
+ inline constexpr bool ok() const noexcept
+ {
+ if (!__y.ok() || !__m.ok() || !__wdi.ok()) return false;
+ if (__wdi.index() <= 4) return true;
+ auto __nth_weekday_day =
+ __wdi.weekday() -
+ chrono::weekday{static_cast<sys_days>(__y / __m / 1)} +
+ days{(__wdi.index() - 1) * 7 + 1};
+ return static_cast<unsigned>(__nth_weekday_day.count()) <=
+ static_cast<unsigned>((__y / __m / last).day());
+ }
+
+ static constexpr year_month_weekday __from_days(days __d) noexcept;
+ constexpr days __to_days() const noexcept;
+};
+
+inline constexpr
+year_month_weekday year_month_weekday::__from_days(days __d) noexcept
+{
+ const sys_days __sysd{__d};
+ const chrono::weekday __wd = chrono::weekday(__sysd);
+ const year_month_day __ymd = year_month_day(__sysd);
+ return year_month_weekday{__ymd.year(), __ymd.month(),
+ __wd[(static_cast<unsigned>(__ymd.day())-1)/7+1]};
+}
+
+inline constexpr
+days year_month_weekday::__to_days() const noexcept
+{
+ const sys_days __sysd = sys_days(__y/__m/1);
+ return (__sysd + (__wdi.weekday() - chrono::weekday(__sysd) + days{(__wdi.index()-1)*7}))
+ .time_since_epoch();
+}
+
+inline constexpr
+bool operator==(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept
+{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); }
+
+inline constexpr
+bool operator!=(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+inline constexpr
+year_month_weekday operator/(const year_month& __lhs, const weekday_indexed& __rhs) noexcept
+{ return year_month_weekday{__lhs.year(), __lhs.month(), __rhs}; }
+
+inline constexpr
+year_month_weekday operator/(const year& __lhs, const month_weekday& __rhs) noexcept
+{ return year_month_weekday{__lhs, __rhs.month(), __rhs.weekday_indexed()}; }
+
+inline constexpr
+year_month_weekday operator/(int __lhs, const month_weekday& __rhs) noexcept
+{ return year(__lhs) / __rhs; }
+
+inline constexpr
+year_month_weekday operator/(const month_weekday& __lhs, const year& __rhs) noexcept
+{ return __rhs / __lhs; }
+
+inline constexpr
+year_month_weekday operator/(const month_weekday& __lhs, int __rhs) noexcept
+{ return year(__rhs) / __lhs; }
+
+
+inline constexpr
+year_month_weekday operator+(const year_month_weekday& __lhs, const months& __rhs) noexcept
+{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_indexed(); }
+
+inline constexpr
+year_month_weekday operator+(const months& __lhs, const year_month_weekday& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_weekday operator-(const year_month_weekday& __lhs, const months& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+inline constexpr
+year_month_weekday operator+(const year_month_weekday& __lhs, const years& __rhs) noexcept
+{ return year_month_weekday{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_indexed()}; }
+
+inline constexpr
+year_month_weekday operator+(const years& __lhs, const year_month_weekday& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_weekday operator-(const year_month_weekday& __lhs, const years& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+
+inline constexpr year_month_weekday& year_month_weekday::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
+inline constexpr year_month_weekday& year_month_weekday::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
+inline constexpr year_month_weekday& year_month_weekday::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
+inline constexpr year_month_weekday& year_month_weekday::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
+
+class year_month_weekday_last {
+private:
+ chrono::year __y;
+ chrono::month __m;
+ chrono::weekday_last __wdl;
+public:
+ constexpr year_month_weekday_last(const chrono::year& __yval, const chrono::month& __mval,
+ const chrono::weekday_last& __wdlval) noexcept
+ : __y{__yval}, __m{__mval}, __wdl{__wdlval} {}
+ constexpr year_month_weekday_last& operator+=(const months& __dm) noexcept;
+ constexpr year_month_weekday_last& operator-=(const months& __dm) noexcept;
+ constexpr year_month_weekday_last& operator+=(const years& __dy) noexcept;
+ constexpr year_month_weekday_last& operator-=(const years& __dy) noexcept;
+
+ inline constexpr chrono::year year() const noexcept { return __y; }
+ inline constexpr chrono::month month() const noexcept { return __m; }
+ inline constexpr chrono::weekday weekday() const noexcept { return __wdl.weekday(); }
+ inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; }
+ inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
+ inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
+ inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok() && __wdl.ok(); }
+
+ constexpr days __to_days() const noexcept;
+
+};
+
+inline constexpr
+days year_month_weekday_last::__to_days() const noexcept
+{
+ const sys_days __last = sys_days{__y/__m/last};
+ return (__last - (chrono::weekday{__last} - __wdl.weekday())).time_since_epoch();
+
+}
+
+inline constexpr
+bool operator==(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept
+{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); }
+
+inline constexpr
+bool operator!=(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept
+{ return !(__lhs == __rhs); }
+
+
+inline constexpr
+year_month_weekday_last operator/(const year_month& __lhs, const weekday_last& __rhs) noexcept
+{ return year_month_weekday_last{__lhs.year(), __lhs.month(), __rhs}; }
+
+inline constexpr
+year_month_weekday_last operator/(const year& __lhs, const month_weekday_last& __rhs) noexcept
+{ return year_month_weekday_last{__lhs, __rhs.month(), __rhs.weekday_last()}; }
+
+inline constexpr
+year_month_weekday_last operator/(int __lhs, const month_weekday_last& __rhs) noexcept
+{ return year(__lhs) / __rhs; }
+
+inline constexpr
+year_month_weekday_last operator/(const month_weekday_last& __lhs, const year& __rhs) noexcept
+{ return __rhs / __lhs; }
+
+inline constexpr
+year_month_weekday_last operator/(const month_weekday_last& __lhs, int __rhs) noexcept
+{ return year(__rhs) / __lhs; }
+
+
+inline constexpr
+year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const months& __rhs) noexcept
+{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_last(); }
+
+inline constexpr
+year_month_weekday_last operator+(const months& __lhs, const year_month_weekday_last& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const months& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+inline constexpr
+year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const years& __rhs) noexcept
+{ return year_month_weekday_last{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_last()}; }
+
+inline constexpr
+year_month_weekday_last operator+(const years& __lhs, const year_month_weekday_last& __rhs) noexcept
+{ return __rhs + __lhs; }
+
+inline constexpr
+year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const years& __rhs) noexcept
+{ return __lhs + (-__rhs); }
+
+inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
+inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
+inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
+inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
+
+
+template <class _Duration>
+class hh_mm_ss
+{
+private:
+ static_assert(__is_duration<_Duration>::value, "template parameter of hh_mm_ss must be a std::chrono::duration");
+ using __CommonType = common_type_t<_Duration, chrono::seconds>;
+
+ static constexpr uint64_t __pow10(unsigned __exp)
+ {
+ uint64_t __ret = 1;
+ for (unsigned __i = 0; __i < __exp; ++__i)
+ __ret *= 10U;
+ return __ret;
+ }
+
+ static constexpr unsigned __width(uint64_t __n, uint64_t __d = 10, unsigned __w = 0)
+ {
+ if (__n >= 2 && __d != 0 && __w < 19)
+ return 1 + __width(__n, __d % __n * 10, __w+1);
+ return 0;
+ }
+
+public:
+ static unsigned constexpr fractional_width = __width(__CommonType::period::den) < 19 ?
+ __width(__CommonType::period::den) : 6u;
+ using precision = duration<typename __CommonType::rep, ratio<1, __pow10(fractional_width)>>;
+
+ constexpr hh_mm_ss() noexcept : hh_mm_ss{_Duration::zero()} {}
+
+ constexpr explicit hh_mm_ss(_Duration __d) noexcept :
+ __is_neg(__d < _Duration(0)),
+ __h(duration_cast<chrono::hours> (abs(__d))),
+ __m(duration_cast<chrono::minutes>(abs(__d) - hours())),
+ __s(duration_cast<chrono::seconds>(abs(__d) - hours() - minutes())),
+ __f(duration_cast<precision> (abs(__d) - hours() - minutes() - seconds()))
+ {}
+
+ constexpr bool is_negative() const noexcept { return __is_neg; }
+ constexpr chrono::hours hours() const noexcept { return __h; }
+ constexpr chrono::minutes minutes() const noexcept { return __m; }
+ constexpr chrono::seconds seconds() const noexcept { return __s; }
+ constexpr precision subseconds() const noexcept { return __f; }
+
+ constexpr precision to_duration() const noexcept
+ {
+ auto __dur = __h + __m + __s + __f;
+ return __is_neg ? -__dur : __dur;
+ }
+
+ constexpr explicit operator precision() const noexcept { return to_duration(); }
+
+private:
+ bool __is_neg;
+ chrono::hours __h;
+ chrono::minutes __m;
+ chrono::seconds __s;
+ precision __f;
+};
+
+constexpr bool is_am(const hours& __h) noexcept { return __h >= hours( 0) && __h < hours(12); }
+constexpr bool is_pm(const hours& __h) noexcept { return __h >= hours(12) && __h < hours(24); }
+
+constexpr hours make12(const hours& __h) noexcept
+{
+ if (__h == hours( 0)) return hours(12);
+ else if (__h <= hours(12)) return __h;
+ else return __h - hours(12);
+}
+
+constexpr hours make24(const hours& __h, bool __is_pm) noexcept
+{
+ if (__is_pm)
+ return __h == hours(12) ? __h : __h + hours(12);
+ else
+ return __h == hours(12) ? hours(0) : __h;
+}
+
+} // namespace chrono
+
+inline namespace literals
+{
+ inline namespace chrono_literals
+ {
+ constexpr chrono::day operator ""d(unsigned long long __d) noexcept
+ {
+ return chrono::day(static_cast<unsigned>(__d));
+ }
+
+ constexpr chrono::year operator ""y(unsigned long long __y) noexcept
+ {
+ return chrono::year(static_cast<int>(__y));
+ }
+} // namespace chrono_literals
+} // namespace literals
+
+namespace chrono { // hoist the literals into namespace std::chrono
+ using namespace literals::chrono_literals;
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_STD_VER > 17
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___CHRONO_CALENDAR_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/convert_to_timespec.h b/contrib/llvm-project/libcxx/include/__chrono/convert_to_timespec.h
new file mode 100644
index 000000000000..0106e6dec3e1
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/convert_to_timespec.h
@@ -0,0 +1,55 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef _LIBCPP___CHRONO_CONVERT_TO_TIMESPEC_H
+#define _LIBCPP___CHRONO_CONVERT_TO_TIMESPEC_H
+
+#include <__chrono/duration.h>
+#include <__config>
+#include <limits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// Convert a nanoseconds duration to the given TimeSpec type, which must have
+// the same properties as std::timespec.
+template <class _TimeSpec>
+_LIBCPP_HIDE_FROM_ABI inline
+_TimeSpec __convert_to_timespec(const chrono::nanoseconds& __ns)
+{
+ using namespace chrono;
+ seconds __s = duration_cast<seconds>(__ns);
+ _TimeSpec __ts;
+ typedef decltype(__ts.tv_sec) __ts_sec;
+ const __ts_sec __ts_sec_max = numeric_limits<__ts_sec>::max();
+
+ if (__s.count() < __ts_sec_max)
+ {
+ __ts.tv_sec = static_cast<__ts_sec>(__s.count());
+ __ts.tv_nsec = static_cast<decltype(__ts.tv_nsec)>((__ns - __s).count());
+ }
+ else
+ {
+ __ts.tv_sec = __ts_sec_max;
+ __ts.tv_nsec = 999999999; // (10^9 - 1)
+ }
+
+ return __ts;
+}
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___CHRONO_CONVERT_TO_TIMESPEC_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/duration.h b/contrib/llvm-project/libcxx/include/__chrono/duration.h
new file mode 100644
index 000000000000..24801772ec5d
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/duration.h
@@ -0,0 +1,615 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_DURATION_H
+#define _LIBCPP___CHRONO_DURATION_H
+
+#include <__config>
+#include <limits>
+#include <ratio>
+#include <type_traits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+template <class _Rep, class _Period = ratio<1> > class _LIBCPP_TEMPLATE_VIS duration;
+
+template <class _Tp>
+struct __is_duration : false_type {};
+
+template <class _Rep, class _Period>
+struct __is_duration<duration<_Rep, _Period> > : true_type {};
+
+template <class _Rep, class _Period>
+struct __is_duration<const duration<_Rep, _Period> > : true_type {};
+
+template <class _Rep, class _Period>
+struct __is_duration<volatile duration<_Rep, _Period> > : true_type {};
+
+template <class _Rep, class _Period>
+struct __is_duration<const volatile duration<_Rep, _Period> > : true_type {};
+
+} // namespace chrono
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+struct _LIBCPP_TEMPLATE_VIS common_type<chrono::duration<_Rep1, _Period1>,
+ chrono::duration<_Rep2, _Period2> >
+{
+ typedef chrono::duration<typename common_type<_Rep1, _Rep2>::type,
+ typename __ratio_gcd<_Period1, _Period2>::type> type;
+};
+
+namespace chrono {
+
+// duration_cast
+
+template <class _FromDuration, class _ToDuration,
+ class _Period = typename ratio_divide<typename _FromDuration::period, typename _ToDuration::period>::type,
+ bool = _Period::num == 1,
+ bool = _Period::den == 1>
+struct __duration_cast;
+
+template <class _FromDuration, class _ToDuration, class _Period>
+struct __duration_cast<_FromDuration, _ToDuration, _Period, true, true>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ _ToDuration operator()(const _FromDuration& __fd) const
+ {
+ return _ToDuration(static_cast<typename _ToDuration::rep>(__fd.count()));
+ }
+};
+
+template <class _FromDuration, class _ToDuration, class _Period>
+struct __duration_cast<_FromDuration, _ToDuration, _Period, true, false>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ _ToDuration operator()(const _FromDuration& __fd) const
+ {
+ typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
+ return _ToDuration(static_cast<typename _ToDuration::rep>(
+ static_cast<_Ct>(__fd.count()) / static_cast<_Ct>(_Period::den)));
+ }
+};
+
+template <class _FromDuration, class _ToDuration, class _Period>
+struct __duration_cast<_FromDuration, _ToDuration, _Period, false, true>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ _ToDuration operator()(const _FromDuration& __fd) const
+ {
+ typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
+ return _ToDuration(static_cast<typename _ToDuration::rep>(
+ static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num)));
+ }
+};
+
+template <class _FromDuration, class _ToDuration, class _Period>
+struct __duration_cast<_FromDuration, _ToDuration, _Period, false, false>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ _ToDuration operator()(const _FromDuration& __fd) const
+ {
+ typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
+ return _ToDuration(static_cast<typename _ToDuration::rep>(
+ static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num)
+ / static_cast<_Ct>(_Period::den)));
+ }
+};
+
+template <class _ToDuration, class _Rep, class _Period>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ _ToDuration
+>::type
+duration_cast(const duration<_Rep, _Period>& __fd)
+{
+ return __duration_cast<duration<_Rep, _Period>, _ToDuration>()(__fd);
+}
+
+template <class _Rep>
+struct _LIBCPP_TEMPLATE_VIS treat_as_floating_point : is_floating_point<_Rep> {};
+
+#if _LIBCPP_STD_VER > 14
+template <class _Rep>
+inline constexpr bool treat_as_floating_point_v = treat_as_floating_point<_Rep>::value;
+#endif
+
+template <class _Rep>
+struct _LIBCPP_TEMPLATE_VIS duration_values
+{
+public:
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep zero() _NOEXCEPT {return _Rep(0);}
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep max() _NOEXCEPT {return numeric_limits<_Rep>::max();}
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep min() _NOEXCEPT {return numeric_limits<_Rep>::lowest();}
+};
+
+#if _LIBCPP_STD_VER > 14
+template <class _ToDuration, class _Rep, class _Period>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ _ToDuration
+>::type
+floor(const duration<_Rep, _Period>& __d)
+{
+ _ToDuration __t = duration_cast<_ToDuration>(__d);
+ if (__t > __d)
+ __t = __t - _ToDuration{1};
+ return __t;
+}
+
+template <class _ToDuration, class _Rep, class _Period>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ _ToDuration
+>::type
+ceil(const duration<_Rep, _Period>& __d)
+{
+ _ToDuration __t = duration_cast<_ToDuration>(__d);
+ if (__t < __d)
+ __t = __t + _ToDuration{1};
+ return __t;
+}
+
+template <class _ToDuration, class _Rep, class _Period>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ _ToDuration
+>::type
+round(const duration<_Rep, _Period>& __d)
+{
+ _ToDuration __lower = floor<_ToDuration>(__d);
+ _ToDuration __upper = __lower + _ToDuration{1};
+ auto __lowerDiff = __d - __lower;
+ auto __upperDiff = __upper - __d;
+ if (__lowerDiff < __upperDiff)
+ return __lower;
+ if (__lowerDiff > __upperDiff)
+ return __upper;
+ return __lower.count() & 1 ? __upper : __lower;
+}
+#endif
+
+// duration
+
+template <class _Rep, class _Period>
+class _LIBCPP_TEMPLATE_VIS duration
+{
+ static_assert(!__is_duration<_Rep>::value, "A duration representation can not be a duration");
+ static_assert(__is_ratio<_Period>::value, "Second template parameter of duration must be a std::ratio");
+ static_assert(_Period::num > 0, "duration period must be positive");
+
+ template <class _R1, class _R2>
+ struct __no_overflow
+ {
+ private:
+ static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value;
+ static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value;
+ static const intmax_t __n1 = _R1::num / __gcd_n1_n2;
+ static const intmax_t __d1 = _R1::den / __gcd_d1_d2;
+ static const intmax_t __n2 = _R2::num / __gcd_n1_n2;
+ static const intmax_t __d2 = _R2::den / __gcd_d1_d2;
+ static const intmax_t max = -((intmax_t(1) << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1);
+
+ template <intmax_t _Xp, intmax_t _Yp, bool __overflow>
+ struct __mul // __overflow == false
+ {
+ static const intmax_t value = _Xp * _Yp;
+ };
+
+ template <intmax_t _Xp, intmax_t _Yp>
+ struct __mul<_Xp, _Yp, true>
+ {
+ static const intmax_t value = 1;
+ };
+
+ public:
+ static const bool value = (__n1 <= max / __d2) && (__n2 <= max / __d1);
+ typedef ratio<__mul<__n1, __d2, !value>::value,
+ __mul<__n2, __d1, !value>::value> type;
+ };
+
+public:
+ typedef _Rep rep;
+ typedef typename _Period::type period;
+private:
+ rep __rep_;
+public:
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+#ifndef _LIBCPP_CXX03_LANG
+ duration() = default;
+#else
+ duration() {}
+#endif
+
+ template <class _Rep2>
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ explicit duration(const _Rep2& __r,
+ typename enable_if
+ <
+ is_convertible<_Rep2, rep>::value &&
+ (treat_as_floating_point<rep>::value ||
+ !treat_as_floating_point<_Rep2>::value)
+ >::type* = nullptr)
+ : __rep_(__r) {}
+
+ // conversions
+ template <class _Rep2, class _Period2>
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ duration(const duration<_Rep2, _Period2>& __d,
+ typename enable_if
+ <
+ __no_overflow<_Period2, period>::value && (
+ treat_as_floating_point<rep>::value ||
+ (__no_overflow<_Period2, period>::type::den == 1 &&
+ !treat_as_floating_point<_Rep2>::value))
+ >::type* = nullptr)
+ : __rep_(chrono::duration_cast<duration>(__d).count()) {}
+
+ // observer
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR rep count() const {return __rep_;}
+
+ // arithmetic
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR typename common_type<duration>::type operator+() const {return typename common_type<duration>::type(*this);}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR typename common_type<duration>::type operator-() const {return typename common_type<duration>::type(-__rep_);}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator++() {++__rep_; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration operator++(int) {return duration(__rep_++);}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator--() {--__rep_; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration operator--(int) {return duration(__rep_--);}
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator+=(const duration& __d) {__rep_ += __d.count(); return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator-=(const duration& __d) {__rep_ -= __d.count(); return *this;}
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator*=(const rep& rhs) {__rep_ *= rhs; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator/=(const rep& rhs) {__rep_ /= rhs; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator%=(const rep& rhs) {__rep_ %= rhs; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator%=(const duration& rhs) {__rep_ %= rhs.count(); return *this;}
+
+ // special values
+
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration zero() _NOEXCEPT {return duration(duration_values<rep>::zero());}
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration min() _NOEXCEPT {return duration(duration_values<rep>::min());}
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration max() _NOEXCEPT {return duration(duration_values<rep>::max());}
+};
+
+typedef duration<long long, nano> nanoseconds;
+typedef duration<long long, micro> microseconds;
+typedef duration<long long, milli> milliseconds;
+typedef duration<long long > seconds;
+typedef duration< long, ratio< 60> > minutes;
+typedef duration< long, ratio<3600> > hours;
+#if _LIBCPP_STD_VER > 17
+typedef duration< int, ratio_multiply<ratio<24>, hours::period>> days;
+typedef duration< int, ratio_multiply<ratio<7>, days::period>> weeks;
+typedef duration< int, ratio_multiply<ratio<146097, 400>, days::period>> years;
+typedef duration< int, ratio_divide<years::period, ratio<12>>> months;
+#endif
+// Duration ==
+
+template <class _LhsDuration, class _RhsDuration>
+struct __duration_eq
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const
+ {
+ typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct;
+ return _Ct(__lhs).count() == _Ct(__rhs).count();
+ }
+};
+
+template <class _LhsDuration>
+struct __duration_eq<_LhsDuration, _LhsDuration>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const
+ {return __lhs.count() == __rhs.count();}
+};
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator==(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return __duration_eq<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >()(__lhs, __rhs);
+}
+
+// Duration !=
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator!=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return !(__lhs == __rhs);
+}
+
+// Duration <
+
+template <class _LhsDuration, class _RhsDuration>
+struct __duration_lt
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const
+ {
+ typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct;
+ return _Ct(__lhs).count() < _Ct(__rhs).count();
+ }
+};
+
+template <class _LhsDuration>
+struct __duration_lt<_LhsDuration, _LhsDuration>
+{
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const
+ {return __lhs.count() < __rhs.count();}
+};
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator< (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return __duration_lt<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >()(__lhs, __rhs);
+}
+
+// Duration >
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator> (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return __rhs < __lhs;
+}
+
+// Duration <=
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator<=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return !(__rhs < __lhs);
+}
+
+// Duration >=
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+bool
+operator>=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ return !(__lhs < __rhs);
+}
+
+// Duration +
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
+operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
+ return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count());
+}
+
+// Duration -
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
+operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
+ return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count());
+}
+
+// Duration *
+
+template <class _Rep1, class _Period, class _Rep2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename enable_if
+<
+ is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
+ duration<typename common_type<_Rep1, _Rep2>::type, _Period>
+>::type
+operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
+{
+ typedef typename common_type<_Rep1, _Rep2>::type _Cr;
+ typedef duration<_Cr, _Period> _Cd;
+ return _Cd(_Cd(__d).count() * static_cast<_Cr>(__s));
+}
+
+template <class _Rep1, class _Period, class _Rep2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename enable_if
+<
+ is_convertible<_Rep1, typename common_type<_Rep1, _Rep2>::type>::value,
+ duration<typename common_type<_Rep1, _Rep2>::type, _Period>
+>::type
+operator*(const _Rep1& __s, const duration<_Rep2, _Period>& __d)
+{
+ return __d * __s;
+}
+
+// Duration /
+
+template <class _Rep1, class _Period, class _Rep2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename enable_if
+<
+ !__is_duration<_Rep2>::value &&
+ is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
+ duration<typename common_type<_Rep1, _Rep2>::type, _Period>
+>::type
+operator/(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
+{
+ typedef typename common_type<_Rep1, _Rep2>::type _Cr;
+ typedef duration<_Cr, _Period> _Cd;
+ return _Cd(_Cd(__d).count() / static_cast<_Cr>(__s));
+}
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename common_type<_Rep1, _Rep2>::type
+operator/(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Ct;
+ return _Ct(__lhs).count() / _Ct(__rhs).count();
+}
+
+// Duration %
+
+template <class _Rep1, class _Period, class _Rep2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename enable_if
+<
+ !__is_duration<_Rep2>::value &&
+ is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
+ duration<typename common_type<_Rep1, _Rep2>::type, _Period>
+>::type
+operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
+{
+ typedef typename common_type<_Rep1, _Rep2>::type _Cr;
+ typedef duration<_Cr, _Period> _Cd;
+ return _Cd(_Cd(__d).count() % static_cast<_Cr>(__s));
+}
+
+template <class _Rep1, class _Period1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_CONSTEXPR
+typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
+operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef typename common_type<_Rep1, _Rep2>::type _Cr;
+ typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
+ return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count()));
+}
+
+} // namespace chrono
+
+#if _LIBCPP_STD_VER > 11
+// Suffixes for duration literals [time.duration.literals]
+inline namespace literals
+{
+ inline namespace chrono_literals
+ {
+
+ constexpr chrono::hours operator""h(unsigned long long __h)
+ {
+ return chrono::hours(static_cast<chrono::hours::rep>(__h));
+ }
+
+ constexpr chrono::duration<long double, ratio<3600,1>> operator""h(long double __h)
+ {
+ return chrono::duration<long double, ratio<3600,1>>(__h);
+ }
+
+
+ constexpr chrono::minutes operator""min(unsigned long long __m)
+ {
+ return chrono::minutes(static_cast<chrono::minutes::rep>(__m));
+ }
+
+ constexpr chrono::duration<long double, ratio<60,1>> operator""min(long double __m)
+ {
+ return chrono::duration<long double, ratio<60,1>> (__m);
+ }
+
+
+ constexpr chrono::seconds operator""s(unsigned long long __s)
+ {
+ return chrono::seconds(static_cast<chrono::seconds::rep>(__s));
+ }
+
+ constexpr chrono::duration<long double> operator""s(long double __s)
+ {
+ return chrono::duration<long double> (__s);
+ }
+
+
+ constexpr chrono::milliseconds operator""ms(unsigned long long __ms)
+ {
+ return chrono::milliseconds(static_cast<chrono::milliseconds::rep>(__ms));
+ }
+
+ constexpr chrono::duration<long double, milli> operator""ms(long double __ms)
+ {
+ return chrono::duration<long double, milli>(__ms);
+ }
+
+
+ constexpr chrono::microseconds operator""us(unsigned long long __us)
+ {
+ return chrono::microseconds(static_cast<chrono::microseconds::rep>(__us));
+ }
+
+ constexpr chrono::duration<long double, micro> operator""us(long double __us)
+ {
+ return chrono::duration<long double, micro> (__us);
+ }
+
+
+ constexpr chrono::nanoseconds operator""ns(unsigned long long __ns)
+ {
+ return chrono::nanoseconds(static_cast<chrono::nanoseconds::rep>(__ns));
+ }
+
+ constexpr chrono::duration<long double, nano> operator""ns(long double __ns)
+ {
+ return chrono::duration<long double, nano> (__ns);
+ }
+
+} // namespace chrono_literals
+} // namespace literals
+
+namespace chrono { // hoist the literals into namespace std::chrono
+ using namespace literals::chrono_literals;
+} // namespace chrono
+
+#endif // _LIBCPP_STD_VER > 11
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___CHRONO_DURATION_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/file_clock.h b/contrib/llvm-project/libcxx/include/__chrono/file_clock.h
new file mode 100644
index 000000000000..cd8758f81908
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/file_clock.h
@@ -0,0 +1,85 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_FILE_CLOCK_H
+#define _LIBCPP___CHRONO_FILE_CLOCK_H
+
+#include <__availability>
+#include <__chrono/duration.h>
+#include <__chrono/system_clock.h>
+#include <__chrono/time_point.h>
+#include <__config>
+#include <ratio>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+#ifndef _LIBCPP_CXX03_LANG
+_LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
+struct _FilesystemClock;
+_LIBCPP_END_NAMESPACE_FILESYSTEM
+#endif // !_LIBCPP_CXX03_LANG
+
+#if _LIBCPP_STD_VER > 17
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+// [time.clock.file], type file_clock
+using file_clock = _VSTD_FS::_FilesystemClock;
+
+template<class _Duration>
+using file_time = time_point<file_clock, _Duration>;
+
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_STD_VER > 17
+
+#ifndef _LIBCPP_CXX03_LANG
+_LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
+struct _FilesystemClock {
+#if !defined(_LIBCPP_HAS_NO_INT128)
+ typedef __int128_t rep;
+ typedef nano period;
+#else
+ typedef long long rep;
+ typedef nano period;
+#endif
+
+ typedef chrono::duration<rep, period> duration;
+ typedef chrono::time_point<_FilesystemClock> time_point;
+
+ _LIBCPP_EXPORTED_FROM_ABI
+ static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = false;
+
+ _LIBCPP_AVAILABILITY_FILESYSTEM _LIBCPP_FUNC_VIS static time_point now() noexcept;
+
+#if _LIBCPP_STD_VER > 17
+ template <class _Duration>
+ _LIBCPP_HIDE_FROM_ABI
+ static chrono::sys_time<_Duration> to_sys(const chrono::file_time<_Duration>& __t) {
+ return chrono::sys_time<_Duration>(__t.time_since_epoch());
+ }
+
+ template <class _Duration>
+ _LIBCPP_HIDE_FROM_ABI
+ static chrono::file_time<_Duration> from_sys(const chrono::sys_time<_Duration>& __t) {
+ return chrono::file_time<_Duration>(__t.time_since_epoch());
+ }
+#endif // _LIBCPP_STD_VER > 17
+};
+_LIBCPP_END_NAMESPACE_FILESYSTEM
+#endif // !_LIBCPP_CXX03_LANG
+
+#endif // _LIBCPP___CHRONO_FILE_CLOCK_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/high_resolution_clock.h b/contrib/llvm-project/libcxx/include/__chrono/high_resolution_clock.h
new file mode 100644
index 000000000000..f5cde4acb979
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/high_resolution_clock.h
@@ -0,0 +1,36 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_HIGH_RESOLUTION_CLOCK_H
+#define _LIBCPP___CHRONO_HIGH_RESOLUTION_CLOCK_H
+
+#include <__chrono/steady_clock.h>
+#include <__chrono/system_clock.h>
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
+typedef steady_clock high_resolution_clock;
+#else
+typedef system_clock high_resolution_clock;
+#endif
+
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___CHRONO_HIGH_RESOLUTION_CLOCK_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/steady_clock.h b/contrib/llvm-project/libcxx/include/__chrono/steady_clock.h
new file mode 100644
index 000000000000..5ba911df843e
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/steady_clock.h
@@ -0,0 +1,44 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_STEADY_CLOCK_H
+#define _LIBCPP___CHRONO_STEADY_CLOCK_H
+
+#include <__chrono/duration.h>
+#include <__chrono/time_point.h>
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
+class _LIBCPP_TYPE_VIS steady_clock
+{
+public:
+ typedef nanoseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef chrono::time_point<steady_clock, duration> time_point;
+ static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = true;
+
+ static time_point now() _NOEXCEPT;
+};
+#endif
+
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___CHRONO_STEADY_CLOCK_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/system_clock.h b/contrib/llvm-project/libcxx/include/__chrono/system_clock.h
new file mode 100644
index 000000000000..9c977d369e1b
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/system_clock.h
@@ -0,0 +1,54 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_SYSTEM_CLOCK_H
+#define _LIBCPP___CHRONO_SYSTEM_CLOCK_H
+
+#include <__chrono/duration.h>
+#include <__chrono/time_point.h>
+#include <__config>
+#include <ctime>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+class _LIBCPP_TYPE_VIS system_clock
+{
+public:
+ typedef microseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef chrono::time_point<system_clock> time_point;
+ static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = false;
+
+ static time_point now() _NOEXCEPT;
+ static time_t to_time_t (const time_point& __t) _NOEXCEPT;
+ static time_point from_time_t(time_t __t) _NOEXCEPT;
+};
+
+#if _LIBCPP_STD_VER > 17
+
+template <class _Duration>
+using sys_time = time_point<system_clock, _Duration>;
+using sys_seconds = sys_time<seconds>;
+using sys_days = sys_time<days>;
+
+#endif
+
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___CHRONO_SYSTEM_CLOCK_H
diff --git a/contrib/llvm-project/libcxx/include/__chrono/time_point.h b/contrib/llvm-project/libcxx/include/__chrono/time_point.h
new file mode 100644
index 000000000000..c042e125145a
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__chrono/time_point.h
@@ -0,0 +1,249 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___CHRONO_TIME_POINT_H
+#define _LIBCPP___CHRONO_TIME_POINT_H
+
+#include <__chrono/duration.h>
+#include <__config>
+#include <limits>
+#include <type_traits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+namespace chrono
+{
+
+template <class _Clock, class _Duration = typename _Clock::duration>
+class _LIBCPP_TEMPLATE_VIS time_point
+{
+ static_assert(__is_duration<_Duration>::value,
+ "Second template parameter of time_point must be a std::chrono::duration");
+public:
+ typedef _Clock clock;
+ typedef _Duration duration;
+ typedef typename duration::rep rep;
+ typedef typename duration::period period;
+private:
+ duration __d_;
+
+public:
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 time_point() : __d_(duration::zero()) {}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 explicit time_point(const duration& __d) : __d_(__d) {}
+
+ // conversions
+ template <class _Duration2>
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+ time_point(const time_point<clock, _Duration2>& t,
+ typename enable_if
+ <
+ is_convertible<_Duration2, duration>::value
+ >::type* = nullptr)
+ : __d_(t.time_since_epoch()) {}
+
+ // observer
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 duration time_since_epoch() const {return __d_;}
+
+ // arithmetic
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 time_point& operator+=(const duration& __d) {__d_ += __d; return *this;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 time_point& operator-=(const duration& __d) {__d_ -= __d; return *this;}
+
+ // special values
+
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR time_point min() _NOEXCEPT {return time_point(duration::min());}
+ _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR time_point max() _NOEXCEPT {return time_point(duration::max());}
+};
+
+} // namespace chrono
+
+template <class _Clock, class _Duration1, class _Duration2>
+struct _LIBCPP_TEMPLATE_VIS common_type<chrono::time_point<_Clock, _Duration1>,
+ chrono::time_point<_Clock, _Duration2> >
+{
+ typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type;
+};
+
+namespace chrono {
+
+template <class _ToDuration, class _Clock, class _Duration>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+time_point<_Clock, _ToDuration>
+time_point_cast(const time_point<_Clock, _Duration>& __t)
+{
+ return time_point<_Clock, _ToDuration>(chrono::duration_cast<_ToDuration>(__t.time_since_epoch()));
+}
+
+#if _LIBCPP_STD_VER > 14
+template <class _ToDuration, class _Clock, class _Duration>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ time_point<_Clock, _ToDuration>
+>::type
+floor(const time_point<_Clock, _Duration>& __t)
+{
+ return time_point<_Clock, _ToDuration>{floor<_ToDuration>(__t.time_since_epoch())};
+}
+
+template <class _ToDuration, class _Clock, class _Duration>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ time_point<_Clock, _ToDuration>
+>::type
+ceil(const time_point<_Clock, _Duration>& __t)
+{
+ return time_point<_Clock, _ToDuration>{ceil<_ToDuration>(__t.time_since_epoch())};
+}
+
+template <class _ToDuration, class _Clock, class _Duration>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ __is_duration<_ToDuration>::value,
+ time_point<_Clock, _ToDuration>
+>::type
+round(const time_point<_Clock, _Duration>& __t)
+{
+ return time_point<_Clock, _ToDuration>{round<_ToDuration>(__t.time_since_epoch())};
+}
+
+template <class _Rep, class _Period>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+typename enable_if
+<
+ numeric_limits<_Rep>::is_signed,
+ duration<_Rep, _Period>
+>::type
+abs(duration<_Rep, _Period> __d)
+{
+ return __d >= __d.zero() ? +__d : -__d;
+}
+#endif // _LIBCPP_STD_VER > 14
+
+// time_point ==
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator==(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return __lhs.time_since_epoch() == __rhs.time_since_epoch();
+}
+
+// time_point !=
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator!=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return !(__lhs == __rhs);
+}
+
+// time_point <
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator<(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return __lhs.time_since_epoch() < __rhs.time_since_epoch();
+}
+
+// time_point >
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator>(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return __rhs < __lhs;
+}
+
+// time_point <=
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator<=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return !(__rhs < __lhs);
+}
+
+// time_point >=
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+bool
+operator>=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return !(__lhs < __rhs);
+}
+
+// time_point operator+(time_point x, duration y);
+
+template <class _Clock, class _Duration1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type>
+operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Tr;
+ return _Tr (__lhs.time_since_epoch() + __rhs);
+}
+
+// time_point operator+(duration x, time_point y);
+
+template <class _Rep1, class _Period1, class _Clock, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+time_point<_Clock, typename common_type<duration<_Rep1, _Period1>, _Duration2>::type>
+operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return __rhs + __lhs;
+}
+
+// time_point operator-(time_point x, duration y);
+
+template <class _Clock, class _Duration1, class _Rep2, class _Period2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type>
+operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
+{
+ typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Ret;
+ return _Ret(__lhs.time_since_epoch() -__rhs);
+}
+
+// duration operator-(time_point x, time_point y);
+
+template <class _Clock, class _Duration1, class _Duration2>
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
+typename common_type<_Duration1, _Duration2>::type
+operator-(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
+{
+ return __lhs.time_since_epoch() - __rhs.time_since_epoch();
+}
+
+} // namespace chrono
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___CHRONO_TIME_POINT_H
diff --git a/contrib/llvm-project/libcxx/include/__compare/compare_three_way.h b/contrib/llvm-project/libcxx/include/__compare/compare_three_way.h
index 3edddf1a1c94..d7f339eda992 100644
--- a/contrib/llvm-project/libcxx/include/__compare/compare_three_way.h
+++ b/contrib/llvm-project/libcxx/include/__compare/compare_three_way.h
@@ -10,8 +10,8 @@
#ifndef _LIBCPP___COMPARE_COMPARE_THREE_WAY_H
#define _LIBCPP___COMPARE_COMPARE_THREE_WAY_H
-#include <__config>
#include <__compare/three_way_comparable.h>
+#include <__config>
#include <__utility/forward.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__compare/synth_three_way.h b/contrib/llvm-project/libcxx/include/__compare/synth_three_way.h
index 3d8e738816dd..0f302c0fa11f 100644
--- a/contrib/llvm-project/libcxx/include/__compare/synth_three_way.h
+++ b/contrib/llvm-project/libcxx/include/__compare/synth_three_way.h
@@ -9,10 +9,10 @@
#ifndef _LIBCPP___COMPARE_SYNTH_THREE_WAY_H
#define _LIBCPP___COMPARE_SYNTH_THREE_WAY_H
-#include <__config>
#include <__compare/ordering.h>
#include <__compare/three_way_comparable.h>
#include <__concepts/boolean_testable.h>
+#include <__config>
#include <__utility/declval.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__concepts/class_or_enum.h b/contrib/llvm-project/libcxx/include/__concepts/class_or_enum.h
index 43c7636d9c81..aa8606a21929 100644
--- a/contrib/llvm-project/libcxx/include/__concepts/class_or_enum.h
+++ b/contrib/llvm-project/libcxx/include/__concepts/class_or_enum.h
@@ -25,6 +25,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template<class _Tp>
concept __class_or_enum = is_class_v<_Tp> || is_union_v<_Tp> || is_enum_v<_Tp>;
+// Work around Clang bug https://llvm.org/PR52970
+template<class _Tp>
+concept __workaround_52970 = is_class_v<__uncvref_t<_Tp>> || is_union_v<__uncvref_t<_Tp>>;
+
#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_CONCEPTS)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__concepts/convertible_to.h b/contrib/llvm-project/libcxx/include/__concepts/convertible_to.h
index ec68967106d5..795b0bd7494c 100644
--- a/contrib/llvm-project/libcxx/include/__concepts/convertible_to.h
+++ b/contrib/llvm-project/libcxx/include/__concepts/convertible_to.h
@@ -10,6 +10,7 @@
#define _LIBCPP___CONCEPTS_CONVERTIBLE_TO_H
#include <__config>
+#include <__utility/declval.h>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -25,8 +26,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template<class _From, class _To>
concept convertible_to =
is_convertible_v<_From, _To> &&
- requires (add_rvalue_reference_t<_From> (&__f)()) {
- static_cast<_To>(__f());
+ requires {
+ static_cast<_To>(declval<_From>());
};
#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_CONCEPTS)
diff --git a/contrib/llvm-project/libcxx/include/__config b/contrib/llvm-project/libcxx/include/__config
index 720e12eac0dd..b99cdc38dc9f 100644
--- a/contrib/llvm-project/libcxx/include/__config
+++ b/contrib/llvm-project/libcxx/include/__config
@@ -107,6 +107,15 @@
# define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI
// Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr
# define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI
+// std::random_device holds some state when it uses an implementation that gets
+// entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this
+// implementation to another one on a platform that has already shipped
+// std::random_device, one needs to retain the same object layout to remain ABI
+// compatible. This switch removes these workarounds for platforms that don't care
+// about ABI compatibility.
+# define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT
+// Remove basic_string common base
+# define _LIBCPP_ABI_NO_BASIC_STRING_BASE_CLASS
#elif _LIBCPP_ABI_VERSION == 1
# if !defined(_LIBCPP_OBJECT_FORMAT_COFF)
// Enable compiling copies of now inline methods into the dylib to support
@@ -250,6 +259,10 @@
# endif // defined(__GLIBC_PREREQ)
#endif // defined(__linux__)
+#if defined(__MVS__)
+# include <features.h> // for __NATIVE_ASCII_F
+#endif
+
#ifdef __LITTLE_ENDIAN__
# if __LITTLE_ENDIAN__
# define _LIBCPP_LITTLE_ENDIAN
@@ -354,6 +367,12 @@
// When this option is used, the token passed to `std::random_device`'s
// constructor *must* be "/dev/urandom" -- anything else is an error.
//
+// _LIBCPP_USING_FUCHSIA_CPRNG
+// Use Fuchsia's zx_cprng_draw() system call, which is specified to
+// deliver high-quality entropy and cannot fail.
+// When this option is used, the token passed to `std::random_device`'s
+// constructor *must* be "/dev/urandom" -- anything else is an error.
+//
// _LIBCPP_USING_NACL_RANDOM
// NaCl's sandbox (which PNaCl also runs in) doesn't allow filesystem access,
// including accesses to the special files under `/dev`. This implementation
@@ -365,10 +384,12 @@
// Use rand_s(), for use on Windows.
// When this option is used, the token passed to `std::random_device`'s
// constructor *must* be "/dev/urandom" -- anything else is an error.
-#if defined(__OpenBSD__)
+#if defined(__OpenBSD__) || defined(__APPLE__)
# define _LIBCPP_USING_ARC4_RANDOM
-#elif defined(__Fuchsia__) || defined(__wasi__)
+#elif defined(__wasi__)
# define _LIBCPP_USING_GETENTROPY
+#elif defined(__Fuchsia__)
+# define _LIBCPP_USING_FUCHSIA_CPRNG
#elif defined(__native_client__)
# define _LIBCPP_USING_NACL_RANDOM
#elif defined(_LIBCPP_WIN32API)
@@ -1204,9 +1225,9 @@ extern "C" _LIBCPP_FUNC_VIS void __sanitizer_annotate_contiguous_container(
# define _LIBCPP_C_HAS_NO_GETS
#endif
-#if defined(__BIONIC__) || defined(__NuttX__) || \
- defined(__Fuchsia__) || defined(__wasi__) || defined(_LIBCPP_HAS_MUSL_LIBC) || \
- defined(__MVS__) || defined(__OpenBSD__)
+#if defined(__BIONIC__) || defined(__NuttX__) || \
+ defined(__Fuchsia__) || defined(__wasi__) || \
+ defined(_LIBCPP_HAS_MUSL_LIBC) || defined(__OpenBSD__)
#define _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE
#endif
diff --git a/contrib/llvm-project/libcxx/include/__coroutine/noop_coroutine_handle.h b/contrib/llvm-project/libcxx/include/__coroutine/noop_coroutine_handle.h
index 9dbf21aac5e6..a29e202f4e4f 100644
--- a/contrib/llvm-project/libcxx/include/__coroutine/noop_coroutine_handle.h
+++ b/contrib/llvm-project/libcxx/include/__coroutine/noop_coroutine_handle.h
@@ -20,7 +20,8 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-#if __has_builtin(__builtin_coro_noop)
+#if __has_builtin(__builtin_coro_noop) || defined(_LIBCPP_COMPILER_GCC)
+
// [coroutine.noop]
// [coroutine.promise.noop]
struct noop_coroutine_promise {};
@@ -64,20 +65,45 @@ private:
_LIBCPP_HIDE_FROM_ABI
friend coroutine_handle<noop_coroutine_promise> noop_coroutine() noexcept;
+#if __has_builtin(__builtin_coro_noop)
_LIBCPP_HIDE_FROM_ABI coroutine_handle() noexcept {
this->__handle_ = __builtin_coro_noop();
}
void* __handle_ = nullptr;
+
+#elif defined(_LIBCPP_COMPILER_GCC)
+ // GCC doesn't implement __builtin_coro_noop().
+ // Construct the coroutine frame manually instead.
+ struct __noop_coroutine_frame_ty_ {
+ static void __dummy_resume_destroy_func() { }
+
+ void (*__resume_)() = __dummy_resume_destroy_func;
+ void (*__destroy_)() = __dummy_resume_destroy_func;
+ struct noop_coroutine_promise __promise_;
+ };
+
+ static __noop_coroutine_frame_ty_ __noop_coroutine_frame_;
+
+ void* __handle_ = &__noop_coroutine_frame_;
+
+ _LIBCPP_HIDE_FROM_ABI coroutine_handle() noexcept = default;
+
+#endif // __has_builtin(__builtin_coro_noop)
};
using noop_coroutine_handle = coroutine_handle<noop_coroutine_promise>;
+#if defined(_LIBCPP_COMPILER_GCC)
+inline noop_coroutine_handle::__noop_coroutine_frame_ty_
+ noop_coroutine_handle::__noop_coroutine_frame_{};
+#endif
+
// [coroutine.noop.coroutine]
inline _LIBCPP_HIDE_FROM_ABI
noop_coroutine_handle noop_coroutine() noexcept { return noop_coroutine_handle(); }
-#endif // __has_builtin(__builtin_coro_noop)
+#endif // __has_builtin(__builtin_coro_noop) || defined(_LIBCPP_COMPILER_GCC)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__debug b/contrib/llvm-project/libcxx/include/__debug
index 42f6cef4c07f..a1e21a703224 100644
--- a/contrib/llvm-project/libcxx/include/__debug
+++ b/contrib/llvm-project/libcxx/include/__debug
@@ -12,6 +12,7 @@
#include <__config>
#include <iosfwd>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -22,9 +23,9 @@
#endif
#if _LIBCPP_DEBUG_LEVEL >= 1 || defined(_LIBCPP_BUILDING_LIBRARY)
-# include <cstdlib>
-# include <cstdio>
# include <cstddef>
+# include <cstdio>
+# include <cstdlib>
#endif
#if _LIBCPP_DEBUG_LEVEL == 0
@@ -268,6 +269,26 @@ _LIBCPP_FUNC_VIS const __libcpp_db* __get_const_db();
#endif // _LIBCPP_DEBUG_LEVEL == 2 || defined(_LIBCPP_BUILDING_LIBRARY)
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 inline void __debug_db_insert_c(_Tp* __c) {
+#if _LIBCPP_DEBUG_LEVEL == 2
+ if (!__libcpp_is_constant_evaluated())
+ __get_db()->__insert_c(__c);
+#else
+ (void)(__c);
+#endif
+}
+
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 inline void __debug_db_insert_i(_Tp* __i) {
+#if _LIBCPP_DEBUG_LEVEL == 2
+ if (!__libcpp_is_constant_evaluated())
+ __get_db()->__insert_i(__i);
+#else
+ (void)(__i);
+#endif
+}
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP_DEBUG_H
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/directory_entry.h b/contrib/llvm-project/libcxx/include/__filesystem/directory_entry.h
index 9efe19465428..91dd1a214588 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/directory_entry.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/directory_entry.h
@@ -12,17 +12,18 @@
#include <__availability>
#include <__config>
-#include <__filesystem/path.h>
-#include <__filesystem/file_time_type.h>
-#include <__filesystem/filesystem_error.h>
+#include <__errc>
#include <__filesystem/file_status.h>
+#include <__filesystem/file_time_type.h>
#include <__filesystem/file_type.h>
+#include <__filesystem/filesystem_error.h>
#include <__filesystem/operations.h>
+#include <__filesystem/path.h>
#include <__filesystem/perms.h>
-#include <__errc>
#include <chrono>
#include <cstdint>
#include <cstdlib>
+#include <iosfwd>
#include <system_error>
_LIBCPP_PUSH_MACROS
@@ -239,6 +240,12 @@ public:
return __p_ >= __rhs.__p_;
}
+ template <class _CharT, class _Traits>
+ _LIBCPP_INLINE_VISIBILITY
+ friend basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const directory_entry& __d) {
+ return __os << __d.path();
+ }
+
private:
friend class directory_iterator;
friend class recursive_directory_iterator;
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/directory_iterator.h b/contrib/llvm-project/libcxx/include/__filesystem/directory_iterator.h
index be958e0eb8de..7ea66bbc7ff0 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/directory_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/directory_iterator.h
@@ -12,12 +12,12 @@
#include <__availability>
#include <__config>
+#include <__debug>
#include <__filesystem/directory_entry.h>
#include <__filesystem/directory_options.h>
#include <__filesystem/path.h>
#include <__iterator/iterator_traits.h>
#include <__memory/shared_ptr.h>
-#include <__debug>
#include <__ranges/enable_borrowed_range.h>
#include <__ranges/enable_view.h>
#include <cstddef>
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/filesystem_error.h b/contrib/llvm-project/libcxx/include/__filesystem/filesystem_error.h
index a8c6977b48e9..0b1874b0e50e 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/filesystem_error.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/filesystem_error.h
@@ -14,9 +14,9 @@
#include <__config>
#include <__filesystem/path.h>
#include <__memory/shared_ptr.h>
-#include <system_error>
#include <iosfwd>
#include <new>
+#include <system_error>
#include <type_traits>
#ifndef _LIBCPP_CXX03_LANG
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/operations.h b/contrib/llvm-project/libcxx/include/__filesystem/operations.h
index 19d6c2d437f9..918b4f9362e6 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/operations.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/operations.h
@@ -30,430 +30,118 @@ _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
_LIBCPP_AVAILABILITY_FILESYSTEM_PUSH
-_LIBCPP_FUNC_VIS
-path __absolute(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-path __canonical(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __copy_file(const path& __from, const path& __to, copy_options __opt,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __copy_symlink(const path& __existing_symlink, const path& __new_symlink,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __copy(const path& __from, const path& __to, copy_options __opt,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __create_directories(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __create_directory_symlink(const path& __to, const path& __new_symlink,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __create_directory(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __create_directory(const path& p, const path& attributes,
- error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __create_hard_link(const path& __to, const path& __new_hard_link,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __create_symlink(const path& __to, const path& __new_symlink,
- error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-path __current_path(error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __current_path(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __equivalent(const path&, const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-file_status __status(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-uintmax_t __file_size(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-uintmax_t __hard_link_count(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-file_status __symlink_status(const path&, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-file_time_type __last_write_time(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __last_write_time(const path& p, file_time_type new_time,
- error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-path __weakly_canonical(path const& __p, error_code* __ec = nullptr);
-_LIBCPP_FUNC_VIS
-path __read_symlink(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-uintmax_t __remove_all(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-bool __remove(const path& p, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __rename(const path& from, const path& to, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-void __resize_file(const path& p, uintmax_t size, error_code* ec = nullptr);
-_LIBCPP_FUNC_VIS
-path __temp_directory_path(error_code* __ec = nullptr);
-
-inline _LIBCPP_INLINE_VISIBILITY path absolute(const path& __p) {
- return __absolute(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path absolute(const path& __p,
- error_code& __ec) {
- return __absolute(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path canonical(const path& __p) {
- return __canonical(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path canonical(const path& __p,
- error_code& __ec) {
- return __canonical(__p, &__ec);
-}
-
-
-inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from,
- const path& __to) {
- return __copy_file(__from, __to, copy_options::none);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-copy_file(const path& __from, const path& __to, error_code& __ec) {
- return __copy_file(__from, __to, copy_options::none, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-copy_file(const path& __from, const path& __to, copy_options __opt) {
- return __copy_file(__from, __to, __opt);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from,
- const path& __to,
- copy_options __opt,
- error_code& __ec) {
- return __copy_file(__from, __to, __opt, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void copy_symlink(const path& __existing,
- const path& __new) {
- __copy_symlink(__existing, __new);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-copy_symlink(const path& __ext, const path& __new, error_code& __ec) noexcept {
- __copy_symlink(__ext, __new, &__ec);
-}
-
-
-inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from,
- const path& __to) {
- __copy(__from, __to, copy_options::none);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to,
- error_code& __ec) {
- __copy(__from, __to, copy_options::none, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to,
- copy_options __opt) {
- __copy(__from, __to, __opt);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to,
- copy_options __opt,
- error_code& __ec) {
- __copy(__from, __to, __opt, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool create_directories(const path& __p) {
- return __create_directories(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool create_directories(const path& __p,
- error_code& __ec) {
- return __create_directories(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-create_directory_symlink(const path& __to, const path& __new) {
- __create_directory_symlink(__to, __new);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-create_directory_symlink(const path& __to, const path& __new,
- error_code& __ec) noexcept {
- __create_directory_symlink(__to, __new, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p) {
- return __create_directory(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-create_directory(const path& __p, error_code& __ec) noexcept {
- return __create_directory(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p,
- const path& __attrs) {
- return __create_directory(__p, __attrs);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-create_directory(const path& __p, const path& __attrs,
- error_code& __ec) noexcept {
- return __create_directory(__p, __attrs, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void create_hard_link(const path& __to,
- const path& __new) {
- __create_hard_link(__to, __new);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-create_hard_link(const path& __to, const path& __new,
- error_code& __ec) noexcept {
- __create_hard_link(__to, __new, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void create_symlink(const path& __to,
- const path& __new) {
- __create_symlink(__to, __new);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-create_symlink(const path& __to, const path& __new, error_code& __ec) noexcept {
- return __create_symlink(__to, __new, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path current_path() {
- return __current_path();
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path current_path(error_code& __ec) {
- return __current_path(&__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void current_path(const path& __p) {
- __current_path(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void current_path(const path& __p,
- error_code& __ec) noexcept {
- __current_path(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool equivalent(const path& __p1,
- const path& __p2) {
- return __equivalent(__p1, __p2);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-equivalent(const path& __p1, const path& __p2, error_code& __ec) noexcept {
- return __equivalent(__p1, __p2, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool status_known(file_status __s) noexcept {
- return __s.type() != file_type::none;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool exists(file_status __s) noexcept {
- return status_known(__s) && __s.type() != file_type::not_found;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool exists(const path& __p) {
- return exists(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool exists(const path& __p,
- error_code& __ec) noexcept {
+_LIBCPP_FUNC_VIS path __absolute(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS path __canonical(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS bool __copy_file(const path& __from, const path& __to, copy_options __opt, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS void __copy_symlink(const path& __existing_symlink, const path& __new_symlink, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS void __copy(const path& __from, const path& __to, copy_options __opt, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS bool __create_directories(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS void __create_directory_symlink(const path& __to, const path& __new_symlink, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS bool __create_directory(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS bool __create_directory(const path& p, const path& attributes, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS void __create_hard_link(const path& __to, const path& __new_hard_link, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS void __create_symlink(const path& __to, const path& __new_symlink, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS path __current_path(error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS void __current_path(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS bool __equivalent(const path&, const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS file_status __status(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS uintmax_t __file_size(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS uintmax_t __hard_link_count(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS file_status __symlink_status(const path&, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS file_time_type __last_write_time(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS void __last_write_time(const path& p, file_time_type new_time, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS path __weakly_canonical(path const& __p, error_code* __ec = nullptr);
+_LIBCPP_FUNC_VIS path __read_symlink(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS uintmax_t __remove_all(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS bool __remove(const path& p, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS void __rename(const path& from, const path& to, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS void __resize_file(const path& p, uintmax_t size, error_code* ec = nullptr);
+_LIBCPP_FUNC_VIS path __temp_directory_path(error_code* __ec = nullptr);
+
+inline _LIBCPP_INLINE_VISIBILITY path absolute(const path& __p) { return __absolute(__p); }
+inline _LIBCPP_INLINE_VISIBILITY path absolute(const path& __p, error_code& __ec) { return __absolute(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY path canonical(const path& __p) { return __canonical(__p); }
+inline _LIBCPP_INLINE_VISIBILITY path canonical(const path& __p, error_code& __ec) { return __canonical(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from, const path& __to) { return __copy_file(__from, __to, copy_options::none); }
+inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from, const path& __to, error_code& __ec) { return __copy_file(__from, __to, copy_options::none, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from, const path& __to, copy_options __opt) { return __copy_file(__from, __to, __opt); }
+inline _LIBCPP_INLINE_VISIBILITY bool copy_file(const path& __from, const path& __to, copy_options __opt, error_code& __ec) { return __copy_file(__from, __to, __opt, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void copy_symlink(const path& __from, const path& __to) { __copy_symlink(__from, __to); }
+inline _LIBCPP_INLINE_VISIBILITY void copy_symlink(const path& __from, const path& __to, error_code& __ec) noexcept { __copy_symlink(__from, __to, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to) { __copy(__from, __to, copy_options::none); }
+inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to, error_code& __ec) { __copy(__from, __to, copy_options::none, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to, copy_options __opt) { __copy(__from, __to, __opt); }
+inline _LIBCPP_INLINE_VISIBILITY void copy(const path& __from, const path& __to, copy_options __opt, error_code& __ec) { __copy(__from, __to, __opt, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directories(const path& __p) { return __create_directories(__p); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directories(const path& __p, error_code& __ec) { return __create_directories(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void create_directory_symlink(const path& __target, const path& __link) { __create_directory_symlink(__target, __link); }
+inline _LIBCPP_INLINE_VISIBILITY void create_directory_symlink(const path& __target, const path& __link, error_code& __ec) noexcept { __create_directory_symlink(__target, __link, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p) { return __create_directory(__p); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p, error_code& __ec) noexcept { return __create_directory(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p, const path& __attrs) { return __create_directory(__p, __attrs); }
+inline _LIBCPP_INLINE_VISIBILITY bool create_directory(const path& __p, const path& __attrs, error_code& __ec) noexcept { return __create_directory(__p, __attrs, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void create_hard_link(const path& __target, const path& __link) { __create_hard_link(__target, __link); }
+inline _LIBCPP_INLINE_VISIBILITY void create_hard_link(const path& __target, const path& __link, error_code& __ec) noexcept { __create_hard_link(__target, __link, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void create_symlink(const path& __target, const path& __link) { __create_symlink(__target, __link); }
+inline _LIBCPP_INLINE_VISIBILITY void create_symlink(const path& __target, const path& __link, error_code& __ec) noexcept { return __create_symlink(__target, __link, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY path current_path() { return __current_path(); }
+inline _LIBCPP_INLINE_VISIBILITY path current_path(error_code& __ec) { return __current_path(&__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void current_path(const path& __p) { __current_path(__p); }
+inline _LIBCPP_INLINE_VISIBILITY void current_path(const path& __p, error_code& __ec) noexcept { __current_path(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool equivalent(const path& __p1, const path& __p2) { return __equivalent(__p1, __p2); }
+inline _LIBCPP_INLINE_VISIBILITY bool equivalent(const path& __p1, const path& __p2, error_code& __ec) noexcept { return __equivalent(__p1, __p2, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool status_known(file_status __s) noexcept { return __s.type() != file_type::none; }
+inline _LIBCPP_INLINE_VISIBILITY bool exists(file_status __s) noexcept { return status_known(__s) && __s.type() != file_type::not_found; }
+inline _LIBCPP_INLINE_VISIBILITY bool exists(const path& __p) { return exists(__status(__p)); }
+
+inline _LIBCPP_INLINE_VISIBILITY bool exists(const path& __p, error_code& __ec) noexcept {
auto __s = __status(__p, &__ec);
if (status_known(__s))
__ec.clear();
return exists(__s);
}
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t file_size(const path& __p) {
- return __file_size(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t
-file_size(const path& __p, error_code& __ec) noexcept {
- return __file_size(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t hard_link_count(const path& __p) {
- return __hard_link_count(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t
-hard_link_count(const path& __p, error_code& __ec) noexcept {
- return __hard_link_count(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(file_status __s) noexcept {
- return __s.type() == file_type::block;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(const path& __p) {
- return is_block_file(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(const path& __p,
- error_code& __ec) noexcept {
- return is_block_file(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-is_character_file(file_status __s) noexcept {
- return __s.type() == file_type::character;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_character_file(const path& __p) {
- return is_character_file(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-is_character_file(const path& __p, error_code& __ec) noexcept {
- return is_character_file(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_directory(file_status __s) noexcept {
- return __s.type() == file_type::directory;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_directory(const path& __p) {
- return is_directory(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_directory(const path& __p,
- error_code& __ec) noexcept {
- return is_directory(__status(__p, &__ec));
-}
-
-_LIBCPP_FUNC_VIS
-bool __fs_is_empty(const path& p, error_code* ec = nullptr);
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_empty(const path& __p) {
- return __fs_is_empty(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_empty(const path& __p,
- error_code& __ec) {
- return __fs_is_empty(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(file_status __s) noexcept {
- return __s.type() == file_type::fifo;
-}
-inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(const path& __p) {
- return is_fifo(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(const path& __p,
- error_code& __ec) noexcept {
- return is_fifo(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-is_regular_file(file_status __s) noexcept {
- return __s.type() == file_type::regular;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_regular_file(const path& __p) {
- return is_regular_file(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool
-is_regular_file(const path& __p, error_code& __ec) noexcept {
- return is_regular_file(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(file_status __s) noexcept {
- return __s.type() == file_type::symlink;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(const path& __p) {
- return is_symlink(__symlink_status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(const path& __p,
- error_code& __ec) noexcept {
- return is_symlink(__symlink_status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_other(file_status __s) noexcept {
- return exists(__s) && !is_regular_file(__s) && !is_directory(__s) &&
- !is_symlink(__s);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_other(const path& __p) {
- return is_other(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_other(const path& __p,
- error_code& __ec) noexcept {
- return is_other(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_socket(file_status __s) noexcept {
- return __s.type() == file_type::socket;
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_socket(const path& __p) {
- return is_socket(__status(__p));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool is_socket(const path& __p,
- error_code& __ec) noexcept {
- return is_socket(__status(__p, &__ec));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_time_type
-last_write_time(const path& __p) {
- return __last_write_time(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_time_type
-last_write_time(const path& __p, error_code& __ec) noexcept {
- return __last_write_time(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void last_write_time(const path& __p,
- file_time_type __t) {
- __last_write_time(__p, __t);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-last_write_time(const path& __p, file_time_type __t,
- error_code& __ec) noexcept {
- __last_write_time(__p, __t, &__ec);
-}
-
-_LIBCPP_FUNC_VIS
-void __permissions(const path&, perms, perm_options, error_code* = nullptr);
-
-inline _LIBCPP_INLINE_VISIBILITY void
-permissions(const path& __p, perms __prms,
- perm_options __opts = perm_options::replace) {
- __permissions(__p, __prms, __opts);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void permissions(const path& __p, perms __prms,
- error_code& __ec) noexcept {
- __permissions(__p, __prms, perm_options::replace, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void permissions(const path& __p, perms __prms,
- perm_options __opts,
- error_code& __ec) {
- __permissions(__p, __prms, __opts, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p,
- const path& __base,
- error_code& __ec) {
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t file_size(const path& __p) { return __file_size(__p); }
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t file_size(const path& __p, error_code& __ec) noexcept { return __file_size(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t hard_link_count(const path& __p) { return __hard_link_count(__p); }
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t hard_link_count(const path& __p, error_code& __ec) noexcept { return __hard_link_count(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(file_status __s) noexcept { return __s.type() == file_type::block; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(const path& __p) { return is_block_file(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_block_file(const path& __p, error_code& __ec) noexcept { return is_block_file(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_character_file(file_status __s) noexcept { return __s.type() == file_type::character; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_character_file(const path& __p) { return is_character_file(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_character_file(const path& __p, error_code& __ec) noexcept { return is_character_file(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_directory(file_status __s) noexcept { return __s.type() == file_type::directory; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_directory(const path& __p) { return is_directory(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_directory(const path& __p, error_code& __ec) noexcept { return is_directory(__status(__p, &__ec)); }
+_LIBCPP_FUNC_VIS bool __fs_is_empty(const path& p, error_code* ec = nullptr);
+inline _LIBCPP_INLINE_VISIBILITY bool is_empty(const path& __p) { return __fs_is_empty(__p); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_empty(const path& __p, error_code& __ec) { return __fs_is_empty(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(file_status __s) noexcept { return __s.type() == file_type::fifo; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(const path& __p) { return is_fifo(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_fifo(const path& __p, error_code& __ec) noexcept { return is_fifo(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_regular_file(file_status __s) noexcept { return __s.type() == file_type::regular; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_regular_file(const path& __p) { return is_regular_file(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_regular_file(const path& __p, error_code& __ec) noexcept { return is_regular_file(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(file_status __s) noexcept { return __s.type() == file_type::symlink; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(const path& __p) { return is_symlink(__symlink_status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_symlink(const path& __p, error_code& __ec) noexcept { return is_symlink(__symlink_status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_other(file_status __s) noexcept { return exists(__s) && !is_regular_file(__s) && !is_directory(__s) && !is_symlink(__s); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_other(const path& __p) { return is_other(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_other(const path& __p, error_code& __ec) noexcept { return is_other(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_socket(file_status __s) noexcept { return __s.type() == file_type::socket; }
+inline _LIBCPP_INLINE_VISIBILITY bool is_socket(const path& __p) { return is_socket(__status(__p)); }
+inline _LIBCPP_INLINE_VISIBILITY bool is_socket(const path& __p, error_code& __ec) noexcept { return is_socket(__status(__p, &__ec)); }
+inline _LIBCPP_INLINE_VISIBILITY file_time_type last_write_time(const path& __p) { return __last_write_time(__p); }
+inline _LIBCPP_INLINE_VISIBILITY file_time_type last_write_time(const path& __p, error_code& __ec) noexcept { return __last_write_time(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void last_write_time(const path& __p, file_time_type __t) { __last_write_time(__p, __t); }
+inline _LIBCPP_INLINE_VISIBILITY void last_write_time(const path& __p, file_time_type __t, error_code& __ec) noexcept { __last_write_time(__p, __t, &__ec); }
+_LIBCPP_FUNC_VIS void __permissions(const path&, perms, perm_options, error_code* = nullptr);
+inline _LIBCPP_INLINE_VISIBILITY void permissions(const path& __p, perms __prms, perm_options __opts = perm_options::replace) { __permissions(__p, __prms, __opts); }
+inline _LIBCPP_INLINE_VISIBILITY void permissions(const path& __p, perms __prms, error_code& __ec) noexcept { __permissions(__p, __prms, perm_options::replace, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void permissions(const path& __p, perms __prms, perm_options __opts, error_code& __ec) { __permissions(__p, __prms, __opts, &__ec); }
+
+inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p, const path& __base, error_code& __ec) {
path __tmp = __weakly_canonical(__p, &__ec);
if (__ec)
return {};
@@ -463,29 +151,12 @@ inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p,
return __tmp.lexically_proximate(__tmp_base);
}
-inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p,
- error_code& __ec) {
- return proximate(__p, current_path(), __ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path
-proximate(const path& __p, const path& __base = current_path()) {
- return __weakly_canonical(__p).lexically_proximate(
- __weakly_canonical(__base));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path read_symlink(const path& __p) {
- return __read_symlink(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path read_symlink(const path& __p,
- error_code& __ec) {
- return __read_symlink(__p, &__ec);
-}
+inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p, error_code& __ec) { return proximate(__p, current_path(), __ec); }
+inline _LIBCPP_INLINE_VISIBILITY path proximate(const path& __p, const path& __base = current_path()) { return __weakly_canonical(__p).lexically_proximate(__weakly_canonical(__base)); }
+inline _LIBCPP_INLINE_VISIBILITY path read_symlink(const path& __p) { return __read_symlink(__p); }
+inline _LIBCPP_INLINE_VISIBILITY path read_symlink(const path& __p, error_code& __ec) { return __read_symlink(__p, &__ec); }
-inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p,
- const path& __base,
- error_code& __ec) {
+inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p, const path& __base, error_code& __ec) {
path __tmp = __weakly_canonical(__p, &__ec);
if (__ec)
return path();
@@ -495,100 +166,27 @@ inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p,
return __tmp.lexically_relative(__tmpbase);
}
-inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p,
- error_code& __ec) {
- return relative(__p, current_path(), __ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path
-relative(const path& __p, const path& __base = current_path()) {
- return __weakly_canonical(__p).lexically_relative(__weakly_canonical(__base));
-}
-
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t remove_all(const path& __p) {
- return __remove_all(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY uintmax_t remove_all(const path& __p,
- error_code& __ec) {
- return __remove_all(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool remove(const path& __p) {
- return __remove(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY bool remove(const path& __p,
- error_code& __ec) noexcept {
- return __remove(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void rename(const path& __from,
- const path& __to) {
- return __rename(__from, __to);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-rename(const path& __from, const path& __to, error_code& __ec) noexcept {
- return __rename(__from, __to, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void resize_file(const path& __p,
- uintmax_t __ns) {
- return __resize_file(__p, __ns);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY void
-resize_file(const path& __p, uintmax_t __ns, error_code& __ec) noexcept {
- return __resize_file(__p, __ns, &__ec);
-}
-
-_LIBCPP_FUNC_VIS
-space_info __space(const path&, error_code* __ec = nullptr);
-
-inline _LIBCPP_INLINE_VISIBILITY space_info space(const path& __p) {
- return __space(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY space_info space(const path& __p,
- error_code& __ec) noexcept {
- return __space(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_status status(const path& __p) {
- return __status(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_status status(const path& __p,
- error_code& __ec) noexcept {
- return __status(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_status symlink_status(const path& __p) {
- return __symlink_status(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY file_status
-symlink_status(const path& __p, error_code& __ec) noexcept {
- return __symlink_status(__p, &__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path temp_directory_path() {
- return __temp_directory_path();
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path temp_directory_path(error_code& __ec) {
- return __temp_directory_path(&__ec);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path weakly_canonical(path const& __p) {
- return __weakly_canonical(__p);
-}
-
-inline _LIBCPP_INLINE_VISIBILITY path weakly_canonical(path const& __p,
- error_code& __ec) {
- return __weakly_canonical(__p, &__ec);
-}
+inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p, error_code& __ec) { return relative(__p, current_path(), __ec); }
+inline _LIBCPP_INLINE_VISIBILITY path relative(const path& __p, const path& __base = current_path()) { return __weakly_canonical(__p).lexically_relative(__weakly_canonical(__base)); }
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t remove_all(const path& __p) { return __remove_all(__p); }
+inline _LIBCPP_INLINE_VISIBILITY uintmax_t remove_all(const path& __p, error_code& __ec) { return __remove_all(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY bool remove(const path& __p) { return __remove(__p); }
+inline _LIBCPP_INLINE_VISIBILITY bool remove(const path& __p, error_code& __ec) noexcept { return __remove(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void rename(const path& __from, const path& __to) { return __rename(__from, __to); }
+inline _LIBCPP_INLINE_VISIBILITY void rename(const path& __from, const path& __to, error_code& __ec) noexcept { return __rename(__from, __to, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY void resize_file(const path& __p, uintmax_t __ns) { return __resize_file(__p, __ns); }
+inline _LIBCPP_INLINE_VISIBILITY void resize_file(const path& __p, uintmax_t __ns, error_code& __ec) noexcept { return __resize_file(__p, __ns, &__ec); }
+_LIBCPP_FUNC_VIS space_info __space(const path&, error_code* __ec = nullptr);
+inline _LIBCPP_INLINE_VISIBILITY space_info space(const path& __p) { return __space(__p); }
+inline _LIBCPP_INLINE_VISIBILITY space_info space(const path& __p, error_code& __ec) noexcept { return __space(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY file_status status(const path& __p) { return __status(__p); }
+inline _LIBCPP_INLINE_VISIBILITY file_status status(const path& __p, error_code& __ec) noexcept { return __status(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY file_status symlink_status(const path& __p) { return __symlink_status(__p); }
+inline _LIBCPP_INLINE_VISIBILITY file_status symlink_status(const path& __p, error_code& __ec) noexcept { return __symlink_status(__p, &__ec); }
+inline _LIBCPP_INLINE_VISIBILITY path temp_directory_path() { return __temp_directory_path(); }
+inline _LIBCPP_INLINE_VISIBILITY path temp_directory_path(error_code& __ec) { return __temp_directory_path(&__ec); }
+inline _LIBCPP_INLINE_VISIBILITY path weakly_canonical(path const& __p) { return __weakly_canonical(__p); }
+inline _LIBCPP_INLINE_VISIBILITY path weakly_canonical(path const& __p, error_code& __ec) { return __weakly_canonical(__p, &__ec); }
_LIBCPP_AVAILABILITY_FILESYSTEM_POP
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/path.h b/contrib/llvm-project/libcxx/include/__filesystem/path.h
index a6d1ee997d91..77547cbacb7d 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/path.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/path.h
@@ -12,16 +12,16 @@
#include <__availability>
#include <__config>
-#include <string>
-#include <type_traits>
#include <__iterator/back_insert_iterator.h>
#include <__iterator/iterator_traits.h>
#include <cstddef>
+#include <string>
#include <string_view>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
-# include <locale>
# include <iomanip> // for quoted
+# include <locale>
#endif
#ifndef _LIBCPP_CXX03_LANG
diff --git a/contrib/llvm-project/libcxx/include/__filesystem/path_iterator.h b/contrib/llvm-project/libcxx/include/__filesystem/path_iterator.h
index 62f8dc6fd357..08039e4c8a36 100644
--- a/contrib/llvm-project/libcxx/include/__filesystem/path_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__filesystem/path_iterator.h
@@ -12,9 +12,9 @@
#include <__availability>
#include <__config>
+#include <__debug>
#include <__filesystem/path.h>
#include <__iterator/iterator_traits.h>
-#include <__debug>
#include <cstddef>
#include <string>
#include <string_view>
@@ -38,15 +38,13 @@ public:
};
public:
- typedef bidirectional_iterator_tag iterator_category;
+ typedef input_iterator_tag iterator_category;
+ typedef bidirectional_iterator_tag iterator_concept;
typedef path value_type;
typedef ptrdiff_t difference_type;
typedef const path* pointer;
- typedef const path& reference;
-
- typedef void
- __stashing_iterator_tag; // See reverse_iterator and __is_stashing_iterator
+ typedef path reference;
public:
_LIBCPP_INLINE_VISIBILITY
diff --git a/contrib/llvm-project/libcxx/include/__format/format_arg.h b/contrib/llvm-project/libcxx/include/__format/format_arg.h
index 59429c13d415..da829d52fbfe 100644
--- a/contrib/llvm-project/libcxx/include/__format/format_arg.h
+++ b/contrib/llvm-project/libcxx/include/__format/format_arg.h
@@ -14,7 +14,9 @@
#include <__config>
#include <__format/format_error.h>
#include <__format/format_fwd.h>
+#include <__format/format_parse_context.h>
#include <__functional_base>
+#include <__memory/addressof.h>
#include <__variant/monostate.h>
#include <string>
#include <string_view>
@@ -56,7 +58,8 @@ enum class _LIBCPP_ENUM_VIS __arg_t : uint8_t {
__long_double,
__const_char_type_ptr,
__string_view,
- __ptr
+ __ptr,
+ __handle
};
} // namespace __format
@@ -104,6 +107,8 @@ visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) {
return _VSTD::invoke(_VSTD::forward<_Visitor>(__vis), __arg.__string_view);
case __format::__arg_t::__ptr:
return _VSTD::invoke(_VSTD::forward<_Visitor>(__vis), __arg.__ptr);
+ case __format::__arg_t::__handle:
+ return _VSTD::invoke(_VSTD::forward<_Visitor>(__vis), __arg.__handle);
}
_LIBCPP_UNREACHABLE();
}
@@ -111,8 +116,7 @@ visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) {
template <class _Context>
class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT basic_format_arg {
public:
- // TODO FMT Define the handle class.
- class handle;
+ class _LIBCPP_TEMPLATE_VIS handle;
_LIBCPP_HIDE_FROM_ABI basic_format_arg() noexcept
: __type_{__format::__arg_t::__none} {}
@@ -161,7 +165,7 @@ private:
const char_type* __const_char_type_ptr;
basic_string_view<char_type> __string_view;
const void* __ptr;
- // TODO FMT Add the handle.
+ handle __handle;
};
__format::__arg_t __type_;
@@ -245,7 +249,37 @@ private:
explicit basic_format_arg(nullptr_t) noexcept
: __ptr(nullptr), __type_(__format::__arg_t::__ptr) {}
- // TODO FMT Implement the _Tp* constructor.
+ template <class _Tp>
+ requires is_void_v<_Tp> _LIBCPP_HIDE_FROM_ABI explicit basic_format_arg(_Tp* __p) noexcept
+ : __ptr(__p), __type_(__format::__arg_t::__ptr) {}
+
+ template <class _Tp>
+ _LIBCPP_HIDE_FROM_ABI explicit basic_format_arg(const _Tp& __v) noexcept
+ : __handle(__v), __type_(__format::__arg_t::__handle) {}
+};
+
+template <class _Context>
+class _LIBCPP_TEMPLATE_VIS basic_format_arg<_Context>::handle {
+ friend class basic_format_arg<_Context>;
+
+public:
+ _LIBCPP_HIDE_FROM_ABI
+ void format(basic_format_parse_context<char_type>& __parse_ctx, _Context& __ctx) const {
+ __format_(__parse_ctx, __ctx, __ptr_);
+ }
+
+private:
+ const void* __ptr_;
+ void (*__format_)(basic_format_parse_context<char_type>&, _Context&, const void*);
+
+ template <class _Tp>
+ _LIBCPP_HIDE_FROM_ABI explicit handle(const _Tp& __v) noexcept
+ : __ptr_(_VSTD::addressof(__v)),
+ __format_([](basic_format_parse_context<char_type>& __parse_ctx, _Context& __ctx, const void* __ptr) {
+ typename _Context::template formatter_type<_Tp> __f;
+ __parse_ctx.advance_to(__f.parse(__parse_ctx));
+ __ctx.advance_to(__f.format(*static_cast<const _Tp*>(__ptr), __ctx));
+ }) {}
};
#endif // !defined(_LIBCPP_HAS_NO_CONCEPTS)
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter.h b/contrib/llvm-project/libcxx/include/__format/formatter.h
index 1adce75a8611..38b73bba32f3 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter.h
@@ -38,26 +38,20 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// to support compilers with partial C++20 support.
#if !defined(_LIBCPP_HAS_NO_CONCEPTS)
-// Currently not implemented specializations throw an exception when used. This
-// does not conform to the Standard. However not all Standard defined formatters
-// have been implemented yet. Until that time the current behavior is intended.
-// TODO FMT Disable the default template.
+/// The default formatter template.
+///
+/// [format.formatter.spec]/5
+/// If F is a disabled specialization of formatter, these values are false:
+/// - is_default_constructible_v<F>,
+/// - is_copy_constructible_v<F>,
+/// - is_move_constructible_v<F>,
+/// - is_copy_assignable<F>, and
+/// - is_move_assignable<F>.
template <class _Tp, class _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter {
- _LIBCPP_NORETURN _LIBCPP_HIDE_FROM_ABI auto parse(auto& __parse_ctx)
- -> decltype(__parse_ctx.begin()) {
- __throw();
- }
-
- _LIBCPP_NORETURN _LIBCPP_HIDE_FROM_ABI auto format(_Tp, auto& __ctx)
- -> decltype(__ctx.out()) {
- __throw();
- }
-
-private:
- _LIBCPP_NORETURN _LIBCPP_HIDE_FROM_ABI void __throw() {
- __throw_format_error("Argument type not implemented yet");
- }
+ formatter() = delete;
+ formatter(const formatter&) = delete;
+ formatter& operator=(const formatter&) = delete;
};
namespace __format_spec {
@@ -193,6 +187,34 @@ __write(output_iterator<const _CharT&> auto __out_it, const _CharT* __first,
/**
* @overload
*
+ * Writes additional zero's for the precision before the exponent.
+ * This is used when the precision requested in the format string is larger
+ * than the maximum precision of the floating-point type. These precision
+ * digits are always 0.
+ *
+ * @param __exponent The location of the exponent character.
+ * @param __num_trailing_zeros The number of 0's to write before the exponent
+ * character.
+ */
+template <class _CharT, class _Fill>
+_LIBCPP_HIDE_FROM_ABI auto __write(output_iterator<const _CharT&> auto __out_it, const _CharT* __first,
+ const _CharT* __last, size_t __size, size_t __width, _Fill __fill,
+ __format_spec::_Flags::_Alignment __alignment, const _CharT* __exponent,
+ size_t __num_trailing_zeros) -> decltype(__out_it) {
+ _LIBCPP_ASSERT(__first <= __last, "Not a valid range");
+ _LIBCPP_ASSERT(__num_trailing_zeros > 0, "The overload not writing trailing zeros should have been used");
+
+ __padding_size_result __padding = __padding_size(__size + __num_trailing_zeros, __width, __alignment);
+ __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __padding.__before, __fill);
+ __out_it = _VSTD::copy(__first, __exponent, _VSTD::move(__out_it));
+ __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __num_trailing_zeros, _CharT('0'));
+ __out_it = _VSTD::copy(__exponent, __last, _VSTD::move(__out_it));
+ return _VSTD::fill_n(_VSTD::move(__out_it), __padding.__after, __fill);
+}
+
+/**
+ * @overload
+ *
* Uses a transformation operation before writing an element.
*
* TODO FMT Fill will probably change to support Unicode grapheme cluster.
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_bool.h b/contrib/llvm-project/libcxx/include/__format/formatter_bool.h
index fdd1d75355d2..1e40bc0a435a 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_bool.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_bool.h
@@ -102,7 +102,7 @@ using __formatter_bool = __formatter_integral<__parser_bool<_CharT>>;
// For each charT, for each cv-unqualified arithmetic type ArithmeticT other
// than char, wchar_t, char8_t, char16_t, or char32_t, a specialization
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<bool, _CharT>
: public __format_spec::__formatter_bool<_CharT> {
using _Base = __format_spec::__formatter_bool<_CharT>;
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h b/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h
new file mode 100644
index 000000000000..2e710b409deb
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h
@@ -0,0 +1,717 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FORMAT_FORMATTER_FLOATING_POINT_H
+#define _LIBCPP___FORMAT_FORMATTER_FLOATING_POINT_H
+
+#include <__algorithm/copy.h>
+#include <__algorithm/copy_n.h>
+#include <__algorithm/fill_n.h>
+#include <__algorithm/find.h>
+#include <__algorithm/min.h>
+#include <__algorithm/rotate.h>
+#include <__algorithm/transform.h>
+#include <__concepts/arithmetic.h>
+#include <__config>
+#include <__debug>
+#include <__format/format_error.h>
+#include <__format/format_fwd.h>
+#include <__format/format_string.h>
+#include <__format/formatter.h>
+#include <__format/formatter_integral.h>
+#include <__format/parser_std_format_spec.h>
+#include <__utility/move.h>
+#include <charconv>
+#include <cmath>
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+# include <locale>
+#endif
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER > 17
+
+// TODO FMT Remove this once we require compilers with proper C++20 support.
+// If the compiler has no concepts support, the format header will be disabled.
+// Without concepts support enable_if needs to be used and that too much effort
+// to support compilers with partial C++20 support.
+# if !defined(_LIBCPP_HAS_NO_CONCEPTS)
+
+namespace __format_spec {
+
+template <floating_point _Tp>
+_LIBCPP_HIDE_FROM_ABI char* __to_buffer(char* __first, char* __last, _Tp __value) {
+ to_chars_result __r = _VSTD::to_chars(__first, __last, __value);
+ _LIBCPP_ASSERT(__r.ec == errc(0), "Internal buffer too small");
+ return __r.ptr;
+}
+
+template <floating_point _Tp>
+_LIBCPP_HIDE_FROM_ABI char* __to_buffer(char* __first, char* __last, _Tp __value, chars_format __fmt) {
+ to_chars_result __r = _VSTD::to_chars(__first, __last, __value, __fmt);
+ _LIBCPP_ASSERT(__r.ec == errc(0), "Internal buffer too small");
+ return __r.ptr;
+}
+
+template <floating_point _Tp>
+_LIBCPP_HIDE_FROM_ABI char* __to_buffer(char* __first, char* __last, _Tp __value, chars_format __fmt, int __precision) {
+ to_chars_result __r = _VSTD::to_chars(__first, __last, __value, __fmt, __precision);
+ _LIBCPP_ASSERT(__r.ec == errc(0), "Internal buffer too small");
+ return __r.ptr;
+}
+
+// https://en.cppreference.com/w/cpp/language/types#cite_note-1
+// float min subnormal: +/-0x1p-149 max: +/- 3.402,823,4 10^38
+// double min subnormal: +/-0x1p-1074 max +/- 1.797,693,134,862,315,7 10^308
+// long double (x86) min subnormal: +/-0x1p-16446 max: +/- 1.189,731,495,357,231,765,021 10^4932
+//
+// The maximum number of digits required for the integral part is based on the
+// maximum's value power of 10. Every power of 10 requires one additional
+// decimal digit.
+// The maximum number of digits required for the fractional part is based on
+// the minimal subnormal hexadecimal output's power of 10. Every division of a
+// fraction's binary 1 by 2, requires one additional decimal digit.
+//
+// The maximum size of a formatted value depends on the selected output format.
+// Ignoring the fact the format string can request a precision larger than the
+// values maximum required, these values are:
+//
+// sign 1 code unit
+// __max_integral
+// radix point 1 code unit
+// __max_fractional
+// exponent character 1 code unit
+// sign 1 code unit
+// __max_fractional_value
+// -----------------------------------
+// total 4 code units extra required.
+//
+// TODO FMT Optimize the storage to avoid storing digits that are known to be zero.
+// https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/
+
+// TODO FMT Add long double specialization when to_chars has proper long double support.
+template <class _Tp>
+struct __traits;
+
+template <floating_point _Fp>
+static constexpr size_t __float_buffer_size(int __precision) {
+ using _Traits = __traits<_Fp>;
+ return 4 + _Traits::__max_integral + __precision + _Traits::__max_fractional_value;
+}
+
+template <>
+struct __traits<float> {
+ static constexpr int __max_integral = 38;
+ static constexpr int __max_fractional = 149;
+ static constexpr int __max_fractional_value = 3;
+ static constexpr size_t __stack_buffer_size = 256;
+
+ static constexpr int __hex_precision_digits = 3;
+};
+
+template <>
+struct __traits<double> {
+ static constexpr int __max_integral = 308;
+ static constexpr int __max_fractional = 1074;
+ static constexpr int __max_fractional_value = 4;
+ static constexpr size_t __stack_buffer_size = 1024;
+
+ static constexpr int __hex_precision_digits = 4;
+};
+
+/// Helper class to store the conversion buffer.
+///
+/// Depending on the maxium size required for a value, the buffer is allocated
+/// on the stack or the heap.
+template <floating_point _Fp>
+class _LIBCPP_TEMPLATE_VIS __float_buffer {
+ using _Traits = __traits<_Fp>;
+
+public:
+ // TODO FMT Improve this constructor to do a better estimate.
+ // When using a scientific formatting with a precision of 6 a stack buffer
+ // will always suffice. At the moment that isn't important since floats and
+ // doubles use a stack buffer, unless the precision used in the format string
+ // is large.
+ // When supporting long doubles the __max_integral part becomes 4932 which
+ // may be too much for some platforms. For these cases a better estimate is
+ // required.
+ explicit _LIBCPP_HIDE_FROM_ABI __float_buffer(int __precision)
+ : __precision_(__precision != -1 ? __precision : _Traits::__max_fractional) {
+
+ // When the precision is larger than _Traits::__max_fractional the digits in
+ // the range (_Traits::__max_fractional, precision] will contain the value
+ // zero. There's no need to request to_chars to write these zeros:
+ // - When the value is large a temporary heap buffer needs to be allocated.
+ // - When to_chars writes the values they need to be "copied" to the output:
+ // - char: std::fill on the output iterator is faster than std::copy.
+ // - wchar_t: same argument as char, but additional std::copy won't work.
+ // The input is always a char buffer, so every char in the buffer needs
+ // to be converted from a char to a wchar_t.
+ if (__precision_ > _Traits::__max_fractional) {
+ __num_trailing_zeros_ = __precision_ - _Traits::__max_fractional;
+ __precision_ = _Traits::__max_fractional;
+ }
+
+ __size_ = __format_spec::__float_buffer_size<_Fp>(__precision_);
+ if (__size_ > _Traits::__stack_buffer_size)
+ // The allocated buffer's contents don't need initialization.
+ __begin_ = allocator<char>{}.allocate(__size_);
+ else
+ __begin_ = __buffer_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI ~__float_buffer() {
+ if (__size_ > _Traits::__stack_buffer_size)
+ allocator<char>{}.deallocate(__begin_, __size_);
+ }
+ _LIBCPP_HIDE_FROM_ABI __float_buffer(const __float_buffer&) = delete;
+ _LIBCPP_HIDE_FROM_ABI __float_buffer& operator=(const __float_buffer&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI char* begin() const { return __begin_; }
+ _LIBCPP_HIDE_FROM_ABI char* end() const { return __begin_ + __size_; }
+
+ _LIBCPP_HIDE_FROM_ABI int __precision() const { return __precision_; }
+ _LIBCPP_HIDE_FROM_ABI int __num_trailing_zeros() const { return __num_trailing_zeros_; }
+ _LIBCPP_HIDE_FROM_ABI void __remove_trailing_zeros() { __num_trailing_zeros_ = 0; }
+
+private:
+ int __precision_;
+ int __num_trailing_zeros_{0};
+ size_t __size_;
+ char* __begin_;
+ char __buffer_[_Traits::__stack_buffer_size];
+};
+
+struct __float_result {
+ /// Points at the beginning of the integral part in the buffer.
+ ///
+ /// When there's no sign character this points at the start of the buffer.
+ char* __integral;
+
+ /// Points at the radix point, when not present it's the same as \ref __last.
+ char* __radix_point;
+
+ /// Points at the exponent character, when not present it's the same as \ref __last.
+ char* __exponent;
+
+ /// Points beyond the last written element in the buffer.
+ char* __last;
+};
+
+/// Finds the position of the exponent character 'e' at the end of the buffer.
+///
+/// Assuming there is an exponent the input will terminate with
+/// eSdd and eSdddd (S = sign, d = digit)
+///
+/// \returns a pointer to the exponent or __last when not found.
+constexpr inline _LIBCPP_HIDE_FROM_ABI char* __find_exponent(char* __first, char* __last) {
+ ptrdiff_t __size = __last - __first;
+ if (__size > 4) {
+ __first = __last - _VSTD::min(__size, ptrdiff_t(6));
+ for (; __first != __last - 3; ++__first) {
+ if (*__first == 'e')
+ return __first;
+ }
+ }
+ return __last;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_default(const __float_buffer<_Fp>& __buffer, _Tp __value,
+ char* __integral) {
+ __float_result __result;
+ __result.__integral = __integral;
+ __result.__last = __format_spec::__to_buffer(__integral, __buffer.end(), __value);
+
+ __result.__exponent = __format_spec::__find_exponent(__result.__integral, __result.__last);
+
+ // Constrains:
+ // - There's at least one decimal digit before the radix point.
+ // - The radix point, when present, is placed before the exponent.
+ __result.__radix_point = _VSTD::find(__result.__integral + 1, __result.__exponent, '.');
+
+ // When the radix point isn't found its position is the exponent instead of
+ // __result.__last.
+ if (__result.__radix_point == __result.__exponent)
+ __result.__radix_point = __result.__last;
+
+ // clang-format off
+ _LIBCPP_ASSERT((__result.__integral != __result.__last) &&
+ (__result.__radix_point == __result.__last || *__result.__radix_point == '.') &&
+ (__result.__exponent == __result.__last || *__result.__exponent == 'e'),
+ "Post-condition failure.");
+ // clang-format on
+
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_hexadecimal_lower_case(const __float_buffer<_Fp>& __buffer,
+ _Tp __value, int __precision,
+ char* __integral) {
+ __float_result __result;
+ __result.__integral = __integral;
+ if (__precision == -1)
+ __result.__last = __format_spec::__to_buffer(__integral, __buffer.end(), __value, chars_format::hex);
+ else
+ __result.__last = __format_spec::__to_buffer(__integral, __buffer.end(), __value, chars_format::hex, __precision);
+
+ // H = one or more hex-digits
+ // S = sign
+ // D = one or more decimal-digits
+ // When the fractional part is zero and no precision the output is 0p+0
+ // else the output is 0.HpSD
+ // So testing the second position can differentiate between these two cases.
+ char* __first = __integral + 1;
+ if (*__first == '.') {
+ __result.__radix_point = __first;
+ // One digit is the minimum
+ // 0.hpSd
+ // ^-- last
+ // ^---- integral = end of search
+ // ^-------- start of search
+ // 0123456
+ //
+ // Four digits is the maximum
+ // 0.hpSdddd
+ // ^-- last
+ // ^---- integral = end of search
+ // ^-------- start of search
+ // 0123456789
+ static_assert(__traits<_Fp>::__hex_precision_digits <= 4, "Guard against possible underflow.");
+
+ char* __last = __result.__last - 2;
+ __first = __last - __traits<_Fp>::__hex_precision_digits;
+ __result.__exponent = _VSTD::find(__first, __last, 'p');
+ } else {
+ __result.__radix_point = __result.__last;
+ __result.__exponent = __first;
+ }
+
+ // clang-format off
+ _LIBCPP_ASSERT((__result.__integral != __result.__last) &&
+ (__result.__radix_point == __result.__last || *__result.__radix_point == '.') &&
+ (__result.__exponent != __result.__last && *__result.__exponent == 'p'),
+ "Post-condition failure.");
+ // clang-format on
+
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_hexadecimal_upper_case(const __float_buffer<_Fp>& __buffer,
+ _Tp __value, int __precision,
+ char* __integral) {
+ __float_result __result =
+ __format_spec::__format_buffer_hexadecimal_lower_case(__buffer, __value, __precision, __integral);
+ _VSTD::transform(__result.__integral, __result.__exponent, __result.__integral, __hex_to_upper);
+ *__result.__exponent = 'P';
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_scientific_lower_case(const __float_buffer<_Fp>& __buffer,
+ _Tp __value, int __precision,
+ char* __integral) {
+ __float_result __result;
+ __result.__integral = __integral;
+ __result.__last =
+ __format_spec::__to_buffer(__integral, __buffer.end(), __value, chars_format::scientific, __precision);
+
+ char* __first = __integral + 1;
+ _LIBCPP_ASSERT(__first != __result.__last, "No exponent present");
+ if (*__first == '.') {
+ __result.__radix_point = __first;
+ __result.__exponent = __format_spec::__find_exponent(__first + 1, __result.__last);
+ } else {
+ __result.__radix_point = __result.__last;
+ __result.__exponent = __first;
+ }
+
+ // clang-format off
+ _LIBCPP_ASSERT((__result.__integral != __result.__last) &&
+ (__result.__radix_point == __result.__last || *__result.__radix_point == '.') &&
+ (__result.__exponent != __result.__last && *__result.__exponent == 'e'),
+ "Post-condition failure.");
+ // clang-format on
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_scientific_upper_case(const __float_buffer<_Fp>& __buffer,
+ _Tp __value, int __precision,
+ char* __integral) {
+ __float_result __result =
+ __format_spec::__format_buffer_scientific_lower_case(__buffer, __value, __precision, __integral);
+ *__result.__exponent = 'E';
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_fixed(const __float_buffer<_Fp>& __buffer, _Tp __value,
+ int __precision, char* __integral) {
+ __float_result __result;
+ __result.__integral = __integral;
+ __result.__last = __format_spec::__to_buffer(__integral, __buffer.end(), __value, chars_format::fixed, __precision);
+
+ // When there's no precision there's no radix point.
+ // Else the radix point is placed at __precision + 1 from the end.
+ // By converting __precision to a bool the subtraction can be done
+ // unconditionally.
+ __result.__radix_point = __result.__last - (__precision + bool(__precision));
+ __result.__exponent = __result.__last;
+
+ // clang-format off
+ _LIBCPP_ASSERT((__result.__integral != __result.__last) &&
+ (__result.__radix_point == __result.__last || *__result.__radix_point == '.') &&
+ (__result.__exponent == __result.__last),
+ "Post-condition failure.");
+ // clang-format on
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_general_lower_case(__float_buffer<_Fp>& __buffer, _Tp __value,
+ int __precision, char* __integral) {
+
+ __buffer.__remove_trailing_zeros();
+
+ __float_result __result;
+ __result.__integral = __integral;
+ __result.__last = __format_spec::__to_buffer(__integral, __buffer.end(), __value, chars_format::general, __precision);
+
+ char* __first = __integral + 1;
+ if (__first == __result.__last) {
+ __result.__radix_point = __result.__last;
+ __result.__exponent = __result.__last;
+ } else {
+ __result.__exponent = __format_spec::__find_exponent(__first, __result.__last);
+ if (__result.__exponent != __result.__last)
+ // In scientific mode if there's a radix point it will always be after
+ // the first digit. (This is the position __first points at).
+ __result.__radix_point = *__first == '.' ? __first : __result.__last;
+ else {
+ // In fixed mode the algorithm truncates trailing spaces and possibly the
+ // radix point. There's no good guess for the position of the radix point
+ // therefore scan the output after the first digit.
+ __result.__radix_point = _VSTD::find(__first, __result.__last, '.');
+ }
+ }
+
+ // clang-format off
+ _LIBCPP_ASSERT((__result.__integral != __result.__last) &&
+ (__result.__radix_point == __result.__last || *__result.__radix_point == '.') &&
+ (__result.__exponent == __result.__last || *__result.__exponent == 'e'),
+ "Post-condition failure.");
+ // clang-format on
+
+ return __result;
+}
+
+template <class _Fp, class _Tp>
+_LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_general_upper_case(__float_buffer<_Fp>& __buffer, _Tp __value,
+ int __precision, char* __integral) {
+ __float_result __result =
+ __format_spec::__format_buffer_general_lower_case(__buffer, __value, __precision, __integral);
+ if (__result.__exponent != __result.__last)
+ *__result.__exponent = 'E';
+ return __result;
+}
+
+# ifndef _LIBCPP_HAS_NO_LOCALIZATION
+template <class _OutIt, class _Fp, class _CharT>
+_LIBCPP_HIDE_FROM_ABI _OutIt __format_locale_specific_form(_OutIt __out_it, const __float_buffer<_Fp>& __buffer,
+ const __float_result& __result, _VSTD::locale __loc,
+ size_t __width, _Flags::_Alignment __alignment,
+ _CharT __fill) {
+ const auto& __np = use_facet<numpunct<_CharT>>(__loc);
+ string __grouping = __np.grouping();
+ char* __first = __result.__integral;
+ // When no radix point or exponent are present __last will be __result.__last.
+ char* __last = _VSTD::min(__result.__radix_point, __result.__exponent);
+
+ ptrdiff_t __digits = __last - __first;
+ if (!__grouping.empty()) {
+ if (__digits <= __grouping[0])
+ __grouping.clear();
+ else
+ __grouping = __determine_grouping(__digits, __grouping);
+ }
+
+ size_t __size = __result.__last - __buffer.begin() + // Formatted string
+ __buffer.__num_trailing_zeros() + // Not yet rendered zeros
+ __grouping.size() - // Grouping contains one
+ !__grouping.empty(); // additional character
+
+ __formatter::__padding_size_result __padding = {0, 0};
+ bool __zero_padding = __alignment == _Flags::_Alignment::__default;
+ if (__size < __width) {
+ if (__zero_padding) {
+ __alignment = _Flags::_Alignment::__right;
+ __fill = _CharT('0');
+ }
+
+ __padding = __formatter::__padding_size(__size, __width, __alignment);
+ }
+
+ // sign and (zero padding or alignment)
+ if (__zero_padding && __first != __buffer.begin())
+ *__out_it++ = *__buffer.begin();
+ __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __padding.__before, __fill);
+ if (!__zero_padding && __first != __buffer.begin())
+ *__out_it++ = *__buffer.begin();
+
+ // integral part
+ if (__grouping.empty()) {
+ __out_it = _VSTD::copy_n(__first, __digits, _VSTD::move(__out_it));
+ } else {
+ auto __r = __grouping.rbegin();
+ auto __e = __grouping.rend() - 1;
+ _CharT __sep = __np.thousands_sep();
+ // The output is divided in small groups of numbers to write:
+ // - A group before the first separator.
+ // - A separator and a group, repeated for the number of separators.
+ // - A group after the last separator.
+ // This loop achieves that process by testing the termination condition
+ // midway in the loop.
+ while (true) {
+ __out_it = _VSTD::copy_n(__first, *__r, _VSTD::move(__out_it));
+ __first += *__r;
+
+ if (__r == __e)
+ break;
+
+ ++__r;
+ *__out_it++ = __sep;
+ }
+ }
+
+ // fractional part
+ if (__result.__radix_point != __result.__last) {
+ *__out_it++ = __np.decimal_point();
+ __out_it = _VSTD::copy(__result.__radix_point + 1, __result.__exponent, _VSTD::move(__out_it));
+ __out_it = _VSTD::fill_n(_VSTD::move(__out_it), __buffer.__num_trailing_zeros(), _CharT('0'));
+ }
+
+ // exponent
+ if (__result.__exponent != __result.__last)
+ __out_it = _VSTD::copy(__result.__exponent, __result.__last, _VSTD::move(__out_it));
+
+ // alignment
+ return _VSTD::fill_n(_VSTD::move(__out_it), __padding.__after, __fill);
+}
+
+# endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+template <__formatter::__char_type _CharT>
+class _LIBCPP_TEMPLATE_VIS __formatter_floating_point : public __parser_floating_point<_CharT> {
+public:
+ template <floating_point _Tp>
+ _LIBCPP_HIDE_FROM_ABI auto format(_Tp __value, auto& __ctx) -> decltype(__ctx.out()) {
+ if (this->__width_needs_substitution())
+ this->__substitute_width_arg_id(__ctx.arg(this->__width));
+
+ bool __negative = _VSTD::signbit(__value);
+
+ if (!_VSTD::isfinite(__value)) [[unlikely]]
+ return __format_non_finite(__ctx.out(), __negative, _VSTD::isnan(__value));
+
+ bool __has_precision = this->__has_precision_field();
+ if (this->__precision_needs_substitution())
+ this->__substitute_precision_arg_id(__ctx.arg(this->__precision));
+
+ // Depending on the std-format-spec string the sign and the value
+ // might not be outputted together:
+ // - zero-padding may insert additional '0' characters.
+ // Therefore the value is processed as a non negative value.
+ // The function @ref __insert_sign will insert a '-' when the value was
+ // negative.
+
+ if (__negative)
+ __value = _VSTD::copysign(__value, +1.0);
+
+ // TODO FMT _Fp should just be _Tp when to_chars has proper long double support.
+ using _Fp = conditional_t<same_as<_Tp, long double>, double, _Tp>;
+ // Force the type of the precision to avoid -1 to become an unsigned value.
+ __float_buffer<_Fp> __buffer(__has_precision ? int(this->__precision) : -1);
+ __float_result __result = __format_buffer(__buffer, __value, __negative, __has_precision);
+
+ if (this->__alternate_form && __result.__radix_point == __result.__last) {
+ *__result.__last++ = '.';
+
+ // When there is an exponent the point needs to be moved before the
+ // exponent. When there's no exponent the rotate does nothing. Since
+ // rotate tests whether the operation is a nop, call it unconditionally.
+ _VSTD::rotate(__result.__exponent, __result.__last - 1, __result.__last);
+ __result.__radix_point = __result.__exponent;
+
+ // The radix point is always placed before the exponent.
+ // - No exponent needs to point to the new last.
+ // - An exponent needs to move one position to the right.
+ // So it's safe to increment the value unconditionally.
+ ++__result.__exponent;
+ }
+
+# ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ if (this->__locale_specific_form)
+ return __format_spec::__format_locale_specific_form(__ctx.out(), __buffer, __result, __ctx.locale(),
+ this->__width, this->__alignment, this->__fill);
+# endif
+
+ ptrdiff_t __size = __result.__last - __buffer.begin();
+ int __num_trailing_zeros = __buffer.__num_trailing_zeros();
+ if (__size + __num_trailing_zeros >= this->__width) {
+ if (__num_trailing_zeros && __result.__exponent != __result.__last)
+ // Insert trailing zeros before exponent character.
+ return _VSTD::copy(__result.__exponent, __result.__last,
+ _VSTD::fill_n(_VSTD::copy(__buffer.begin(), __result.__exponent, __ctx.out()),
+ __num_trailing_zeros, _CharT('0')));
+
+ return _VSTD::fill_n(_VSTD::copy(__buffer.begin(), __result.__last, __ctx.out()), __num_trailing_zeros,
+ _CharT('0'));
+ }
+
+ auto __out_it = __ctx.out();
+ char* __first = __buffer.begin();
+ if (this->__alignment == _Flags::_Alignment::__default) {
+ // When there is a sign output it before the padding. Note the __size
+ // doesn't need any adjustment, regardless whether the sign is written
+ // here or in __formatter::__write.
+ if (__first != __result.__integral)
+ *__out_it++ = *__first++;
+ // After the sign is written, zero padding is the same a right alignment
+ // with '0'.
+ this->__alignment = _Flags::_Alignment::__right;
+ this->__fill = _CharT('0');
+ }
+
+ if (__num_trailing_zeros)
+ return __formatter::__write(_VSTD::move(__out_it), __first, __result.__last, __size, this->__width, this->__fill,
+ this->__alignment, __result.__exponent, __num_trailing_zeros);
+
+ return __formatter::__write(_VSTD::move(__out_it), __first, __result.__last, __size, this->__width, this->__fill,
+ this->__alignment);
+ }
+
+private:
+ template <class _OutIt>
+ _LIBCPP_HIDE_FROM_ABI _OutIt __format_non_finite(_OutIt __out_it, bool __negative, bool __isnan) {
+ char __buffer[4];
+ char* __last = __insert_sign(__buffer, __negative, this->__sign);
+
+ // to_char can return inf, infinity, nan, and nan(n-char-sequence).
+ // The format library requires inf and nan.
+ // All in one expression to avoid dangling references.
+ __last = _VSTD::copy_n(&("infnanINFNAN"[6 * (this->__type == _Flags::_Type::__float_hexadecimal_upper_case ||
+ this->__type == _Flags::_Type::__scientific_upper_case ||
+ this->__type == _Flags::_Type::__fixed_upper_case ||
+ this->__type == _Flags::_Type::__general_upper_case) +
+ 3 * __isnan]),
+ 3, __last);
+
+ // [format.string.std]/13
+ // A zero (0) character preceding the width field pads the field with
+ // leading zeros (following any indication of sign or base) to the field
+ // width, except when applied to an infinity or NaN.
+ if (this->__alignment == _Flags::_Alignment::__default)
+ this->__alignment = _Flags::_Alignment::__right;
+
+ ptrdiff_t __size = __last - __buffer;
+ if (__size >= this->__width)
+ return _VSTD::copy_n(__buffer, __size, _VSTD::move(__out_it));
+
+ return __formatter::__write(_VSTD::move(__out_it), __buffer, __last, __size, this->__width, this->__fill,
+ this->__alignment);
+ }
+
+ /// Fills the buffer with the data based on the requested formatting.
+ ///
+ /// This function, when needed, turns the characters to upper case and
+ /// determines the "interesting" locations which are returned to the caller.
+ ///
+ /// This means the caller never has to convert the contents of the buffer to
+ /// upper case or search for radix points and the location of the exponent.
+ /// This gives a bit of overhead. The original code didn't do that, but due
+ /// to the number of possible additional work needed to turn this number to
+ /// the proper output the code was littered with tests for upper cases and
+ /// searches for radix points and exponents.
+ /// - When a precision larger than the type's precision is selected
+ /// additional zero characters need to be written before the exponent.
+ /// - alternate form needs to add a radix point when not present.
+ /// - localization needs to do grouping in the integral part.
+ template <class _Fp, class _Tp>
+ // TODO FMT _Fp should just be _Tp when to_chars has proper long double support.
+ _LIBCPP_HIDE_FROM_ABI __float_result __format_buffer(__float_buffer<_Fp>& __buffer, _Tp __value, bool __negative,
+ bool __has_precision) {
+ char* __first = __insert_sign(__buffer.begin(), __negative, this->__sign);
+ switch (this->__type) {
+ case _Flags::_Type::__default:
+ return __format_spec::__format_buffer_default(__buffer, __value, __first);
+
+ case _Flags::_Type::__float_hexadecimal_lower_case:
+ return __format_spec::__format_buffer_hexadecimal_lower_case(
+ __buffer, __value, __has_precision ? __buffer.__precision() : -1, __first);
+
+ case _Flags::_Type::__float_hexadecimal_upper_case:
+ return __format_spec::__format_buffer_hexadecimal_upper_case(
+ __buffer, __value, __has_precision ? __buffer.__precision() : -1, __first);
+
+ case _Flags::_Type::__scientific_lower_case:
+ return __format_spec::__format_buffer_scientific_lower_case(__buffer, __value, __buffer.__precision(), __first);
+
+ case _Flags::_Type::__scientific_upper_case:
+ return __format_spec::__format_buffer_scientific_upper_case(__buffer, __value, __buffer.__precision(), __first);
+
+ case _Flags::_Type::__fixed_lower_case:
+ case _Flags::_Type::__fixed_upper_case:
+ return __format_spec::__format_buffer_fixed(__buffer, __value, __buffer.__precision(), __first);
+
+ case _Flags::_Type::__general_lower_case:
+ return __format_spec::__format_buffer_general_lower_case(__buffer, __value, __buffer.__precision(), __first);
+
+ case _Flags::_Type::__general_upper_case:
+ return __format_spec::__format_buffer_general_upper_case(__buffer, __value, __buffer.__precision(), __first);
+
+ default:
+ _LIBCPP_ASSERT(false, "The parser should have validated the type");
+ _LIBCPP_UNREACHABLE();
+ }
+ }
+};
+
+} //namespace __format_spec
+
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<float, _CharT>
+ : public __format_spec::__formatter_floating_point<_CharT> {};
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<double, _CharT>
+ : public __format_spec::__formatter_floating_point<_CharT> {};
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<long double, _CharT>
+ : public __format_spec::__formatter_floating_point<_CharT> {};
+
+# endif // !defined(_LIBCPP_HAS_NO_CONCEPTS)
+
+#endif //_LIBCPP_STD_VER > 17
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___FORMAT_FORMATTER_FLOATING_POINT_H
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_integer.h b/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
index 767df36e61eb..e1f3d4e34897 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
@@ -81,25 +81,25 @@ using __formatter_integer = __formatter_integral<__parser_integer<_CharT>>;
// than char, wchar_t, char8_t, char16_t, or char32_t, a specialization
// Signed integral types.
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<signed char, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<short, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<int, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<long, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<long long, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
#ifndef _LIBCPP_HAS_NO_INT128
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<__int128_t, _CharT>
: public __format_spec::__formatter_integer<_CharT> {
@@ -119,28 +119,28 @@ struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
#endif
// Unsigned integral types.
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<unsigned char, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<unsigned short, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<unsigned, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<unsigned long, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<unsigned long long, _CharT>
: public __format_spec::__formatter_integer<_CharT> {};
#ifndef _LIBCPP_HAS_NO_INT128
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<__uint128_t, _CharT>
: public __format_spec::__formatter_integer<_CharT> {
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_integral.h b/contrib/llvm-project/libcxx/include/__format/formatter_integral.h
index 5f1353effd77..f164ee610974 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_integral.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_integral.h
@@ -10,15 +10,15 @@
#ifndef _LIBCPP___FORMAT_FORMATTER_INTEGRAL_H
#define _LIBCPP___FORMAT_FORMATTER_INTEGRAL_H
+#include <__algorithm/copy.h>
+#include <__algorithm/copy_n.h>
+#include <__algorithm/fill_n.h>
+#include <__algorithm/transform.h>
#include <__config>
#include <__format/format_error.h>
#include <__format/format_fwd.h>
#include <__format/formatter.h>
#include <__format/parser_std_format_spec.h>
-#include <__algorithm/copy.h>
-#include <__algorithm/copy_n.h>
-#include <__algorithm/fill_n.h>
-#include <__algorithm/transform.h>
#include <array>
#include <charconv>
#include <concepts>
@@ -82,7 +82,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace __format_spec {
/** Wrapper around @ref to_chars, returning the output pointer. */
-template <class _Tp>
+template <integral _Tp>
_LIBCPP_HIDE_FROM_ABI char* __to_buffer(char* __first, char* __last,
_Tp __value, int __base) {
// TODO FMT Evaluate code overhead due to not calling the internal function
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_pointer.h b/contrib/llvm-project/libcxx/include/__format/formatter_pointer.h
new file mode 100644
index 000000000000..aa2eb641c6c6
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_pointer.h
@@ -0,0 +1,91 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FORMAT_FORMATTER_POINTER_H
+#define _LIBCPP___FORMAT_FORMATTER_POINTER_H
+
+#include <__algorithm/copy.h>
+#include <__availability>
+#include <__config>
+#include <__debug>
+#include <__format/format_error.h>
+#include <__format/format_fwd.h>
+#include <__format/formatter.h>
+#include <__format/formatter_integral.h>
+#include <__format/parser_std_format_spec.h>
+#include <__iterator/access.h>
+#include <__nullptr>
+#include <cstdint>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER > 17
+
+// TODO FMT Remove this once we require compilers with proper C++20 support.
+// If the compiler has no concepts support, the format header will be disabled.
+// Without concepts support enable_if needs to be used and that too much effort
+// to support compilers with partial C++20 support.
+# if !defined(_LIBCPP_HAS_NO_CONCEPTS)
+
+namespace __format_spec {
+
+template <__formatter::__char_type _CharT>
+class _LIBCPP_TEMPLATE_VIS __formatter_pointer : public __parser_pointer<_CharT> {
+public:
+ _LIBCPP_HIDE_FROM_ABI auto format(const void* __ptr, auto& __ctx) -> decltype(__ctx.out()) {
+ _LIBCPP_ASSERT(this->__alignment != _Flags::_Alignment::__default,
+ "The call to parse should have updated the alignment");
+ if (this->__width_needs_substitution())
+ this->__substitute_width_arg_id(__ctx.arg(this->__width));
+
+ // This code looks a lot like the code to format a hexadecimal integral,
+ // but that code isn't public. Making that code public requires some
+ // refactoring.
+ // TODO FMT Remove code duplication.
+ char __buffer[2 + 2 * sizeof(uintptr_t)];
+ __buffer[0] = '0';
+ __buffer[1] = 'x';
+ char* __last = __to_buffer(__buffer + 2, _VSTD::end(__buffer), reinterpret_cast<uintptr_t>(__ptr), 16);
+
+ unsigned __size = __last - __buffer;
+ if (__size >= this->__width)
+ return _VSTD::copy(__buffer, __last, __ctx.out());
+
+ return __formatter::__write(__ctx.out(), __buffer, __last, __size, this->__width, this->__fill, this->__alignment);
+ }
+};
+
+} // namespace __format_spec
+
+// [format.formatter.spec]/2.4
+// For each charT, the pointer type specializations template<>
+// - struct formatter<nullptr_t, charT>;
+// - template<> struct formatter<void*, charT>;
+// - template<> struct formatter<const void*, charT>;
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<nullptr_t, _CharT>
+ : public __format_spec::__formatter_pointer<_CharT> {};
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<void*, _CharT>
+ : public __format_spec::__formatter_pointer<_CharT> {};
+template <__formatter::__char_type _CharT>
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<const void*, _CharT>
+ : public __format_spec::__formatter_pointer<_CharT> {};
+
+# endif // !defined(_LIBCPP_HAS_NO_CONCEPTS)
+
+#endif //_LIBCPP_STD_VER > 17
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FORMAT_FORMATTER_POINTER_H
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_string.h b/contrib/llvm-project/libcxx/include/__format/formatter_string.h
index 75a81f5184a0..04950faa4a21 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_string.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_string.h
@@ -64,7 +64,7 @@ public:
// [format.formatter.spec]/2.2 For each charT, the string type specializations
// Formatter const char*.
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<const _CharT*, _CharT>
: public __format_spec::__formatter_string<_CharT> {
@@ -98,7 +98,7 @@ struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
};
// Formatter char*.
-template <class _CharT>
+template <__formatter::__char_type _CharT>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<_CharT*, _CharT> : public formatter<const _CharT*, _CharT> {
using _Base = formatter<const _CharT*, _CharT>;
@@ -110,7 +110,7 @@ struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
};
// Formatter const char[].
-template <class _CharT, size_t _Size>
+template <__formatter::__char_type _CharT, size_t _Size>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<const _CharT[_Size], _CharT>
: public __format_spec::__formatter_string<_CharT> {
@@ -123,7 +123,7 @@ struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
};
// Formatter std::string.
-template <class _CharT, class _Traits, class _Allocator>
+template <__formatter::__char_type _CharT, class _Traits, class _Allocator>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
formatter<basic_string<_CharT, _Traits, _Allocator>, _CharT>
: public __format_spec::__formatter_string<_CharT> {
@@ -138,7 +138,7 @@ struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT
};
// Formatter std::string_view.
-template <class _CharT, class _Traits>
+template <__formatter::__char_type _CharT, class _Traits>
struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FORMAT formatter<basic_string_view<_CharT, _Traits>, _CharT>
: public __format_spec::__formatter_string<_CharT> {
using _Base = __format_spec::__formatter_string<_CharT>;
diff --git a/contrib/llvm-project/libcxx/include/__format/parser_std_format_spec.h b/contrib/llvm-project/libcxx/include/__format/parser_std_format_spec.h
index 9b713b811484..9d893e9ced27 100644
--- a/contrib/llvm-project/libcxx/include/__format/parser_std_format_spec.h
+++ b/contrib/llvm-project/libcxx/include/__format/parser_std_format_spec.h
@@ -214,7 +214,7 @@ __parse_arg_id(const _CharT* __begin, const _CharT* __end, auto& __parse_ctx) {
__format::__parse_arg_id(__begin, __end, __parse_ctx);
if (__r.__ptr == __end || *__r.__ptr != _CharT('}'))
- __throw_format_error("A format-spec arg-id should terminate at a '}'");
+ __throw_format_error("Invalid arg-id");
++__r.__ptr;
return __r;
@@ -363,17 +363,6 @@ protected:
if (__begin == __end)
__throw_format_error("End of input while parsing format-spec precision");
- if (*__begin == _CharT('0')) {
- ++__begin;
- if (__begin != __end && *__begin >= '0' && *__begin <= '9')
- __throw_format_error(
- "A format-spec precision field shouldn't have a leading zero");
-
- __precision = 0;
- __precision_as_arg = 0;
- return __begin;
- }
-
if (*__begin == _CharT('{')) {
__format::__parse_number_result __arg_id =
__parse_arg_id(++__begin, __end, __parse_ctx);
@@ -487,6 +476,21 @@ __parse_type(const _CharT* __begin, _Flags& __flags) {
}
/**
+ * Process the parsed alignment and zero-padding state of arithmetic types.
+ *
+ * [format.string.std]/13
+ * If the 0 character and an align option both appear, the 0 character is
+ * ignored.
+ *
+ * For the formatter a @ref __default alignment means zero-padding.
+ */
+_LIBCPP_HIDE_FROM_ABI constexpr void __process_arithmetic_alignment(_Flags& __flags) {
+ __flags.__zero_padding &= __flags.__alignment == _Flags::_Alignment::__default;
+ if (!__flags.__zero_padding && __flags.__alignment == _Flags::_Alignment::__default)
+ __flags.__alignment = _Flags::_Alignment::__right;
+}
+
+/**
* The parser for the std-format-spec.
*
* [format.string.std]/1 specifies the std-format-spec:
@@ -659,23 +663,9 @@ protected:
return __begin;
}
- /**
- * Handles the post-parsing updates for the integer types.
- *
- * Updates the zero-padding and alignment for integer types.
- *
- * [format.string.std]/13
- * If the 0 character and an align option both appear, the 0 character is
- * ignored.
- *
- * For the formatter a @ref __default alignment means zero-padding. Update
- * the alignment based on parsed format string.
- */
+ /** Handles the post-parsing updates for the integer types. */
_LIBCPP_HIDE_FROM_ABI constexpr void __handle_integer() noexcept {
- this->__zero_padding &= this->__alignment == _Flags::_Alignment::__default;
- if (!this->__zero_padding &&
- this->__alignment == _Flags::_Alignment::__default)
- this->__alignment = _Flags::_Alignment::__right;
+ __process_arithmetic_alignment(static_cast<_Flags&>(*this));
}
/**
@@ -712,8 +702,232 @@ protected:
}
};
-// TODO FMT Add a parser for floating-point values.
-// TODO FMT Add a parser for pointer values.
+/**
+ * The parser for the std-format-spec.
+ *
+ * This implements the parser for the floating-point types.
+ *
+ * See @ref __parser_string.
+ */
+template <class _CharT>
+class _LIBCPP_TEMPLATE_VIS __parser_floating_point
+ : public __parser_width, // provides __width(|as_arg)
+ public __parser_precision, // provides __precision(|as_arg)
+ public __parser_fill_align<_CharT>, // provides __fill and uses __flags
+ public _Flags // provides __flags
+{
+public:
+ using char_type = _CharT;
+
+ /**
+ * The low-level std-format-spec parse function.
+ *
+ * @pre __begin points at the beginning of the std-format-spec. This means
+ * directly after the ':'.
+ * @pre The std-format-spec parses the entire input, or the first unmatched
+ * character is a '}'.
+ *
+ * @returns The iterator pointing at the last parsed character.
+ */
+ _LIBCPP_HIDE_FROM_ABI constexpr auto parse(auto& __parse_ctx)
+ -> decltype(__parse_ctx.begin()) {
+ auto __it = __parse(__parse_ctx);
+ __process_arithmetic_alignment(static_cast<_Flags&>(*this));
+ __process_display_type();
+ return __it;
+ }
+protected:
+ /**
+ * The low-level std-format-spec parse function.
+ *
+ * @pre __begin points at the beginning of the std-format-spec. This means
+ * directly after the ':'.
+ * @pre The std-format-spec parses the entire input, or the first unmatched
+ * character is a '}'.
+ *
+ * @returns The iterator pointing at the last parsed character.
+ */
+ _LIBCPP_HIDE_FROM_ABI constexpr auto __parse(auto& __parse_ctx)
+ -> decltype(__parse_ctx.begin()) {
+ auto __begin = __parse_ctx.begin();
+ auto __end = __parse_ctx.end();
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parser_fill_align<_CharT>::__parse(__begin, __end,
+ static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parse_sign(__begin, static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parse_alternate_form(__begin, static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parse_zero_padding(__begin, static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parser_width::__parse(__begin, __end, __parse_ctx);
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parser_precision::__parse(__begin, __end, __parse_ctx);
+ if (__begin == __end)
+ return __begin;
+
+ __begin =
+ __parse_locale_specific_form(__begin, static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parse_type(__begin, static_cast<_Flags&>(*this));
+
+ if (__begin != __end && *__begin != _CharT('}'))
+ __throw_format_error(
+ "The format-spec should consume the input or end with a '}'");
+
+ return __begin;
+ }
+
+ /** Processes the parsed std-format-spec based on the parsed display type. */
+ _LIBCPP_HIDE_FROM_ABI constexpr void __process_display_type() {
+ switch (this->__type) {
+ case _Flags::_Type::__default:
+ // When no precision specified then it keeps default since that
+ // formatting differs from the other types.
+ if (this->__has_precision_field())
+ this->__type = _Flags::_Type::__general_lower_case;
+ break;
+ case _Flags::_Type::__float_hexadecimal_lower_case:
+ case _Flags::_Type::__float_hexadecimal_upper_case:
+ // Precision specific behavior will be handled later.
+ break;
+ case _Flags::_Type::__scientific_lower_case:
+ case _Flags::_Type::__scientific_upper_case:
+ case _Flags::_Type::__fixed_lower_case:
+ case _Flags::_Type::__fixed_upper_case:
+ case _Flags::_Type::__general_lower_case:
+ case _Flags::_Type::__general_upper_case:
+ if (!this->__has_precision_field()) {
+ // Set the default precision for the call to to_chars.
+ this->__precision = 6;
+ this->__precision_as_arg = false;
+ }
+ break;
+
+ default:
+ __throw_format_error("The format-spec type has a type not supported for "
+ "a floating-point argument");
+ }
+ }
+};
+
+/**
+ * The parser for the std-format-spec.
+ *
+ * This implements the parser for the pointer types.
+ *
+ * See @ref __parser_string.
+ */
+template <class _CharT>
+class _LIBCPP_TEMPLATE_VIS __parser_pointer : public __parser_width, // provides __width(|as_arg)
+ public __parser_fill_align<_CharT>, // provides __fill and uses __flags
+ public _Flags // provides __flags
+{
+public:
+ using char_type = _CharT;
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __parser_pointer() {
+ // Implements LWG3612 Inconsistent pointer alignment in std::format.
+ // The issue's current status is "Tentatively Ready" and libc++ status is
+ // still experimental.
+ //
+ // TODO FMT Validate this with the final resolution of LWG3612.
+ this->__alignment = _Flags::_Alignment::__right;
+ }
+
+ /**
+ * The low-level std-format-spec parse function.
+ *
+ * @pre __begin points at the beginning of the std-format-spec. This means
+ * directly after the ':'.
+ * @pre The std-format-spec parses the entire input, or the first unmatched
+ * character is a '}'.
+ *
+ * @returns The iterator pointing at the last parsed character.
+ */
+ _LIBCPP_HIDE_FROM_ABI constexpr auto parse(auto& __parse_ctx) -> decltype(__parse_ctx.begin()) {
+ auto __it = __parse(__parse_ctx);
+ __process_display_type();
+ return __it;
+ }
+
+protected:
+ /**
+ * The low-level std-format-spec parse function.
+ *
+ * @pre __begin points at the beginning of the std-format-spec. This means
+ * directly after the ':'.
+ * @pre The std-format-spec parses the entire input, or the first unmatched
+ * character is a '}'.
+ *
+ * @returns The iterator pointing at the last parsed character.
+ */
+ _LIBCPP_HIDE_FROM_ABI constexpr auto __parse(auto& __parse_ctx) -> decltype(__parse_ctx.begin()) {
+ auto __begin = __parse_ctx.begin();
+ auto __end = __parse_ctx.end();
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parser_fill_align<_CharT>::__parse(__begin, __end, static_cast<_Flags&>(*this));
+ if (__begin == __end)
+ return __begin;
+
+ // An integer presentation type isn't defined in the Standard.
+ // Since a pointer is formatted as an integer it can be argued it's an
+ // integer presentation type. However there are two LWG-issues asserting it
+ // isn't an integer presentation type:
+ // - LWG3612 Inconsistent pointer alignment in std::format
+ // - LWG3644 std::format does not define "integer presentation type"
+ //
+ // There's a paper to make additional clarifications on the status of
+ // formatting pointers and proposes additional fields to be valid. That
+ // paper hasn't been reviewed by the Committee yet.
+ // - P2510 Formatting pointers
+ //
+ // The current implementation assumes formatting pointers isn't covered by
+ // "integer presentation type".
+ // TODO FMT Apply the LWG-issues/papers after approval/rejection by the Committee.
+
+ __begin = __parser_width::__parse(__begin, __end, __parse_ctx);
+ if (__begin == __end)
+ return __begin;
+
+ __begin = __parse_type(__begin, static_cast<_Flags&>(*this));
+
+ if (__begin != __end && *__begin != _CharT('}'))
+ __throw_format_error("The format-spec should consume the input or end with a '}'");
+
+ return __begin;
+ }
+
+ /** Processes the parsed std-format-spec based on the parsed display type. */
+ _LIBCPP_HIDE_FROM_ABI constexpr void __process_display_type() {
+ switch (this->__type) {
+ case _Flags::_Type::__default:
+ this->__type = _Flags::_Type::__pointer;
+ break;
+ case _Flags::_Type::__pointer:
+ break;
+ default:
+ __throw_format_error("The format-spec type has a type not supported for a pointer argument");
+ }
+ }
+};
/** Helper struct returned from @ref __get_string_alignment. */
template <class _CharT>
diff --git a/contrib/llvm-project/libcxx/include/__function_like.h b/contrib/llvm-project/libcxx/include/__function_like.h
deleted file mode 100644
index 4075355174d9..000000000000
--- a/contrib/llvm-project/libcxx/include/__function_like.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// -*- C++ -*-
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ITERATOR_FUNCTION_LIKE_H
-#define _LIBCPP___ITERATOR_FUNCTION_LIKE_H
-
-#include <__config>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-#pragma GCC system_header
-#endif
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-#if !defined(_LIBCPP_HAS_NO_RANGES)
-
-namespace ranges {
-// Per [range.iter.ops.general] and [algorithms.requirements], functions in namespace std::ranges
-// can't be found by ADL and inhibit ADL when found by unqualified lookup. The easiest way to
-// facilitate this is to use function objects.
-//
-// Since these are still standard library functions, we use `__function_like` to eliminate most of
-// the properties that function objects get by default (e.g. semiregularity, addressability), to
-// limit the surface area of the unintended public interface, so as to curb the effect of Hyrum's
-// law.
-struct __function_like {
- __function_like() = delete;
- __function_like(__function_like const&) = delete;
- __function_like& operator=(__function_like const&) = delete;
-
- void operator&() const = delete;
-
- struct __tag { };
-
-protected:
- constexpr explicit __function_like(__tag) noexcept {}
- ~__function_like() = default;
-};
-} // namespace ranges
-
-#endif // !defined(_LIBCPP_HAS_NO_RANGES)
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // _LIBCPP___ITERATOR_FUNCTION_LIKE_H
diff --git a/contrib/llvm-project/libcxx/include/__functional/bind.h b/contrib/llvm-project/libcxx/include/__functional/bind.h
index 0eb95c824664..11a51e5957ee 100644
--- a/contrib/llvm-project/libcxx/include/__functional/bind.h
+++ b/contrib/llvm-project/libcxx/include/__functional/bind.h
@@ -11,8 +11,8 @@
#define _LIBCPP___FUNCTIONAL_BIND_H
#include <__config>
-#include <__functional/weak_result_type.h>
#include <__functional/invoke.h>
+#include <__functional/weak_result_type.h>
#include <cstddef>
#include <tuple>
#include <type_traits>
@@ -23,18 +23,24 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template<class _Tp> struct __is_bind_expression : public false_type {};
-template<class _Tp> struct _LIBCPP_TEMPLATE_VIS is_bind_expression
- : public __is_bind_expression<typename remove_cv<_Tp>::type> {};
+template<class _Tp>
+struct is_bind_expression : _If<
+ _IsSame<_Tp, typename __uncvref<_Tp>::type>::value,
+ false_type,
+ is_bind_expression<typename __uncvref<_Tp>::type>
+> {};
#if _LIBCPP_STD_VER > 14
template <class _Tp>
inline constexpr size_t is_bind_expression_v = is_bind_expression<_Tp>::value;
#endif
-template<class _Tp> struct __is_placeholder : public integral_constant<int, 0> {};
-template<class _Tp> struct _LIBCPP_TEMPLATE_VIS is_placeholder
- : public __is_placeholder<typename remove_cv<_Tp>::type> {};
+template<class _Tp>
+struct is_placeholder : _If<
+ _IsSame<_Tp, typename __uncvref<_Tp>::type>::value,
+ integral_constant<int, 0>,
+ is_placeholder<typename __uncvref<_Tp>::type>
+> {};
#if _LIBCPP_STD_VER > 14
template <class _Tp>
@@ -73,7 +79,7 @@ _LIBCPP_FUNC_VIS extern const __ph<10> _10;
} // namespace placeholders
template<int _Np>
-struct __is_placeholder<placeholders::__ph<_Np> >
+struct is_placeholder<placeholders::__ph<_Np> >
: public integral_constant<int, _Np> {};
@@ -304,7 +310,7 @@ public:
};
template<class _Fp, class ..._BoundArgs>
-struct __is_bind_expression<__bind<_Fp, _BoundArgs...> > : public true_type {};
+struct is_bind_expression<__bind<_Fp, _BoundArgs...> > : public true_type {};
template<class _Rp, class _Fp, class ..._BoundArgs>
class __bind_r
@@ -359,7 +365,7 @@ public:
};
template<class _Rp, class _Fp, class ..._BoundArgs>
-struct __is_bind_expression<__bind_r<_Rp, _Fp, _BoundArgs...> > : public true_type {};
+struct is_bind_expression<__bind_r<_Rp, _Fp, _BoundArgs...> > : public true_type {};
template<class _Fp, class ..._BoundArgs>
inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
diff --git a/contrib/llvm-project/libcxx/include/__functional/bind_front.h b/contrib/llvm-project/libcxx/include/__functional/bind_front.h
index 86d4594b6571..31397ec5400d 100644
--- a/contrib/llvm-project/libcxx/include/__functional/bind_front.h
+++ b/contrib/llvm-project/libcxx/include/__functional/bind_front.h
@@ -11,8 +11,8 @@
#define _LIBCPP___FUNCTIONAL_BIND_FRONT_H
#include <__config>
-#include <__functional/perfect_forward.h>
#include <__functional/invoke.h>
+#include <__functional/perfect_forward.h>
#include <type_traits>
#include <utility>
diff --git a/contrib/llvm-project/libcxx/include/__functional/function.h b/contrib/llvm-project/libcxx/include/__functional/function.h
index 8336d85adf2e..b6d383ce8459 100644
--- a/contrib/llvm-project/libcxx/include/__functional/function.h
+++ b/contrib/llvm-project/libcxx/include/__functional/function.h
@@ -16,6 +16,7 @@
#include <__functional/invoke.h>
#include <__functional/unary_function.h>
#include <__iterator/iterator_traits.h>
+#include <__memory/addressof.h>
#include <__memory/allocator_traits.h>
#include <__memory/compressed_pair.h>
#include <__memory/shared_ptr.h>
@@ -360,7 +361,7 @@ const void*
__func<_Fp, _Alloc, _Rp(_ArgTypes...)>::target(const type_info& __ti) const _NOEXCEPT
{
if (__ti == typeid(_Fp))
- return &__f_.__target();
+ return _VSTD::addressof(__f_.__target());
return nullptr;
}
@@ -1392,7 +1393,7 @@ const void*
__func<_Fp, _Alloc, _Rp()>::target(const type_info& __ti) const
{
if (__ti == typeid(_Fp))
- return &__f_.first();
+ return _VSTD::addressof(__f_.first());
return (const void*)0;
}
diff --git a/contrib/llvm-project/libcxx/include/__functional/hash.h b/contrib/llvm-project/libcxx/include/__functional/hash.h
index b1a3ad94ae2d..de0c161f47ec 100644
--- a/contrib/llvm-project/libcxx/include/__functional/hash.h
+++ b/contrib/llvm-project/libcxx/include/__functional/hash.h
@@ -16,9 +16,9 @@
#include <__utility/move.h>
#include <__utility/pair.h>
#include <__utility/swap.h>
+#include <cstddef>
#include <cstdint>
#include <cstring>
-#include <cstddef>
#include <limits>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__functional/mem_fn.h b/contrib/llvm-project/libcxx/include/__functional/mem_fn.h
index 1fa070a42cc9..0ec84233439c 100644
--- a/contrib/llvm-project/libcxx/include/__functional/mem_fn.h
+++ b/contrib/llvm-project/libcxx/include/__functional/mem_fn.h
@@ -11,9 +11,9 @@
#define _LIBCPP___FUNCTIONAL_MEM_FN_H
#include <__config>
-#include <__functional/weak_result_type.h>
#include <__functional/binary_function.h>
#include <__functional/invoke.h>
+#include <__functional/weak_result_type.h>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__functional/mem_fun_ref.h b/contrib/llvm-project/libcxx/include/__functional/mem_fun_ref.h
index 4616da0b0748..830936c1b0e5 100644
--- a/contrib/llvm-project/libcxx/include/__functional/mem_fun_ref.h
+++ b/contrib/llvm-project/libcxx/include/__functional/mem_fun_ref.h
@@ -11,8 +11,8 @@
#define _LIBCPP___FUNCTIONAL_MEM_FUN_REF_H
#include <__config>
-#include <__functional/unary_function.h>
#include <__functional/binary_function.h>
+#include <__functional/unary_function.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__functional/not_fn.h b/contrib/llvm-project/libcxx/include/__functional/not_fn.h
index 81fe112c88ba..36aab2eb7935 100644
--- a/contrib/llvm-project/libcxx/include/__functional/not_fn.h
+++ b/contrib/llvm-project/libcxx/include/__functional/not_fn.h
@@ -11,8 +11,8 @@
#define _LIBCPP___FUNCTIONAL_NOT_FN_H
#include <__config>
-#include <__functional/perfect_forward.h>
#include <__functional/invoke.h>
+#include <__functional/perfect_forward.h>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__functional/reference_wrapper.h b/contrib/llvm-project/libcxx/include/__functional/reference_wrapper.h
index e1e4abd80c23..d04d51568beb 100644
--- a/contrib/llvm-project/libcxx/include/__functional/reference_wrapper.h
+++ b/contrib/llvm-project/libcxx/include/__functional/reference_wrapper.h
@@ -34,25 +34,16 @@ public:
private:
type* __f_;
-#ifndef _LIBCPP_CXX03_LANG
static void __fun(_Tp&) _NOEXCEPT;
static void __fun(_Tp&&) = delete;
-#endif
public:
- // construct/copy/destroy
-#ifdef _LIBCPP_CXX03_LANG
- _LIBCPP_INLINE_VISIBILITY
- reference_wrapper(type& __f) _NOEXCEPT
- : __f_(_VSTD::addressof(__f)) {}
-#else
- template <class _Up, class = __enable_if_t<!__is_same_uncvref<_Up, reference_wrapper>::value, decltype(__fun(declval<_Up>())) >>
+ template <class _Up, class = __enable_if_t<!__is_same_uncvref<_Up, reference_wrapper>::value, decltype(__fun(declval<_Up>())) > >
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
reference_wrapper(_Up&& __u) _NOEXCEPT_(noexcept(__fun(declval<_Up>()))) {
type& __f = static_cast<_Up&&>(__u);
__f_ = _VSTD::addressof(__f);
}
-#endif
// access
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
@@ -176,7 +167,7 @@ public:
#endif // _LIBCPP_CXX03_LANG
};
-#if _LIBCPP_STD_VER >= 17
+#if _LIBCPP_STD_VER > 14
template <class _Tp>
reference_wrapper(_Tp&) -> reference_wrapper<_Tp>;
#endif
@@ -194,7 +185,7 @@ inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
reference_wrapper<_Tp>
ref(reference_wrapper<_Tp> __t) _NOEXCEPT
{
- return _VSTD::ref(__t.get());
+ return __t;
}
template <class _Tp>
@@ -210,13 +201,11 @@ inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
reference_wrapper<const _Tp>
cref(reference_wrapper<_Tp> __t) _NOEXCEPT
{
- return _VSTD::cref(__t.get());
+ return __t;
}
-#ifndef _LIBCPP_CXX03_LANG
template <class _Tp> void ref(const _Tp&&) = delete;
template <class _Tp> void cref(const _Tp&&) = delete;
-#endif
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__hash_table b/contrib/llvm-project/libcxx/include/__hash_table
index 126e1884a664..adc732cffb01 100644
--- a/contrib/llvm-project/libcxx/include/__hash_table
+++ b/contrib/llvm-project/libcxx/include/__hash_table
@@ -288,9 +288,7 @@ public:
typedef typename _NodeTypes::__node_value_type_pointer pointer;
_LIBCPP_INLINE_VISIBILITY __hash_iterator() _NOEXCEPT : __node_(nullptr) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
#if _LIBCPP_DEBUG_LEVEL == 2
@@ -310,9 +308,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
__hash_iterator& operator=(const __hash_iterator& __i)
{
- if (this != &__i)
+ if (this != _VSTD::addressof(__i))
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
__node_ = __i.__node_;
}
return *this;
@@ -400,9 +398,7 @@ public:
_LIBCPP_INLINE_VISIBILITY __hash_const_iterator() _NOEXCEPT : __node_(nullptr) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
_LIBCPP_INLINE_VISIBILITY
@@ -410,7 +406,7 @@ public:
: __node_(__x.__node_)
{
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__iterator_copy(this, &__x);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__x));
#endif
}
@@ -419,7 +415,7 @@ public:
__hash_const_iterator(const __hash_const_iterator& __i)
: __node_(__i.__node_)
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
}
_LIBCPP_INLINE_VISIBILITY
@@ -431,9 +427,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
__hash_const_iterator& operator=(const __hash_const_iterator& __i)
{
- if (this != &__i)
+ if (this != _VSTD::addressof(__i))
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
__node_ = __i.__node_;
}
return *this;
@@ -517,9 +513,7 @@ public:
typedef typename _NodeTypes::__node_value_type_pointer pointer;
_LIBCPP_INLINE_VISIBILITY __hash_local_iterator() _NOEXCEPT : __node_(nullptr) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
#if _LIBCPP_DEBUG_LEVEL == 2
@@ -529,7 +523,7 @@ public:
__bucket_(__i.__bucket_),
__bucket_count_(__i.__bucket_count_)
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
}
_LIBCPP_INLINE_VISIBILITY
@@ -541,9 +535,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
__hash_local_iterator& operator=(const __hash_local_iterator& __i)
{
- if (this != &__i)
+ if (this != _VSTD::addressof(__i))
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
__node_ = __i.__node_;
__bucket_ = __i.__bucket_;
__bucket_count_ = __i.__bucket_count_;
@@ -651,9 +645,7 @@ public:
_LIBCPP_INLINE_VISIBILITY __hash_const_local_iterator() _NOEXCEPT : __node_(nullptr) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
_LIBCPP_INLINE_VISIBILITY
@@ -663,7 +655,7 @@ public:
__bucket_count_(__x.__bucket_count_)
{
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__iterator_copy(this, &__x);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__x));
#endif
}
@@ -674,7 +666,7 @@ public:
__bucket_(__i.__bucket_),
__bucket_count_(__i.__bucket_count_)
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
}
_LIBCPP_INLINE_VISIBILITY
@@ -686,9 +678,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
__hash_const_local_iterator& operator=(const __hash_const_local_iterator& __i)
{
- if (this != &__i)
+ if (this != _VSTD::addressof(__i))
{
- __get_db()->__iterator_copy(this, &__i);
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__i));
__node_ = __i.__node_;
__bucket_ = __i.__bucket_;
__bucket_count_ = __i.__bucket_count_;
@@ -1623,7 +1615,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign(
__u.size() = 0;
}
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -2029,11 +2021,9 @@ typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi(
const_iterator __p, __node_pointer __cp)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered container::emplace_hint(const_iterator, args...) called with an iterator not"
- " referring to this unordered container");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered container::emplace_hint(const_iterator, args...) called with an iterator not"
+ " referring to this unordered container");
if (__p != end() && key_eq()(*__p, __cp->__value_))
{
__next_pointer __np = __p.__node_;
@@ -2158,11 +2148,9 @@ typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_hint_multi(
const_iterator __p, _Args&&... __args)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered container::emplace_hint(const_iterator, args...) called with an iterator not"
- " referring to this unordered container");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered container::emplace_hint(const_iterator, args...) called with an iterator not"
+ " referring to this unordered container");
__node_holder __h = __construct_node(_VSTD::forward<_Args>(__args)...);
iterator __r = __node_insert_multi(__p, __h.get());
__h.release();
@@ -2484,12 +2472,12 @@ typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __p)
{
__next_pointer __np = __p.__node_;
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered container erase(iterator) called with an iterator not"
+ " referring to this container");
+ _LIBCPP_DEBUG_ASSERT(__p != end(),
+ "unordered container erase(iterator) called with a non-dereferenceable iterator");
#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered container erase(iterator) called with an iterator not"
- " referring to this container");
- _LIBCPP_ASSERT(__p != end(),
- "unordered container erase(iterator) called with a non-dereferenceable iterator");
iterator __r(__np, this);
#else
iterator __r(__np);
@@ -2504,14 +2492,12 @@ typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __first,
const_iterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__first) == this,
- "unordered container::erase(iterator, iterator) called with an iterator not"
- " referring to this container");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__last) == this,
- "unordered container::erase(iterator, iterator) called with an iterator not"
- " referring to this container");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__first)) == this,
+ "unordered container::erase(iterator, iterator) called with an iterator not"
+ " referring to this container");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__last)) == this,
+ "unordered container::erase(iterator, iterator) called with an iterator not"
+ " referring to this container");
for (const_iterator __p = __first; __first != __last; __p = __first)
{
++__first;
@@ -2741,7 +2727,7 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u)
__u.__bucket_list_[__constrain_hash(__u.__p1_.first().__next_->__hash(), __u.bucket_count())] =
__u.__p1_.first().__ptr();
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
diff --git a/contrib/llvm-project/libcxx/include/__iterator/advance.h b/contrib/llvm-project/libcxx/include/__iterator/advance.h
index 2236a57936cc..03418979ddbd 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/advance.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/advance.h
@@ -12,13 +12,12 @@
#include <__config>
#include <__debug>
-#include <__function_like.h>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
#include <__iterator/iterator_traits.h>
#include <__utility/move.h>
-#include <cstdlib>
#include <concepts>
+#include <cstdlib>
#include <limits>
#include <type_traits>
@@ -67,17 +66,13 @@ void advance(_InputIter& __i, _Distance __orig_n) {
#if !defined(_LIBCPP_HAS_NO_RANGES)
-namespace ranges {
// [range.iter.op.advance]
-// TODO(varconst): rename `__advance_fn` to `__fn`.
-struct __advance_fn final : private __function_like {
-private:
- template <class _Tp>
- _LIBCPP_HIDE_FROM_ABI
- static constexpr _Tp __magnitude_geq(_Tp __a, _Tp __b) noexcept {
- return __a < 0 ? (__a <= __b) : (__a >= __b);
- }
+namespace ranges {
+namespace __advance {
+
+struct __fn {
+private:
template <class _Ip>
_LIBCPP_HIDE_FROM_ABI
static constexpr void __advance_forward(_Ip& __i, iter_difference_t<_Ip> __n) {
@@ -97,8 +92,6 @@ private:
}
public:
- constexpr explicit __advance_fn(__tag __x) noexcept : __function_like(__x) {}
-
// Preconditions: If `I` does not model `bidirectional_iterator`, `n` is not negative.
template <input_or_output_iterator _Ip>
_LIBCPP_HIDE_FROM_ABI
@@ -156,6 +149,12 @@ public:
// If `S` and `I` model `sized_sentinel_for<S, I>`:
if constexpr (sized_sentinel_for<_Sp, _Ip>) {
// If |n| >= |bound - i|, equivalent to `ranges::advance(i, bound)`.
+ // __magnitude_geq(a, b) returns |a| >= |b|, assuming they have the same sign.
+ auto __magnitude_geq = [](auto __a, auto __b) {
+ return __a == 0 ? __b == 0 :
+ __a > 0 ? __a >= __b :
+ __a <= __b;
+ };
if (const auto __M = __bound - __i; __magnitude_geq(__n, __M)) {
(*this)(__i, __bound);
return __n - __M;
@@ -186,7 +185,11 @@ public:
}
};
-inline constexpr auto advance = __advance_fn(__function_like::__tag());
+} // namespace __advance
+
+inline namespace __cpo {
+ inline constexpr auto advance = __advance::__fn{};
+} // namespace __cpo
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__iterator/common_iterator.h b/contrib/llvm-project/libcxx/include/__iterator/common_iterator.h
index 9a142769e55a..605071d70928 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/common_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/common_iterator.h
@@ -29,6 +29,11 @@ _LIBCPP_BEGIN_NAMESPACE_STD
#if !defined(_LIBCPP_HAS_NO_RANGES)
+template<class _Iter>
+concept __can_use_postfix_proxy =
+ constructible_from<iter_value_t<_Iter>, iter_reference_t<_Iter>> &&
+ move_constructible<iter_value_t<_Iter>>;
+
template<input_or_output_iterator _Iter, sentinel_for<_Iter> _Sent>
requires (!same_as<_Iter, _Sent> && copyable<_Iter>)
class common_iterator {
@@ -41,7 +46,7 @@ class common_iterator {
: __value(_VSTD::move(__x)) {}
public:
- const iter_value_t<_Iter>* operator->() const {
+ constexpr const iter_value_t<_Iter>* operator->() const noexcept {
return _VSTD::addressof(__value);
}
};
@@ -54,11 +59,7 @@ class common_iterator {
: __value(_VSTD::forward<iter_reference_t<_Iter>>(__x)) {}
public:
- constexpr static bool __valid_for_iter =
- constructible_from<iter_value_t<_Iter>, iter_reference_t<_Iter>> &&
- move_constructible<iter_value_t<_Iter>>;
-
- const iter_value_t<_Iter>& operator*() const {
+ constexpr const iter_value_t<_Iter>& operator*() const noexcept {
return __value;
}
};
@@ -75,7 +76,7 @@ public:
requires convertible_to<const _I2&, _Iter> && convertible_to<const _S2&, _Sent>
constexpr common_iterator(const common_iterator<_I2, _S2>& __other)
: __hold_([&]() -> variant<_Iter, _Sent> {
- _LIBCPP_ASSERT(!__other.__hold_.valueless_by_exception(), "Constructed from valueless iterator.");
+ _LIBCPP_ASSERT(!__other.__hold_.valueless_by_exception(), "Attempted to construct from a valueless common_iterator");
if (__other.__hold_.index() == 0)
return variant<_Iter, _Sent>{in_place_index<0>, _VSTD::__unchecked_get<0>(__other.__hold_)};
return variant<_Iter, _Sent>{in_place_index<1>, _VSTD::__unchecked_get<1>(__other.__hold_)};
@@ -85,7 +86,7 @@ public:
requires convertible_to<const _I2&, _Iter> && convertible_to<const _S2&, _Sent> &&
assignable_from<_Iter&, const _I2&> && assignable_from<_Sent&, const _S2&>
common_iterator& operator=(const common_iterator<_I2, _S2>& __other) {
- _LIBCPP_ASSERT(!__other.__hold_.valueless_by_exception(), "Assigned from valueless iterator.");
+ _LIBCPP_ASSERT(!__other.__hold_.valueless_by_exception(), "Attempted to assign from a valueless common_iterator");
auto __idx = __hold_.index();
auto __other_idx = __other.__hold_.index();
@@ -105,18 +106,16 @@ public:
return *this;
}
- decltype(auto) operator*()
+ constexpr decltype(auto) operator*()
{
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_),
- "Cannot dereference sentinel. Common iterator not holding an iterator.");
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_), "Attempted to dereference a non-dereferenceable common_iterator");
return *_VSTD::__unchecked_get<_Iter>(__hold_);
}
- decltype(auto) operator*() const
+ constexpr decltype(auto) operator*() const
requires __dereferenceable<const _Iter>
{
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_),
- "Cannot dereference sentinel. Common iterator not holding an iterator.");
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_), "Attempted to dereference a non-dereferenceable common_iterator");
return *_VSTD::__unchecked_get<_Iter>(__hold_);
}
@@ -127,9 +126,7 @@ public:
is_reference_v<iter_reference_t<_I2>> ||
constructible_from<iter_value_t<_I2>, iter_reference_t<_I2>>)
{
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_),
- "Cannot dereference sentinel. Common iterator not holding an iterator.");
-
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_), "Attempted to dereference a non-dereferenceable common_iterator");
if constexpr (is_pointer_v<_Iter> || requires(const _Iter& __i) { __i.operator->(); }) {
return _VSTD::__unchecked_get<_Iter>(__hold_);
} else if constexpr (is_reference_v<iter_reference_t<_Iter>>) {
@@ -141,21 +138,18 @@ public:
}
common_iterator& operator++() {
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_),
- "Cannot increment sentinel. Common iterator not holding an iterator.");
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_), "Attempted to increment a non-dereferenceable common_iterator");
++_VSTD::__unchecked_get<_Iter>(__hold_); return *this;
}
decltype(auto) operator++(int) {
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_),
- "Cannot increment sentinel. Common iterator not holding an iterator.");
-
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__hold_), "Attempted to increment a non-dereferenceable common_iterator");
if constexpr (forward_iterator<_Iter>) {
auto __tmp = *this;
++*this;
return __tmp;
} else if constexpr (requires (_Iter& __i) { { *__i++ } -> __referenceable; } ||
- !__postfix_proxy::__valid_for_iter) {
+ !__can_use_postfix_proxy<_Iter>) {
return _VSTD::__unchecked_get<_Iter>(__hold_)++;
} else {
__postfix_proxy __p(**this);
@@ -166,10 +160,9 @@ public:
template<class _I2, sentinel_for<_Iter> _S2>
requires sentinel_for<_Sent, _I2>
- friend bool operator==(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
- _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception() &&
- !__y.__hold_.valueless_by_exception(),
- "One or both common_iterators are valueless. (Cannot compare valueless iterators.)");
+ friend constexpr bool operator==(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
+ _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception(), "Attempted to compare a valueless common_iterator");
+ _LIBCPP_ASSERT(!__y.__hold_.valueless_by_exception(), "Attempted to compare a valueless common_iterator");
auto __x_index = __x.__hold_.index();
auto __y_index = __y.__hold_.index();
@@ -185,10 +178,9 @@ public:
template<class _I2, sentinel_for<_Iter> _S2>
requires sentinel_for<_Sent, _I2> && equality_comparable_with<_Iter, _I2>
- friend bool operator==(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
- _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception() &&
- !__y.__hold_.valueless_by_exception(),
- "One or both common_iterators are valueless. (Cannot compare valueless iterators.)");
+ friend constexpr bool operator==(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
+ _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception(), "Attempted to compare a valueless common_iterator");
+ _LIBCPP_ASSERT(!__y.__hold_.valueless_by_exception(), "Attempted to compare a valueless common_iterator");
auto __x_index = __x.__hold_.index();
auto __y_index = __y.__hold_.index();
@@ -207,10 +199,9 @@ public:
template<sized_sentinel_for<_Iter> _I2, sized_sentinel_for<_Iter> _S2>
requires sized_sentinel_for<_Sent, _I2>
- friend iter_difference_t<_I2> operator-(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
- _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception() &&
- !__y.__hold_.valueless_by_exception(),
- "One or both common_iterators are valueless. (Cannot subtract valueless iterators.)");
+ friend constexpr iter_difference_t<_I2> operator-(const common_iterator& __x, const common_iterator<_I2, _S2>& __y) {
+ _LIBCPP_ASSERT(!__x.__hold_.valueless_by_exception(), "Attempted to subtract from a valueless common_iterator");
+ _LIBCPP_ASSERT(!__y.__hold_.valueless_by_exception(), "Attempted to subtract a valueless common_iterator");
auto __x_index = __x.__hold_.index();
auto __y_index = __y.__hold_.index();
@@ -227,24 +218,21 @@ public:
return _VSTD::__unchecked_get<_Sent>(__x.__hold_) - _VSTD::__unchecked_get<_I2>(__y.__hold_);
}
- friend iter_rvalue_reference_t<_Iter> iter_move(const common_iterator& __i)
+ friend constexpr iter_rvalue_reference_t<_Iter> iter_move(const common_iterator& __i)
noexcept(noexcept(ranges::iter_move(declval<const _Iter&>())))
requires input_iterator<_Iter>
{
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__i.__hold_),
- "Cannot iter_move a sentinel. Common iterator not holding an iterator.");
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__i.__hold_), "Attempted to iter_move a non-dereferenceable common_iterator");
return ranges::iter_move( _VSTD::__unchecked_get<_Iter>(__i.__hold_));
}
template<indirectly_swappable<_Iter> _I2, class _S2>
- friend void iter_swap(const common_iterator& __x, const common_iterator<_I2, _S2>& __y)
+ friend constexpr void iter_swap(const common_iterator& __x, const common_iterator<_I2, _S2>& __y)
noexcept(noexcept(ranges::iter_swap(declval<const _Iter&>(), declval<const _I2&>())))
{
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__x.__hold_),
- "Cannot swap __y with a sentinel. Common iterator (__x) not holding an iterator.");
- _LIBCPP_ASSERT(holds_alternative<_Iter>(__y.__hold_),
- "Cannot swap __x with a sentinel. Common iterator (__y) not holding an iterator.");
- return ranges::iter_swap( _VSTD::__unchecked_get<_Iter>(__x.__hold_), _VSTD::__unchecked_get<_Iter>(__y.__hold_));
+ _LIBCPP_ASSERT(holds_alternative<_Iter>(__x.__hold_), "Attempted to iter_swap a non-dereferenceable common_iterator");
+ _LIBCPP_ASSERT(holds_alternative<_I2>(__y.__hold_), "Attempted to iter_swap a non-dereferenceable common_iterator");
+ return ranges::iter_swap(_VSTD::__unchecked_get<_Iter>(__x.__hold_), _VSTD::__unchecked_get<_I2>(__y.__hold_));
}
};
@@ -271,10 +259,10 @@ struct __arrow_type_or_void {
template<class _Iter, class _Sent>
requires __common_iter_has_ptr_op<_Iter, _Sent>
struct __arrow_type_or_void<_Iter, _Sent> {
- using type = decltype(declval<const common_iterator<_Iter, _Sent>>().operator->());
+ using type = decltype(declval<const common_iterator<_Iter, _Sent>&>().operator->());
};
-template<class _Iter, class _Sent>
+template<input_iterator _Iter, class _Sent>
struct iterator_traits<common_iterator<_Iter, _Sent>> {
using iterator_concept = _If<forward_iterator<_Iter>,
forward_iterator_tag,
@@ -288,7 +276,6 @@ struct iterator_traits<common_iterator<_Iter, _Sent>> {
using reference = iter_reference_t<_Iter>;
};
-
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__iterator/counted_iterator.h b/contrib/llvm-project/libcxx/include/__iterator/counted_iterator.h
index 55979fe5571e..82d7adcfb02e 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/counted_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/counted_iterator.h
@@ -13,9 +13,9 @@
#include <__debug>
#include <__iterator/concepts.h>
#include <__iterator/default_sentinel.h>
+#include <__iterator/incrementable_traits.h>
#include <__iterator/iter_move.h>
#include <__iterator/iter_swap.h>
-#include <__iterator/incrementable_traits.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/readable_traits.h>
#include <__memory/pointer_traits.h>
@@ -96,7 +96,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr const _Iter& base() const& { return __current_; }
+ constexpr const _Iter& base() const& noexcept { return __current_; }
_LIBCPP_HIDE_FROM_ABI
constexpr _Iter base() && { return _VSTD::move(__current_); }
diff --git a/contrib/llvm-project/libcxx/include/__iterator/indirectly_comparable.h b/contrib/llvm-project/libcxx/include/__iterator/indirectly_comparable.h
new file mode 100644
index 000000000000..3129b2dcf65e
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__iterator/indirectly_comparable.h
@@ -0,0 +1,30 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ITERATOR_INDIRECTLY_COMPARABLE_H
+#define _LIBCPP___ITERATOR_INDIRECTLY_COMPARABLE_H
+
+#include <__config>
+#include <__functional/identity.h>
+#include <__iterator/concepts.h>
+#include <__iterator/projected.h>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#ifndef _LIBCPP_HAS_NO_RANGES
+
+template <class _I1, class _I2, class _Rp, class _P1 = identity, class _P2 = identity>
+concept indirectly_comparable =
+ indirect_binary_predicate<_Rp, projected<_I1, _P1>, projected<_I2, _P2>>;
+
+#endif // _LIBCPP_HAS_NO_RANGES
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ITERATOR_INDIRECTLY_COMPARABLE_H
diff --git a/contrib/llvm-project/libcxx/include/__iterator/move_iterator.h b/contrib/llvm-project/libcxx/include/__iterator/move_iterator.h
index eac9264df30a..29bac864c275 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/move_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/move_iterator.h
@@ -12,6 +12,7 @@
#include <__config>
#include <__iterator/iterator_traits.h>
+#include <__utility/move.h>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -23,19 +24,20 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Iter>
class _LIBCPP_TEMPLATE_VIS move_iterator
{
-private:
- _Iter __i;
public:
- typedef _Iter iterator_type;
+#if _LIBCPP_STD_VER > 17
+ typedef input_iterator_tag iterator_concept;
+#endif
+
+ typedef _Iter iterator_type;
+ typedef _If<
+ __is_cpp17_random_access_iterator<_Iter>::value,
+ random_access_iterator_tag,
+ typename iterator_traits<_Iter>::iterator_category
+ > iterator_category;
typedef typename iterator_traits<iterator_type>::value_type value_type;
typedef typename iterator_traits<iterator_type>::difference_type difference_type;
typedef iterator_type pointer;
- typedef _If<__is_cpp17_random_access_iterator<_Iter>::value,
- random_access_iterator_tag,
- typename iterator_traits<_Iter>::iterator_category> iterator_category;
-#if _LIBCPP_STD_VER > 17
- typedef input_iterator_tag iterator_concept;
-#endif
#ifndef _LIBCPP_CXX03_LANG
typedef typename iterator_traits<iterator_type>::reference __reference;
@@ -48,114 +50,113 @@ public:
typedef typename iterator_traits<iterator_type>::reference reference;
#endif
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator() : __i() {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator() : __current_() {}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- explicit move_iterator(_Iter __x) : __i(__x) {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ explicit move_iterator(_Iter __i) : __current_(_VSTD::move(__i)) {}
template <class _Up, class = __enable_if_t<
- !is_same<_Up, _Iter>::value && is_convertible<_Up const&, _Iter>::value
+ !is_same<_Up, _Iter>::value && is_convertible<const _Up&, _Iter>::value
> >
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator(const move_iterator<_Up>& __u) : __i(__u.base()) {}
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator(const move_iterator<_Up>& __u) : __current_(__u.base()) {}
template <class _Up, class = __enable_if_t<
!is_same<_Up, _Iter>::value &&
- is_convertible<_Up const&, _Iter>::value &&
- is_assignable<_Iter&, _Up const&>::value
+ is_convertible<const _Up&, _Iter>::value &&
+ is_assignable<_Iter&, const _Up&>::value
> >
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
move_iterator& operator=(const move_iterator<_Up>& __u) {
- __i = __u.base();
+ __current_ = __u.base();
return *this;
}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 _Iter base() const {return __i;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- reference operator*() const { return static_cast<reference>(*__i); }
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- pointer operator->() const { return __i;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator& operator++() {++__i; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator operator++(int) {move_iterator __tmp(*this); ++__i; return __tmp;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator& operator--() {--__i; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator operator--(int) {move_iterator __tmp(*this); --__i; return __tmp;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator operator+ (difference_type __n) const {return move_iterator(__i + __n);}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator& operator+=(difference_type __n) {__i += __n; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator operator- (difference_type __n) const {return move_iterator(__i - __n);}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- move_iterator& operator-=(difference_type __n) {__i -= __n; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
- reference operator[](difference_type __n) const { return static_cast<reference>(__i[__n]); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ _Iter base() const { return __current_; }
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ reference operator*() const { return static_cast<reference>(*__current_); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ pointer operator->() const { return __current_; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ reference operator[](difference_type __n) const { return static_cast<reference>(__current_[__n]); }
+
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator& operator++() { ++__current_; return *this; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator operator++(int) { move_iterator __tmp(*this); ++__current_; return __tmp; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator& operator--() { --__current_; return *this; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator operator--(int) { move_iterator __tmp(*this); --__current_; return __tmp; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator operator+(difference_type __n) const { return move_iterator(__current_ + __n); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator& operator+=(difference_type __n) { __current_ += __n; return *this; }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator operator-(difference_type __n) const { return move_iterator(__current_ - __n); }
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+ move_iterator& operator-=(difference_type __n) { __current_ -= __n; return *this; }
+
+private:
+ _Iter __current_;
};
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator==(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator==(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
return __x.base() == __y.base();
}
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator<(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator!=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
- return __x.base() < __y.base();
+ return __x.base() != __y.base();
}
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator!=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator<(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
- return __x.base() != __y.base();
+ return __x.base() < __y.base();
}
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator>(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator>(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
return __x.base() > __y.base();
}
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator>=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator<=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
- return __x.base() >= __y.base();
+ return __x.base() <= __y.base();
}
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-bool
-operator<=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+bool operator>=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
- return __x.base() <= __y.base();
+ return __x.base() >= __y.base();
}
#ifndef _LIBCPP_CXX03_LANG
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
-auto
-operator-(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
--> decltype(__x.base() - __y.base())
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
+auto operator-(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
+ -> decltype(__x.base() - __y.base())
{
return __x.base() - __y.base();
}
#else
template <class _Iter1, class _Iter2>
-inline _LIBCPP_INLINE_VISIBILITY
+inline _LIBCPP_HIDE_FROM_ABI
typename move_iterator<_Iter1>::difference_type
operator-(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
{
@@ -164,7 +165,7 @@ operator-(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y)
#endif
template <class _Iter>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
move_iterator<_Iter>
operator+(typename move_iterator<_Iter>::difference_type __n, const move_iterator<_Iter>& __x)
{
@@ -172,11 +173,11 @@ operator+(typename move_iterator<_Iter>::difference_type __n, const move_iterato
}
template <class _Iter>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX14
move_iterator<_Iter>
make_move_iterator(_Iter __i)
{
- return move_iterator<_Iter>(__i);
+ return move_iterator<_Iter>(_VSTD::move(__i));
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__iterator/next.h b/contrib/llvm-project/libcxx/include/__iterator/next.h
index d332abfa8e0e..b9bdd6b27e05 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/next.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/next.h
@@ -12,7 +12,6 @@
#include <__config>
#include <__debug>
-#include <__function_like.h>
#include <__iterator/advance.h>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
@@ -38,12 +37,12 @@ inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
#if !defined(_LIBCPP_HAS_NO_RANGES)
+// [range.iter.op.next]
+
namespace ranges {
-// TODO(varconst): rename `__next_fn` to `__fn`.
-struct __next_fn final : private __function_like {
- _LIBCPP_HIDE_FROM_ABI
- constexpr explicit __next_fn(__tag __x) noexcept : __function_like(__x) {}
+namespace __next {
+struct __fn {
template <input_or_output_iterator _Ip>
_LIBCPP_HIDE_FROM_ABI
constexpr _Ip operator()(_Ip __x) const {
@@ -73,7 +72,11 @@ struct __next_fn final : private __function_like {
}
};
-inline constexpr auto next = __next_fn(__function_like::__tag());
+} // namespace __next
+
+inline namespace __cpo {
+ inline constexpr auto next = __next::__fn{};
+} // namespace __cpo
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__iterator/prev.h b/contrib/llvm-project/libcxx/include/__iterator/prev.h
index 57f2d04a1325..870cbe64eaee 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/prev.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/prev.h
@@ -12,7 +12,6 @@
#include <__config>
#include <__debug>
-#include <__function_like.h>
#include <__iterator/advance.h>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
@@ -37,12 +36,12 @@ inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
#if !defined(_LIBCPP_HAS_NO_RANGES)
+// [range.iter.op.prev]
+
namespace ranges {
-// TODO(varconst): rename `__prev_fn` to `__fn`.
-struct __prev_fn final : private __function_like {
- _LIBCPP_HIDE_FROM_ABI
- constexpr explicit __prev_fn(__tag __x) noexcept : __function_like(__x) {}
+namespace __prev {
+struct __fn {
template <bidirectional_iterator _Ip>
_LIBCPP_HIDE_FROM_ABI
constexpr _Ip operator()(_Ip __x) const {
@@ -65,7 +64,11 @@ struct __prev_fn final : private __function_like {
}
};
-inline constexpr auto prev = __prev_fn(__function_like::__tag());
+} // namespace __prev
+
+inline namespace __cpo {
+ inline constexpr auto prev = __prev::__fn{};
+} // namespace __cpo
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__iterator/readable_traits.h b/contrib/llvm-project/libcxx/include/__iterator/readable_traits.h
index 90121bea8073..13f323e295ba 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/readable_traits.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/readable_traits.h
@@ -57,14 +57,14 @@ template<__has_member_element_type _Tp>
struct indirectly_readable_traits<_Tp>
: __cond_value_type<typename _Tp::element_type> {};
-// Pre-emptively applies LWG3541
template<__has_member_value_type _Tp>
-requires __has_member_element_type<_Tp>
+ requires __has_member_element_type<_Tp>
struct indirectly_readable_traits<_Tp> {};
+
template<__has_member_value_type _Tp>
-requires __has_member_element_type<_Tp> &&
- same_as<remove_cv_t<typename _Tp::element_type>,
- remove_cv_t<typename _Tp::value_type>>
+ requires __has_member_element_type<_Tp> &&
+ same_as<remove_cv_t<typename _Tp::element_type>,
+ remove_cv_t<typename _Tp::value_type>>
struct indirectly_readable_traits<_Tp>
: __cond_value_type<typename _Tp::value_type> {};
diff --git a/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h b/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
index d06859ee5f39..449eb529aa98 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
@@ -10,9 +10,9 @@
#ifndef _LIBCPP___ITERATOR_REVERSE_ITERATOR_H
#define _LIBCPP___ITERATOR_REVERSE_ITERATOR_H
-#include <__config>
#include <__compare/compare_three_way_result.h>
#include <__compare/three_way_comparable.h>
+#include <__config>
#include <__iterator/iterator.h>
#include <__iterator/iterator_traits.h>
#include <__memory/addressof.h>
@@ -24,13 +24,6 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Tp, class = void>
-struct __is_stashing_iterator : false_type {};
-
-template <class _Tp>
-struct __is_stashing_iterator<_Tp, typename __void_t<typename _Tp::__stashing_iterator_tag>::type>
- : true_type {};
-
_LIBCPP_SUPPRESS_DEPRECATED_PUSH
template <class _Iter>
class _LIBCPP_TEMPLATE_VIS reverse_iterator
@@ -48,10 +41,6 @@ private:
_Iter __t; // no longer used as of LWG #2360, not removed due to ABI break
#endif
- static_assert(!__is_stashing_iterator<_Iter>::value,
- "The specified iterator type cannot be used with reverse_iterator; "
- "Using stashing iterators with reverse_iterator causes undefined behavior");
-
protected:
_Iter current;
public:
@@ -88,7 +77,7 @@ public:
template <class _Up, class = __enable_if_t<
!is_same<_Up, _Iter>::value &&
is_convertible<_Up const&, _Iter>::value &&
- is_assignable<_Iter, _Up const&>::value
+ is_assignable<_Iter&, _Up const&>::value
> >
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
reverse_iterator& operator=(const reverse_iterator<_Up>& __u) {
@@ -113,7 +102,7 @@ public:
template <class _Up, class = __enable_if_t<
!is_same<_Up, _Iter>::value &&
is_convertible<_Up const&, _Iter>::value &&
- is_assignable<_Iter, _Up const&>::value
+ is_assignable<_Iter&, _Up const&>::value
> >
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
reverse_iterator& operator=(const reverse_iterator<_Up>& __u) {
diff --git a/contrib/llvm-project/libcxx/include/__iterator/wrap_iter.h b/contrib/llvm-project/libcxx/include/__iterator/wrap_iter.h
index cfcc9857b3fc..d9dbee588896 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/wrap_iter.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/wrap_iter.h
@@ -43,10 +43,7 @@ public:
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 __wrap_iter() _NOEXCEPT
: __i()
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
template <class _Up> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
__wrap_iter(const __wrap_iter<_Up>& __u,
@@ -69,9 +66,10 @@ public:
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
__wrap_iter& operator=(const __wrap_iter& __x)
{
- if (this != _VSTD::addressof(__x) && !__libcpp_is_constant_evaluated())
+ if (this != _VSTD::addressof(__x))
{
- __get_db()->__iterator_copy(this, _VSTD::addressof(__x));
+ if (!__libcpp_is_constant_evaluated())
+ __get_db()->__iterator_copy(this, _VSTD::addressof(__x));
__i = __x.__i;
}
return *this;
@@ -85,29 +83,20 @@ public:
#endif
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 reference operator*() const _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable iterator");
return *__i;
}
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 pointer operator->() const _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable iterator");
return _VSTD::__to_address(__i);
}
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 __wrap_iter& operator++() _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to increment a non-incrementable iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to increment a non-incrementable iterator");
++__i;
return *this;
}
@@ -116,11 +105,8 @@ public:
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 __wrap_iter& operator--() _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__decrementable(this),
- "Attempted to decrement a non-decrementable iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__decrementable(this),
+ "Attempted to decrement a non-decrementable iterator");
--__i;
return *this;
}
@@ -130,11 +116,8 @@ public:
{__wrap_iter __w(*this); __w += __n; return __w;}
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 __wrap_iter& operator+=(difference_type __n) _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__addable(this, __n),
- "Attempted to add/subtract an iterator outside its valid range");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__addable(this, __n),
+ "Attempted to add/subtract an iterator outside its valid range");
__i += __n;
return *this;
}
@@ -144,11 +127,8 @@ public:
{*this += -__n; return *this;}
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 reference operator[](difference_type __n) const _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__subscriptable(this, __n),
- "Attempted to subscript an iterator outside its valid range");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__subscriptable(this, __n),
+ "Attempted to subscript an iterator outside its valid range");
return __i[__n];
}
@@ -189,11 +169,8 @@ template <class _Iter1>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
bool operator<(const __wrap_iter<_Iter1>& __x, const __wrap_iter<_Iter1>& __y) _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__less_than_comparable(_VSTD::addressof(__x), _VSTD::addressof(__y)),
- "Attempted to compare incomparable iterators");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__less_than_comparable(_VSTD::addressof(__x), _VSTD::addressof(__y)),
+ "Attempted to compare incomparable iterators");
return __x.base() < __y.base();
}
@@ -201,11 +178,8 @@ template <class _Iter1, class _Iter2>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
bool operator<(const __wrap_iter<_Iter1>& __x, const __wrap_iter<_Iter2>& __y) _NOEXCEPT
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__less_than_comparable(&__x, &__y),
- "Attempted to compare incomparable iterators");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__less_than_comparable(&__x, &__y),
+ "Attempted to compare incomparable iterators");
return __x.base() < __y.base();
}
@@ -275,11 +249,8 @@ typename __wrap_iter<_Iter1>::difference_type
operator-(const __wrap_iter<_Iter1>& __x, const __wrap_iter<_Iter2>& __y) _NOEXCEPT
#endif // C++03
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- _LIBCPP_ASSERT(__get_const_db()->__less_than_comparable(_VSTD::addressof(__x), _VSTD::addressof(__y)),
- "Attempted to subtract incompatible iterators");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__less_than_comparable(_VSTD::addressof(__x), _VSTD::addressof(__y)),
+ "Attempted to subtract incompatible iterators");
return __x.base() - __y.base();
}
diff --git a/contrib/llvm-project/libcxx/include/__locale b/contrib/llvm-project/libcxx/include/__locale
index 2582a90bb578..98445bd2d8f4 100644
--- a/contrib/llvm-project/libcxx/include/__locale
+++ b/contrib/llvm-project/libcxx/include/__locale
@@ -12,13 +12,14 @@
#include <__availability>
#include <__config>
-#include <string>
-#include <memory>
-#include <utility>
-#include <mutex>
-#include <cstdint>
#include <cctype>
+#include <cstdint>
#include <locale.h>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <utility>
+
#if defined(_LIBCPP_MSVCRT_LIKE)
# include <cstring>
# include <__support/win32/locale_win32.h>
@@ -510,6 +511,33 @@ public:
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_PRINT
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_ALPHA
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_XDIGIT
+#elif defined(__MVS__)
+# if defined(__NATIVE_ASCII_F)
+ typedef unsigned int mask;
+ static const mask space = _ISSPACE_A;
+ static const mask print = _ISPRINT_A;
+ static const mask cntrl = _ISCNTRL_A;
+ static const mask upper = _ISUPPER_A;
+ static const mask lower = _ISLOWER_A;
+ static const mask alpha = _ISALPHA_A;
+ static const mask digit = _ISDIGIT_A;
+ static const mask punct = _ISPUNCT_A;
+ static const mask xdigit = _ISXDIGIT_A;
+ static const mask blank = _ISBLANK_A;
+# else
+ typedef unsigned short mask;
+ static const mask space = __ISSPACE;
+ static const mask print = __ISPRINT;
+ static const mask cntrl = __ISCNTRL;
+ static const mask upper = __ISUPPER;
+ static const mask lower = __ISLOWER;
+ static const mask alpha = __ISALPHA;
+ static const mask digit = __ISDIGIT;
+ static const mask punct = __ISPUNCT;
+ static const mask xdigit = __ISXDIGIT;
+ static const mask blank = __ISBLANK;
+# endif
+ static const mask __regex_word = 0x8000;
#else
# error unknown rune table for this platform -- do you mean to define _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE?
#endif
@@ -733,6 +761,10 @@ public:
static const short* __classic_upper_table() _NOEXCEPT;
static const short* __classic_lower_table() _NOEXCEPT;
#endif
+#if defined(__MVS__)
+ static const unsigned short* __classic_upper_table() _NOEXCEPT;
+ static const unsigned short* __classic_lower_table() _NOEXCEPT;
+#endif
protected:
~ctype();
diff --git a/contrib/llvm-project/libcxx/include/__memory/construct_at.h b/contrib/llvm-project/libcxx/include/__memory/construct_at.h
index 3b58451c5009..580ce781f482 100644
--- a/contrib/llvm-project/libcxx/include/__memory/construct_at.h
+++ b/contrib/llvm-project/libcxx/include/__memory/construct_at.h
@@ -14,6 +14,7 @@
#include <__debug>
#include <__iterator/access.h>
#include <__memory/addressof.h>
+#include <__memory/voidify.h>
#include <__utility/forward.h>
#include <type_traits>
#include <utility>
@@ -31,10 +32,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template<class _Tp, class ..._Args, class = decltype(
::new (declval<void*>()) _Tp(declval<_Args>()...)
)>
-_LIBCPP_INLINE_VISIBILITY
+_LIBCPP_HIDE_FROM_ABI
constexpr _Tp* construct_at(_Tp* __location, _Args&& ...__args) {
_LIBCPP_ASSERT(__location, "null pointer given to construct_at");
- return ::new ((void*)__location) _Tp(_VSTD::forward<_Args>(__args)...);
+ return ::new (_VSTD::__voidify(*__location)) _Tp(_VSTD::forward<_Args>(__args)...);
}
#endif
@@ -46,7 +47,7 @@ constexpr _Tp* construct_at(_Tp* __location, _Args&& ...__args) {
template <class _ForwardIterator>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
-void __destroy(_ForwardIterator, _ForwardIterator);
+_ForwardIterator __destroy(_ForwardIterator, _ForwardIterator);
template <class _Tp, typename enable_if<!is_array<_Tp>::value, int>::type = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
@@ -66,9 +67,10 @@ void __destroy_at(_Tp* __loc) {
template <class _ForwardIterator>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
-void __destroy(_ForwardIterator __first, _ForwardIterator __last) {
+_ForwardIterator __destroy(_ForwardIterator __first, _ForwardIterator __last) {
for (; __first != __last; ++__first)
_VSTD::__destroy_at(_VSTD::addressof(*__first));
+ return __first;
}
#if _LIBCPP_STD_VER > 14
@@ -90,7 +92,7 @@ void destroy_at(_Tp* __loc) {
template <class _ForwardIterator>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
void destroy(_ForwardIterator __first, _ForwardIterator __last) {
- _VSTD::__destroy(_VSTD::move(__first), _VSTD::move(__last));
+ (void)_VSTD::__destroy(_VSTD::move(__first), _VSTD::move(__last));
}
template <class _ForwardIterator, class _Size>
diff --git a/contrib/llvm-project/libcxx/include/__memory/ranges_construct_at.h b/contrib/llvm-project/libcxx/include/__memory/ranges_construct_at.h
new file mode 100644
index 000000000000..1a72da739682
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__memory/ranges_construct_at.h
@@ -0,0 +1,124 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___MEMORY_RANGES_CONSTRUCT_AT_H
+#define _LIBCPP___MEMORY_RANGES_CONSTRUCT_AT_H
+
+#include <__concepts/destructible.h>
+#include <__config>
+#include <__iterator/incrementable_traits.h>
+#include <__iterator/readable_traits.h>
+#include <__memory/concepts.h>
+#include <__memory/construct_at.h>
+#include <__ranges/access.h>
+#include <__ranges/concepts.h>
+#include <__ranges/dangling.h>
+#include <__utility/declval.h>
+#include <__utility/forward.h>
+#include <__utility/move.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if !defined(_LIBCPP_HAS_NO_RANGES)
+namespace ranges {
+
+// construct_at
+
+namespace __construct_at {
+
+struct __fn {
+ template<class _Tp, class... _Args, class = decltype(
+ ::new (declval<void*>()) _Tp(declval<_Args>()...)
+ )>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr _Tp* operator()(_Tp* __location, _Args&& ...__args) const {
+ return _VSTD::construct_at(__location, _VSTD::forward<_Args>(__args)...);
+ }
+};
+
+} // namespace __construct_at
+
+inline namespace __cpo {
+ inline constexpr auto construct_at = __construct_at::__fn{};
+} // namespace __cpo
+
+// destroy_at
+
+namespace __destroy_at {
+
+struct __fn {
+ template <destructible _Tp>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr void operator()(_Tp* __location) const noexcept {
+ _VSTD::destroy_at(__location);
+ }
+};
+
+} // namespace __destroy_at
+
+inline namespace __cpo {
+ inline constexpr auto destroy_at = __destroy_at::__fn{};
+} // namespace __cpo
+
+// destroy
+
+namespace __destroy {
+
+struct __fn {
+ template <__nothrow_input_iterator _InputIterator, __nothrow_sentinel_for<_InputIterator> _Sentinel>
+ requires destructible<iter_value_t<_InputIterator>>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr _InputIterator operator()(_InputIterator __first, _Sentinel __last) const noexcept {
+ return _VSTD::__destroy(_VSTD::move(__first), _VSTD::move(__last));
+ }
+
+ template <__nothrow_input_range _InputRange>
+ requires destructible<range_value_t<_InputRange>>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr borrowed_iterator_t<_InputRange> operator()(_InputRange&& __range) const noexcept {
+ return (*this)(ranges::begin(__range), ranges::end(__range));
+ }
+};
+
+} // namespace __destroy
+
+inline namespace __cpo {
+ inline constexpr auto destroy = __destroy::__fn{};
+} // namespace __cpo
+
+// destroy_n
+
+namespace __destroy_n {
+
+struct __fn {
+ template <__nothrow_input_iterator _InputIterator>
+ requires destructible<iter_value_t<_InputIterator>>
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr _InputIterator operator()(_InputIterator __first, iter_difference_t<_InputIterator> __n) const noexcept {
+ return _VSTD::destroy_n(_VSTD::move(__first), __n);
+ }
+};
+
+} // namespace __destroy_n
+
+inline namespace __cpo {
+ inline constexpr auto destroy_n = __destroy_n::__fn{};
+} // namespace __cpo
+
+} // namespace ranges
+
+#endif // !defined(_LIBCPP_HAS_NO_RANGES)
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___MEMORY_RANGES_CONSTRUCT_AT_H
diff --git a/contrib/llvm-project/libcxx/include/__memory/ranges_uninitialized_algorithms.h b/contrib/llvm-project/libcxx/include/__memory/ranges_uninitialized_algorithms.h
index 6ec803806c05..6a8f9f070ed7 100644
--- a/contrib/llvm-project/libcxx/include/__memory/ranges_uninitialized_algorithms.h
+++ b/contrib/llvm-project/libcxx/include/__memory/ranges_uninitialized_algorithms.h
@@ -10,10 +10,12 @@
#ifndef _LIBCPP___MEMORY_RANGES_UNINITIALIZED_ALGORITHMS_H
#define _LIBCPP___MEMORY_RANGES_UNINITIALIZED_ALGORITHMS_H
+#include <__algorithm/in_out_result.h>
#include <__concepts/constructible.h>
#include <__config>
-#include <__function_like.h>
+#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
+#include <__iterator/iter_move.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/readable_traits.h>
#include <__memory/concepts.h>
@@ -37,10 +39,7 @@ namespace ranges {
namespace __uninitialized_default_construct {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept : __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator,
__nothrow_sentinel_for<_ForwardIterator> _Sentinel>
requires default_initializable<iter_value_t<_ForwardIterator>>
@@ -55,25 +54,19 @@ struct __fn final : private __function_like {
borrowed_iterator_t<_ForwardRange> operator()(_ForwardRange&& __range) const {
return (*this)(ranges::begin(__range), ranges::end(__range));
}
-
};
} // namespace __uninitialized_default_construct
inline namespace __cpo {
-inline constexpr auto uninitialized_default_construct =
- __uninitialized_default_construct::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_default_construct = __uninitialized_default_construct::__fn{};
} // namespace __cpo
// uninitialized_default_construct_n
namespace __uninitialized_default_construct_n {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept :
- __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator>
requires default_initializable<iter_value_t<_ForwardIterator>>
_ForwardIterator operator()(_ForwardIterator __first,
@@ -81,24 +74,19 @@ struct __fn final : private __function_like {
using _ValueType = remove_reference_t<iter_reference_t<_ForwardIterator>>;
return _VSTD::__uninitialized_default_construct_n<_ValueType>(_VSTD::move(__first), __n);
}
-
};
} // namespace __uninitialized_default_construct_n
inline namespace __cpo {
-inline constexpr auto uninitialized_default_construct_n =
- __uninitialized_default_construct_n::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_default_construct_n = __uninitialized_default_construct_n::__fn{};
} // namespace __cpo
// uninitialized_value_construct
namespace __uninitialized_value_construct {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept : __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator,
__nothrow_sentinel_for<_ForwardIterator> _Sentinel>
requires default_initializable<iter_value_t<_ForwardIterator>>
@@ -113,24 +101,19 @@ struct __fn final : private __function_like {
borrowed_iterator_t<_ForwardRange> operator()(_ForwardRange&& __range) const {
return (*this)(ranges::begin(__range), ranges::end(__range));
}
-
};
} // namespace __uninitialized_value_construct
inline namespace __cpo {
-inline constexpr auto uninitialized_value_construct =
- __uninitialized_value_construct::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_value_construct = __uninitialized_value_construct::__fn{};
} // namespace __cpo
// uninitialized_value_construct_n
namespace __uninitialized_value_construct_n {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept : __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator>
requires default_initializable<iter_value_t<_ForwardIterator>>
_ForwardIterator operator()(_ForwardIterator __first,
@@ -138,24 +121,19 @@ struct __fn final : private __function_like {
using _ValueType = remove_reference_t<iter_reference_t<_ForwardIterator>>;
return _VSTD::__uninitialized_value_construct_n<_ValueType>(_VSTD::move(__first), __n);
}
-
};
} // namespace __uninitialized_value_construct_n
inline namespace __cpo {
-inline constexpr auto uninitialized_value_construct_n =
- __uninitialized_value_construct_n::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_value_construct_n = __uninitialized_value_construct_n::__fn{};
} // namespace __cpo
// uninitialized_fill
namespace __uninitialized_fill {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept : __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator,
__nothrow_sentinel_for<_ForwardIterator> _Sentinel,
class _Tp>
@@ -170,23 +148,19 @@ struct __fn final : private __function_like {
borrowed_iterator_t<_ForwardRange> operator()(_ForwardRange&& __range, const _Tp& __x) const {
return (*this)(ranges::begin(__range), ranges::end(__range), __x);
}
-
};
-} // namespace __uninitialized_fil
+} // namespace __uninitialized_fill
inline namespace __cpo {
-inline constexpr auto uninitialized_fill = __uninitialized_fill::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_fill = __uninitialized_fill::__fn{};
} // namespace __cpo
// uninitialized_fill_n
namespace __uninitialized_fill_n {
-struct __fn final : private __function_like {
-
- constexpr explicit __fn(__tag __x) noexcept : __function_like(__x) {}
-
+struct __fn {
template <__nothrow_forward_iterator _ForwardIterator, class _Tp>
requires constructible_from<iter_value_t<_ForwardIterator>, const _Tp&>
_ForwardIterator operator()(_ForwardIterator __first,
@@ -195,13 +169,143 @@ struct __fn final : private __function_like {
using _ValueType = remove_reference_t<iter_reference_t<_ForwardIterator>>;
return _VSTD::__uninitialized_fill_n<_ValueType>(_VSTD::move(__first), __n, __x);
}
-
};
} // namespace __uninitialized_fill_n
inline namespace __cpo {
-inline constexpr auto uninitialized_fill_n = __uninitialized_fill_n::__fn(__function_like::__tag());
+ inline constexpr auto uninitialized_fill_n = __uninitialized_fill_n::__fn{};
+} // namespace __cpo
+
+// uninitialized_copy
+
+template <class _InputIterator, class _OutputIterator>
+using uninitialized_copy_result = in_out_result<_InputIterator, _OutputIterator>;
+
+namespace __uninitialized_copy {
+
+struct __fn {
+ template <input_iterator _InputIterator,
+ sentinel_for<_InputIterator> _Sentinel1,
+ __nothrow_forward_iterator _OutputIterator,
+ __nothrow_sentinel_for<_OutputIterator> _Sentinel2>
+ requires constructible_from<iter_value_t<_OutputIterator>, iter_reference_t<_InputIterator>>
+ uninitialized_copy_result<_InputIterator, _OutputIterator>
+ operator()(_InputIterator __ifirst, _Sentinel1 __ilast, _OutputIterator __ofirst, _Sentinel2 __olast) const {
+ using _ValueType = remove_reference_t<iter_reference_t<_OutputIterator>>;
+
+ auto __result = _VSTD::__uninitialized_copy<_ValueType>(_VSTD::move(__ifirst), _VSTD::move(__ilast),
+ _VSTD::move(__ofirst), _VSTD::move(__olast));
+ return {_VSTD::move(__result.first), _VSTD::move(__result.second)};
+ }
+
+ template <input_range _InputRange, __nothrow_forward_range _OutputRange>
+ requires constructible_from<range_value_t<_OutputRange>, range_reference_t<_InputRange>>
+ uninitialized_copy_result<borrowed_iterator_t<_InputRange>, borrowed_iterator_t<_OutputRange>>
+ operator()( _InputRange&& __in_range, _OutputRange&& __out_range) const {
+ return (*this)(ranges::begin(__in_range), ranges::end(__in_range),
+ ranges::begin(__out_range), ranges::end(__out_range));
+ }
+};
+
+} // namespace __uninitialized_copy
+
+inline namespace __cpo {
+ inline constexpr auto uninitialized_copy = __uninitialized_copy::__fn{};
+} // namespace __cpo
+
+// uninitialized_copy_n
+
+template <class _InputIterator, class _OutputIterator>
+using uninitialized_copy_n_result = in_out_result<_InputIterator, _OutputIterator>;
+
+namespace __uninitialized_copy_n {
+
+struct __fn {
+ template <input_iterator _InputIterator,
+ __nothrow_forward_iterator _OutputIterator,
+ __nothrow_sentinel_for<_OutputIterator> _Sentinel>
+ requires constructible_from<iter_value_t<_OutputIterator>, iter_reference_t<_InputIterator>>
+ uninitialized_copy_n_result<_InputIterator, _OutputIterator>
+ operator()(_InputIterator __ifirst, iter_difference_t<_InputIterator> __n,
+ _OutputIterator __ofirst, _Sentinel __olast) const {
+ using _ValueType = remove_reference_t<iter_reference_t<_OutputIterator>>;
+ auto __result = _VSTD::__uninitialized_copy_n<_ValueType>(_VSTD::move(__ifirst), __n,
+ _VSTD::move(__ofirst), _VSTD::move(__olast));
+ return {_VSTD::move(__result.first), _VSTD::move(__result.second)};
+ }
+};
+
+} // namespace __uninitialized_copy_n
+
+inline namespace __cpo {
+ inline constexpr auto uninitialized_copy_n = __uninitialized_copy_n::__fn{};
+} // namespace __cpo
+
+// uninitialized_move
+
+template <class _InputIterator, class _OutputIterator>
+using uninitialized_move_result = in_out_result<_InputIterator, _OutputIterator>;
+
+namespace __uninitialized_move {
+
+struct __fn {
+ template <input_iterator _InputIterator,
+ sentinel_for<_InputIterator> _Sentinel1,
+ __nothrow_forward_iterator _OutputIterator,
+ __nothrow_sentinel_for<_OutputIterator> _Sentinel2>
+ requires constructible_from<iter_value_t<_OutputIterator>, iter_reference_t<_InputIterator>>
+ uninitialized_move_result<_InputIterator, _OutputIterator>
+ operator()(_InputIterator __ifirst, _Sentinel1 __ilast, _OutputIterator __ofirst, _Sentinel2 __olast) const {
+ using _ValueType = remove_reference_t<iter_reference_t<_OutputIterator>>;
+ auto __iter_move = [](auto&& __iter) -> decltype(auto) { return ranges::iter_move(__iter); };
+ auto __result = _VSTD::__uninitialized_move<_ValueType>(_VSTD::move(__ifirst), _VSTD::move(__ilast),
+ _VSTD::move(__ofirst), _VSTD::move(__olast), __iter_move);
+ return {_VSTD::move(__result.first), _VSTD::move(__result.second)};
+ }
+
+ template <input_range _InputRange, __nothrow_forward_range _OutputRange>
+ requires constructible_from<range_value_t<_OutputRange>, range_reference_t<_InputRange>>
+ uninitialized_move_result<borrowed_iterator_t<_InputRange>, borrowed_iterator_t<_OutputRange>>
+ operator()(_InputRange&& __in_range, _OutputRange&& __out_range) const {
+ return (*this)(ranges::begin(__in_range), ranges::end(__in_range),
+ ranges::begin(__out_range), ranges::end(__out_range));
+ }
+};
+
+} // namespace __uninitialized_move
+
+inline namespace __cpo {
+ inline constexpr auto uninitialized_move = __uninitialized_move::__fn{};
+} // namespace __cpo
+
+// uninitialized_move_n
+
+template <class _InputIterator, class _OutputIterator>
+using uninitialized_move_n_result = in_out_result<_InputIterator, _OutputIterator>;
+
+namespace __uninitialized_move_n {
+
+struct __fn {
+ template <input_iterator _InputIterator,
+ __nothrow_forward_iterator _OutputIterator,
+ __nothrow_sentinel_for<_OutputIterator> _Sentinel>
+ requires constructible_from<iter_value_t<_OutputIterator>, iter_reference_t<_InputIterator>>
+ uninitialized_move_n_result<_InputIterator, _OutputIterator>
+ operator()(_InputIterator __ifirst, iter_difference_t<_InputIterator> __n,
+ _OutputIterator __ofirst, _Sentinel __olast) const {
+ using _ValueType = remove_reference_t<iter_reference_t<_OutputIterator>>;
+ auto __iter_move = [](auto&& __iter) -> decltype(auto) { return ranges::iter_move(__iter); };
+ auto __result = _VSTD::__uninitialized_move_n<_ValueType>(_VSTD::move(__ifirst), __n,
+ _VSTD::move(__ofirst), _VSTD::move(__olast), __iter_move);
+ return {_VSTD::move(__result.first), _VSTD::move(__result.second)};
+ }
+};
+
+} // namespace __uninitialized_move_n
+
+inline namespace __cpo {
+ inline constexpr auto uninitialized_move_n = __uninitialized_move_n::__fn{};
} // namespace __cpo
} // namespace ranges
diff --git a/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h b/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
index 9c7df8845956..6a4ffc88c866 100644
--- a/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
+++ b/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
@@ -12,14 +12,14 @@
#include <__availability>
#include <__config>
-#include <__functional_base>
#include <__functional/binary_function.h>
#include <__functional/operations.h>
#include <__functional/reference_wrapper.h>
+#include <__functional_base>
#include <__memory/addressof.h>
#include <__memory/allocation_guard.h>
-#include <__memory/allocator_traits.h>
#include <__memory/allocator.h>
+#include <__memory/allocator_traits.h>
#include <__memory/compressed_pair.h>
#include <__memory/pointer_traits.h>
#include <__memory/unique_ptr.h>
@@ -28,8 +28,8 @@
#include <cstdlib> // abort
#include <iosfwd>
#include <stdexcept>
-#include <typeinfo>
#include <type_traits>
+#include <typeinfo>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
# include <atomic>
diff --git a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
index 69132633a48e..40e7c79a51e0 100644
--- a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
+++ b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
@@ -23,52 +23,75 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _InputIterator, class _ForwardIterator>
-_ForwardIterator
-uninitialized_copy(_InputIterator __f, _InputIterator __l, _ForwardIterator __r)
-{
- typedef typename iterator_traits<_ForwardIterator>::value_type value_type;
+// This is a simplified version of C++20 `unreachable_sentinel` that doesn't use concepts and thus can be used in any
+// language mode.
+struct __unreachable_sentinel {
+ template <class _Iter>
+ _LIBCPP_HIDE_FROM_ABI friend _LIBCPP_CONSTEXPR bool operator!=(const _Iter&, __unreachable_sentinel) _NOEXCEPT {
+ return true;
+ }
+};
+
+// uninitialized_copy
+
+template <class _ValueType, class _InputIterator, class _Sentinel1, class _ForwardIterator, class _Sentinel2>
+inline _LIBCPP_HIDE_FROM_ABI pair<_InputIterator, _ForwardIterator>
+__uninitialized_copy(_InputIterator __ifirst, _Sentinel1 __ilast,
+ _ForwardIterator __ofirst, _Sentinel2 __olast) {
+ _ForwardIterator __idx = __ofirst;
#ifndef _LIBCPP_NO_EXCEPTIONS
- _ForwardIterator __s = __r;
- try
- {
+ try {
#endif
- for (; __f != __l; ++__f, (void) ++__r)
- ::new ((void*)_VSTD::addressof(*__r)) value_type(*__f);
+ for (; __ifirst != __ilast && __idx != __olast; ++__ifirst, (void)++__idx)
+ ::new (_VSTD::__voidify(*__idx)) _ValueType(*__ifirst);
#ifndef _LIBCPP_NO_EXCEPTIONS
- }
- catch (...)
- {
- for (; __s != __r; ++__s)
- __s->~value_type();
- throw;
- }
+ } catch (...) {
+ _VSTD::__destroy(__ofirst, __idx);
+ throw;
+ }
#endif
- return __r;
+
+ return pair<_InputIterator, _ForwardIterator>(_VSTD::move(__ifirst), _VSTD::move(__idx));
}
-template <class _InputIterator, class _Size, class _ForwardIterator>
-_ForwardIterator
-uninitialized_copy_n(_InputIterator __f, _Size __n, _ForwardIterator __r)
-{
- typedef typename iterator_traits<_ForwardIterator>::value_type value_type;
+template <class _InputIterator, class _ForwardIterator>
+_ForwardIterator uninitialized_copy(_InputIterator __ifirst, _InputIterator __ilast,
+ _ForwardIterator __ofirst) {
+ typedef typename iterator_traits<_ForwardIterator>::value_type _ValueType;
+ auto __result = _VSTD::__uninitialized_copy<_ValueType>(_VSTD::move(__ifirst), _VSTD::move(__ilast),
+ _VSTD::move(__ofirst), __unreachable_sentinel());
+ return _VSTD::move(__result.second);
+}
+
+// uninitialized_copy_n
+
+template <class _ValueType, class _InputIterator, class _Size, class _ForwardIterator, class _Sentinel>
+inline _LIBCPP_HIDE_FROM_ABI pair<_InputIterator, _ForwardIterator>
+__uninitialized_copy_n(_InputIterator __ifirst, _Size __n,
+ _ForwardIterator __ofirst, _Sentinel __olast) {
+ _ForwardIterator __idx = __ofirst;
#ifndef _LIBCPP_NO_EXCEPTIONS
- _ForwardIterator __s = __r;
- try
- {
+ try {
#endif
- for (; __n > 0; ++__f, (void) ++__r, (void) --__n)
- ::new ((void*)_VSTD::addressof(*__r)) value_type(*__f);
+ for (; __n > 0 && __idx != __olast; ++__ifirst, (void)++__idx, (void)--__n)
+ ::new (_VSTD::__voidify(*__idx)) _ValueType(*__ifirst);
#ifndef _LIBCPP_NO_EXCEPTIONS
- }
- catch (...)
- {
- for (; __s != __r; ++__s)
- __s->~value_type();
- throw;
- }
+ } catch (...) {
+ _VSTD::__destroy(__ofirst, __idx);
+ throw;
+ }
#endif
- return __r;
+
+ return pair<_InputIterator, _ForwardIterator>(_VSTD::move(__ifirst), _VSTD::move(__idx));
+}
+
+template <class _InputIterator, class _Size, class _ForwardIterator>
+inline _LIBCPP_HIDE_FROM_ABI _ForwardIterator uninitialized_copy_n(_InputIterator __ifirst, _Size __n,
+ _ForwardIterator __ofirst) {
+ typedef typename iterator_traits<_ForwardIterator>::value_type _ValueType;
+ auto __result = _VSTD::__uninitialized_copy_n<_ValueType>(_VSTD::move(__ifirst), __n, _VSTD::move(__ofirst),
+ __unreachable_sentinel());
+ return _VSTD::move(__result.second);
}
// uninitialized_fill
@@ -253,43 +276,71 @@ _ForwardIterator uninitialized_value_construct_n(_ForwardIterator __first, _Size
return __uninitialized_value_construct_n<_ValueType>(_VSTD::move(__first), __n);
}
-template <class _InputIt, class _ForwardIt>
-inline _LIBCPP_INLINE_VISIBILITY
-_ForwardIt uninitialized_move(_InputIt __first, _InputIt __last, _ForwardIt __first_res) {
- using _Vt = typename iterator_traits<_ForwardIt>::value_type;
- auto __idx = __first_res;
+// uninitialized_move
+
+template <class _ValueType, class _InputIterator, class _Sentinel1, class _ForwardIterator, class _Sentinel2,
+ class _IterMove>
+inline _LIBCPP_HIDE_FROM_ABI pair<_InputIterator, _ForwardIterator>
+__uninitialized_move(_InputIterator __ifirst, _Sentinel1 __ilast,
+ _ForwardIterator __ofirst, _Sentinel2 __olast, _IterMove __iter_move) {
+ auto __idx = __ofirst;
#ifndef _LIBCPP_NO_EXCEPTIONS
- try {
+ try {
#endif
- for (; __first != __last; ++__idx, (void) ++__first)
- ::new ((void*)_VSTD::addressof(*__idx)) _Vt(_VSTD::move(*__first));
- return __idx;
-#ifndef _LIBCPP_NO_EXCEPTIONS
- } catch (...) {
- _VSTD::destroy(__first_res, __idx);
- throw;
+ for (; __ifirst != __ilast && __idx != __olast; ++__idx, (void)++__ifirst) {
+ ::new (_VSTD::__voidify(*__idx)) _ValueType(__iter_move(__ifirst));
}
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ } catch (...) {
+ _VSTD::__destroy(__ofirst, __idx);
+ throw;
+ }
#endif
+
+ return {_VSTD::move(__ifirst), _VSTD::move(__idx)};
+}
+
+template <class _InputIterator, class _ForwardIterator>
+inline _LIBCPP_HIDE_FROM_ABI _ForwardIterator uninitialized_move(_InputIterator __ifirst, _InputIterator __ilast,
+ _ForwardIterator __ofirst) {
+ using _ValueType = typename iterator_traits<_ForwardIterator>::value_type;
+ auto __iter_move = [](auto&& __iter) -> decltype(auto) { return _VSTD::move(*__iter); };
+
+ auto __result = _VSTD::__uninitialized_move<_ValueType>(_VSTD::move(__ifirst), _VSTD::move(__ilast),
+ _VSTD::move(__ofirst), __unreachable_sentinel(), __iter_move);
+ return _VSTD::move(__result.second);
}
-template <class _InputIt, class _Size, class _ForwardIt>
-inline _LIBCPP_INLINE_VISIBILITY
-pair<_InputIt, _ForwardIt>
-uninitialized_move_n(_InputIt __first, _Size __n, _ForwardIt __first_res) {
- using _Vt = typename iterator_traits<_ForwardIt>::value_type;
- auto __idx = __first_res;
+// uninitialized_move_n
+
+template <class _ValueType, class _InputIterator, class _Size, class _ForwardIterator, class _Sentinel, class _IterMove>
+inline _LIBCPP_HIDE_FROM_ABI pair<_InputIterator, _ForwardIterator>
+__uninitialized_move_n(_InputIterator __ifirst, _Size __n,
+ _ForwardIterator __ofirst, _Sentinel __olast, _IterMove __iter_move) {
+ auto __idx = __ofirst;
#ifndef _LIBCPP_NO_EXCEPTIONS
- try {
+ try {
#endif
- for (; __n > 0; ++__idx, (void) ++__first, --__n)
- ::new ((void*)_VSTD::addressof(*__idx)) _Vt(_VSTD::move(*__first));
- return {__first, __idx};
+ for (; __n > 0 && __idx != __olast; ++__idx, (void)++__ifirst, --__n)
+ ::new (_VSTD::__voidify(*__idx)) _ValueType(__iter_move(__ifirst));
#ifndef _LIBCPP_NO_EXCEPTIONS
- } catch (...) {
- _VSTD::destroy(__first_res, __idx);
- throw;
- }
+ } catch (...) {
+ _VSTD::__destroy(__ofirst, __idx);
+ throw;
+ }
#endif
+
+ return {_VSTD::move(__ifirst), _VSTD::move(__idx)};
+}
+
+template <class _InputIterator, class _Size, class _ForwardIterator>
+inline _LIBCPP_HIDE_FROM_ABI pair<_InputIterator, _ForwardIterator>
+uninitialized_move_n(_InputIterator __ifirst, _Size __n, _ForwardIterator __ofirst) {
+ using _ValueType = typename iterator_traits<_ForwardIterator>::value_type;
+ auto __iter_move = [](auto&& __iter) -> decltype(auto) { return _VSTD::move(*__iter); };
+
+ return _VSTD::__uninitialized_move_n<_ValueType>(_VSTD::move(__ifirst), __n, _VSTD::move(__ofirst),
+ __unreachable_sentinel(), __iter_move);
}
#endif // _LIBCPP_STD_VER > 14
diff --git a/contrib/llvm-project/libcxx/include/__memory/unique_ptr.h b/contrib/llvm-project/libcxx/include/__memory/unique_ptr.h
index 433120394269..e892f3287b93 100644
--- a/contrib/llvm-project/libcxx/include/__memory/unique_ptr.h
+++ b/contrib/llvm-project/libcxx/include/__memory/unique_ptr.h
@@ -11,9 +11,9 @@
#define _LIBCPP___MEMORY_UNIQUE_PTR_H
#include <__config>
-#include <__functional_base>
#include <__functional/hash.h>
#include <__functional/operations.h>
+#include <__functional_base>
#include <__memory/allocator_traits.h> // __pointer
#include <__memory/compressed_pair.h>
#include <__utility/forward.h>
diff --git a/contrib/llvm-project/libcxx/include/__mutex_base b/contrib/llvm-project/libcxx/include/__mutex_base
index 21e2075603b5..8d8c8af7070c 100644
--- a/contrib/llvm-project/libcxx/include/__mutex_base
+++ b/contrib/llvm-project/libcxx/include/__mutex_base
@@ -13,6 +13,7 @@
#include <__config>
#include <__threading_support>
#include <chrono>
+#include <ratio>
#include <system_error>
#include <time.h>
diff --git a/contrib/llvm-project/libcxx/include/__random/chi_squared_distribution.h b/contrib/llvm-project/libcxx/include/__random/chi_squared_distribution.h
index 9cf38971bdde..2aa0762a520c 100644
--- a/contrib/llvm-project/libcxx/include/__random/chi_squared_distribution.h
+++ b/contrib/llvm-project/libcxx/include/__random/chi_squared_distribution.h
@@ -11,8 +11,8 @@
#include <__config>
#include <__random/gamma_distribution.h>
-#include <limits>
#include <iosfwd>
+#include <limits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/__random/gamma_distribution.h b/contrib/llvm-project/libcxx/include/__random/gamma_distribution.h
index 49d024eafea2..34167e463528 100644
--- a/contrib/llvm-project/libcxx/include/__random/gamma_distribution.h
+++ b/contrib/llvm-project/libcxx/include/__random/gamma_distribution.h
@@ -10,8 +10,8 @@
#define _LIBCPP___RANDOM_GAMMA_DISTRIBUTION_H
#include <__config>
-#include <__random/uniform_real_distribution.h>
#include <__random/exponential_distribution.h>
+#include <__random/uniform_real_distribution.h>
#include <cmath>
#include <iosfwd>
#include <limits>
diff --git a/contrib/llvm-project/libcxx/include/__random/lognormal_distribution.h b/contrib/llvm-project/libcxx/include/__random/lognormal_distribution.h
index 752861c3de0c..8fadb5a1e66a 100644
--- a/contrib/llvm-project/libcxx/include/__random/lognormal_distribution.h
+++ b/contrib/llvm-project/libcxx/include/__random/lognormal_distribution.h
@@ -24,6 +24,8 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
+#ifdef _LIBCPP_ABI_OLD_LOGNORMAL_DISTRIBUTION
+
template<class _RealType = double>
class _LIBCPP_TEMPLATE_VIS lognormal_distribution
{
@@ -156,6 +158,140 @@ operator>>(basic_istream<_CharT, _Traits>& __is,
return __is >> __x.__p_.__nd_;
}
+#else // _LIBCPP_ABI_OLD_LOGNORMAL_DISTRIBUTION
+
+template<class _RealType = double>
+class _LIBCPP_TEMPLATE_VIS lognormal_distribution
+{
+public:
+ // types
+ typedef _RealType result_type;
+
+ class _LIBCPP_TEMPLATE_VIS param_type
+ {
+ result_type __m_;
+ result_type __s_;
+ public:
+ typedef lognormal_distribution distribution_type;
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit param_type(result_type __m = 0, result_type __s = 1)
+ : __m_(__m), __s_(__s) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ result_type m() const {return __m_;}
+ _LIBCPP_INLINE_VISIBILITY
+ result_type s() const {return __s_;}
+
+ friend _LIBCPP_INLINE_VISIBILITY
+ bool operator==(const param_type& __x, const param_type& __y)
+ {return __x.__m_ == __y.__m_ && __x.__s_ == __y.__s_;}
+ friend _LIBCPP_INLINE_VISIBILITY
+ bool operator!=(const param_type& __x, const param_type& __y)
+ {return !(__x == __y);}
+ };
+
+private:
+ normal_distribution<result_type> __nd_;
+
+public:
+ // constructor and reset functions
+#ifndef _LIBCPP_CXX03_LANG
+ _LIBCPP_INLINE_VISIBILITY
+ lognormal_distribution() : lognormal_distribution(0) {}
+ _LIBCPP_INLINE_VISIBILITY
+ explicit lognormal_distribution(result_type __m, result_type __s = 1)
+ : __nd_(__m, __s) {}
+#else
+ _LIBCPP_INLINE_VISIBILITY
+ explicit lognormal_distribution(result_type __m = 0,
+ result_type __s = 1)
+ : __nd_(__m, __s) {}
+#endif
+ _LIBCPP_INLINE_VISIBILITY
+ explicit lognormal_distribution(const param_type& __p)
+ : __nd_(__p.m(), __p.s()) {}
+ _LIBCPP_INLINE_VISIBILITY
+ void reset() {__nd_.reset();}
+
+ // generating functions
+ template<class _URNG>
+ _LIBCPP_INLINE_VISIBILITY
+ result_type operator()(_URNG& __g)
+ {
+ return _VSTD::exp(__nd_(__g));
+ }
+
+ template<class _URNG>
+ _LIBCPP_INLINE_VISIBILITY
+ result_type operator()(_URNG& __g, const param_type& __p)
+ {
+ typename normal_distribution<result_type>::param_type __pn(__p.m(), __p.s());
+ return _VSTD::exp(__nd_(__g, __pn));
+ }
+
+ // property functions
+ _LIBCPP_INLINE_VISIBILITY
+ result_type m() const {return __nd_.mean();}
+ _LIBCPP_INLINE_VISIBILITY
+ result_type s() const {return __nd_.stddev();}
+
+ _LIBCPP_INLINE_VISIBILITY
+ param_type param() const {return param_type(__nd_.mean(), __nd_.stddev());}
+ _LIBCPP_INLINE_VISIBILITY
+ void param(const param_type& __p)
+ {
+ typename normal_distribution<result_type>::param_type __pn(__p.m(), __p.s());
+ __nd_.param(__pn);
+ }
+
+ _LIBCPP_INLINE_VISIBILITY
+ result_type min() const {return 0;}
+ _LIBCPP_INLINE_VISIBILITY
+ result_type max() const {return numeric_limits<result_type>::infinity();}
+
+ friend _LIBCPP_INLINE_VISIBILITY
+ bool operator==(const lognormal_distribution& __x,
+ const lognormal_distribution& __y)
+ {return __x.__nd_ == __y.__nd_;}
+ friend _LIBCPP_INLINE_VISIBILITY
+ bool operator!=(const lognormal_distribution& __x,
+ const lognormal_distribution& __y)
+ {return !(__x == __y);}
+
+ template <class _CharT, class _Traits, class _RT>
+ friend
+ basic_ostream<_CharT, _Traits>&
+ operator<<(basic_ostream<_CharT, _Traits>& __os,
+ const lognormal_distribution<_RT>& __x);
+
+ template <class _CharT, class _Traits, class _RT>
+ friend
+ basic_istream<_CharT, _Traits>&
+ operator>>(basic_istream<_CharT, _Traits>& __is,
+ lognormal_distribution<_RT>& __x);
+};
+
+template <class _CharT, class _Traits, class _RT>
+inline _LIBCPP_INLINE_VISIBILITY
+basic_ostream<_CharT, _Traits>&
+operator<<(basic_ostream<_CharT, _Traits>& __os,
+ const lognormal_distribution<_RT>& __x)
+{
+ return __os << __x.__nd_;
+}
+
+template <class _CharT, class _Traits, class _RT>
+inline _LIBCPP_INLINE_VISIBILITY
+basic_istream<_CharT, _Traits>&
+operator>>(basic_istream<_CharT, _Traits>& __is,
+ lognormal_distribution<_RT>& __x)
+{
+ return __is >> __x.__nd_;
+}
+
+#endif // _LIBCPP_ABI_OLD_LOGNORMAL_DISTRIBUTION
+
_LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
diff --git a/contrib/llvm-project/libcxx/include/__random/random_device.h b/contrib/llvm-project/libcxx/include/__random/random_device.h
index 835f726fdbcc..ef11977f9b77 100644
--- a/contrib/llvm-project/libcxx/include/__random/random_device.h
+++ b/contrib/llvm-project/libcxx/include/__random/random_device.h
@@ -27,7 +27,26 @@ class _LIBCPP_TYPE_VIS random_device
{
#ifdef _LIBCPP_USING_DEV_RANDOM
int __f_;
+#elif !defined(_LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT)
+# if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunused-private-field"
+# endif
+
+ // Apple platforms used to use the `_LIBCPP_USING_DEV_RANDOM` code path, and now
+ // use `arc4random()` as of this comment. In order to avoid breaking the ABI, we
+ // retain the same layout as before.
+# if defined(__APPLE__)
+ int __padding_; // padding to fake the `__f_` field above
+# endif
+
+ // ... vendors can add workarounds here if they switch to a different representation ...
+
+# if defined(__clang__)
+# pragma clang diagnostic pop
+# endif
#endif
+
public:
// types
typedef unsigned result_type;
diff --git a/contrib/llvm-project/libcxx/include/__random/seed_seq.h b/contrib/llvm-project/libcxx/include/__random/seed_seq.h
index bf27af6627a5..1a0877995650 100644
--- a/contrib/llvm-project/libcxx/include/__random/seed_seq.h
+++ b/contrib/llvm-project/libcxx/include/__random/seed_seq.h
@@ -31,25 +31,24 @@ public:
// types
typedef uint32_t result_type;
-private:
- vector<result_type> __v_;
-
- template<class _InputIterator>
- void init(_InputIterator __first, _InputIterator __last);
-public:
// constructors
_LIBCPP_INLINE_VISIBILITY
seed_seq() _NOEXCEPT {}
#ifndef _LIBCPP_CXX03_LANG
- template<class _Tp>
- _LIBCPP_INLINE_VISIBILITY
- seed_seq(initializer_list<_Tp> __il) {init(__il.begin(), __il.end());}
+ template<class _Tp, __enable_if_t<is_integral<_Tp>::value>* = nullptr>
+ _LIBCPP_INLINE_VISIBILITY
+ seed_seq(initializer_list<_Tp> __il) {
+ __init(__il.begin(), __il.end());
+ }
#endif // _LIBCPP_CXX03_LANG
template<class _InputIterator>
- _LIBCPP_INLINE_VISIBILITY
- seed_seq(_InputIterator __first, _InputIterator __last)
- {init(__first, __last);}
+ _LIBCPP_INLINE_VISIBILITY
+ seed_seq(_InputIterator __first, _InputIterator __last) {
+ static_assert(is_integral<typename iterator_traits<_InputIterator>::value_type>::value,
+ "Mandates: iterator_traits<InputIterator>::value_type is an integer type");
+ __init(__first, __last);
+ }
// generating functions
template<class _RandomAccessIterator>
@@ -68,11 +67,17 @@ public:
_LIBCPP_INLINE_VISIBILITY
static result_type _Tp(result_type __x) {return __x ^ (__x >> 27);}
+
+private:
+ template<class _InputIterator>
+ void __init(_InputIterator __first, _InputIterator __last);
+
+ vector<result_type> __v_;
};
template<class _InputIterator>
void
-seed_seq::init(_InputIterator __first, _InputIterator __last)
+seed_seq::__init(_InputIterator __first, _InputIterator __last)
{
for (_InputIterator __s = __first; __s != __last; ++__s)
__v_.push_back(*__s & 0xFFFFFFFF);
diff --git a/contrib/llvm-project/libcxx/include/__ranges/access.h b/contrib/llvm-project/libcxx/include/__ranges/access.h
index 91dc3055c86d..0b9470fa4017 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/access.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/access.h
@@ -9,6 +9,7 @@
#ifndef _LIBCPP___RANGES_ACCESS_H
#define _LIBCPP___RANGES_ACCESS_H
+#include <__concepts/class_or_enum.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__iterator/readable_traits.h>
@@ -39,6 +40,7 @@ namespace __begin {
template <class _Tp>
concept __member_begin =
__can_borrow<_Tp> &&
+ __workaround_52970<_Tp> &&
requires(_Tp&& __t) {
{ _LIBCPP_AUTO_CAST(__t.begin()) } -> input_or_output_iterator;
};
@@ -102,6 +104,7 @@ namespace __end {
template <class _Tp>
concept __member_end =
__can_borrow<_Tp> &&
+ __workaround_52970<_Tp> &&
requires(_Tp&& __t) {
typename iterator_t<_Tp>;
{ _LIBCPP_AUTO_CAST(__t.end()) } -> sentinel_for<iterator_t<_Tp>>;
@@ -160,20 +163,20 @@ namespace ranges {
namespace __cbegin {
struct __fn {
template <class _Tp>
- requires invocable<decltype(ranges::begin), _Tp const&>
- [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp& __t) const
- noexcept(noexcept(ranges::begin(_VSTD::as_const(__t))))
- {
- return ranges::begin(_VSTD::as_const(__t));
- }
+ requires is_lvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::begin(static_cast<const remove_reference_t<_Tp>&>(__t))))
+ -> decltype( ranges::begin(static_cast<const remove_reference_t<_Tp>&>(__t)))
+ { return ranges::begin(static_cast<const remove_reference_t<_Tp>&>(__t)); }
template <class _Tp>
- requires is_rvalue_reference_v<_Tp> && invocable<decltype(ranges::begin), _Tp const&&>
- [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp&& __t) const
- noexcept(noexcept(ranges::begin(static_cast<_Tp const&&>(__t))))
- {
- return ranges::begin(static_cast<_Tp const&&>(__t));
- }
+ requires is_rvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::begin(static_cast<const _Tp&&>(__t))))
+ -> decltype( ranges::begin(static_cast<const _Tp&&>(__t)))
+ { return ranges::begin(static_cast<const _Tp&&>(__t)); }
};
}
@@ -188,20 +191,20 @@ namespace ranges {
namespace __cend {
struct __fn {
template <class _Tp>
- requires invocable<decltype(ranges::end), _Tp const&>
- [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp& __t) const
- noexcept(noexcept(ranges::end(_VSTD::as_const(__t))))
- {
- return ranges::end(_VSTD::as_const(__t));
- }
+ requires is_lvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::end(static_cast<const remove_reference_t<_Tp>&>(__t))))
+ -> decltype( ranges::end(static_cast<const remove_reference_t<_Tp>&>(__t)))
+ { return ranges::end(static_cast<const remove_reference_t<_Tp>&>(__t)); }
template <class _Tp>
- requires is_rvalue_reference_v<_Tp> && invocable<decltype(ranges::end), _Tp const&&>
- [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp&& __t) const
- noexcept(noexcept(ranges::end(static_cast<_Tp const&&>(__t))))
- {
- return ranges::end(static_cast<_Tp const&&>(__t));
- }
+ requires is_rvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::end(static_cast<const _Tp&&>(__t))))
+ -> decltype( ranges::end(static_cast<const _Tp&&>(__t)))
+ { return ranges::end(static_cast<const _Tp&&>(__t)); }
};
}
diff --git a/contrib/llvm-project/libcxx/include/__ranges/all.h b/contrib/llvm-project/libcxx/include/__ranges/all.h
index ccc77258ba10..90327da81460 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/all.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/all.h
@@ -14,9 +14,9 @@
#include <__iterator/iterator_traits.h>
#include <__ranges/access.h>
#include <__ranges/concepts.h>
+#include <__ranges/owning_view.h>
#include <__ranges/range_adaptor.h>
#include <__ranges/ref_view.h>
-#include <__ranges/subrange.h>
#include <__utility/auto_cast.h>
#include <__utility/declval.h>
#include <__utility/forward.h>
@@ -56,12 +56,12 @@ namespace __all {
template<class _Tp>
requires (!ranges::view<decay_t<_Tp>> &&
!requires (_Tp&& __t) { ranges::ref_view{_VSTD::forward<_Tp>(__t)}; } &&
- requires (_Tp&& __t) { ranges::subrange{_VSTD::forward<_Tp>(__t)}; })
+ requires (_Tp&& __t) { ranges::owning_view{_VSTD::forward<_Tp>(__t)}; })
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI
constexpr auto operator()(_Tp&& __t) const
- noexcept(noexcept(ranges::subrange{_VSTD::forward<_Tp>(__t)}))
+ noexcept(noexcept(ranges::owning_view{_VSTD::forward<_Tp>(__t)}))
{
- return ranges::subrange{_VSTD::forward<_Tp>(__t)};
+ return ranges::owning_view{_VSTD::forward<_Tp>(__t)};
}
};
}
diff --git a/contrib/llvm-project/libcxx/include/__ranges/concepts.h b/contrib/llvm-project/libcxx/include/__ranges/concepts.h
index bad23c8c4bfb..a9cb15f9f17c 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/concepts.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/concepts.h
@@ -9,6 +9,9 @@
#ifndef _LIBCPP___RANGES_CONCEPTS_H
#define _LIBCPP___RANGES_CONCEPTS_H
+#include <__concepts/constructible.h>
+#include <__concepts/movable.h>
+#include <__concepts/same_as.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
@@ -20,7 +23,7 @@
#include <__ranges/enable_borrowed_range.h>
#include <__ranges/enable_view.h>
#include <__ranges/size.h>
-#include <concepts>
+#include <initializer_list>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -80,11 +83,11 @@ namespace ranges {
movable<_Tp> &&
enable_view<_Tp>;
- template<class _Range>
+ template <class _Range>
concept __simple_view =
view<_Range> && range<const _Range> &&
same_as<iterator_t<_Range>, iterator_t<const _Range>> &&
- same_as<sentinel_t<_Range>, iterator_t<const _Range>>;
+ same_as<sentinel_t<_Range>, sentinel_t<const _Range>>;
// [range.refinements], other range refinements
template <class _Rp, class _Tp>
@@ -114,12 +117,20 @@ namespace ranges {
template <class _Tp>
concept common_range = range<_Tp> && same_as<iterator_t<_Tp>, sentinel_t<_Tp>>;
- template<class _Tp>
+ template <class _Tp>
+ inline constexpr bool __is_std_initializer_list = false;
+
+ template <class _Ep>
+ inline constexpr bool __is_std_initializer_list<initializer_list<_Ep>> = true;
+
+ template <class _Tp>
concept viewable_range =
- range<_Tp> && (
- (view<remove_cvref_t<_Tp>> && constructible_from<remove_cvref_t<_Tp>, _Tp>) ||
- (!view<remove_cvref_t<_Tp>> && borrowed_range<_Tp>)
- );
+ range<_Tp> &&
+ ((view<remove_cvref_t<_Tp>> && constructible_from<remove_cvref_t<_Tp>, _Tp>) ||
+ (!view<remove_cvref_t<_Tp>> &&
+ (is_lvalue_reference_v<_Tp> ||
+ (movable<remove_reference_t<_Tp>> && !__is_std_initializer_list<remove_cvref_t<_Tp>>))));
+
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__ranges/data.h b/contrib/llvm-project/libcxx/include/__ranges/data.h
index cc151c59f3d7..69dfd479c011 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/data.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/data.h
@@ -9,13 +9,13 @@
#ifndef _LIBCPP___RANGES_DATA_H
#define _LIBCPP___RANGES_DATA_H
+#include <__concepts/class_or_enum.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__iterator/iterator_traits.h>
#include <__memory/pointer_traits.h>
#include <__ranges/access.h>
-#include <__utility/forward.h>
-#include <concepts>
+#include <__utility/auto_cast.h>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -35,34 +35,33 @@ namespace __data {
template <class _Tp>
concept __member_data =
+ __can_borrow<_Tp> &&
+ __workaround_52970<_Tp> &&
requires(_Tp&& __t) {
- { _VSTD::forward<_Tp>(__t) } -> __can_borrow;
- { __t.data() } -> __ptr_to_object;
+ { _LIBCPP_AUTO_CAST(__t.data()) } -> __ptr_to_object;
};
template <class _Tp>
concept __ranges_begin_invocable =
!__member_data<_Tp> &&
+ __can_borrow<_Tp> &&
requires(_Tp&& __t) {
- { _VSTD::forward<_Tp>(__t) } -> __can_borrow;
- { ranges::begin(_VSTD::forward<_Tp>(__t)) } -> contiguous_iterator;
+ { ranges::begin(__t) } -> contiguous_iterator;
};
struct __fn {
template <__member_data _Tp>
- requires __can_borrow<_Tp>
_LIBCPP_HIDE_FROM_ABI
- constexpr __ptr_to_object auto operator()(_Tp&& __t) const
+ constexpr auto operator()(_Tp&& __t) const
noexcept(noexcept(__t.data())) {
return __t.data();
}
template<__ranges_begin_invocable _Tp>
- requires __can_borrow<_Tp>
_LIBCPP_HIDE_FROM_ABI
- constexpr __ptr_to_object auto operator()(_Tp&& __t) const
- noexcept(noexcept(_VSTD::to_address(ranges::begin(_VSTD::forward<_Tp>(__t))))) {
- return _VSTD::to_address(ranges::begin(_VSTD::forward<_Tp>(__t)));
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(_VSTD::to_address(ranges::begin(__t)))) {
+ return _VSTD::to_address(ranges::begin(__t));
}
};
}
@@ -72,6 +71,34 @@ inline namespace __cpo {
} // namespace __cpo
} // namespace ranges
+// [range.prim.cdata]
+
+namespace ranges {
+namespace __cdata {
+ struct __fn {
+ template <class _Tp>
+ requires is_lvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::data(static_cast<const remove_reference_t<_Tp>&>(__t))))
+ -> decltype( ranges::data(static_cast<const remove_reference_t<_Tp>&>(__t)))
+ { return ranges::data(static_cast<const remove_reference_t<_Tp>&>(__t)); }
+
+ template <class _Tp>
+ requires is_rvalue_reference_v<_Tp&&>
+ [[nodiscard]] _LIBCPP_HIDE_FROM_ABI
+ constexpr auto operator()(_Tp&& __t) const
+ noexcept(noexcept(ranges::data(static_cast<const _Tp&&>(__t))))
+ -> decltype( ranges::data(static_cast<const _Tp&&>(__t)))
+ { return ranges::data(static_cast<const _Tp&&>(__t)); }
+ };
+}
+
+inline namespace __cpo {
+ inline constexpr auto cdata = __cdata::__fn{};
+} // namespace __cpo
+} // namespace ranges
+
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__ranges/empty.h b/contrib/llvm-project/libcxx/include/__ranges/empty.h
index e8a8aabf4aed..8da0b120f182 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/empty.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/empty.h
@@ -9,6 +9,7 @@
#ifndef _LIBCPP___RANGES_EMPTY_H
#define _LIBCPP___RANGES_EMPTY_H
+#include <__concepts/class_or_enum.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__ranges/access.h>
@@ -28,9 +29,11 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
namespace __empty {
template <class _Tp>
- concept __member_empty = requires(_Tp&& __t) {
- bool(__t.empty());
- };
+ concept __member_empty =
+ __workaround_52970<_Tp> &&
+ requires(_Tp&& __t) {
+ bool(__t.empty());
+ };
template<class _Tp>
concept __can_invoke_size =
diff --git a/contrib/llvm-project/libcxx/include/__ranges/empty_view.h b/contrib/llvm-project/libcxx/include/__ranges/empty_view.h
index 4a98a6f324e7..f744dcbe92f4 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/empty_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/empty_view.h
@@ -10,6 +10,7 @@
#define _LIBCPP___RANGES_EMPTY_VIEW_H
#include <__config>
+#include <__ranges/enable_borrowed_range.h>
#include <__ranges/view_interface.h>
#include <type_traits>
@@ -32,6 +33,9 @@ namespace ranges {
_LIBCPP_HIDE_FROM_ABI static constexpr size_t size() noexcept { return 0; }
_LIBCPP_HIDE_FROM_ABI static constexpr bool empty() noexcept { return true; }
};
+
+ template<class _Tp>
+ inline constexpr bool enable_borrowed_range<empty_view<_Tp>> = true;
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__ranges/enable_view.h b/contrib/llvm-project/libcxx/include/__ranges/enable_view.h
index a09de11da81e..e1daec046fc0 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/enable_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/enable_view.h
@@ -12,6 +12,7 @@
#include <__config>
#include <concepts>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -25,8 +26,17 @@ namespace ranges {
struct view_base { };
+template<class _Derived>
+ requires is_class_v<_Derived> && same_as<_Derived, remove_cv_t<_Derived>>
+class view_interface;
+
+template<class _Op, class _Yp>
+ requires is_convertible_v<_Op*, view_interface<_Yp>*>
+void __is_derived_from_view_interface(const _Op*, const view_interface<_Yp>*);
+
template <class _Tp>
-inline constexpr bool enable_view = derived_from<_Tp, view_base>;
+inline constexpr bool enable_view = derived_from<_Tp, view_base> ||
+ requires { ranges::__is_derived_from_view_interface((_Tp*)nullptr, (_Tp*)nullptr); };
} // end namespace ranges
diff --git a/contrib/llvm-project/libcxx/include/__ranges/owning_view.h b/contrib/llvm-project/libcxx/include/__ranges/owning_view.h
new file mode 100644
index 000000000000..29182d2d8e46
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__ranges/owning_view.h
@@ -0,0 +1,81 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef _LIBCPP___RANGES_OWNING_VIEW_H
+#define _LIBCPP___RANGES_OWNING_VIEW_H
+
+#include <__concepts/constructible.h>
+#include <__concepts/movable.h>
+#include <__config>
+#include <__ranges/access.h>
+#include <__ranges/concepts.h>
+#include <__ranges/data.h>
+#include <__ranges/empty.h>
+#include <__ranges/enable_borrowed_range.h>
+#include <__ranges/size.h>
+#include <__ranges/view_interface.h>
+#include <__utility/move.h>
+#include <type_traits>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if !defined(_LIBCPP_HAS_NO_RANGES)
+
+namespace ranges {
+ template<range _Rp>
+ requires movable<_Rp> && (!__is_std_initializer_list<remove_cvref_t<_Rp>>)
+ class owning_view : public view_interface<owning_view<_Rp>> {
+ _Rp __r_ = _Rp();
+
+public:
+ owning_view() requires default_initializable<_Rp> = default;
+ _LIBCPP_HIDE_FROM_ABI constexpr owning_view(_Rp&& __r) : __r_(_VSTD::move(__r)) {}
+
+ owning_view(owning_view&&) = default;
+ owning_view& operator=(owning_view&&) = default;
+
+ _LIBCPP_HIDE_FROM_ABI constexpr _Rp& base() & noexcept { return __r_; }
+ _LIBCPP_HIDE_FROM_ABI constexpr const _Rp& base() const& noexcept { return __r_; }
+ _LIBCPP_HIDE_FROM_ABI constexpr _Rp&& base() && noexcept { return _VSTD::move(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr const _Rp&& base() const&& noexcept { return _VSTD::move(__r_); }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr iterator_t<_Rp> begin() { return ranges::begin(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr sentinel_t<_Rp> end() { return ranges::end(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr auto begin() const requires range<const _Rp> { return ranges::begin(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr auto end() const requires range<const _Rp> { return ranges::end(__r_); }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr bool empty() requires requires { ranges::empty(__r_); }
+ { return ranges::empty(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr bool empty() const requires requires { ranges::empty(__r_); }
+ { return ranges::empty(__r_); }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr auto size() requires sized_range<_Rp>
+ { return ranges::size(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr auto size() const requires sized_range<const _Rp>
+ { return ranges::size(__r_); }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr auto data() requires contiguous_range<_Rp>
+ { return ranges::data(__r_); }
+ _LIBCPP_HIDE_FROM_ABI constexpr auto data() const requires contiguous_range<const _Rp>
+ { return ranges::data(__r_); }
+ };
+
+ template<class _Tp>
+ inline constexpr bool enable_borrowed_range<owning_view<_Tp>> = enable_borrowed_range<_Tp>;
+
+} // namespace ranges
+
+#endif // !defined(_LIBCPP_HAS_NO_RANGES)
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___RANGES_OWNING_VIEW_H
diff --git a/contrib/llvm-project/libcxx/include/__ranges/ref_view.h b/contrib/llvm-project/libcxx/include/__ranges/ref_view.h
index 7567ac48f255..283fa2599bff 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/ref_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/ref_view.h
@@ -18,6 +18,7 @@
#include <__ranges/concepts.h>
#include <__ranges/data.h>
#include <__ranges/empty.h>
+#include <__ranges/enable_borrowed_range.h>
#include <__ranges/size.h>
#include <__ranges/view_interface.h>
#include <__utility/forward.h>
@@ -74,6 +75,8 @@ public:
template<class _Range>
ref_view(_Range&) -> ref_view<_Range>;
+ template<class _Tp>
+ inline constexpr bool enable_borrowed_range<ref_view<_Tp>> = true;
} // namespace ranges
#endif // !defined(_LIBCPP_HAS_NO_RANGES)
diff --git a/contrib/llvm-project/libcxx/include/__ranges/single_view.h b/contrib/llvm-project/libcxx/include/__ranges/single_view.h
index 412fa9b64643..931ce78da7b9 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/single_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/single_view.h
@@ -10,8 +10,8 @@
#define _LIBCPP___RANGES_SINGLE_VIEW_H
#include <__config>
-#include <__ranges/view_interface.h>
#include <__ranges/copyable_box.h>
+#include <__ranges/view_interface.h>
#include <__utility/forward.h>
#include <__utility/in_place.h>
#include <__utility/move.h>
diff --git a/contrib/llvm-project/libcxx/include/__ranges/size.h b/contrib/llvm-project/libcxx/include/__ranges/size.h
index fc6641cf4887..f3de5a8b8410 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/size.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/size.h
@@ -9,6 +9,7 @@
#ifndef _LIBCPP___RANGES_SIZE_H
#define _LIBCPP___RANGES_SIZE_H
+#include <__concepts/class_or_enum.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__iterator/iterator_traits.h>
@@ -41,9 +42,12 @@ namespace __size {
concept __size_enabled = !disable_sized_range<remove_cvref_t<_Tp>>;
template <class _Tp>
- concept __member_size = __size_enabled<_Tp> && requires(_Tp&& __t) {
- { _LIBCPP_AUTO_CAST(__t.size()) } -> __integer_like;
- };
+ concept __member_size =
+ __size_enabled<_Tp> &&
+ __workaround_52970<_Tp> &&
+ requires(_Tp&& __t) {
+ { _LIBCPP_AUTO_CAST(__t.size()) } -> __integer_like;
+ };
template <class _Tp>
concept __unqualified_size =
diff --git a/contrib/llvm-project/libcxx/include/__ranges/subrange.h b/contrib/llvm-project/libcxx/include/__ranges/subrange.h
index 8e984f2bf06c..14716d1fb0ff 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/subrange.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/subrange.h
@@ -40,12 +40,14 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
template<class _From, class _To>
+ concept __uses_nonqualification_pointer_conversion =
+ is_pointer_v<_From> && is_pointer_v<_To> &&
+ !convertible_to<remove_pointer_t<_From>(*)[], remove_pointer_t<_To>(*)[]>;
+
+ template<class _From, class _To>
concept __convertible_to_non_slicing =
convertible_to<_From, _To> &&
- // If they're both pointers, they must have the same element type.
- !(is_pointer_v<decay_t<_From>> &&
- is_pointer_v<decay_t<_To>> &&
- __different_from<remove_pointer_t<decay_t<_From>>, remove_pointer_t<decay_t<_To>>>);
+ !__uses_nonqualification_pointer_conversion<decay_t<_From>, decay_t<_To>>;
template<class _Tp>
concept __pair_like =
diff --git a/contrib/llvm-project/libcxx/include/__ranges/transform_view.h b/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
index 0f53fbaa7e68..1506e8b2a7fe 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
@@ -195,9 +195,7 @@ public:
: __parent_(__i.__parent_), __current_(_VSTD::move(__i.__current_)) {}
_LIBCPP_HIDE_FROM_ABI
- constexpr iterator_t<_Base> base() const&
- requires copyable<iterator_t<_Base>>
- {
+ constexpr const iterator_t<_Base>& base() const& noexcept {
return __current_;
}
diff --git a/contrib/llvm-project/libcxx/include/__ranges/view_interface.h b/contrib/llvm-project/libcxx/include/__ranges/view_interface.h
index 8a1f5d8c9251..c5215cbcb8e3 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/view_interface.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/view_interface.h
@@ -18,7 +18,6 @@
#include <__ranges/access.h>
#include <__ranges/concepts.h>
#include <__ranges/empty.h>
-#include <__ranges/enable_view.h>
#include <concepts>
#include <type_traits>
@@ -40,7 +39,7 @@ void __implicitly_convert_to(type_identity_t<_Tp>) noexcept;
template<class _Derived>
requires is_class_v<_Derived> && same_as<_Derived, remove_cv_t<_Derived>>
-class view_interface : public view_base {
+class view_interface {
_LIBCPP_HIDE_FROM_ABI
constexpr _Derived& __derived() noexcept {
static_assert(sizeof(_Derived) && derived_from<_Derived, view_interface> && view<_Derived>);
diff --git a/contrib/llvm-project/libcxx/include/__string b/contrib/llvm-project/libcxx/include/__string
index 13ff7b35e47d..2249d3a8aec5 100644
--- a/contrib/llvm-project/libcxx/include/__string
+++ b/contrib/llvm-project/libcxx/include/__string
@@ -10,21 +10,21 @@
#ifndef _LIBCPP___STRING
#define _LIBCPP___STRING
-#include <__config>
#include <__algorithm/copy.h>
#include <__algorithm/copy_backward.h>
#include <__algorithm/copy_n.h>
#include <__algorithm/fill_n.h>
-#include <__algorithm/find_first_of.h>
#include <__algorithm/find_end.h>
+#include <__algorithm/find_first_of.h>
#include <__algorithm/min.h>
+#include <__config>
#include <__functional/hash.h> // for __murmur2_or_cityhash
#include <__iterator/iterator_traits.h>
-#include <cstdio> // for EOF
#include <cstdint> // for uint_least16_t
+#include <cstdio> // for EOF
#include <cstring> // for memcpy
-#include <type_traits> // for __libcpp_is_constant_evaluated
#include <iosfwd> // for streampos & friends
+#include <type_traits> // for __libcpp_is_constant_evaluated
#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
# include <cwchar> // for wmemcpy
diff --git a/contrib/llvm-project/libcxx/include/__thread/poll_with_backoff.h b/contrib/llvm-project/libcxx/include/__thread/poll_with_backoff.h
index e1d8a9c90c56..3149a3862890 100644
--- a/contrib/llvm-project/libcxx/include/__thread/poll_with_backoff.h
+++ b/contrib/llvm-project/libcxx/include/__thread/poll_with_backoff.h
@@ -9,6 +9,7 @@
#ifndef _LIBCPP___THREAD_POLL_WITH_BACKOFF_H
#define _LIBCPP___THREAD_POLL_WITH_BACKOFF_H
+#include <__availability>
#include <__config>
#include <chrono>
diff --git a/contrib/llvm-project/libcxx/include/__thread/timed_backoff_policy.h b/contrib/llvm-project/libcxx/include/__thread/timed_backoff_policy.h
new file mode 100644
index 000000000000..d85a34071c6d
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__thread/timed_backoff_policy.h
@@ -0,0 +1,45 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef _LIBCPP___THREAD_TIMED_BACKOFF_POLICY_H
+#define _LIBCPP___THREAD_TIMED_BACKOFF_POLICY_H
+
+#include <__config>
+
+#ifndef _LIBCPP_HAS_NO_THREADS
+
+#include <__threading_support>
+#include <chrono>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+struct __libcpp_timed_backoff_policy {
+ _LIBCPP_INLINE_VISIBILITY
+ bool operator()(chrono::nanoseconds __elapsed) const
+ {
+ if(__elapsed > chrono::milliseconds(128))
+ __libcpp_thread_sleep_for(chrono::milliseconds(8));
+ else if(__elapsed > chrono::microseconds(64))
+ __libcpp_thread_sleep_for(__elapsed / 2);
+ else if(__elapsed > chrono::microseconds(4))
+ __libcpp_thread_yield();
+ else
+ {} // poll
+ return false;
+ }
+};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_HAS_NO_THREADS
+
+#endif // _LIBCPP___THREAD_TIMED_BACKOFF_POLICY_H
diff --git a/contrib/llvm-project/libcxx/include/__threading_support b/contrib/llvm-project/libcxx/include/__threading_support
index 68f381a62183..bf85d5f5d9f0 100644
--- a/contrib/llvm-project/libcxx/include/__threading_support
+++ b/contrib/llvm-project/libcxx/include/__threading_support
@@ -54,9 +54,6 @@
typedef ::timespec __libcpp_timespec_t;
#endif // !defined(_LIBCPP_HAS_NO_THREADS)
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
_LIBCPP_BEGIN_NAMESPACE_STD
#if !defined(_LIBCPP_HAS_NO_THREADS)
@@ -252,53 +249,9 @@ int __libcpp_tls_set(__libcpp_tls_key __key, void *__p);
#endif // !defined(_LIBCPP_HAS_THREAD_API_EXTERNAL)
-struct __libcpp_timed_backoff_policy {
- _LIBCPP_INLINE_VISIBILITY
- bool operator()(chrono::nanoseconds __elapsed) const
- {
- if(__elapsed > chrono::milliseconds(128))
- __libcpp_thread_sleep_for(chrono::milliseconds(8));
- else if(__elapsed > chrono::microseconds(64))
- __libcpp_thread_sleep_for(__elapsed / 2);
- else if(__elapsed > chrono::microseconds(4))
- __libcpp_thread_yield();
- else
- {} // poll
- return false;
- }
-};
-
#if (!defined(_LIBCPP_HAS_THREAD_LIBRARY_EXTERNAL) || \
defined(_LIBCPP_BUILDING_THREAD_LIBRARY_EXTERNAL))
-
-namespace __thread_detail {
-
-_LIBCPP_HIDE_FROM_ABI inline
-__libcpp_timespec_t __convert_to_timespec(const chrono::nanoseconds& __ns)
-{
- using namespace chrono;
- seconds __s = duration_cast<seconds>(__ns);
- __libcpp_timespec_t __ts;
- typedef decltype(__ts.tv_sec) __ts_sec;
- const __ts_sec __ts_sec_max = numeric_limits<__ts_sec>::max();
-
- if (__s.count() < __ts_sec_max)
- {
- __ts.tv_sec = static_cast<__ts_sec>(__s.count());
- __ts.tv_nsec = static_cast<decltype(__ts.tv_nsec)>((__ns - __s).count());
- }
- else
- {
- __ts.tv_sec = __ts_sec_max;
- __ts.tv_nsec = 999999999; // (10^9 - 1)
- }
-
- return __ts;
-}
-
-} // namespace __thread_detail
-
#if defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
_LIBCPP_HIDE_FROM_ABI inline
@@ -479,7 +432,7 @@ void __libcpp_thread_yield()
_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
{
- __libcpp_timespec_t __ts = __thread_detail::__convert_to_timespec(__ns);
+ __libcpp_timespec_t __ts = _VSTD::__convert_to_timespec<__libcpp_timespec_t>(__ns);
while (nanosleep(&__ts, &__ts) == -1 && errno == EINTR);
}
@@ -664,7 +617,7 @@ void __libcpp_thread_yield()
_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
{
- __libcpp_timespec_t __ts = __thread_detail::__convert_to_timespec(__ns);
+ __libcpp_timespec_t __ts = _VSTD::__convert_to_timespec<__libcpp_timespec_t>(__ns);
thrd_sleep(&__ts, nullptr);
}
@@ -776,6 +729,4 @@ get_id() _NOEXCEPT
_LIBCPP_END_NAMESPACE_STD
-_LIBCPP_POP_MACROS
-
#endif // _LIBCPP_THREADING_SUPPORT
diff --git a/contrib/llvm-project/libcxx/include/__utility/swap.h b/contrib/llvm-project/libcxx/include/__utility/swap.h
index 6c07511686f0..20ec1419fc97 100644
--- a/contrib/llvm-project/libcxx/include/__utility/swap.h
+++ b/contrib/llvm-project/libcxx/include/__utility/swap.h
@@ -12,8 +12,8 @@
#include <__config>
#include <__utility/declval.h>
#include <__utility/move.h>
-#include <type_traits>
#include <cstddef>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/algorithm b/contrib/llvm-project/libcxx/include/algorithm
index b28c8cd49890..03b4faaee284 100644
--- a/contrib/llvm-project/libcxx/include/algorithm
+++ b/contrib/llvm-project/libcxx/include/algorithm
@@ -18,6 +18,11 @@
namespace std
{
+namespace ranges {
+ template <class I1, class I2>
+ struct in_in_result; // since C++20
+}
+
template <class InputIterator, class Predicate>
constexpr bool // constexpr in C++20
all_of(InputIterator first, InputIterator last, Predicate pred);
@@ -641,19 +646,23 @@ template <class BidirectionalIterator, class Compare>
constexpr bool // constexpr in C++20
prev_permutation(BidirectionalIterator first, BidirectionalIterator last, Compare comp);
+namespace ranges {
+// [algorithms.results], algorithm result types
+template<class InputIterator, class OutputIterator>
+ struct in_out_result;
+}
+
} // std
*/
+#include <__bits> // __libcpp_clz
#include <__config>
#include <__debug>
-#include <__bits> // __libcpp_clz
#include <cstddef>
#include <cstring>
#include <functional>
#include <initializer_list>
-#include <utility> // needed to provide swap_ranges.
-#include <memory>
#include <iterator>
#include <memory>
#include <type_traits>
@@ -675,8 +684,8 @@ template <class BidirectionalIterator, class Compare>
#include <__algorithm/count_if.h>
#include <__algorithm/equal.h>
#include <__algorithm/equal_range.h>
-#include <__algorithm/fill_n.h>
#include <__algorithm/fill.h>
+#include <__algorithm/fill_n.h>
#include <__algorithm/find.h>
#include <__algorithm/find_end.h>
#include <__algorithm/find_first_of.h>
@@ -684,9 +693,11 @@ template <class BidirectionalIterator, class Compare>
#include <__algorithm/find_if_not.h>
#include <__algorithm/for_each.h>
#include <__algorithm/for_each_n.h>
-#include <__algorithm/generate_n.h>
#include <__algorithm/generate.h>
+#include <__algorithm/generate_n.h>
#include <__algorithm/half_positive.h>
+#include <__algorithm/in_in_result.h>
+#include <__algorithm/in_out_result.h>
#include <__algorithm/includes.h>
#include <__algorithm/inplace_merge.h>
#include <__algorithm/is_heap.h>
@@ -749,8 +760,8 @@ template <class BidirectionalIterator, class Compare>
#include <__algorithm/stable_sort.h>
#include <__algorithm/swap_ranges.h>
#include <__algorithm/transform.h>
-#include <__algorithm/unique_copy.h>
#include <__algorithm/unique.h>
+#include <__algorithm/unique_copy.h>
#include <__algorithm/unwrap_iter.h>
#include <__algorithm/upper_bound.h>
diff --git a/contrib/llvm-project/libcxx/include/atomic b/contrib/llvm-project/libcxx/include/atomic
index a293b7865dd7..e1820fca351f 100644
--- a/contrib/llvm-project/libcxx/include/atomic
+++ b/contrib/llvm-project/libcxx/include/atomic
@@ -521,6 +521,7 @@ template <class T>
#include <__availability>
#include <__config>
#include <__thread/poll_with_backoff.h>
+#include <__thread/timed_backoff_policy.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
diff --git a/contrib/llvm-project/libcxx/include/barrier b/contrib/llvm-project/libcxx/include/barrier
index aef88556a011..c7af46271745 100644
--- a/contrib/llvm-project/libcxx/include/barrier
+++ b/contrib/llvm-project/libcxx/include/barrier
@@ -47,6 +47,7 @@ namespace std
#include <__availability>
#include <__config>
+#include <__thread/timed_backoff_policy.h>
#include <atomic>
#ifndef _LIBCPP_HAS_NO_TREE_BARRIER
# include <memory>
diff --git a/contrib/llvm-project/libcxx/include/bitset b/contrib/llvm-project/libcxx/include/bitset
index 8f538e92e7ff..a5b93f90d40f 100644
--- a/contrib/llvm-project/libcxx/include/bitset
+++ b/contrib/llvm-project/libcxx/include/bitset
@@ -112,14 +112,15 @@ template <size_t N> struct hash<std::bitset<N>>;
*/
-#include <__config>
#include <__bit_reference>
+#include <__config>
#include <__functional_base>
#include <climits>
#include <cstddef>
#include <iosfwd>
#include <stdexcept>
#include <string>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/chrono b/contrib/llvm-project/libcxx/include/chrono
index 90c9b0829a1f..eada2b8520e3 100644
--- a/contrib/llvm-project/libcxx/include/chrono
+++ b/contrib/llvm-project/libcxx/include/chrono
@@ -694,2145 +694,20 @@ constexpr chrono::year operator ""y(unsigned lo
} // std
*/
-#include <__availability>
+#include <__chrono/calendar.h>
+#include <__chrono/convert_to_timespec.h>
+#include <__chrono/duration.h>
+#include <__chrono/file_clock.h>
+#include <__chrono/high_resolution_clock.h>
+#include <__chrono/steady_clock.h>
+#include <__chrono/system_clock.h>
+#include <__chrono/time_point.h>
#include <__config>
#include <compare>
-#include <ctime>
-#include <limits>
-#include <ratio>
-#include <type_traits>
#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#ifndef _LIBCPP_CXX03_LANG
-_LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
-struct _FilesystemClock;
-_LIBCPP_END_NAMESPACE_FILESYSTEM
-#endif // !_LIBCPP_CXX03_LANG
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-namespace chrono
-{
-
-template <class _Rep, class _Period = ratio<1> > class _LIBCPP_TEMPLATE_VIS duration;
-
-template <class _Tp>
-struct __is_duration : false_type {};
-
-template <class _Rep, class _Period>
-struct __is_duration<duration<_Rep, _Period> > : true_type {};
-
-template <class _Rep, class _Period>
-struct __is_duration<const duration<_Rep, _Period> > : true_type {};
-
-template <class _Rep, class _Period>
-struct __is_duration<volatile duration<_Rep, _Period> > : true_type {};
-
-template <class _Rep, class _Period>
-struct __is_duration<const volatile duration<_Rep, _Period> > : true_type {};
-
-} // namespace chrono
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-struct _LIBCPP_TEMPLATE_VIS common_type<chrono::duration<_Rep1, _Period1>,
- chrono::duration<_Rep2, _Period2> >
-{
- typedef chrono::duration<typename common_type<_Rep1, _Rep2>::type,
- typename __ratio_gcd<_Period1, _Period2>::type> type;
-};
-
-namespace chrono {
-
-// duration_cast
-
-template <class _FromDuration, class _ToDuration,
- class _Period = typename ratio_divide<typename _FromDuration::period, typename _ToDuration::period>::type,
- bool = _Period::num == 1,
- bool = _Period::den == 1>
-struct __duration_cast;
-
-template <class _FromDuration, class _ToDuration, class _Period>
-struct __duration_cast<_FromDuration, _ToDuration, _Period, true, true>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- _ToDuration operator()(const _FromDuration& __fd) const
- {
- return _ToDuration(static_cast<typename _ToDuration::rep>(__fd.count()));
- }
-};
-
-template <class _FromDuration, class _ToDuration, class _Period>
-struct __duration_cast<_FromDuration, _ToDuration, _Period, true, false>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- _ToDuration operator()(const _FromDuration& __fd) const
- {
- typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
- return _ToDuration(static_cast<typename _ToDuration::rep>(
- static_cast<_Ct>(__fd.count()) / static_cast<_Ct>(_Period::den)));
- }
-};
-
-template <class _FromDuration, class _ToDuration, class _Period>
-struct __duration_cast<_FromDuration, _ToDuration, _Period, false, true>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- _ToDuration operator()(const _FromDuration& __fd) const
- {
- typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
- return _ToDuration(static_cast<typename _ToDuration::rep>(
- static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num)));
- }
-};
-
-template <class _FromDuration, class _ToDuration, class _Period>
-struct __duration_cast<_FromDuration, _ToDuration, _Period, false, false>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- _ToDuration operator()(const _FromDuration& __fd) const
- {
- typedef typename common_type<typename _ToDuration::rep, typename _FromDuration::rep, intmax_t>::type _Ct;
- return _ToDuration(static_cast<typename _ToDuration::rep>(
- static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num)
- / static_cast<_Ct>(_Period::den)));
- }
-};
-
-template <class _ToDuration, class _Rep, class _Period>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- _ToDuration
->::type
-duration_cast(const duration<_Rep, _Period>& __fd)
-{
- return __duration_cast<duration<_Rep, _Period>, _ToDuration>()(__fd);
-}
-
-template <class _Rep>
-struct _LIBCPP_TEMPLATE_VIS treat_as_floating_point : is_floating_point<_Rep> {};
-
-#if _LIBCPP_STD_VER > 14
-template <class _Rep>
-inline constexpr bool treat_as_floating_point_v = treat_as_floating_point<_Rep>::value;
-#endif
-
-template <class _Rep>
-struct _LIBCPP_TEMPLATE_VIS duration_values
-{
-public:
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep zero() _NOEXCEPT {return _Rep(0);}
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep max() _NOEXCEPT {return numeric_limits<_Rep>::max();}
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR _Rep min() _NOEXCEPT {return numeric_limits<_Rep>::lowest();}
-};
-
-#if _LIBCPP_STD_VER > 14
-template <class _ToDuration, class _Rep, class _Period>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- _ToDuration
->::type
-floor(const duration<_Rep, _Period>& __d)
-{
- _ToDuration __t = duration_cast<_ToDuration>(__d);
- if (__t > __d)
- __t = __t - _ToDuration{1};
- return __t;
-}
-
-template <class _ToDuration, class _Rep, class _Period>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- _ToDuration
->::type
-ceil(const duration<_Rep, _Period>& __d)
-{
- _ToDuration __t = duration_cast<_ToDuration>(__d);
- if (__t < __d)
- __t = __t + _ToDuration{1};
- return __t;
-}
-
-template <class _ToDuration, class _Rep, class _Period>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- _ToDuration
->::type
-round(const duration<_Rep, _Period>& __d)
-{
- _ToDuration __lower = floor<_ToDuration>(__d);
- _ToDuration __upper = __lower + _ToDuration{1};
- auto __lowerDiff = __d - __lower;
- auto __upperDiff = __upper - __d;
- if (__lowerDiff < __upperDiff)
- return __lower;
- if (__lowerDiff > __upperDiff)
- return __upper;
- return __lower.count() & 1 ? __upper : __lower;
-}
-#endif
-
-// duration
-
-template <class _Rep, class _Period>
-class _LIBCPP_TEMPLATE_VIS duration
-{
- static_assert(!__is_duration<_Rep>::value, "A duration representation can not be a duration");
- static_assert(__is_ratio<_Period>::value, "Second template parameter of duration must be a std::ratio");
- static_assert(_Period::num > 0, "duration period must be positive");
-
- template <class _R1, class _R2>
- struct __no_overflow
- {
- private:
- static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value;
- static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value;
- static const intmax_t __n1 = _R1::num / __gcd_n1_n2;
- static const intmax_t __d1 = _R1::den / __gcd_d1_d2;
- static const intmax_t __n2 = _R2::num / __gcd_n1_n2;
- static const intmax_t __d2 = _R2::den / __gcd_d1_d2;
- static const intmax_t max = -((intmax_t(1) << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1);
-
- template <intmax_t _Xp, intmax_t _Yp, bool __overflow>
- struct __mul // __overflow == false
- {
- static const intmax_t value = _Xp * _Yp;
- };
-
- template <intmax_t _Xp, intmax_t _Yp>
- struct __mul<_Xp, _Yp, true>
- {
- static const intmax_t value = 1;
- };
-
- public:
- static const bool value = (__n1 <= max / __d2) && (__n2 <= max / __d1);
- typedef ratio<__mul<__n1, __d2, !value>::value,
- __mul<__n2, __d1, !value>::value> type;
- };
-
-public:
- typedef _Rep rep;
- typedef typename _Period::type period;
-private:
- rep __rep_;
-public:
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-#ifndef _LIBCPP_CXX03_LANG
- duration() = default;
-#else
- duration() {}
-#endif
-
- template <class _Rep2>
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- explicit duration(const _Rep2& __r,
- typename enable_if
- <
- is_convertible<_Rep2, rep>::value &&
- (treat_as_floating_point<rep>::value ||
- !treat_as_floating_point<_Rep2>::value)
- >::type* = nullptr)
- : __rep_(__r) {}
-
- // conversions
- template <class _Rep2, class _Period2>
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- duration(const duration<_Rep2, _Period2>& __d,
- typename enable_if
- <
- __no_overflow<_Period2, period>::value && (
- treat_as_floating_point<rep>::value ||
- (__no_overflow<_Period2, period>::type::den == 1 &&
- !treat_as_floating_point<_Rep2>::value))
- >::type* = nullptr)
- : __rep_(chrono::duration_cast<duration>(__d).count()) {}
-
- // observer
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR rep count() const {return __rep_;}
-
- // arithmetic
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR typename common_type<duration>::type operator+() const {return typename common_type<duration>::type(*this);}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR typename common_type<duration>::type operator-() const {return typename common_type<duration>::type(-__rep_);}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator++() {++__rep_; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration operator++(int) {return duration(__rep_++);}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator--() {--__rep_; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration operator--(int) {return duration(__rep_--);}
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator+=(const duration& __d) {__rep_ += __d.count(); return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator-=(const duration& __d) {__rep_ -= __d.count(); return *this;}
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator*=(const rep& rhs) {__rep_ *= rhs; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator/=(const rep& rhs) {__rep_ /= rhs; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator%=(const rep& rhs) {__rep_ %= rhs; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 duration& operator%=(const duration& rhs) {__rep_ %= rhs.count(); return *this;}
-
- // special values
-
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration zero() _NOEXCEPT {return duration(duration_values<rep>::zero());}
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration min() _NOEXCEPT {return duration(duration_values<rep>::min());}
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR duration max() _NOEXCEPT {return duration(duration_values<rep>::max());}
-};
-
-typedef duration<long long, nano> nanoseconds;
-typedef duration<long long, micro> microseconds;
-typedef duration<long long, milli> milliseconds;
-typedef duration<long long > seconds;
-typedef duration< long, ratio< 60> > minutes;
-typedef duration< long, ratio<3600> > hours;
-#if _LIBCPP_STD_VER > 17
-typedef duration< int, ratio_multiply<ratio<24>, hours::period>> days;
-typedef duration< int, ratio_multiply<ratio<7>, days::period>> weeks;
-typedef duration< int, ratio_multiply<ratio<146097, 400>, days::period>> years;
-typedef duration< int, ratio_divide<years::period, ratio<12>>> months;
-#endif
-// Duration ==
-
-template <class _LhsDuration, class _RhsDuration>
-struct __duration_eq
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const
- {
- typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct;
- return _Ct(__lhs).count() == _Ct(__rhs).count();
- }
-};
-
-template <class _LhsDuration>
-struct __duration_eq<_LhsDuration, _LhsDuration>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const
- {return __lhs.count() == __rhs.count();}
-};
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator==(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return __duration_eq<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >()(__lhs, __rhs);
-}
-
-// Duration !=
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator!=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return !(__lhs == __rhs);
-}
-
-// Duration <
-
-template <class _LhsDuration, class _RhsDuration>
-struct __duration_lt
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const
- {
- typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct;
- return _Ct(__lhs).count() < _Ct(__rhs).count();
- }
-};
-
-template <class _LhsDuration>
-struct __duration_lt<_LhsDuration, _LhsDuration>
-{
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
- bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const
- {return __lhs.count() < __rhs.count();}
-};
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator< (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return __duration_lt<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >()(__lhs, __rhs);
-}
-
-// Duration >
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator> (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return __rhs < __lhs;
-}
-
-// Duration <=
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator<=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return !(__rhs < __lhs);
-}
-
-// Duration >=
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-bool
-operator>=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- return !(__lhs < __rhs);
-}
-
-// Duration +
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
-operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
- return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count());
-}
-
-// Duration -
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
-operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
- return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count());
-}
-
-// Duration *
-
-template <class _Rep1, class _Period, class _Rep2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename enable_if
-<
- is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
- duration<typename common_type<_Rep1, _Rep2>::type, _Period>
->::type
-operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
-{
- typedef typename common_type<_Rep1, _Rep2>::type _Cr;
- typedef duration<_Cr, _Period> _Cd;
- return _Cd(_Cd(__d).count() * static_cast<_Cr>(__s));
-}
-
-template <class _Rep1, class _Period, class _Rep2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename enable_if
-<
- is_convertible<_Rep1, typename common_type<_Rep1, _Rep2>::type>::value,
- duration<typename common_type<_Rep1, _Rep2>::type, _Period>
->::type
-operator*(const _Rep1& __s, const duration<_Rep2, _Period>& __d)
-{
- return __d * __s;
-}
-
-// Duration /
-
-template <class _Rep1, class _Period, class _Rep2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename enable_if
-<
- !__is_duration<_Rep2>::value &&
- is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
- duration<typename common_type<_Rep1, _Rep2>::type, _Period>
->::type
-operator/(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
-{
- typedef typename common_type<_Rep1, _Rep2>::type _Cr;
- typedef duration<_Cr, _Period> _Cd;
- return _Cd(_Cd(__d).count() / static_cast<_Cr>(__s));
-}
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename common_type<_Rep1, _Rep2>::type
-operator/(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Ct;
- return _Ct(__lhs).count() / _Ct(__rhs).count();
-}
-
-// Duration %
-
-template <class _Rep1, class _Period, class _Rep2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename enable_if
-<
- !__is_duration<_Rep2>::value &&
- is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value,
- duration<typename common_type<_Rep1, _Rep2>::type, _Period>
->::type
-operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s)
-{
- typedef typename common_type<_Rep1, _Rep2>::type _Cr;
- typedef duration<_Cr, _Period> _Cd;
- return _Cd(_Cd(__d).count() % static_cast<_Cr>(__s));
-}
-
-template <class _Rep1, class _Period1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY
-_LIBCPP_CONSTEXPR
-typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type
-operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef typename common_type<_Rep1, _Rep2>::type _Cr;
- typedef typename common_type<duration<_Rep1, _Period1>, duration<_Rep2, _Period2> >::type _Cd;
- return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count()));
-}
-
-//////////////////////////////////////////////////////////
-///////////////////// time_point /////////////////////////
-//////////////////////////////////////////////////////////
-
-template <class _Clock, class _Duration = typename _Clock::duration>
-class _LIBCPP_TEMPLATE_VIS time_point
-{
- static_assert(__is_duration<_Duration>::value,
- "Second template parameter of time_point must be a std::chrono::duration");
-public:
- typedef _Clock clock;
- typedef _Duration duration;
- typedef typename duration::rep rep;
- typedef typename duration::period period;
-private:
- duration __d_;
-
-public:
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 time_point() : __d_(duration::zero()) {}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 explicit time_point(const duration& __d) : __d_(__d) {}
-
- // conversions
- template <class _Duration2>
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
- time_point(const time_point<clock, _Duration2>& t,
- typename enable_if
- <
- is_convertible<_Duration2, duration>::value
- >::type* = nullptr)
- : __d_(t.time_since_epoch()) {}
-
- // observer
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 duration time_since_epoch() const {return __d_;}
-
- // arithmetic
-
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 time_point& operator+=(const duration& __d) {__d_ += __d; return *this;}
- _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 time_point& operator-=(const duration& __d) {__d_ -= __d; return *this;}
-
- // special values
-
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR time_point min() _NOEXCEPT {return time_point(duration::min());}
- _LIBCPP_INLINE_VISIBILITY static _LIBCPP_CONSTEXPR time_point max() _NOEXCEPT {return time_point(duration::max());}
-};
-
-} // namespace chrono
-
-template <class _Clock, class _Duration1, class _Duration2>
-struct _LIBCPP_TEMPLATE_VIS common_type<chrono::time_point<_Clock, _Duration1>,
- chrono::time_point<_Clock, _Duration2> >
-{
- typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type;
-};
-
-namespace chrono {
-
-template <class _ToDuration, class _Clock, class _Duration>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-time_point<_Clock, _ToDuration>
-time_point_cast(const time_point<_Clock, _Duration>& __t)
-{
- return time_point<_Clock, _ToDuration>(chrono::duration_cast<_ToDuration>(__t.time_since_epoch()));
-}
-
-#if _LIBCPP_STD_VER > 14
-template <class _ToDuration, class _Clock, class _Duration>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- time_point<_Clock, _ToDuration>
->::type
-floor(const time_point<_Clock, _Duration>& __t)
-{
- return time_point<_Clock, _ToDuration>{floor<_ToDuration>(__t.time_since_epoch())};
-}
-
-template <class _ToDuration, class _Clock, class _Duration>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- time_point<_Clock, _ToDuration>
->::type
-ceil(const time_point<_Clock, _Duration>& __t)
-{
- return time_point<_Clock, _ToDuration>{ceil<_ToDuration>(__t.time_since_epoch())};
-}
-
-template <class _ToDuration, class _Clock, class _Duration>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- __is_duration<_ToDuration>::value,
- time_point<_Clock, _ToDuration>
->::type
-round(const time_point<_Clock, _Duration>& __t)
-{
- return time_point<_Clock, _ToDuration>{round<_ToDuration>(__t.time_since_epoch())};
-}
-
-template <class _Rep, class _Period>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
-typename enable_if
-<
- numeric_limits<_Rep>::is_signed,
- duration<_Rep, _Period>
->::type
-abs(duration<_Rep, _Period> __d)
-{
- return __d >= __d.zero() ? +__d : -__d;
-}
-#endif
-
-// time_point ==
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator==(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return __lhs.time_since_epoch() == __rhs.time_since_epoch();
-}
-
-// time_point !=
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator!=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return !(__lhs == __rhs);
-}
-
-// time_point <
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator<(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return __lhs.time_since_epoch() < __rhs.time_since_epoch();
-}
-
-// time_point >
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator>(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return __rhs < __lhs;
-}
-
-// time_point <=
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator<=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return !(__rhs < __lhs);
-}
-
-// time_point >=
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-bool
-operator>=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return !(__lhs < __rhs);
-}
-
-// time_point operator+(time_point x, duration y);
-
-template <class _Clock, class _Duration1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type>
-operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Tr;
- return _Tr (__lhs.time_since_epoch() + __rhs);
-}
-
-// time_point operator+(duration x, time_point y);
-
-template <class _Rep1, class _Period1, class _Clock, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-time_point<_Clock, typename common_type<duration<_Rep1, _Period1>, _Duration2>::type>
-operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return __rhs + __lhs;
-}
-
-// time_point operator-(time_point x, duration y);
-
-template <class _Clock, class _Duration1, class _Rep2, class _Period2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type>
-operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs)
-{
- typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Ret;
- return _Ret(__lhs.time_since_epoch() -__rhs);
-}
-
-// duration operator-(time_point x, time_point y);
-
-template <class _Clock, class _Duration1, class _Duration2>
-inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11
-typename common_type<_Duration1, _Duration2>::type
-operator-(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs)
-{
- return __lhs.time_since_epoch() - __rhs.time_since_epoch();
-}
-
-//////////////////////////////////////////////////////////
-/////////////////////// clocks ///////////////////////////
-//////////////////////////////////////////////////////////
-
-class _LIBCPP_TYPE_VIS system_clock
-{
-public:
- typedef microseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef chrono::time_point<system_clock> time_point;
- static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = false;
-
- static time_point now() _NOEXCEPT;
- static time_t to_time_t (const time_point& __t) _NOEXCEPT;
- static time_point from_time_t(time_t __t) _NOEXCEPT;
-};
-
-#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
-class _LIBCPP_TYPE_VIS steady_clock
-{
-public:
- typedef nanoseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef chrono::time_point<steady_clock, duration> time_point;
- static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = true;
-
- static time_point now() _NOEXCEPT;
-};
-
-typedef steady_clock high_resolution_clock;
-#else
-typedef system_clock high_resolution_clock;
-#endif
-
-#if _LIBCPP_STD_VER > 17
-// [time.clock.file], type file_clock
-using file_clock = _VSTD_FS::_FilesystemClock;
-
-template<class _Duration>
-using file_time = time_point<file_clock, _Duration>;
-
-
-template <class _Duration>
-using sys_time = time_point<system_clock, _Duration>;
-using sys_seconds = sys_time<seconds>;
-using sys_days = sys_time<days>;
-
-struct local_t {};
-template<class Duration>
-using local_time = time_point<local_t, Duration>;
-using local_seconds = local_time<seconds>;
-using local_days = local_time<days>;
-
-
-struct last_spec { explicit last_spec() = default; };
-
-class day {
-private:
- unsigned char __d;
-public:
- day() = default;
- explicit inline constexpr day(unsigned __val) noexcept : __d(static_cast<unsigned char>(__val)) {}
- inline constexpr day& operator++() noexcept { ++__d; return *this; }
- inline constexpr day operator++(int) noexcept { day __tmp = *this; ++(*this); return __tmp; }
- inline constexpr day& operator--() noexcept { --__d; return *this; }
- inline constexpr day operator--(int) noexcept { day __tmp = *this; --(*this); return __tmp; }
- constexpr day& operator+=(const days& __dd) noexcept;
- constexpr day& operator-=(const days& __dd) noexcept;
- explicit inline constexpr operator unsigned() const noexcept { return __d; }
- inline constexpr bool ok() const noexcept { return __d >= 1 && __d <= 31; }
- };
-
-
-inline constexpr
-bool operator==(const day& __lhs, const day& __rhs) noexcept
-{ return static_cast<unsigned>(__lhs) == static_cast<unsigned>(__rhs); }
-
-inline constexpr
-bool operator!=(const day& __lhs, const day& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const day& __lhs, const day& __rhs) noexcept
-{ return static_cast<unsigned>(__lhs) < static_cast<unsigned>(__rhs); }
-
-inline constexpr
-bool operator> (const day& __lhs, const day& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const day& __lhs, const day& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const day& __lhs, const day& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr
-day operator+ (const day& __lhs, const days& __rhs) noexcept
-{ return day(static_cast<unsigned>(__lhs) + __rhs.count()); }
-
-inline constexpr
-day operator+ (const days& __lhs, const day& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-day operator- (const day& __lhs, const days& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-inline constexpr
-days operator-(const day& __lhs, const day& __rhs) noexcept
-{ return days(static_cast<int>(static_cast<unsigned>(__lhs)) -
- static_cast<int>(static_cast<unsigned>(__rhs))); }
-
-inline constexpr day& day::operator+=(const days& __dd) noexcept
-{ *this = *this + __dd; return *this; }
-
-inline constexpr day& day::operator-=(const days& __dd) noexcept
-{ *this = *this - __dd; return *this; }
-
-
-class month {
-private:
- unsigned char __m;
-public:
- month() = default;
- explicit inline constexpr month(unsigned __val) noexcept : __m(static_cast<unsigned char>(__val)) {}
- inline constexpr month& operator++() noexcept { ++__m; return *this; }
- inline constexpr month operator++(int) noexcept { month __tmp = *this; ++(*this); return __tmp; }
- inline constexpr month& operator--() noexcept { --__m; return *this; }
- inline constexpr month operator--(int) noexcept { month __tmp = *this; --(*this); return __tmp; }
- constexpr month& operator+=(const months& __m1) noexcept;
- constexpr month& operator-=(const months& __m1) noexcept;
- explicit inline constexpr operator unsigned() const noexcept { return __m; }
- inline constexpr bool ok() const noexcept { return __m >= 1 && __m <= 12; }
-};
-
-
-inline constexpr
-bool operator==(const month& __lhs, const month& __rhs) noexcept
-{ return static_cast<unsigned>(__lhs) == static_cast<unsigned>(__rhs); }
-
-inline constexpr
-bool operator!=(const month& __lhs, const month& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const month& __lhs, const month& __rhs) noexcept
-{ return static_cast<unsigned>(__lhs) < static_cast<unsigned>(__rhs); }
-
-inline constexpr
-bool operator> (const month& __lhs, const month& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const month& __lhs, const month& __rhs) noexcept
-{ return !(__rhs < __lhs); }
-
-inline constexpr
-bool operator>=(const month& __lhs, const month& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr
-month operator+ (const month& __lhs, const months& __rhs) noexcept
-{
- auto const __mu = static_cast<long long>(static_cast<unsigned>(__lhs)) + (__rhs.count() - 1);
- auto const __yr = (__mu >= 0 ? __mu : __mu - 11) / 12;
- return month{static_cast<unsigned>(__mu - __yr * 12 + 1)};
-}
-
-inline constexpr
-month operator+ (const months& __lhs, const month& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-month operator- (const month& __lhs, const months& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-inline constexpr
-months operator-(const month& __lhs, const month& __rhs) noexcept
-{
- auto const __dm = static_cast<unsigned>(__lhs) - static_cast<unsigned>(__rhs);
- return months(__dm <= 11 ? __dm : __dm + 12);
-}
-
-inline constexpr month& month::operator+=(const months& __dm) noexcept
-{ *this = *this + __dm; return *this; }
-
-inline constexpr month& month::operator-=(const months& __dm) noexcept
-{ *this = *this - __dm; return *this; }
-
-
-class year {
-private:
- short __y;
-public:
- year() = default;
- explicit inline constexpr year(int __val) noexcept : __y(static_cast<short>(__val)) {}
-
- inline constexpr year& operator++() noexcept { ++__y; return *this; }
- inline constexpr year operator++(int) noexcept { year __tmp = *this; ++(*this); return __tmp; }
- inline constexpr year& operator--() noexcept { --__y; return *this; }
- inline constexpr year operator--(int) noexcept { year __tmp = *this; --(*this); return __tmp; }
- constexpr year& operator+=(const years& __dy) noexcept;
- constexpr year& operator-=(const years& __dy) noexcept;
- inline constexpr year operator+() const noexcept { return *this; }
- inline constexpr year operator-() const noexcept { return year{-__y}; }
-
- inline constexpr bool is_leap() const noexcept { return __y % 4 == 0 && (__y % 100 != 0 || __y % 400 == 0); }
- explicit inline constexpr operator int() const noexcept { return __y; }
- constexpr bool ok() const noexcept;
- static inline constexpr year min() noexcept { return year{-32767}; }
- static inline constexpr year max() noexcept { return year{ 32767}; }
-};
-
-
-inline constexpr
-bool operator==(const year& __lhs, const year& __rhs) noexcept
-{ return static_cast<int>(__lhs) == static_cast<int>(__rhs); }
-
-inline constexpr
-bool operator!=(const year& __lhs, const year& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const year& __lhs, const year& __rhs) noexcept
-{ return static_cast<int>(__lhs) < static_cast<int>(__rhs); }
-
-inline constexpr
-bool operator> (const year& __lhs, const year& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const year& __lhs, const year& __rhs) noexcept
-{ return !(__rhs < __lhs); }
-
-inline constexpr
-bool operator>=(const year& __lhs, const year& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr
-year operator+ (const year& __lhs, const years& __rhs) noexcept
-{ return year(static_cast<int>(__lhs) + __rhs.count()); }
-
-inline constexpr
-year operator+ (const years& __lhs, const year& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year operator- (const year& __lhs, const years& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-inline constexpr
-years operator-(const year& __lhs, const year& __rhs) noexcept
-{ return years{static_cast<int>(__lhs) - static_cast<int>(__rhs)}; }
-
-
-inline constexpr year& year::operator+=(const years& __dy) noexcept
-{ *this = *this + __dy; return *this; }
-
-inline constexpr year& year::operator-=(const years& __dy) noexcept
-{ *this = *this - __dy; return *this; }
-
-inline constexpr bool year::ok() const noexcept
-{ return static_cast<int>(min()) <= __y && __y <= static_cast<int>(max()); }
-
-class weekday_indexed;
-class weekday_last;
-
-class weekday {
-private:
- unsigned char __wd;
- static constexpr unsigned char __weekday_from_days(int __days) noexcept;
-public:
- weekday() = default;
- inline explicit constexpr weekday(unsigned __val) noexcept : __wd(static_cast<unsigned char>(__val == 7 ? 0 : __val)) {}
- inline constexpr weekday(const sys_days& __sysd) noexcept
- : __wd(__weekday_from_days(__sysd.time_since_epoch().count())) {}
- inline explicit constexpr weekday(const local_days& __locd) noexcept
- : __wd(__weekday_from_days(__locd.time_since_epoch().count())) {}
-
- inline constexpr weekday& operator++() noexcept { __wd = (__wd == 6 ? 0 : __wd + 1); return *this; }
- inline constexpr weekday operator++(int) noexcept { weekday __tmp = *this; ++(*this); return __tmp; }
- inline constexpr weekday& operator--() noexcept { __wd = (__wd == 0 ? 6 : __wd - 1); return *this; }
- inline constexpr weekday operator--(int) noexcept { weekday __tmp = *this; --(*this); return __tmp; }
- constexpr weekday& operator+=(const days& __dd) noexcept;
- constexpr weekday& operator-=(const days& __dd) noexcept;
- inline constexpr unsigned c_encoding() const noexcept { return __wd; }
- inline constexpr unsigned iso_encoding() const noexcept { return __wd == 0u ? 7 : __wd; }
- inline constexpr bool ok() const noexcept { return __wd <= 6; }
- constexpr weekday_indexed operator[](unsigned __index) const noexcept;
- constexpr weekday_last operator[](last_spec) const noexcept;
-};
-
-
-// https://howardhinnant.github.io/date_algorithms.html#weekday_from_days
-inline constexpr
-unsigned char weekday::__weekday_from_days(int __days) noexcept
-{
- return static_cast<unsigned char>(
- static_cast<unsigned>(__days >= -4 ? (__days+4) % 7 : (__days+5) % 7 + 6)
- );
-}
-
-inline constexpr
-bool operator==(const weekday& __lhs, const weekday& __rhs) noexcept
-{ return __lhs.c_encoding() == __rhs.c_encoding(); }
-
-inline constexpr
-bool operator!=(const weekday& __lhs, const weekday& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const weekday& __lhs, const weekday& __rhs) noexcept
-{ return __lhs.c_encoding() < __rhs.c_encoding(); }
-
-inline constexpr
-bool operator> (const weekday& __lhs, const weekday& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const weekday& __lhs, const weekday& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept
-{
- auto const __mu = static_cast<long long>(__lhs.c_encoding()) + __rhs.count();
- auto const __yr = (__mu >= 0 ? __mu : __mu - 6) / 7;
- return weekday{static_cast<unsigned>(__mu - __yr * 7)};
-}
-
-constexpr weekday operator+(const days& __lhs, const weekday& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-constexpr weekday operator-(const weekday& __lhs, const days& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-constexpr days operator-(const weekday& __lhs, const weekday& __rhs) noexcept
-{
- const int __wdu = __lhs.c_encoding() - __rhs.c_encoding();
- const int __wk = (__wdu >= 0 ? __wdu : __wdu-6) / 7;
- return days{__wdu - __wk * 7};
-}
-
-inline constexpr weekday& weekday::operator+=(const days& __dd) noexcept
-{ *this = *this + __dd; return *this; }
-
-inline constexpr weekday& weekday::operator-=(const days& __dd) noexcept
-{ *this = *this - __dd; return *this; }
-
-
-class weekday_indexed {
-private:
- chrono::weekday __wd;
- unsigned char __idx;
-public:
- weekday_indexed() = default;
- inline constexpr weekday_indexed(const chrono::weekday& __wdval, unsigned __idxval) noexcept
- : __wd{__wdval}, __idx(__idxval) {}
- inline constexpr chrono::weekday weekday() const noexcept { return __wd; }
- inline constexpr unsigned index() const noexcept { return __idx; }
- inline constexpr bool ok() const noexcept { return __wd.ok() && __idx >= 1 && __idx <= 5; }
-};
-
-inline constexpr
-bool operator==(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept
-{ return __lhs.weekday() == __rhs.weekday() && __lhs.index() == __rhs.index(); }
-
-inline constexpr
-bool operator!=(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-
-class weekday_last {
-private:
- chrono::weekday __wd;
-public:
- explicit constexpr weekday_last(const chrono::weekday& __val) noexcept
- : __wd{__val} {}
- constexpr chrono::weekday weekday() const noexcept { return __wd; }
- constexpr bool ok() const noexcept { return __wd.ok(); }
-};
-
-inline constexpr
-bool operator==(const weekday_last& __lhs, const weekday_last& __rhs) noexcept
-{ return __lhs.weekday() == __rhs.weekday(); }
-
-inline constexpr
-bool operator!=(const weekday_last& __lhs, const weekday_last& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-weekday_indexed weekday::operator[](unsigned __index) const noexcept { return weekday_indexed{*this, __index}; }
-
-inline constexpr
-weekday_last weekday::operator[](last_spec) const noexcept { return weekday_last{*this}; }
-
-
-inline constexpr last_spec last{};
-inline constexpr weekday Sunday{0};
-inline constexpr weekday Monday{1};
-inline constexpr weekday Tuesday{2};
-inline constexpr weekday Wednesday{3};
-inline constexpr weekday Thursday{4};
-inline constexpr weekday Friday{5};
-inline constexpr weekday Saturday{6};
-
-inline constexpr month January{1};
-inline constexpr month February{2};
-inline constexpr month March{3};
-inline constexpr month April{4};
-inline constexpr month May{5};
-inline constexpr month June{6};
-inline constexpr month July{7};
-inline constexpr month August{8};
-inline constexpr month September{9};
-inline constexpr month October{10};
-inline constexpr month November{11};
-inline constexpr month December{12};
-
-
-class month_day {
-private:
- chrono::month __m;
- chrono::day __d;
-public:
- month_day() = default;
- constexpr month_day(const chrono::month& __mval, const chrono::day& __dval) noexcept
- : __m{__mval}, __d{__dval} {}
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::day day() const noexcept { return __d; }
- constexpr bool ok() const noexcept;
-};
-
-inline constexpr
-bool month_day::ok() const noexcept
-{
- if (!__m.ok()) return false;
- const unsigned __dval = static_cast<unsigned>(__d);
- if (__dval < 1 || __dval > 31) return false;
- if (__dval <= 29) return true;
-// Now we've got either 30 or 31
- const unsigned __mval = static_cast<unsigned>(__m);
- if (__mval == 2) return false;
- if (__mval == 4 || __mval == 6 || __mval == 9 || __mval == 11)
- return __dval == 30;
- return true;
-}
-
-inline constexpr
-bool operator==(const month_day& __lhs, const month_day& __rhs) noexcept
-{ return __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); }
-
-inline constexpr
-bool operator!=(const month_day& __lhs, const month_day& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-month_day operator/(const month& __lhs, const day& __rhs) noexcept
-{ return month_day{__lhs, __rhs}; }
-
-constexpr
-month_day operator/(const day& __lhs, const month& __rhs) noexcept
-{ return __rhs / __lhs; }
-
-inline constexpr
-month_day operator/(const month& __lhs, int __rhs) noexcept
-{ return __lhs / day(__rhs); }
-
-constexpr
-month_day operator/(int __lhs, const day& __rhs) noexcept
-{ return month(__lhs) / __rhs; }
-
-constexpr
-month_day operator/(const day& __lhs, int __rhs) noexcept
-{ return month(__rhs) / __lhs; }
-
-
-inline constexpr
-bool operator< (const month_day& __lhs, const month_day& __rhs) noexcept
-{ return __lhs.month() != __rhs.month() ? __lhs.month() < __rhs.month() : __lhs.day() < __rhs.day(); }
-
-inline constexpr
-bool operator> (const month_day& __lhs, const month_day& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const month_day& __lhs, const month_day& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const month_day& __lhs, const month_day& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-
-
-class month_day_last {
-private:
- chrono::month __m;
-public:
- explicit constexpr month_day_last(const chrono::month& __val) noexcept
- : __m{__val} {}
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr bool ok() const noexcept { return __m.ok(); }
-};
-
-inline constexpr
-bool operator==(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return __lhs.month() == __rhs.month(); }
-
-inline constexpr
-bool operator!=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return __lhs.month() < __rhs.month(); }
-
-inline constexpr
-bool operator> (const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr
-month_day_last operator/(const month& __lhs, last_spec) noexcept
-{ return month_day_last{__lhs}; }
-
-inline constexpr
-month_day_last operator/(last_spec, const month& __rhs) noexcept
-{ return month_day_last{__rhs}; }
-
-inline constexpr
-month_day_last operator/(int __lhs, last_spec) noexcept
-{ return month_day_last{month(__lhs)}; }
-
-inline constexpr
-month_day_last operator/(last_spec, int __rhs) noexcept
-{ return month_day_last{month(__rhs)}; }
-
-
-class month_weekday {
-private:
- chrono::month __m;
- chrono::weekday_indexed __wdi;
-public:
- month_weekday() = default;
- constexpr month_weekday(const chrono::month& __mval, const chrono::weekday_indexed& __wdival) noexcept
- : __m{__mval}, __wdi{__wdival} {}
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; }
- inline constexpr bool ok() const noexcept { return __m.ok() && __wdi.ok(); }
-};
-
-inline constexpr
-bool operator==(const month_weekday& __lhs, const month_weekday& __rhs) noexcept
-{ return __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); }
-
-inline constexpr
-bool operator!=(const month_weekday& __lhs, const month_weekday& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-month_weekday operator/(const month& __lhs, const weekday_indexed& __rhs) noexcept
-{ return month_weekday{__lhs, __rhs}; }
-
-inline constexpr
-month_weekday operator/(int __lhs, const weekday_indexed& __rhs) noexcept
-{ return month_weekday{month(__lhs), __rhs}; }
-
-inline constexpr
-month_weekday operator/(const weekday_indexed& __lhs, const month& __rhs) noexcept
-{ return month_weekday{__rhs, __lhs}; }
-
-inline constexpr
-month_weekday operator/(const weekday_indexed& __lhs, int __rhs) noexcept
-{ return month_weekday{month(__rhs), __lhs}; }
-
-
-class month_weekday_last {
- chrono::month __m;
- chrono::weekday_last __wdl;
- public:
- constexpr month_weekday_last(const chrono::month& __mval, const chrono::weekday_last& __wdlval) noexcept
- : __m{__mval}, __wdl{__wdlval} {}
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; }
- inline constexpr bool ok() const noexcept { return __m.ok() && __wdl.ok(); }
-};
-
-inline constexpr
-bool operator==(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept
-{ return __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); }
-
-inline constexpr
-bool operator!=(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-
-inline constexpr
-month_weekday_last operator/(const month& __lhs, const weekday_last& __rhs) noexcept
-{ return month_weekday_last{__lhs, __rhs}; }
-
-inline constexpr
-month_weekday_last operator/(int __lhs, const weekday_last& __rhs) noexcept
-{ return month_weekday_last{month(__lhs), __rhs}; }
-
-inline constexpr
-month_weekday_last operator/(const weekday_last& __lhs, const month& __rhs) noexcept
-{ return month_weekday_last{__rhs, __lhs}; }
-
-inline constexpr
-month_weekday_last operator/(const weekday_last& __lhs, int __rhs) noexcept
-{ return month_weekday_last{month(__rhs), __lhs}; }
-
-
-class year_month {
- chrono::year __y;
- chrono::month __m;
-public:
- year_month() = default;
- constexpr year_month(const chrono::year& __yval, const chrono::month& __mval) noexcept
- : __y{__yval}, __m{__mval} {}
- inline constexpr chrono::year year() const noexcept { return __y; }
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr year_month& operator+=(const months& __dm) noexcept { this->__m += __dm; return *this; }
- inline constexpr year_month& operator-=(const months& __dm) noexcept { this->__m -= __dm; return *this; }
- inline constexpr year_month& operator+=(const years& __dy) noexcept { this->__y += __dy; return *this; }
- inline constexpr year_month& operator-=(const years& __dy) noexcept { this->__y -= __dy; return *this; }
- inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok(); }
-};
-
-inline constexpr
-year_month operator/(const year& __y, const month& __m) noexcept { return year_month{__y, __m}; }
-
-inline constexpr
-year_month operator/(const year& __y, int __m) noexcept { return year_month{__y, month(__m)}; }
-
-inline constexpr
-bool operator==(const year_month& __lhs, const year_month& __rhs) noexcept
-{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month(); }
-
-inline constexpr
-bool operator!=(const year_month& __lhs, const year_month& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const year_month& __lhs, const year_month& __rhs) noexcept
-{ return __lhs.year() != __rhs.year() ? __lhs.year() < __rhs.year() : __lhs.month() < __rhs.month(); }
-
-inline constexpr
-bool operator> (const year_month& __lhs, const year_month& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const year_month& __lhs, const year_month& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const year_month& __lhs, const year_month& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-constexpr year_month operator+(const year_month& __lhs, const months& __rhs) noexcept
-{
- int __dmi = static_cast<int>(static_cast<unsigned>(__lhs.month())) - 1 + __rhs.count();
- const int __dy = (__dmi >= 0 ? __dmi : __dmi-11) / 12;
- __dmi = __dmi - __dy * 12 + 1;
- return (__lhs.year() + years(__dy)) / month(static_cast<unsigned>(__dmi));
-}
-
-constexpr year_month operator+(const months& __lhs, const year_month& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-constexpr year_month operator+(const year_month& __lhs, const years& __rhs) noexcept
-{ return (__lhs.year() + __rhs) / __lhs.month(); }
-
-constexpr year_month operator+(const years& __lhs, const year_month& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-constexpr months operator-(const year_month& __lhs, const year_month& __rhs) noexcept
-{ return (__lhs.year() - __rhs.year()) + months(static_cast<unsigned>(__lhs.month()) - static_cast<unsigned>(__rhs.month())); }
-
-constexpr year_month operator-(const year_month& __lhs, const months& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-constexpr year_month operator-(const year_month& __lhs, const years& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-class year_month_day_last;
-
-class year_month_day {
-private:
- chrono::year __y;
- chrono::month __m;
- chrono::day __d;
-public:
- year_month_day() = default;
- inline constexpr year_month_day(
- const chrono::year& __yval, const chrono::month& __mval, const chrono::day& __dval) noexcept
- : __y{__yval}, __m{__mval}, __d{__dval} {}
- constexpr year_month_day(const year_month_day_last& __ymdl) noexcept;
- inline constexpr year_month_day(const sys_days& __sysd) noexcept
- : year_month_day(__from_days(__sysd.time_since_epoch())) {}
- inline explicit constexpr year_month_day(const local_days& __locd) noexcept
- : year_month_day(__from_days(__locd.time_since_epoch())) {}
-
- constexpr year_month_day& operator+=(const months& __dm) noexcept;
- constexpr year_month_day& operator-=(const months& __dm) noexcept;
- constexpr year_month_day& operator+=(const years& __dy) noexcept;
- constexpr year_month_day& operator-=(const years& __dy) noexcept;
-
- inline constexpr chrono::year year() const noexcept { return __y; }
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::day day() const noexcept { return __d; }
- inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
- inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
-
- constexpr bool ok() const noexcept;
-
- static constexpr year_month_day __from_days(days __d) noexcept;
- constexpr days __to_days() const noexcept;
-};
-
-
-// https://howardhinnant.github.io/date_algorithms.html#civil_from_days
-inline constexpr
-year_month_day
-year_month_day::__from_days(days __d) noexcept
-{
- static_assert(numeric_limits<unsigned>::digits >= 18, "");
- static_assert(numeric_limits<int>::digits >= 20 , "");
- const int __z = __d.count() + 719468;
- const int __era = (__z >= 0 ? __z : __z - 146096) / 146097;
- const unsigned __doe = static_cast<unsigned>(__z - __era * 146097); // [0, 146096]
- const unsigned __yoe = (__doe - __doe/1460 + __doe/36524 - __doe/146096) / 365; // [0, 399]
- const int __yr = static_cast<int>(__yoe) + __era * 400;
- const unsigned __doy = __doe - (365 * __yoe + __yoe/4 - __yoe/100); // [0, 365]
- const unsigned __mp = (5 * __doy + 2)/153; // [0, 11]
- const unsigned __dy = __doy - (153 * __mp + 2)/5 + 1; // [1, 31]
- const unsigned __mth = __mp + (__mp < 10 ? 3 : -9); // [1, 12]
- return year_month_day{chrono::year{__yr + (__mth <= 2)}, chrono::month{__mth}, chrono::day{__dy}};
-}
-
-// https://howardhinnant.github.io/date_algorithms.html#days_from_civil
-inline constexpr days year_month_day::__to_days() const noexcept
-{
- static_assert(numeric_limits<unsigned>::digits >= 18, "");
- static_assert(numeric_limits<int>::digits >= 20 , "");
-
- const int __yr = static_cast<int>(__y) - (__m <= February);
- const unsigned __mth = static_cast<unsigned>(__m);
- const unsigned __dy = static_cast<unsigned>(__d);
-
- const int __era = (__yr >= 0 ? __yr : __yr - 399) / 400;
- const unsigned __yoe = static_cast<unsigned>(__yr - __era * 400); // [0, 399]
- const unsigned __doy = (153 * (__mth + (__mth > 2 ? -3 : 9)) + 2) / 5 + __dy-1; // [0, 365]
- const unsigned __doe = __yoe * 365 + __yoe/4 - __yoe/100 + __doy; // [0, 146096]
- return days{__era * 146097 + static_cast<int>(__doe) - 719468};
-}
-
-inline constexpr
-bool operator==(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); }
-
-inline constexpr
-bool operator!=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{
- if (__lhs.year() < __rhs.year()) return true;
- if (__lhs.year() > __rhs.year()) return false;
- if (__lhs.month() < __rhs.month()) return true;
- if (__lhs.month() > __rhs.month()) return false;
- return __lhs.day() < __rhs.day();
-}
-
-inline constexpr
-bool operator> (const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr
-year_month_day operator/(const year_month& __lhs, const day& __rhs) noexcept
-{ return year_month_day{__lhs.year(), __lhs.month(), __rhs}; }
-
-inline constexpr
-year_month_day operator/(const year_month& __lhs, int __rhs) noexcept
-{ return __lhs / day(__rhs); }
-
-inline constexpr
-year_month_day operator/(const year& __lhs, const month_day& __rhs) noexcept
-{ return __lhs / __rhs.month() / __rhs.day(); }
-
-inline constexpr
-year_month_day operator/(int __lhs, const month_day& __rhs) noexcept
-{ return year(__lhs) / __rhs; }
-
-inline constexpr
-year_month_day operator/(const month_day& __lhs, const year& __rhs) noexcept
-{ return __rhs / __lhs; }
-
-inline constexpr
-year_month_day operator/(const month_day& __lhs, int __rhs) noexcept
-{ return year(__rhs) / __lhs; }
-
-
-inline constexpr
-year_month_day operator+(const year_month_day& __lhs, const months& __rhs) noexcept
-{ return (__lhs.year()/__lhs.month() + __rhs)/__lhs.day(); }
-
-inline constexpr
-year_month_day operator+(const months& __lhs, const year_month_day& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_day operator-(const year_month_day& __lhs, const months& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-inline constexpr
-year_month_day operator+(const year_month_day& __lhs, const years& __rhs) noexcept
-{ return (__lhs.year() + __rhs) / __lhs.month() / __lhs.day(); }
-
-inline constexpr
-year_month_day operator+(const years& __lhs, const year_month_day& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_day operator-(const year_month_day& __lhs, const years& __rhs) noexcept
-{ return __lhs + -__rhs; }
-
-inline constexpr year_month_day& year_month_day::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
-inline constexpr year_month_day& year_month_day::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
-inline constexpr year_month_day& year_month_day::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
-inline constexpr year_month_day& year_month_day::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
-
-class year_month_day_last {
-private:
- chrono::year __y;
- chrono::month_day_last __mdl;
-public:
- constexpr year_month_day_last(const year& __yval, const month_day_last& __mdlval) noexcept
- : __y{__yval}, __mdl{__mdlval} {}
-
- constexpr year_month_day_last& operator+=(const months& __m) noexcept;
- constexpr year_month_day_last& operator-=(const months& __m) noexcept;
- constexpr year_month_day_last& operator+=(const years& __y) noexcept;
- constexpr year_month_day_last& operator-=(const years& __y) noexcept;
-
- inline constexpr chrono::year year() const noexcept { return __y; }
- inline constexpr chrono::month month() const noexcept { return __mdl.month(); }
- inline constexpr chrono::month_day_last month_day_last() const noexcept { return __mdl; }
- constexpr chrono::day day() const noexcept;
- inline constexpr operator sys_days() const noexcept { return sys_days{year()/month()/day()}; }
- inline explicit constexpr operator local_days() const noexcept { return local_days{year()/month()/day()}; }
- inline constexpr bool ok() const noexcept { return __y.ok() && __mdl.ok(); }
-};
-
-inline constexpr
-chrono::day year_month_day_last::day() const noexcept
-{
- constexpr chrono::day __d[] =
- {
- chrono::day(31), chrono::day(28), chrono::day(31),
- chrono::day(30), chrono::day(31), chrono::day(30),
- chrono::day(31), chrono::day(31), chrono::day(30),
- chrono::day(31), chrono::day(30), chrono::day(31)
- };
- return (month() != February || !__y.is_leap()) && month().ok() ?
- __d[static_cast<unsigned>(month()) - 1] : chrono::day{29};
-}
-
-inline constexpr
-bool operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{ return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); }
-
-inline constexpr
-bool operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-bool operator< (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{
- if (__lhs.year() < __rhs.year()) return true;
- if (__lhs.year() > __rhs.year()) return false;
- return __lhs.month_day_last() < __rhs.month_day_last();
-}
-
-inline constexpr
-bool operator> (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{ return __rhs < __lhs; }
-
-inline constexpr
-bool operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{ return !(__rhs < __lhs);}
-
-inline constexpr
-bool operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept
-{ return !(__lhs < __rhs); }
-
-inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept
-{ return year_month_day_last{__lhs.year(), month_day_last{__lhs.month()}}; }
-
-inline constexpr year_month_day_last operator/(const year& __lhs, const month_day_last& __rhs) noexcept
-{ return year_month_day_last{__lhs, __rhs}; }
-
-inline constexpr year_month_day_last operator/(int __lhs, const month_day_last& __rhs) noexcept
-{ return year_month_day_last{year{__lhs}, __rhs}; }
-
-inline constexpr year_month_day_last operator/(const month_day_last& __lhs, const year& __rhs) noexcept
-{ return __rhs / __lhs; }
-
-inline constexpr year_month_day_last operator/(const month_day_last& __lhs, int __rhs) noexcept
-{ return year{__rhs} / __lhs; }
-
-
-inline constexpr
-year_month_day_last operator+(const year_month_day_last& __lhs, const months& __rhs) noexcept
-{ return (__lhs.year() / __lhs.month() + __rhs) / last; }
-
-inline constexpr
-year_month_day_last operator+(const months& __lhs, const year_month_day_last& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_day_last operator-(const year_month_day_last& __lhs, const months& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-inline constexpr
-year_month_day_last operator+(const year_month_day_last& __lhs, const years& __rhs) noexcept
-{ return year_month_day_last{__lhs.year() + __rhs, __lhs.month_day_last()}; }
-
-inline constexpr
-year_month_day_last operator+(const years& __lhs, const year_month_day_last& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_day_last operator-(const year_month_day_last& __lhs, const years& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-inline constexpr year_month_day_last& year_month_day_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
-inline constexpr year_month_day_last& year_month_day_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
-inline constexpr year_month_day_last& year_month_day_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
-inline constexpr year_month_day_last& year_month_day_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
-
-inline constexpr year_month_day::year_month_day(const year_month_day_last& __ymdl) noexcept
- : __y{__ymdl.year()}, __m{__ymdl.month()}, __d{__ymdl.day()} {}
-
-inline constexpr bool year_month_day::ok() const noexcept
-{
- if (!__y.ok() || !__m.ok()) return false;
- return chrono::day{1} <= __d && __d <= (__y / __m / last).day();
-}
-
-class year_month_weekday {
- chrono::year __y;
- chrono::month __m;
- chrono::weekday_indexed __wdi;
-public:
- year_month_weekday() = default;
- constexpr year_month_weekday(const chrono::year& __yval, const chrono::month& __mval,
- const chrono::weekday_indexed& __wdival) noexcept
- : __y{__yval}, __m{__mval}, __wdi{__wdival} {}
- constexpr year_month_weekday(const sys_days& __sysd) noexcept
- : year_month_weekday(__from_days(__sysd.time_since_epoch())) {}
- inline explicit constexpr year_month_weekday(const local_days& __locd) noexcept
- : year_month_weekday(__from_days(__locd.time_since_epoch())) {}
- constexpr year_month_weekday& operator+=(const months& m) noexcept;
- constexpr year_month_weekday& operator-=(const months& m) noexcept;
- constexpr year_month_weekday& operator+=(const years& y) noexcept;
- constexpr year_month_weekday& operator-=(const years& y) noexcept;
-
- inline constexpr chrono::year year() const noexcept { return __y; }
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::weekday weekday() const noexcept { return __wdi.weekday(); }
- inline constexpr unsigned index() const noexcept { return __wdi.index(); }
- inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; }
-
- inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
- inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
- inline constexpr bool ok() const noexcept
- {
- if (!__y.ok() || !__m.ok() || !__wdi.ok()) return false;
- if (__wdi.index() <= 4) return true;
- auto __nth_weekday_day =
- __wdi.weekday() -
- chrono::weekday{static_cast<sys_days>(__y / __m / 1)} +
- days{(__wdi.index() - 1) * 7 + 1};
- return static_cast<unsigned>(__nth_weekday_day.count()) <=
- static_cast<unsigned>((__y / __m / last).day());
- }
-
- static constexpr year_month_weekday __from_days(days __d) noexcept;
- constexpr days __to_days() const noexcept;
-};
-
-inline constexpr
-year_month_weekday year_month_weekday::__from_days(days __d) noexcept
-{
- const sys_days __sysd{__d};
- const chrono::weekday __wd = chrono::weekday(__sysd);
- const year_month_day __ymd = year_month_day(__sysd);
- return year_month_weekday{__ymd.year(), __ymd.month(),
- __wd[(static_cast<unsigned>(__ymd.day())-1)/7+1]};
-}
-
-inline constexpr
-days year_month_weekday::__to_days() const noexcept
-{
- const sys_days __sysd = sys_days(__y/__m/1);
- return (__sysd + (__wdi.weekday() - chrono::weekday(__sysd) + days{(__wdi.index()-1)*7}))
- .time_since_epoch();
-}
-
-inline constexpr
-bool operator==(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept
-{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); }
-
-inline constexpr
-bool operator!=(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-inline constexpr
-year_month_weekday operator/(const year_month& __lhs, const weekday_indexed& __rhs) noexcept
-{ return year_month_weekday{__lhs.year(), __lhs.month(), __rhs}; }
-
-inline constexpr
-year_month_weekday operator/(const year& __lhs, const month_weekday& __rhs) noexcept
-{ return year_month_weekday{__lhs, __rhs.month(), __rhs.weekday_indexed()}; }
-
-inline constexpr
-year_month_weekday operator/(int __lhs, const month_weekday& __rhs) noexcept
-{ return year(__lhs) / __rhs; }
-
-inline constexpr
-year_month_weekday operator/(const month_weekday& __lhs, const year& __rhs) noexcept
-{ return __rhs / __lhs; }
-
-inline constexpr
-year_month_weekday operator/(const month_weekday& __lhs, int __rhs) noexcept
-{ return year(__rhs) / __lhs; }
-
-
-inline constexpr
-year_month_weekday operator+(const year_month_weekday& __lhs, const months& __rhs) noexcept
-{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_indexed(); }
-
-inline constexpr
-year_month_weekday operator+(const months& __lhs, const year_month_weekday& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_weekday operator-(const year_month_weekday& __lhs, const months& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-inline constexpr
-year_month_weekday operator+(const year_month_weekday& __lhs, const years& __rhs) noexcept
-{ return year_month_weekday{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_indexed()}; }
-
-inline constexpr
-year_month_weekday operator+(const years& __lhs, const year_month_weekday& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_weekday operator-(const year_month_weekday& __lhs, const years& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-
-inline constexpr year_month_weekday& year_month_weekday::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
-inline constexpr year_month_weekday& year_month_weekday::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
-inline constexpr year_month_weekday& year_month_weekday::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
-inline constexpr year_month_weekday& year_month_weekday::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
-
-class year_month_weekday_last {
-private:
- chrono::year __y;
- chrono::month __m;
- chrono::weekday_last __wdl;
-public:
- constexpr year_month_weekday_last(const chrono::year& __yval, const chrono::month& __mval,
- const chrono::weekday_last& __wdlval) noexcept
- : __y{__yval}, __m{__mval}, __wdl{__wdlval} {}
- constexpr year_month_weekday_last& operator+=(const months& __dm) noexcept;
- constexpr year_month_weekday_last& operator-=(const months& __dm) noexcept;
- constexpr year_month_weekday_last& operator+=(const years& __dy) noexcept;
- constexpr year_month_weekday_last& operator-=(const years& __dy) noexcept;
-
- inline constexpr chrono::year year() const noexcept { return __y; }
- inline constexpr chrono::month month() const noexcept { return __m; }
- inline constexpr chrono::weekday weekday() const noexcept { return __wdl.weekday(); }
- inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; }
- inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; }
- inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; }
- inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok() && __wdl.ok(); }
-
- constexpr days __to_days() const noexcept;
-
-};
-
-inline constexpr
-days year_month_weekday_last::__to_days() const noexcept
-{
- const sys_days __last = sys_days{__y/__m/last};
- return (__last - (chrono::weekday{__last} - __wdl.weekday())).time_since_epoch();
-
-}
-
-inline constexpr
-bool operator==(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept
-{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); }
-
-inline constexpr
-bool operator!=(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept
-{ return !(__lhs == __rhs); }
-
-
-inline constexpr
-year_month_weekday_last operator/(const year_month& __lhs, const weekday_last& __rhs) noexcept
-{ return year_month_weekday_last{__lhs.year(), __lhs.month(), __rhs}; }
-
-inline constexpr
-year_month_weekday_last operator/(const year& __lhs, const month_weekday_last& __rhs) noexcept
-{ return year_month_weekday_last{__lhs, __rhs.month(), __rhs.weekday_last()}; }
-
-inline constexpr
-year_month_weekday_last operator/(int __lhs, const month_weekday_last& __rhs) noexcept
-{ return year(__lhs) / __rhs; }
-
-inline constexpr
-year_month_weekday_last operator/(const month_weekday_last& __lhs, const year& __rhs) noexcept
-{ return __rhs / __lhs; }
-
-inline constexpr
-year_month_weekday_last operator/(const month_weekday_last& __lhs, int __rhs) noexcept
-{ return year(__rhs) / __lhs; }
-
-
-inline constexpr
-year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const months& __rhs) noexcept
-{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_last(); }
-
-inline constexpr
-year_month_weekday_last operator+(const months& __lhs, const year_month_weekday_last& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const months& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-inline constexpr
-year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const years& __rhs) noexcept
-{ return year_month_weekday_last{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_last()}; }
-
-inline constexpr
-year_month_weekday_last operator+(const years& __lhs, const year_month_weekday_last& __rhs) noexcept
-{ return __rhs + __lhs; }
-
-inline constexpr
-year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const years& __rhs) noexcept
-{ return __lhs + (-__rhs); }
-
-inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; }
-inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; }
-inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; }
-inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; }
-
-
-template <class _Duration>
-class hh_mm_ss
-{
-private:
- static_assert(__is_duration<_Duration>::value, "template parameter of hh_mm_ss must be a std::chrono::duration");
- using __CommonType = common_type_t<_Duration, chrono::seconds>;
-
- static constexpr uint64_t __pow10(unsigned __exp)
- {
- uint64_t __ret = 1;
- for (unsigned __i = 0; __i < __exp; ++__i)
- __ret *= 10U;
- return __ret;
- }
-
- static constexpr unsigned __width(uint64_t __n, uint64_t __d = 10, unsigned __w = 0)
- {
- if (__n >= 2 && __d != 0 && __w < 19)
- return 1 + __width(__n, __d % __n * 10, __w+1);
- return 0;
- }
-
-public:
- static unsigned constexpr fractional_width = __width(__CommonType::period::den) < 19 ?
- __width(__CommonType::period::den) : 6u;
- using precision = duration<typename __CommonType::rep, ratio<1, __pow10(fractional_width)>>;
-
- constexpr hh_mm_ss() noexcept : hh_mm_ss{_Duration::zero()} {}
-
- constexpr explicit hh_mm_ss(_Duration __d) noexcept :
- __is_neg(__d < _Duration(0)),
- __h(duration_cast<chrono::hours> (abs(__d))),
- __m(duration_cast<chrono::minutes>(abs(__d) - hours())),
- __s(duration_cast<chrono::seconds>(abs(__d) - hours() - minutes())),
- __f(duration_cast<precision> (abs(__d) - hours() - minutes() - seconds()))
- {}
-
- constexpr bool is_negative() const noexcept { return __is_neg; }
- constexpr chrono::hours hours() const noexcept { return __h; }
- constexpr chrono::minutes minutes() const noexcept { return __m; }
- constexpr chrono::seconds seconds() const noexcept { return __s; }
- constexpr precision subseconds() const noexcept { return __f; }
-
- constexpr precision to_duration() const noexcept
- {
- auto __dur = __h + __m + __s + __f;
- return __is_neg ? -__dur : __dur;
- }
-
- constexpr explicit operator precision() const noexcept { return to_duration(); }
-
-private:
- bool __is_neg;
- chrono::hours __h;
- chrono::minutes __m;
- chrono::seconds __s;
- precision __f;
-};
-
-constexpr bool is_am(const hours& __h) noexcept { return __h >= hours( 0) && __h < hours(12); }
-constexpr bool is_pm(const hours& __h) noexcept { return __h >= hours(12) && __h < hours(24); }
-
-constexpr hours make12(const hours& __h) noexcept
-{
- if (__h == hours( 0)) return hours(12);
- else if (__h <= hours(12)) return __h;
- else return __h - hours(12);
-}
-
-constexpr hours make24(const hours& __h, bool __is_pm) noexcept
-{
- if (__is_pm)
- return __h == hours(12) ? __h : __h + hours(12);
- else
- return __h == hours(12) ? hours(0) : __h;
-}
-
-#endif // _LIBCPP_STD_VER > 17
-} // namespace chrono
-
-#if _LIBCPP_STD_VER > 11
-// Suffixes for duration literals [time.duration.literals]
-inline namespace literals
-{
- inline namespace chrono_literals
- {
-
- constexpr chrono::hours operator""h(unsigned long long __h)
- {
- return chrono::hours(static_cast<chrono::hours::rep>(__h));
- }
-
- constexpr chrono::duration<long double, ratio<3600,1>> operator""h(long double __h)
- {
- return chrono::duration<long double, ratio<3600,1>>(__h);
- }
-
-
- constexpr chrono::minutes operator""min(unsigned long long __m)
- {
- return chrono::minutes(static_cast<chrono::minutes::rep>(__m));
- }
-
- constexpr chrono::duration<long double, ratio<60,1>> operator""min(long double __m)
- {
- return chrono::duration<long double, ratio<60,1>> (__m);
- }
-
-
- constexpr chrono::seconds operator""s(unsigned long long __s)
- {
- return chrono::seconds(static_cast<chrono::seconds::rep>(__s));
- }
-
- constexpr chrono::duration<long double> operator""s(long double __s)
- {
- return chrono::duration<long double> (__s);
- }
-
-
- constexpr chrono::milliseconds operator""ms(unsigned long long __ms)
- {
- return chrono::milliseconds(static_cast<chrono::milliseconds::rep>(__ms));
- }
-
- constexpr chrono::duration<long double, milli> operator""ms(long double __ms)
- {
- return chrono::duration<long double, milli>(__ms);
- }
-
-
- constexpr chrono::microseconds operator""us(unsigned long long __us)
- {
- return chrono::microseconds(static_cast<chrono::microseconds::rep>(__us));
- }
-
- constexpr chrono::duration<long double, micro> operator""us(long double __us)
- {
- return chrono::duration<long double, micro> (__us);
- }
-
-
- constexpr chrono::nanoseconds operator""ns(unsigned long long __ns)
- {
- return chrono::nanoseconds(static_cast<chrono::nanoseconds::rep>(__ns));
- }
-
- constexpr chrono::duration<long double, nano> operator""ns(long double __ns)
- {
- return chrono::duration<long double, nano> (__ns);
- }
-
-#if _LIBCPP_STD_VER > 17
- constexpr chrono::day operator ""d(unsigned long long __d) noexcept
- {
- return chrono::day(static_cast<unsigned>(__d));
- }
-
- constexpr chrono::year operator ""y(unsigned long long __y) noexcept
- {
- return chrono::year(static_cast<int>(__y));
- }
-#endif
-} // namespace chrono_literals
-} // namespace literals
-
-namespace chrono { // hoist the literals into namespace std::chrono
- using namespace literals::chrono_literals;
-} // namespace chrono
-
-#endif
-
-_LIBCPP_END_NAMESPACE_STD
-
-#ifndef _LIBCPP_CXX03_LANG
-_LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
-struct _FilesystemClock {
-#if !defined(_LIBCPP_HAS_NO_INT128)
- typedef __int128_t rep;
- typedef nano period;
-#else
- typedef long long rep;
- typedef nano period;
-#endif
-
- typedef chrono::duration<rep, period> duration;
- typedef chrono::time_point<_FilesystemClock> time_point;
-
- _LIBCPP_EXPORTED_FROM_ABI
- static _LIBCPP_CONSTEXPR_AFTER_CXX11 const bool is_steady = false;
-
- _LIBCPP_AVAILABILITY_FILESYSTEM _LIBCPP_FUNC_VIS static time_point now() noexcept;
-
-#if _LIBCPP_STD_VER > 17
- template <class _Duration>
- _LIBCPP_HIDE_FROM_ABI
- static chrono::sys_time<_Duration> to_sys(const chrono::file_time<_Duration>& __t) {
- return chrono::sys_time<_Duration>(__t.time_since_epoch());
- }
-
- template <class _Duration>
- _LIBCPP_HIDE_FROM_ABI
- static chrono::file_time<_Duration> from_sys(const chrono::sys_time<_Duration>& __t) {
- return chrono::file_time<_Duration>(__t.time_since_epoch());
- }
-#endif // _LIBCPP_STD_VER > 17
-};
-_LIBCPP_END_NAMESPACE_FILESYSTEM
-#endif // !_LIBCPP_CXX03_LANG
-
-_LIBCPP_POP_MACROS
-
#endif // _LIBCPP_CHRONO
diff --git a/contrib/llvm-project/libcxx/include/cmath b/contrib/llvm-project/libcxx/include/cmath
index b5c332c81ad6..8bd55542ed92 100644
--- a/contrib/llvm-project/libcxx/include/cmath
+++ b/contrib/llvm-project/libcxx/include/cmath
@@ -306,8 +306,8 @@ constexpr long double lerp(long double a, long double b, long double t) noexcept
#include <__config>
#include <math.h>
-#include <version>
#include <type_traits>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -636,6 +636,23 @@ lerp(double __a, double __b, double __t) _NOEXCEPT { return __ler
constexpr long double
lerp(long double __a, long double __b, long double __t) _NOEXCEPT { return __lerp(__a, __b, __t); }
+template <class _A1, class _A2, class _A3>
+inline _LIBCPP_HIDE_FROM_ABI
+constexpr typename enable_if_t
+<
+ is_arithmetic<_A1>::value &&
+ is_arithmetic<_A2>::value &&
+ is_arithmetic<_A3>::value,
+ __promote<_A1, _A2, _A3>
+>::type
+lerp(_A1 __a, _A2 __b, _A3 __t) noexcept
+{
+ typedef typename __promote<_A1, _A2, _A3>::type __result_type;
+ static_assert(!(_IsSame<_A1, __result_type>::value &&
+ _IsSame<_A2, __result_type>::value &&
+ _IsSame<_A3, __result_type>::value));
+ return __lerp((__result_type)__a, (__result_type)__b, (__result_type)__t);
+}
#endif // _LIBCPP_STD_VER > 17
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/codecvt b/contrib/llvm-project/libcxx/include/codecvt
index 60d3db882c03..74839d199686 100644
--- a/contrib/llvm-project/libcxx/include/codecvt
+++ b/contrib/llvm-project/libcxx/include/codecvt
@@ -56,6 +56,7 @@ class codecvt_utf8_utf16
#include <__config>
#include <__locale>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/compare b/contrib/llvm-project/libcxx/include/compare
index 5d07ebaf2fbd..d686b5a369f2 100644
--- a/contrib/llvm-project/libcxx/include/compare
+++ b/contrib/llvm-project/libcxx/include/compare
@@ -145,6 +145,7 @@ namespace std {
#include <__compare/three_way_comparable.h>
#include <__compare/weak_order.h>
#include <__config>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/condition_variable b/contrib/llvm-project/libcxx/include/condition_variable
index 0569e2254d1d..ecec3ea8c017 100644
--- a/contrib/llvm-project/libcxx/include/condition_variable
+++ b/contrib/llvm-project/libcxx/include/condition_variable
@@ -109,6 +109,7 @@ public:
#include <__config>
#include <__mutex_base>
#include <memory>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/coroutine b/contrib/llvm-project/libcxx/include/coroutine
index 4e140ab3fed7..6e83fe285bc7 100644
--- a/contrib/llvm-project/libcxx/include/coroutine
+++ b/contrib/llvm-project/libcxx/include/coroutine
@@ -40,8 +40,8 @@ struct suspend_always;
#include <__config>
#include <__coroutine/coroutine_handle.h>
-#include <__coroutine/noop_coroutine_handle.h>
#include <__coroutine/coroutine_traits.h>
+#include <__coroutine/noop_coroutine_handle.h>
#include <__coroutine/trivial_awaitables.h>
#include <version>
diff --git a/contrib/llvm-project/libcxx/include/execution b/contrib/llvm-project/libcxx/include/execution
index c1debcb72ff1..417b11b103a2 100644
--- a/contrib/llvm-project/libcxx/include/execution
+++ b/contrib/llvm-project/libcxx/include/execution
@@ -11,6 +11,7 @@
#define _LIBCPP_EXECUTION
#include <__config>
+#include <version>
#if defined(_LIBCPP_HAS_PARALLEL_ALGORITHMS) && _LIBCPP_STD_VER >= 17
# include <__pstl_execution>
diff --git a/contrib/llvm-project/libcxx/include/experimental/__memory b/contrib/llvm-project/libcxx/include/experimental/__memory
index bd9bf95e5933..33027793895d 100644
--- a/contrib/llvm-project/libcxx/include/experimental/__memory
+++ b/contrib/llvm-project/libcxx/include/experimental/__memory
@@ -10,11 +10,11 @@
#ifndef _LIBCPP_EXPERIMENTAL___MEMORY
#define _LIBCPP_EXPERIMENTAL___MEMORY
+#include <__functional_base>
#include <__memory/allocator_arg_t.h>
#include <__memory/uses_allocator.h>
#include <experimental/__config>
#include <experimental/utility> // for erased_type
-#include <__functional_base>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/experimental/algorithm b/contrib/llvm-project/libcxx/include/experimental/algorithm
index bcf372cafd7a..a7fa18ff5dde 100644
--- a/contrib/llvm-project/libcxx/include/experimental/algorithm
+++ b/contrib/llvm-project/libcxx/include/experimental/algorithm
@@ -31,12 +31,11 @@ ForwardIterator search(ForwardIterator first, ForwardIterator last,
*/
-#include <experimental/__config>
+#include <__debug>
#include <algorithm>
+#include <experimental/__config>
#include <type_traits>
-#include <__debug>
-
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
diff --git a/contrib/llvm-project/libcxx/include/experimental/coroutine b/contrib/llvm-project/libcxx/include/experimental/coroutine
index 16b4028765bc..4b53e4b62b40 100644
--- a/contrib/llvm-project/libcxx/include/experimental/coroutine
+++ b/contrib/llvm-project/libcxx/include/experimental/coroutine
@@ -45,13 +45,13 @@ template <class P> struct hash<coroutine_handle<P>>;
*/
+#include <__debug>
+#include <cstddef>
#include <experimental/__config>
-#include <new>
-#include <type_traits>
#include <functional>
#include <memory> // for hash<T*>
-#include <cstddef>
-#include <__debug>
+#include <new>
+#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/deque b/contrib/llvm-project/libcxx/include/experimental/deque
index 594ddff22f98..41c25d95b9a0 100644
--- a/contrib/llvm-project/libcxx/include/experimental/deque
+++ b/contrib/llvm-project/libcxx/include/experimental/deque
@@ -28,8 +28,8 @@ namespace pmr {
*/
-#include <experimental/__config>
#include <deque>
+#include <experimental/__config>
#include <experimental/memory_resource>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/experimental/forward_list b/contrib/llvm-project/libcxx/include/experimental/forward_list
index 6781424cf2c6..590bef0283a2 100644
--- a/contrib/llvm-project/libcxx/include/experimental/forward_list
+++ b/contrib/llvm-project/libcxx/include/experimental/forward_list
@@ -29,8 +29,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <forward_list>
#include <experimental/memory_resource>
+#include <forward_list>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/functional b/contrib/llvm-project/libcxx/include/experimental/functional
index bcff51e8056f..8977c6354355 100644
--- a/contrib/llvm-project/libcxx/include/experimental/functional
+++ b/contrib/llvm-project/libcxx/include/experimental/functional
@@ -86,16 +86,15 @@ inline namespace fundamentals_v1 {
*/
+#include <__debug>
#include <__memory/uses_allocator.h>
+#include <algorithm>
+#include <array>
#include <experimental/__config>
#include <functional>
-#include <algorithm>
#include <type_traits>
-#include <vector>
-#include <array>
#include <unordered_map>
-
-#include <__debug>
+#include <vector>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -104,7 +103,6 @@ inline namespace fundamentals_v1 {
_LIBCPP_PUSH_MACROS
#include <__undef_macros>
-
_LIBCPP_BEGIN_NAMESPACE_LFTS
#if _LIBCPP_STD_VER > 11
diff --git a/contrib/llvm-project/libcxx/include/experimental/iterator b/contrib/llvm-project/libcxx/include/experimental/iterator
index 9da08a0cff9d..ab1f54e35eb9 100644
--- a/contrib/llvm-project/libcxx/include/experimental/iterator
+++ b/contrib/llvm-project/libcxx/include/experimental/iterator
@@ -53,8 +53,8 @@ namespace std {
*/
#include <__memory/addressof.h>
-#include <__utility/move.h>
#include <__utility/forward.h>
+#include <__utility/move.h>
#include <experimental/__config>
#include <iterator>
diff --git a/contrib/llvm-project/libcxx/include/experimental/list b/contrib/llvm-project/libcxx/include/experimental/list
index 099d80fd8db5..be43e60f49ad 100644
--- a/contrib/llvm-project/libcxx/include/experimental/list
+++ b/contrib/llvm-project/libcxx/include/experimental/list
@@ -29,8 +29,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <list>
#include <experimental/memory_resource>
+#include <list>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/map b/contrib/llvm-project/libcxx/include/experimental/map
index 27ff7e862e27..704626eed674 100644
--- a/contrib/llvm-project/libcxx/include/experimental/map
+++ b/contrib/llvm-project/libcxx/include/experimental/map
@@ -34,8 +34,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <map>
#include <experimental/memory_resource>
+#include <map>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/memory_resource b/contrib/llvm-project/libcxx/include/experimental/memory_resource
index 71a4f51c50e5..47bb8c58c8de 100644
--- a/contrib/llvm-project/libcxx/include/experimental/memory_resource
+++ b/contrib/llvm-project/libcxx/include/experimental/memory_resource
@@ -64,18 +64,18 @@ namespace pmr {
*/
+#include <__debug>
+#include <__tuple>
+#include <cstddef>
+#include <cstdlib>
#include <experimental/__config>
#include <experimental/__memory>
#include <limits>
#include <memory>
#include <new>
#include <stdexcept>
-#include <__tuple>
#include <type_traits>
#include <utility>
-#include <cstddef>
-#include <cstdlib>
-#include <__debug>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/propagate_const b/contrib/llvm-project/libcxx/include/experimental/propagate_const
index 12376dcec242..deb4ba98a7fa 100644
--- a/contrib/llvm-project/libcxx/include/experimental/propagate_const
+++ b/contrib/llvm-project/libcxx/include/experimental/propagate_const
@@ -107,19 +107,18 @@
*/
#include <experimental/__config>
+#include <functional>
+#include <type_traits>
+#include <utility>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
#if _LIBCPP_STD_VER > 11
-#include <type_traits>
-#include <utility>
-#include <functional>
-
_LIBCPP_BEGIN_NAMESPACE_LFTS_V2
-
template <class _Tp>
class propagate_const;
diff --git a/contrib/llvm-project/libcxx/include/experimental/regex b/contrib/llvm-project/libcxx/include/experimental/regex
index ced0e950a127..5bb3dccf9387 100644
--- a/contrib/llvm-project/libcxx/include/experimental/regex
+++ b/contrib/llvm-project/libcxx/include/experimental/regex
@@ -36,9 +36,9 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <regex>
-#include <experimental/string>
#include <experimental/memory_resource>
+#include <experimental/string>
+#include <regex>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/set b/contrib/llvm-project/libcxx/include/experimental/set
index 891510bbb8d9..12b2ac23b81a 100644
--- a/contrib/llvm-project/libcxx/include/experimental/set
+++ b/contrib/llvm-project/libcxx/include/experimental/set
@@ -34,8 +34,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <set>
#include <experimental/memory_resource>
+#include <set>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/simd b/contrib/llvm-project/libcxx/include/experimental/simd
index 1f17ee96f0b5..475155512d76 100644
--- a/contrib/llvm-project/libcxx/include/experimental/simd
+++ b/contrib/llvm-project/libcxx/include/experimental/simd
@@ -649,10 +649,10 @@ public:
*/
-#include <experimental/__config>
#include <algorithm>
#include <array>
#include <cstddef>
+#include <experimental/__config>
#include <functional>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -689,7 +689,7 @@ class __simd_storage<_Tp, __simd_abi<_StorageKind::_Array, __num_element>> {
friend struct simd_mask;
public:
- _Tp __get(size_t __index) const noexcept { return __storage_[__index]; };
+ _Tp __get(size_t __index) const noexcept { return __storage_[__index]; }
void __set(size_t __index, _Tp __val) noexcept {
__storage_[__index] = __val;
}
@@ -706,7 +706,7 @@ class __simd_storage<_Tp, __simd_abi<_StorageKind::_Scalar, 1>> {
friend struct simd_mask;
public:
- _Tp __get(size_t __index) const noexcept { return (&__storage_)[__index]; };
+ _Tp __get(size_t __index) const noexcept { return (&__storage_)[__index]; }
void __set(size_t __index, _Tp __val) noexcept {
(&__storage_)[__index] = __val;
}
@@ -770,7 +770,7 @@ struct __vec_ext_traits {
_LIBCPP_SPECIALIZE_VEC_EXT(_TYPE, 29); \
_LIBCPP_SPECIALIZE_VEC_EXT(_TYPE, 30); \
_LIBCPP_SPECIALIZE_VEC_EXT(_TYPE, 31); \
- _LIBCPP_SPECIALIZE_VEC_EXT(_TYPE, 32);
+ _LIBCPP_SPECIALIZE_VEC_EXT(_TYPE, 32)
_LIBCPP_SPECIALIZE_VEC_EXT_32(char);
_LIBCPP_SPECIALIZE_VEC_EXT_32(char16_t);
@@ -808,7 +808,7 @@ class __simd_storage<_Tp, __simd_abi<_StorageKind::_VecExt, __num_element>> {
friend struct simd_mask;
public:
- _Tp __get(size_t __index) const noexcept { return __storage_[__index]; };
+ _Tp __get(size_t __index) const noexcept { return __storage_[__index]; }
void __set(size_t __index, _Tp __val) noexcept {
__storage_[__index] = __val;
}
diff --git a/contrib/llvm-project/libcxx/include/experimental/string b/contrib/llvm-project/libcxx/include/experimental/string
index b881fcf3af1c..1643e84c5a0c 100644
--- a/contrib/llvm-project/libcxx/include/experimental/string
+++ b/contrib/llvm-project/libcxx/include/experimental/string
@@ -38,8 +38,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <string>
#include <experimental/memory_resource>
+#include <string>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/unordered_map b/contrib/llvm-project/libcxx/include/experimental/unordered_map
index fc8cc7f77bf0..80905e68ef36 100644
--- a/contrib/llvm-project/libcxx/include/experimental/unordered_map
+++ b/contrib/llvm-project/libcxx/include/experimental/unordered_map
@@ -40,8 +40,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <unordered_map>
#include <experimental/memory_resource>
+#include <unordered_map>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/unordered_set b/contrib/llvm-project/libcxx/include/experimental/unordered_set
index 39342da5f679..f09b0bd57d9f 100644
--- a/contrib/llvm-project/libcxx/include/experimental/unordered_set
+++ b/contrib/llvm-project/libcxx/include/experimental/unordered_set
@@ -34,8 +34,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <unordered_set>
#include <experimental/memory_resource>
+#include <unordered_set>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/experimental/vector b/contrib/llvm-project/libcxx/include/experimental/vector
index a22698ef7ce4..4d31d1ccd85b 100644
--- a/contrib/llvm-project/libcxx/include/experimental/vector
+++ b/contrib/llvm-project/libcxx/include/experimental/vector
@@ -29,8 +29,8 @@ namespace pmr {
*/
#include <experimental/__config>
-#include <vector>
#include <experimental/memory_resource>
+#include <vector>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/ext/__hash b/contrib/llvm-project/libcxx/include/ext/__hash
index 2f998ee40a93..f6e7a486f6b6 100644
--- a/contrib/llvm-project/libcxx/include/ext/__hash
+++ b/contrib/llvm-project/libcxx/include/ext/__hash
@@ -13,8 +13,8 @@
#pragma GCC system_header
#include <__string>
-#include <string>
#include <cstring>
+#include <string>
namespace __gnu_cxx {
diff --git a/contrib/llvm-project/libcxx/include/ext/hash_map b/contrib/llvm-project/libcxx/include/ext/hash_map
index d6ea26c2cf86..a52c6a4066f7 100644
--- a/contrib/llvm-project/libcxx/include/ext/hash_map
+++ b/contrib/llvm-project/libcxx/include/ext/hash_map
@@ -203,10 +203,10 @@ template <class Key, class T, class Hash, class Pred, class Alloc>
#include <__config>
#include <__hash_table>
+#include <ext/__hash>
#include <functional>
#include <stdexcept>
#include <type_traits>
-#include <ext/__hash>
#if defined(__DEPRECATED) && __DEPRECATED
#if defined(_LIBCPP_WARNING)
diff --git a/contrib/llvm-project/libcxx/include/ext/hash_set b/contrib/llvm-project/libcxx/include/ext/hash_set
index 7d19ccd006e2..bca008d172a2 100644
--- a/contrib/llvm-project/libcxx/include/ext/hash_set
+++ b/contrib/llvm-project/libcxx/include/ext/hash_set
@@ -194,8 +194,8 @@ template <class Value, class Hash, class Pred, class Alloc>
#include <__config>
#include <__hash_table>
-#include <functional>
#include <ext/__hash>
+#include <functional>
#if defined(__DEPRECATED) && __DEPRECATED
#if defined(_LIBCPP_WARNING)
diff --git a/contrib/llvm-project/libcxx/include/filesystem b/contrib/llvm-project/libcxx/include/filesystem
index 9f5b42747b34..74f596599a1e 100644
--- a/contrib/llvm-project/libcxx/include/filesystem
+++ b/contrib/llvm-project/libcxx/include/filesystem
@@ -248,8 +248,8 @@ inline constexpr bool std::ranges::enable_view<std::filesystem::recursive_direct
#include <__filesystem/file_type.h>
#include <__filesystem/filesystem_error.h>
#include <__filesystem/operations.h>
-#include <__filesystem/path_iterator.h>
#include <__filesystem/path.h>
+#include <__filesystem/path_iterator.h>
#include <__filesystem/perm_options.h>
#include <__filesystem/perms.h>
#include <__filesystem/recursive_directory_iterator.h>
diff --git a/contrib/llvm-project/libcxx/include/format b/contrib/llvm-project/libcxx/include/format
index 788b9c299abc..4cf146ead17a 100644
--- a/contrib/llvm-project/libcxx/include/format
+++ b/contrib/llvm-project/libcxx/include/format
@@ -14,43 +14,15 @@
namespace std {
// [format.context], class template basic_format_context
- template<class Out, class charT>
- class basic_format_context {
- basic_format_args<basic_format_context> args_; // exposition only
- Out out_; // exposition only
-
- public:
- using iterator = Out;
- using char_type = charT;
- template<class T> using formatter_type = formatter<T, charT>;
-
- basic_format_arg<basic_format_context> arg(size_t id) const;
- std::locale locale();
-
- iterator out();
- void advance_to(iterator it);
- };
+ template<class Out, class charT> class basic_format_context;
using format_context = basic_format_context<unspecified, char>;
using wformat_context = basic_format_context<unspecified, wchar_t>;
// [format.args], class template basic_format_args
- template<class Context>
- class basic_format_args {
- size_t size_; // exposition only
- const basic_format_arg<Context>* data_; // exposition only
-
- public:
- basic_format_args() noexcept;
-
- template<class... Args>
- basic_format_args(const format-arg-store<Context, Args...>& store) noexcept;
-
- basic_format_arg<Context> get(size_t i) const noexcept;
- };
+ template<class Context> class basic_format_args;
using format_args = basic_format_args<format_context>;
using wformat_args = basic_format_args<wformat_context>;
-
// [format.functions], formatting functions
template<class... Args>
string format(string_view fmt, const Args&... args);
@@ -90,8 +62,7 @@ namespace std {
Out out;
iter_difference_t<Out> size;
};
-
- template<class Out, class... Args>
+ template<class Out, class... Args>
format_to_n_result<Out> format_to_n(Out out, iter_difference_t<Out> n,
string_view fmt, const Args&... args);
template<class Out, class... Args>
@@ -116,99 +87,22 @@ namespace std {
size_t formatted_size(const locale& loc, wstring_view fmt, const Args&... args);
// [format.formatter], formatter
- template<> struct formatter<char, char>;
- template<> struct formatter<char, wchar_t>;
- template<> struct formatter<wchar_t, wchar_t>;
-
- template<> struct formatter<charT*, charT>;
- template<> struct formatter<const charT*, charT>;
- template<size_t N> struct formatter<const charT[N], charT>;
- template<class traits, class Allocator>
- struct formatter<basic_string<charT, traits, Allocator>, charT>;
- template<class traits>
- struct formatter<basic_string_view<charT, traits>, charT>;
+ template<class T, class charT = char> struct formatter;
// [format.parse.ctx], class template basic_format_parse_context
- template<class charT>
- class basic_format_parse_context {
- public:
- using char_type = charT;
- using const_iterator = typename basic_string_view<charT>::const_iterator;
- using iterator = const_iterator;
-
- private:
- iterator begin_; // exposition only
- iterator end_; // exposition only
- enum indexing { unknown, manual, automatic }; // exposition only
- indexing indexing_; // exposition only
- size_t next_arg_id_; // exposition only
- size_t num_args_; // exposition only
-
- public:
- constexpr explicit basic_format_parse_context(basic_string_view<charT> fmt,
- size_t num_args = 0) noexcept;
- basic_format_parse_context(const basic_format_parse_context&) = delete;
- basic_format_parse_context& operator=(const basic_format_parse_context&) = delete;
-
- constexpr const_iterator begin() const noexcept;
- constexpr const_iterator end() const noexcept;
- constexpr void advance_to(const_iterator it);
-
- constexpr size_t next_arg_id();
- constexpr void check_arg_id(size_t id);
- };
+ template<class charT> class basic_format_parse_context;
using format_parse_context = basic_format_parse_context<char>;
using wformat_parse_context = basic_format_parse_context<wchar_t>;
// [format.arguments], arguments
// [format.arg], class template basic_format_arg
- template<class Context>
- class basic_format_arg {
- public:
- class handle;
-
- private:
- using char_type = typename Context::char_type; // exposition only
-
- variant<monostate, bool, char_type,
- int, unsigned int, long long int, unsigned long long int,
- float, double, long double,
- const char_type*, basic_string_view<char_type>,
- const void*, handle> value; // exposition only
-
- template<class T> explicit basic_format_arg(const T& v) noexcept; // exposition only
- explicit basic_format_arg(float n) noexcept; // exposition only
- explicit basic_format_arg(double n) noexcept; // exposition only
- explicit basic_format_arg(long double n) noexcept; // exposition only
- explicit basic_format_arg(const char_type* s); // exposition only
-
- template<class traits>
- explicit basic_format_arg(
- basic_string_view<char_type, traits> s) noexcept; // exposition only
-
- template<class traits, class Allocator>
- explicit basic_format_arg(
- const basic_string<char_type, traits, Allocator>& s) noexcept; // exposition only
-
- explicit basic_format_arg(nullptr_t) noexcept; // exposition only
-
- template<class T>
- explicit basic_format_arg(const T* p) noexcept; // exposition only
-
- public:
- basic_format_arg() noexcept;
-
- explicit operator bool() const noexcept;
- };
+ template<class Context> class basic_format_arg;
template<class Visitor, class Context>
see below visit_format_arg(Visitor&& vis, basic_format_arg<Context> arg);
// [format.arg.store], class template format-arg-store
- template<class Context, class... Args>
- struct format-arg-store { // exposition only
- array<basic_format_arg<Context>, sizeof...(Args)> args;
- };
+ template<class Context, class... Args> struct format-arg-store; // exposition only
template<class Context = format_context, class... Args>
format-arg-store<Context, Args...>
@@ -218,43 +112,7 @@ namespace std {
make_wformat_args(const Args&... args);
// [format.error], class format_error
- class format_error : public runtime_error {
- public:
- explicit format_error(const string& what_arg);
- explicit format_error(const char* what_arg);
- };
-
- // [format.parse.ctx], class template basic_format_parse_context
- template<class charT>
- class basic_format_parse_context {
- public:
- using char_type = charT;
- using const_iterator = typename basic_string_view<charT>::const_iterator;
- using iterator = const_iterator;
-
- private:
- iterator begin_; // exposition only
- iterator end_; // exposition only
- enum indexing { unknown, manual, automatic }; // exposition only
- indexing indexing_; // exposition only
- size_t next_arg_id_; // exposition only
- size_t num_args_; // exposition only
-
- public:
- constexpr explicit basic_format_parse_context(basic_string_view<charT> fmt,
- size_t num_args = 0) noexcept;
- basic_format_parse_context(const basic_format_parse_context&) = delete;
- basic_format_parse_context& operator=(const basic_format_parse_context&) = delete;
-
- constexpr const_iterator begin() const noexcept;
- constexpr const_iterator end() const noexcept;
- constexpr void advance_to(const_iterator it);
-
- constexpr size_t next_arg_id();
- constexpr void check_arg_id(size_t id);
- };
- using format_parse_context = basic_format_parse_context<char>;
- using wformat_parse_context = basic_format_parse_context<wchar_t>;
+ class format_error;
}
*/
@@ -277,7 +135,9 @@ namespace std {
#include <__format/formatter.h>
#include <__format/formatter_bool.h>
#include <__format/formatter_char.h>
+#include <__format/formatter_floating_point.h>
#include <__format/formatter_integer.h>
+#include <__format/formatter_pointer.h>
#include <__format/formatter_string.h>
#include <__format/parser_std_format_spec.h>
#include <__variant/monostate.h>
@@ -367,6 +227,8 @@ __handle_replacement_field(const _CharT* __begin, const _CharT* __end,
[&](auto __arg) {
if constexpr (same_as<decltype(__arg), monostate>)
__throw_format_error("Argument index out of bounds");
+ else if constexpr (same_as<decltype(__arg), typename basic_format_arg<_Ctx>::handle>)
+ __arg.format(__parse_ctx, __ctx);
else {
formatter<decltype(__arg), _CharT> __formatter;
__parse_ctx.advance_to(__formatter.parse(__parse_ctx));
diff --git a/contrib/llvm-project/libcxx/include/fstream b/contrib/llvm-project/libcxx/include/fstream
index 3d64adcb23d1..fc0a9204ed60 100644
--- a/contrib/llvm-project/libcxx/include/fstream
+++ b/contrib/llvm-project/libcxx/include/fstream
@@ -187,6 +187,7 @@ typedef basic_fstream<wchar_t> wfstream;
#include <cstdlib>
#include <istream>
#include <ostream>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)
# include <filesystem>
diff --git a/contrib/llvm-project/libcxx/include/functional b/contrib/llvm-project/libcxx/include/functional
index 53a5f2bc3770..3a24b975881c 100644
--- a/contrib/llvm-project/libcxx/include/functional
+++ b/contrib/llvm-project/libcxx/include/functional
@@ -496,9 +496,9 @@ POLICY: For non-variadic implementations, the number of arguments is limited
#include <__debug>
#include <__functional/binary_function.h> // TODO: deprecate
#include <__functional/binary_negate.h>
+#include <__functional/bind.h>
#include <__functional/bind_back.h>
#include <__functional/bind_front.h>
-#include <__functional/bind.h>
#include <__functional/binder1st.h>
#include <__functional/binder2nd.h>
#include <__functional/compose.h>
diff --git a/contrib/llvm-project/libcxx/include/future b/contrib/llvm-project/libcxx/include/future
index 6b666a70f48e..e35eedf35641 100644
--- a/contrib/llvm-project/libcxx/include/future
+++ b/contrib/llvm-project/libcxx/include/future
@@ -374,6 +374,7 @@ template <class R, class Alloc> struct uses_allocator<packaged_task<R>, Alloc>;
#include <mutex>
#include <system_error>
#include <thread>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/ios b/contrib/llvm-project/libcxx/include/ios
index 237d146dfb85..74c3c63bd347 100644
--- a/contrib/llvm-project/libcxx/include/ios
+++ b/contrib/llvm-project/libcxx/include/ios
@@ -214,6 +214,7 @@ storage-class-specifier const error_category& iostream_category() noexcept;
#include <__locale>
#include <iosfwd>
#include <system_error>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
#include <atomic> // for __xindex_
diff --git a/contrib/llvm-project/libcxx/include/iosfwd b/contrib/llvm-project/libcxx/include/iosfwd
index 938d712cf36b..c2ba8ee9e652 100644
--- a/contrib/llvm-project/libcxx/include/iosfwd
+++ b/contrib/llvm-project/libcxx/include/iosfwd
@@ -96,6 +96,7 @@ using u32streampos = fpos<char_traits<char32_t>::state_type>;
#include <__config>
#include <__mbstate_t.h>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/iostream b/contrib/llvm-project/libcxx/include/iostream
index 7397acfc678b..793f08ab1330 100644
--- a/contrib/llvm-project/libcxx/include/iostream
+++ b/contrib/llvm-project/libcxx/include/iostream
@@ -38,6 +38,7 @@ extern wostream wclog;
#include <istream>
#include <ostream>
#include <streambuf>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/iterator b/contrib/llvm-project/libcxx/include/iterator
index 4dd9902d79a2..29097a9aa8bf 100644
--- a/contrib/llvm-project/libcxx/include/iterator
+++ b/contrib/llvm-project/libcxx/include/iterator
@@ -140,6 +140,11 @@ template<class In, class Out>
template<class I1, class I2 = I1>
concept indirectly_swappable = see below; // since C++20
+template<class I1, class I2, class R, class P1 = identity,
+ class P2 = identity>
+ concept indirectly_comparable =
+ indirect_binary_predicate<R, projected<I1, P1>, projected<I2, P2>>; // since C++20
+
template<input_or_output_iterator I, sentinel_for<I> S>
requires (!same_as<I, S> && copyable<I>)
class common_iterator; // since C++20
@@ -593,17 +598,18 @@ template <class E> constexpr const E* data(initializer_list<E> il) noexcept;
#include <__iterator/erase_if_container.h>
#include <__iterator/front_insert_iterator.h>
#include <__iterator/incrementable_traits.h>
+#include <__iterator/indirectly_comparable.h>
#include <__iterator/insert_iterator.h>
-#include <__iterator/istreambuf_iterator.h>
#include <__iterator/istream_iterator.h>
-#include <__iterator/iterator.h>
-#include <__iterator/iterator_traits.h>
+#include <__iterator/istreambuf_iterator.h>
#include <__iterator/iter_move.h>
#include <__iterator/iter_swap.h>
+#include <__iterator/iterator.h>
+#include <__iterator/iterator_traits.h>
#include <__iterator/move_iterator.h>
#include <__iterator/next.h>
-#include <__iterator/ostreambuf_iterator.h>
#include <__iterator/ostream_iterator.h>
+#include <__iterator/ostreambuf_iterator.h>
#include <__iterator/prev.h>
#include <__iterator/projected.h>
#include <__iterator/readable_traits.h>
diff --git a/contrib/llvm-project/libcxx/include/latch b/contrib/llvm-project/libcxx/include/latch
index e65825991b59..2cc9222baadc 100644
--- a/contrib/llvm-project/libcxx/include/latch
+++ b/contrib/llvm-project/libcxx/include/latch
@@ -43,6 +43,7 @@ namespace std
#include <__availability>
#include <__config>
#include <atomic>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/list b/contrib/llvm-project/libcxx/include/list
index c9c050a4f1f0..258fe2cee727 100644
--- a/contrib/llvm-project/libcxx/include/list
+++ b/contrib/llvm-project/libcxx/include/list
@@ -319,9 +319,7 @@ public:
_LIBCPP_INLINE_VISIBILITY
__list_iterator() _NOEXCEPT : __ptr_(nullptr)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
#if _LIBCPP_DEBUG_LEVEL == 2
@@ -355,29 +353,23 @@ public:
_LIBCPP_INLINE_VISIBILITY
reference operator*() const
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable list::iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable list::iterator");
return __ptr_->__as_node()->__value_;
}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable list::iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable list::iterator");
return pointer_traits<pointer>::pointer_to(__ptr_->__as_node()->__value_);
}
_LIBCPP_INLINE_VISIBILITY
__list_iterator& operator++()
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to increment a non-incrementable list::iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to increment a non-incrementable list::iterator");
__ptr_ = __ptr_->__next_;
return *this;
}
@@ -387,10 +379,8 @@ public:
_LIBCPP_INLINE_VISIBILITY
__list_iterator& operator--()
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__decrementable(this),
- "Attempted to decrement a non-decrementable list::iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__decrementable(this),
+ "Attempted to decrement a non-decrementable list::iterator");
__ptr_ = __ptr_->__prev_;
return *this;
}
@@ -439,9 +429,7 @@ public:
_LIBCPP_INLINE_VISIBILITY
__list_const_iterator() _NOEXCEPT : __ptr_(nullptr)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_i(this);
-#endif
+ _VSTD::__debug_db_insert_i(this);
}
_LIBCPP_INLINE_VISIBILITY
__list_const_iterator(const __list_iterator<_Tp, _VoidPtr>& __p) _NOEXCEPT
@@ -482,29 +470,23 @@ public:
_LIBCPP_INLINE_VISIBILITY
reference operator*() const
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable list::const_iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable list::const_iterator");
return __ptr_->__as_node()->__value_;
}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to dereference a non-dereferenceable list::const_iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to dereference a non-dereferenceable list::const_iterator");
return pointer_traits<pointer>::pointer_to(__ptr_->__as_node()->__value_);
}
_LIBCPP_INLINE_VISIBILITY
__list_const_iterator& operator++()
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(this),
- "Attempted to increment a non-incrementable list::const_iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this),
+ "Attempted to increment a non-incrementable list::const_iterator");
__ptr_ = __ptr_->__next_;
return *this;
}
@@ -514,10 +496,8 @@ public:
_LIBCPP_INLINE_VISIBILITY
__list_const_iterator& operator--()
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__decrementable(this),
- "Attempted to decrement a non-decrementable list::const_iterator");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__decrementable(this),
+ "Attempted to decrement a non-decrementable list::const_iterator");
__ptr_ = __ptr_->__prev_;
return *this;
}
@@ -869,16 +849,12 @@ public:
list()
_NOEXCEPT_(is_nothrow_default_constructible<__node_allocator>::value)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
_LIBCPP_INLINE_VISIBILITY
explicit list(const allocator_type& __a) : base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
explicit list(size_type __n);
#if _LIBCPP_STD_VER > 11
@@ -888,9 +864,7 @@ public:
template <class = __enable_if_t<__is_allocator<_Alloc>::value> >
list(size_type __n, const value_type& __x, const allocator_type& __a) : base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __n > 0; --__n)
push_back(__x);
}
@@ -1217,9 +1191,7 @@ list<_Tp, _Alloc>::__iterator(size_type __n)
template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(size_type __n)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __n > 0; --__n)
#ifndef _LIBCPP_CXX03_LANG
emplace_back();
@@ -1232,9 +1204,7 @@ list<_Tp, _Alloc>::list(size_type __n)
template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(size_type __n, const allocator_type& __a) : base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __n > 0; --__n)
emplace_back();
}
@@ -1243,9 +1213,7 @@ list<_Tp, _Alloc>::list(size_type __n, const allocator_type& __a) : base(__a)
template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(size_type __n, const value_type& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __n > 0; --__n)
push_back(__x);
}
@@ -1255,9 +1223,7 @@ template <class _InpIter>
list<_Tp, _Alloc>::list(_InpIter __f, _InpIter __l,
typename enable_if<__is_cpp17_input_iterator<_InpIter>::value>::type*)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __f != __l; ++__f)
__emplace_back(*__f);
}
@@ -1268,9 +1234,7 @@ list<_Tp, _Alloc>::list(_InpIter __f, _InpIter __l, const allocator_type& __a,
typename enable_if<__is_cpp17_input_iterator<_InpIter>::value>::type*)
: base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __f != __l; ++__f)
__emplace_back(*__f);
}
@@ -1279,9 +1243,7 @@ template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(const list& __c)
: base(__node_alloc_traits::select_on_container_copy_construction(
__c.__node_alloc())) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (const_iterator __i = __c.begin(), __e = __c.end(); __i != __e; ++__i)
push_back(*__i);
}
@@ -1290,9 +1252,7 @@ template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(const list& __c, const __identity_t<allocator_type>& __a)
: base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (const_iterator __i = __c.begin(), __e = __c.end(); __i != __e; ++__i)
push_back(*__i);
}
@@ -1303,9 +1263,7 @@ template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(initializer_list<value_type> __il, const allocator_type& __a)
: base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (typename initializer_list<value_type>::const_iterator __i = __il.begin(),
__e = __il.end(); __i != __e; ++__i)
push_back(*__i);
@@ -1314,9 +1272,7 @@ list<_Tp, _Alloc>::list(initializer_list<value_type> __il, const allocator_type&
template <class _Tp, class _Alloc>
list<_Tp, _Alloc>::list(initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (typename initializer_list<value_type>::const_iterator __i = __il.begin(),
__e = __il.end(); __i != __e; ++__i)
push_back(*__i);
@@ -1324,11 +1280,9 @@ list<_Tp, _Alloc>::list(initializer_list<value_type> __il)
template <class _Tp, class _Alloc>
inline list<_Tp, _Alloc>::list(list&& __c)
- _NOEXCEPT_(is_nothrow_move_constructible<__node_allocator>::value)
- : base(_VSTD::move(__c.__node_alloc())) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _NOEXCEPT_(is_nothrow_move_constructible<__node_allocator>::value)
+ : base(_VSTD::move(__c.__node_alloc())) {
+ _VSTD::__debug_db_insert_c(this);
splice(end(), __c);
}
@@ -1337,9 +1291,7 @@ inline
list<_Tp, _Alloc>::list(list&& __c, const __identity_t<allocator_type>& __a)
: base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a == __c.get_allocator())
splice(end(), __c);
else
@@ -1448,11 +1400,8 @@ template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::insert(const_iterator __p, const value_type& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::insert(iterator, x) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::insert(iterator, x) called with an iterator not referring to this list");
__node_allocator& __na = base::__node_alloc();
__hold_pointer __hold = __allocate_node(__na);
__node_alloc_traits::construct(__na, _VSTD::addressof(__hold->__value_), __x);
@@ -1469,10 +1418,9 @@ template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::insert(const_iterator __p, size_type __n, const value_type& __x)
{
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::insert(iterator, n, x) called with an iterator not referring to this list");
#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::insert(iterator, n, x) called with an iterator not"
- " referring to this list");
iterator __r(__p.__ptr_, this);
#else
iterator __r(__p.__ptr_);
@@ -1535,10 +1483,9 @@ typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::insert(const_iterator __p, _InpIter __f, _InpIter __l,
typename enable_if<__is_cpp17_input_iterator<_InpIter>::value>::type*)
{
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::insert(iterator, range) called with an iterator not referring to this list");
#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::insert(iterator, range) called with an iterator not"
- " referring to this list");
iterator __r(__p.__ptr_, this);
#else
iterator __r(__p.__ptr_);
@@ -1694,11 +1641,8 @@ template <class... _Args>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::emplace(const_iterator __p, _Args&&... __args)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::emplace(iterator, args...) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::emplace(iterator, args...) called with an iterator not referring to this list");
__node_allocator& __na = base::__node_alloc();
__hold_pointer __hold = __allocate_node(__na);
__node_alloc_traits::construct(__na, _VSTD::addressof(__hold->__value_), _VSTD::forward<_Args>(__args)...);
@@ -1717,11 +1661,8 @@ template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::insert(const_iterator __p, value_type&& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::insert(iterator, x) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::insert(iterator, x) called with an iterator not referring to this list");
__node_allocator& __na = base::__node_alloc();
__hold_pointer __hold = __allocate_node(__na);
__node_alloc_traits::construct(__na, _VSTD::addressof(__hold->__value_), _VSTD::move(__x));
@@ -1800,11 +1741,8 @@ template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::erase(const_iterator __p)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::erase(iterator) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::erase(iterator) called with an iterator not referring to this list");
_LIBCPP_ASSERT(__p != end(),
"list::erase(iterator) called with a non-dereferenceable iterator");
__node_allocator& __na = base::__node_alloc();
@@ -1841,14 +1779,10 @@ template <class _Tp, class _Alloc>
typename list<_Tp, _Alloc>::iterator
list<_Tp, _Alloc>::erase(const_iterator __f, const_iterator __l)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__f)) == this,
- "list::erase(iterator, iterator) called with an iterator not"
- " referring to this list");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__l)) == this,
- "list::erase(iterator, iterator) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__f)) == this,
+ "list::erase(iterator, iterator) called with an iterator not referring to this list");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__l)) == this,
+ "list::erase(iterator, iterator) called with an iterator not referring to this list");
if (__f != __l)
{
__node_allocator& __na = base::__node_alloc();
@@ -2006,11 +1940,8 @@ list<_Tp, _Alloc>::splice(const_iterator __p, list& __c)
{
_LIBCPP_ASSERT(this != _VSTD::addressof(__c),
"list::splice(iterator, list) called with this == &list");
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::splice(iterator, list) called with an iterator not"
- " referring to this list");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::splice(iterator, list) called with an iterator not referring to this list");
if (!__c.empty())
{
__link_pointer __f = __c.__end_.__next_;
@@ -2046,17 +1977,13 @@ template <class _Tp, class _Alloc>
void
list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __i)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::splice(iterator, list, iterator) called with the first iterator"
- " not referring to this list");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__i)) == _VSTD::addressof(__c),
- "list::splice(iterator, list, iterator) called with the second iterator"
- " not referring to the list argument");
- _LIBCPP_ASSERT(__get_const_db()->__dereferenceable(_VSTD::addressof(__i)),
- "list::splice(iterator, list, iterator) called with the second iterator"
- " not dereferenceable");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::splice(iterator, list, iterator) called with the first iterator not referring to this list");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__i)) == _VSTD::addressof(__c),
+ "list::splice(iterator, list, iterator) called with the second iterator not referring to the list argument");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__dereferenceable(_VSTD::addressof(__i)),
+ "list::splice(iterator, list, iterator) called with the second iterator not dereferenceable");
+
if (__p.__ptr_ != __i.__ptr_ && __p.__ptr_ != __i.__ptr_->__next_)
{
__link_pointer __f = __i.__ptr_;
@@ -2091,23 +2018,20 @@ template <class _Tp, class _Alloc>
void
list<_Tp, _Alloc>::splice(const_iterator __p, list& __c, const_iterator __f, const_iterator __l)
{
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "list::splice(iterator, list, iterator, iterator) called with first iterator not referring to this list");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__f)) == _VSTD::addressof(__c),
+ "list::splice(iterator, list, iterator, iterator) called with second iterator not referring to the list argument");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__l)) == _VSTD::addressof(__c),
+ "list::splice(iterator, list, iterator, iterator) called with third iterator not referring to the list argument");
+
#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
- "list::splice(iterator, list, iterator, iterator) called with first iterator not"
- " referring to this list");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__f)) == _VSTD::addressof(__c),
- "list::splice(iterator, list, iterator, iterator) called with second iterator not"
- " referring to the list argument");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__l)) == _VSTD::addressof(__c),
- "list::splice(iterator, list, iterator, iterator) called with third iterator not"
- " referring to the list argument");
if (this == _VSTD::addressof(__c))
{
for (const_iterator __i = __f; __i != __l; ++__i)
- _LIBCPP_ASSERT(__i != __p,
- "list::splice(iterator, list, iterator, iterator)"
- " called with the first iterator within the range"
- " of the second and third iterators");
+ _LIBCPP_DEBUG_ASSERT(__i != __p,
+ "list::splice(iterator, list, iterator, iterator)"
+ " called with the first iterator within the range of the second and third iterators");
}
#endif
if (__f != __l)
diff --git a/contrib/llvm-project/libcxx/include/locale b/contrib/llvm-project/libcxx/include/locale
index 86af26c3e35e..7c2d2361f751 100644
--- a/contrib/llvm-project/libcxx/include/locale
+++ b/contrib/llvm-project/libcxx/include/locale
@@ -416,11 +416,11 @@ struct __num_get
unsigned& __dc, _CharT __thousands_sep, const string& __grouping,
unsigned* __g, unsigned*& __g_end, const _CharT* __atoms);
private:
- template<typename T>
- const T* __do_widen_p(ios_base& __iob, T* __atoms) const
+ template<typename _Tp>
+ const _Tp* __do_widen_p(ios_base& __iob, _Tp* __atoms) const
{
locale __loc = __iob.getloc();
- use_facet<ctype<T> >(__loc).widen(__src, __src + 26, __atoms);
+ use_facet<ctype<_Tp> >(__loc).widen(__src, __src + 26, __atoms);
return __atoms;
}
diff --git a/contrib/llvm-project/libcxx/include/math.h b/contrib/llvm-project/libcxx/include/math.h
index 850cdcfb32f6..e59ac6be11bf 100644
--- a/contrib/llvm-project/libcxx/include/math.h
+++ b/contrib/llvm-project/libcxx/include/math.h
@@ -305,9 +305,9 @@ long double truncl(long double x);
// back to C++ linkage before including these C++ headers.
extern "C++" {
+#include <limits>
#include <stdlib.h>
#include <type_traits>
-#include <limits>
// signbit
diff --git a/contrib/llvm-project/libcxx/include/memory b/contrib/llvm-project/libcxx/include/memory
index 3ed1530f2a75..db59936707e4 100644
--- a/contrib/llvm-project/libcxx/include/memory
+++ b/contrib/llvm-project/libcxx/include/memory
@@ -181,82 +181,189 @@ template <class InputIterator, class ForwardIterator>
ForwardIterator
uninitialized_copy(InputIterator first, InputIterator last, ForwardIterator result);
+namespace ranges {
+
+template<class InputIterator, class OutputIterator>
+using uninitialized_copy_result = in_out_result<InputIterator, OutputIterator>; // since C++20
+
+template<input_iterator InputIterator, sentinel-for<InputIterator> Sentinel1, nothrow-forward-iterator OutputIterator, nothrow-sentinel-for<OutputIterator> Sentinel2>
+ requires constructible_from<iter_value_t<OutputIterator>, iter_reference_t<InputIterator>>
+uninitialized_copy_result<InputIterator, OutputIterator>
+uninitialized_copy(InputIterator ifirst, Sentinel1 ilast, OutputIterator ofirst, Sentinel2 olast); // since C++20
+
+template<input_range InputRange, nothrow-forward-range OutputRange>
+ requires constructible_from<range_value_t<OutputRange>, range_reference_t<InputRange>>
+uninitialized_copy_result<borrowed_iterator_t<InputRange>, borrowed_iterator_t<OutputRange>>
+uninitialized_copy(InputRange&& in_range, OutputRange&& out_range); // since C++20
+
+}
+
template <class InputIterator, class Size, class ForwardIterator>
ForwardIterator
uninitialized_copy_n(InputIterator first, Size n, ForwardIterator result);
+namespace ranges {
+
+template<class InputIterator, class OutputIterator>
+using uninitialized_copy_n_result = in_out_result<InputIterator, OutputIterator>; // since C++20
+
+template<input_iterator InputIterator, nothrow-forward-iterator OutputIterator, nothrow-sentinel-for<OutputIterator> Sentinel>
+ requires constructible_from<iter_value_t<OutputIterator>, iter_reference_t<InputIterator>>
+uninitialized_copy_n_result<InputIterator, OutputIterator>
+uninitialized_copy_n(InputIterator ifirst, iter_difference_t<InputIterator> n, OutputIterator ofirst, Sentinel olast); // since C++20
+
+}
+
template <class ForwardIterator, class T>
void uninitialized_fill(ForwardIterator first, ForwardIterator last, const T& x);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator, nothrow-sentinel-for<ForwardIterator> Sentinel, class T>
requires constructible_from<iter_value_t<ForwardIterator>, const T&>
-ForwardIterator ranges::uninitialized_fill(ForwardIterator first, Sentinel last, const T& x); // since C++20
+ForwardIterator uninitialized_fill(ForwardIterator first, Sentinel last, const T& x); // since C++20
template <nothrow-forward-range ForwardRange, class T>
requires constructible_from<range_value_t<ForwardRange>, const T&>
-borrowed_iterator_t<ForwardRange> ranges::uninitialized_fill(ForwardRange&& range, const T& x); // since C++20
+borrowed_iterator_t<ForwardRange> uninitialized_fill(ForwardRange&& range, const T& x); // since C++20
+
+}
template <class ForwardIterator, class Size, class T>
ForwardIterator
uninitialized_fill_n(ForwardIterator first, Size n, const T& x);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator, class T>
requires constructible_from<iter_value_t<ForwardIterator>, const T&>
-ForwardIterator ranges::uninitialized_fill_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+ForwardIterator uninitialized_fill_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+
+}
template <class T, class ...Args>
constexpr T* construct_at(T* location, Args&& ...args); // since C++20
+namespace ranges {
+ template<class T, class... Args>
+ constexpr T* construct_at(T* location, Args&&... args); // since C++20
+}
+
template <class T>
void destroy_at(T* location); // constexpr in C++20
+namespace ranges {
+ template<destructible T>
+ constexpr void destroy_at(T* location) noexcept; // since C++20
+}
+
template <class ForwardIterator>
void destroy(ForwardIterator first, ForwardIterator last); // constexpr in C++20
+namespace ranges {
+ template<nothrow-input-iterator InputIterator, nothrow-sentinel-for<InputIterator> Sentinel>
+ requires destructible<iter_value_t<InputIterator>>
+ constexpr InputIterator destroy(InputIterator first, Sentinel last) noexcept; // since C++20
+ template<nothrow-input-range InputRange>
+ requires destructible<range_value_t<InputRange>>
+ constexpr borrowed_iterator_t<InputRange> destroy(InputRange&& range) noexcept; // since C++20
+}
+
template <class ForwardIterator, class Size>
ForwardIterator destroy_n(ForwardIterator first, Size n); // constexpr in C++20
+namespace ranges {
+ template<nothrow-input-iterator InputIterator>
+ requires destructible<iter_value_t<InputIterator>>
+ constexpr InputIterator destroy_n(InputIterator first, iter_difference_t<InputIterator> n) noexcept; // since C++20
+}
+
template <class InputIterator, class ForwardIterator>
ForwardIterator uninitialized_move(InputIterator first, InputIterator last, ForwardIterator result);
+namespace ranges {
+
+template<class InputIterator, class OutputIterator>
+using uninitialized_move_result = in_out_result<InputIterator, OutputIterator>; // since C++20
+
+template <input_iterator InputIterator, sentinel_for<InputIterator> Sentinel1, nothrow-forward-iterator OutputIterator, nothrow-sentinel-for<O> Sentinel2>
+ requires constructible_from<iter_value_t<OutputIterator>, iter_rvalue_reference_t<InputIterator>>
+uninitialized_move_result<InputIterator, OutputIterator>
+uninitialized_move(InputIterator ifirst, Sentinel1 ilast, OutputIterator ofirst, Sentinel2 olast); // since C++20
+
+template<input_range InputRange, nothrow-forward-range OutputRange>
+ requires constructible_from<range_value_t<OutputRange>, range_rvalue_reference_t<InputRange>>
+uninitialized_move_result<borrowed_iterator_t<InputRange>, borrowed_iterator_t<OutputRange>>
+uninitialized_move(InputRange&& in_range, OutputRange&& out_range); // since C++20
+
+}
+
template <class InputIterator, class Size, class ForwardIterator>
pair<InputIterator,ForwardIterator> uninitialized_move_n(InputIterator first, Size n, ForwardIterator result);
+namespace ranges {
+
+template<class InputIterator, class OutputIterator>
+using uninitialized_move_n_result = in_out_result<InputIterator, OutputIterator>; // since C++20
+
+template<input_iterator InputIterator, nothrow-forward-iterator OutputIterator, nothrow-sentinel-for<OutputIterator> Sentinel>
+ requires constructible_from<iter_value_t<OutputIterator>, iter_rvalue_reference_t<InputIterator>>
+uninitialized_move_n_result<InputIterator, OutputIterator>
+uninitialized_move_n(InputIterator ifirst, iter_difference_t<InputIterator> n, OutputIterator ofirst, Sentinel olast); // since C++20
+
+}
+
template <class ForwardIterator>
void uninitialized_value_construct(ForwardIterator first, ForwardIterator last);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator, nothrow-sentinel-for<ForwardIterator> Sentinel>
requires default_initializable<iter_value_t<ForwardIterator>>
- ForwardIterator ranges::uninitialized_value_construct(ForwardIterator first, Sentinel last); // since C++20
+ ForwardIterator uninitialized_value_construct(ForwardIterator first, Sentinel last); // since C++20
template <nothrow-forward-range ForwardRange>
requires default_initializable<range_value_t<ForwardRange>>
- borrowed_iterator_t<ForwardRange> ranges::uninitialized_value_construct(ForwardRange&& r); // since C++20
+ borrowed_iterator_t<ForwardRange> uninitialized_value_construct(ForwardRange&& r); // since C++20
+
+}
template <class ForwardIterator, class Size>
ForwardIterator uninitialized_value_construct_n(ForwardIterator first, Size n);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator>
requires default_initializable<iter_value_t<ForwardIterator>>
- ForwardIterator ranges::uninitialized_value_construct_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+ ForwardIterator uninitialized_value_construct_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+
+}
template <class ForwardIterator>
void uninitialized_default_construct(ForwardIterator first, ForwardIterator last);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator, nothrow-sentinel-for<ForwardIterator> Sentinel>
requires default_initializable<iter_value_t<ForwardIterator>>
- ForwardIterator ranges::uninitialized_default_construct(ForwardIterator first, Sentinel last); // since C++20
+ ForwardIterator uninitialized_default_construct(ForwardIterator first, Sentinel last); // since C++20
template <nothrow-forward-range ForwardRange>
requires default_initializable<range_value_t<ForwardRange>>
- borrowed_iterator_t<ForwardRange> ranges::uninitialized_default_construct(ForwardRange&& r); // since C++20
+ borrowed_iterator_t<ForwardRange> uninitialized_default_construct(ForwardRange&& r); // since C++20
+
+}
template <class ForwardIterator, class Size>
ForwardIterator uninitialized_default_construct_n(ForwardIterator first, Size n);
+namespace ranges {
+
template <nothrow-forward-iterator ForwardIterator>
requires default_initializable<iter_value_t<ForwardIterator>>
- ForwardIterator ranges::uninitialized_default_construct_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+ ForwardIterator uninitialized_default_construct_n(ForwardIterator first, iter_difference_t<ForwardIterator> n); // since C++20
+
+}
template <class Y> struct auto_ptr_ref {}; // deprecated in C++11, removed in C++17
@@ -708,6 +815,7 @@ void* align(size_t alignment, size_t size, void*& ptr, size_t& space);
#include <__memory/concepts.h>
#include <__memory/construct_at.h>
#include <__memory/pointer_traits.h>
+#include <__memory/ranges_construct_at.h>
#include <__memory/ranges_uninitialized_algorithms.h>
#include <__memory/raw_storage_iterator.h>
#include <__memory/shared_ptr.h>
diff --git a/contrib/llvm-project/libcxx/include/module.modulemap b/contrib/llvm-project/libcxx/include/module.modulemap
index 1dc6db406a74..90fae9bb8362 100644
--- a/contrib/llvm-project/libcxx/include/module.modulemap
+++ b/contrib/llvm-project/libcxx/include/module.modulemap
@@ -246,6 +246,8 @@ module std [system] {
module generate { private header "__algorithm/generate.h" }
module generate_n { private header "__algorithm/generate_n.h" }
module half_positive { private header "__algorithm/half_positive.h" }
+ module in_in_result { private header "__algorithm/in_in_result.h" }
+ module in_out_result { private header "__algorithm/in_out_result.h" }
module includes { private header "__algorithm/includes.h" }
module inplace_merge { private header "__algorithm/inplace_merge.h" }
module is_heap { private header "__algorithm/is_heap.h" }
@@ -362,6 +364,17 @@ module std [system] {
module chrono {
header "chrono"
export *
+
+ module __chrono {
+ module calendar { private header "__chrono/calendar.h" }
+ module convert_to_timespec { private header "__chrono/convert_to_timespec.h" }
+ module duration { private header "__chrono/duration.h" }
+ module file_clock { private header "__chrono/file_clock.h" }
+ module high_resolution_clock { private header "__chrono/high_resolution_clock.h" }
+ module steady_clock { private header "__chrono/steady_clock.h" }
+ module system_clock { private header "__chrono/system_clock.h" }
+ module time_point { private header "__chrono/time_point.h" }
+ }
}
module codecvt {
header "codecvt"
@@ -430,8 +443,8 @@ module std [system] {
module __coroutine {
module coroutine_handle { private header "__coroutine/coroutine_handle.h" }
module coroutine_traits { private header "__coroutine/coroutine_traits.h" }
- module trivial_awaitables { private header "__coroutine/trivial_awaitables.h" }
module noop_coroutine_handle { private header "__coroutine/noop_coroutine_handle.h" }
+ module trivial_awaitables { private header "__coroutine/trivial_awaitables.h" }
}
}
module deque {
@@ -461,8 +474,8 @@ module std [system] {
module file_type { private header "__filesystem/file_type.h" }
module filesystem_error { private header "__filesystem/filesystem_error.h" }
module operations { private header "__filesystem/operations.h" }
- module path_iterator { private header "__filesystem/path_iterator.h" }
module path { private header "__filesystem/path.h" }
+ module path_iterator { private header "__filesystem/path_iterator.h" }
module perm_options { private header "__filesystem/perm_options.h" }
module perms { private header "__filesystem/perms.h" }
module recursive_directory_iterator { private header "__filesystem/recursive_directory_iterator.h" }
@@ -475,25 +488,27 @@ module std [system] {
export *
module __format {
- module format_arg { private header "__format/format_arg.h" }
- module format_args { private header "__format/format_args.h" }
+ module format_arg { private header "__format/format_arg.h" }
+ module format_args { private header "__format/format_args.h" }
module format_context {
private header "__format/format_context.h"
export optional
export locale
}
- module format_error { private header "__format/format_error.h" }
- module format_fwd { private header "__format/format_fwd.h" }
- module format_parse_context { private header "__format/format_parse_context.h" }
- module format_string { private header "__format/format_string.h" }
- module format_to_n_result { private header "__format/format_to_n_result.h" }
- module formatter { private header "__format/formatter.h" }
- module formatter_bool { private header "__format/formatter_bool.h" }
- module formatter_char { private header "__format/formatter_char.h" }
- module formatter_integer { private header "__format/formatter_integer.h" }
- module formatter_integral { private header "__format/formatter_integral.h" }
- module formatter_string { private header "__format/formatter_string.h" }
- module parser_std_format_spec { private header "__format/parser_std_format_spec.h" }
+ module format_error { private header "__format/format_error.h" }
+ module format_fwd { private header "__format/format_fwd.h" }
+ module format_parse_context { private header "__format/format_parse_context.h" }
+ module format_string { private header "__format/format_string.h" }
+ module format_to_n_result { private header "__format/format_to_n_result.h" }
+ module formatter { private header "__format/formatter.h" }
+ module formatter_bool { private header "__format/formatter_bool.h" }
+ module formatter_char { private header "__format/formatter_char.h" }
+ module formatter_floating_point { private header "__format/formatter_floating_point.h" }
+ module formatter_integer { private header "__format/formatter_integer.h" }
+ module formatter_integral { private header "__format/formatter_integral.h" }
+ module formatter_pointer { private header "__format/formatter_pointer.h" }
+ module formatter_string { private header "__format/formatter_string.h" }
+ module parser_std_format_spec { private header "__format/parser_std_format_spec.h" }
}
}
module forward_list {
@@ -579,10 +594,7 @@ module std [system] {
module __iterator {
module access { private header "__iterator/access.h" }
- module advance {
- private header "__iterator/advance.h"
- export __function_like
- }
+ module advance { private header "__iterator/advance.h" }
module back_insert_iterator { private header "__iterator/back_insert_iterator.h" }
module common_iterator { private header "__iterator/common_iterator.h" }
module concepts { private header "__iterator/concepts.h" }
@@ -594,6 +606,7 @@ module std [system] {
module erase_if_container { private header "__iterator/erase_if_container.h" }
module front_insert_iterator { private header "__iterator/front_insert_iterator.h" }
module incrementable_traits { private header "__iterator/incrementable_traits.h" }
+ module indirectly_comparable { private header "__iterator/indirectly_comparable.h" }
module insert_iterator { private header "__iterator/insert_iterator.h" }
module istream_iterator { private header "__iterator/istream_iterator.h" }
module istreambuf_iterator { private header "__iterator/istreambuf_iterator.h" }
@@ -602,16 +615,10 @@ module std [system] {
module iterator { private header "__iterator/iterator.h" }
module iterator_traits { private header "__iterator/iterator_traits.h" }
module move_iterator { private header "__iterator/move_iterator.h" }
- module next {
- private header "__iterator/next.h"
- export __function_like
- }
+ module next { private header "__iterator/next.h" }
module ostream_iterator { private header "__iterator/ostream_iterator.h" }
module ostreambuf_iterator { private header "__iterator/ostreambuf_iterator.h" }
- module prev {
- private header "__iterator/prev.h"
- export __function_like
- }
+ module prev { private header "__iterator/prev.h" }
module projected { private header "__iterator/projected.h" }
module readable_traits { private header "__iterator/readable_traits.h" }
module reverse_access { private header "__iterator/reverse_access.h" }
@@ -659,10 +666,8 @@ module std [system] {
module concepts { private header "__memory/concepts.h" }
module construct_at { private header "__memory/construct_at.h" }
module pointer_traits { private header "__memory/pointer_traits.h" }
- module ranges_uninitialized_algorithms {
- private header "__memory/ranges_uninitialized_algorithms.h"
- export __function_like
- }
+ module ranges_construct_at { private header "__memory/ranges_construct_at.h" }
+ module ranges_uninitialized_algorithms { private header "__memory/ranges_uninitialized_algorithms.h" }
module raw_storage_iterator { private header "__memory/raw_storage_iterator.h" }
module shared_ptr { private header "__memory/shared_ptr.h" }
module temporary_buffer { private header "__memory/temporary_buffer.h" }
@@ -793,11 +798,12 @@ module std [system] {
module iota_view { private header "__ranges/iota_view.h" }
module join_view { private header "__ranges/join_view.h" }
module non_propagating_cache { private header "__ranges/non_propagating_cache.h" }
+ module owning_view { private header "__ranges/owning_view.h" }
module range_adaptor { private header "__ranges/range_adaptor.h" }
module ref_view { private header "__ranges/ref_view.h" }
module reverse_view { private header "__ranges/reverse_view.h" }
- module size { private header "__ranges/size.h" }
module single_view { private header "__ranges/single_view.h" }
+ module size { private header "__ranges/size.h" }
module subrange { private header "__ranges/subrange.h" }
module take_view { private header "__ranges/take_view.h" }
module transform_view {
@@ -884,7 +890,8 @@ module std [system] {
export *
module __thread {
- module poll_with_backoff { private header "__thread/poll_with_backoff.h" }
+ module poll_with_backoff { private header "__thread/poll_with_backoff.h" }
+ module timed_backoff_policy { private header "__thread/timed_backoff_policy.h" }
}
}
module tuple {
@@ -968,10 +975,9 @@ module std [system] {
module __bits { private header "__bits" export * }
module __debug { header "__debug" export * }
module __errc { private header "__errc" export * }
- module __function_like { private header "__function_like.h" export * }
module __hash_table { header "__hash_table" export * }
module __locale { private header "__locale" export * }
- module __mbstate { private header "__mbstate_t.h" export * }
+ module __mbstate_t { private header "__mbstate_t.h" export * }
module __mutex_base { private header "__mutex_base" export * }
module __node_handle { private header "__node_handle" export * }
module __nullptr { header "__nullptr" export * }
diff --git a/contrib/llvm-project/libcxx/include/numbers b/contrib/llvm-project/libcxx/include/numbers
index ede4e33c7a88..2ac36695b888 100644
--- a/contrib/llvm-project/libcxx/include/numbers
+++ b/contrib/llvm-project/libcxx/include/numbers
@@ -73,42 +73,42 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace numbers {
-template <class T>
+template <class _Tp>
inline constexpr bool __false = false;
-template <class T>
+template <class _Tp>
struct __illformed
{
- static_assert(__false<T>, "A program that instantiates a primary template of a mathematical constant variable template is ill-formed.");
+ static_assert(__false<_Tp>, "A program that instantiates a primary template of a mathematical constant variable template is ill-formed.");
};
-template <class T> inline constexpr T e_v = __illformed<T>{};
-template <class T> inline constexpr T log2e_v = __illformed<T>{};
-template <class T> inline constexpr T log10e_v = __illformed<T>{};
-template <class T> inline constexpr T pi_v = __illformed<T>{};
-template <class T> inline constexpr T inv_pi_v = __illformed<T>{};
-template <class T> inline constexpr T inv_sqrtpi_v = __illformed<T>{};
-template <class T> inline constexpr T ln2_v = __illformed<T>{};
-template <class T> inline constexpr T ln10_v = __illformed<T>{};
-template <class T> inline constexpr T sqrt2_v = __illformed<T>{};
-template <class T> inline constexpr T sqrt3_v = __illformed<T>{};
-template <class T> inline constexpr T inv_sqrt3_v = __illformed<T>{};
-template <class T> inline constexpr T egamma_v = __illformed<T>{};
-template <class T> inline constexpr T phi_v = __illformed<T>{};
-
-template <floating_point T> inline constexpr T e_v<T> = 2.718281828459045235360287471352662;
-template <floating_point T> inline constexpr T log2e_v<T> = 1.442695040888963407359924681001892;
-template <floating_point T> inline constexpr T log10e_v<T> = 0.434294481903251827651128918916605;
-template <floating_point T> inline constexpr T pi_v<T> = 3.141592653589793238462643383279502;
-template <floating_point T> inline constexpr T inv_pi_v<T> = 0.318309886183790671537767526745028;
-template <floating_point T> inline constexpr T inv_sqrtpi_v<T> = 0.564189583547756286948079451560772;
-template <floating_point T> inline constexpr T ln2_v<T> = 0.693147180559945309417232121458176;
-template <floating_point T> inline constexpr T ln10_v<T> = 2.302585092994045684017991454684364;
-template <floating_point T> inline constexpr T sqrt2_v<T> = 1.414213562373095048801688724209698;
-template <floating_point T> inline constexpr T sqrt3_v<T> = 1.732050807568877293527446341505872;
-template <floating_point T> inline constexpr T inv_sqrt3_v<T> = 0.577350269189625764509148780501957;
-template <floating_point T> inline constexpr T egamma_v<T> = 0.577215664901532860606512090082402;
-template <floating_point T> inline constexpr T phi_v<T> = 1.618033988749894848204586834365638;
+template <class _Tp> inline constexpr _Tp e_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp log2e_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp log10e_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp pi_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp inv_pi_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp inv_sqrtpi_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp ln2_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp ln10_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp sqrt2_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp sqrt3_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp inv_sqrt3_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp egamma_v = __illformed<_Tp>{};
+template <class _Tp> inline constexpr _Tp phi_v = __illformed<_Tp>{};
+
+template <floating_point _Tp> inline constexpr _Tp e_v<_Tp> = 2.718281828459045235360287471352662;
+template <floating_point _Tp> inline constexpr _Tp log2e_v<_Tp> = 1.442695040888963407359924681001892;
+template <floating_point _Tp> inline constexpr _Tp log10e_v<_Tp> = 0.434294481903251827651128918916605;
+template <floating_point _Tp> inline constexpr _Tp pi_v<_Tp> = 3.141592653589793238462643383279502;
+template <floating_point _Tp> inline constexpr _Tp inv_pi_v<_Tp> = 0.318309886183790671537767526745028;
+template <floating_point _Tp> inline constexpr _Tp inv_sqrtpi_v<_Tp> = 0.564189583547756286948079451560772;
+template <floating_point _Tp> inline constexpr _Tp ln2_v<_Tp> = 0.693147180559945309417232121458176;
+template <floating_point _Tp> inline constexpr _Tp ln10_v<_Tp> = 2.302585092994045684017991454684364;
+template <floating_point _Tp> inline constexpr _Tp sqrt2_v<_Tp> = 1.414213562373095048801688724209698;
+template <floating_point _Tp> inline constexpr _Tp sqrt3_v<_Tp> = 1.732050807568877293527446341505872;
+template <floating_point _Tp> inline constexpr _Tp inv_sqrt3_v<_Tp> = 0.577350269189625764509148780501957;
+template <floating_point _Tp> inline constexpr _Tp egamma_v<_Tp> = 0.577215664901532860606512090082402;
+template <floating_point _Tp> inline constexpr _Tp phi_v<_Tp> = 1.618033988749894848204586834365638;
inline constexpr double e = e_v<double>;
inline constexpr double log2e = log2e_v<double>;
diff --git a/contrib/llvm-project/libcxx/include/optional b/contrib/llvm-project/libcxx/include/optional
index 63753d9f9f03..917e9b5cdb57 100644
--- a/contrib/llvm-project/libcxx/include/optional
+++ b/contrib/llvm-project/libcxx/include/optional
@@ -1167,8 +1167,8 @@ public:
};
#if _LIBCPP_STD_VER >= 17
-template<class T>
- optional(T) -> optional<T>;
+template<class _Tp>
+ optional(_Tp) -> optional<_Tp>;
#endif
// Comparisons between optionals
diff --git a/contrib/llvm-project/libcxx/include/queue b/contrib/llvm-project/libcxx/include/queue
index 03081eb844ba..9e1257b25e0e 100644
--- a/contrib/llvm-project/libcxx/include/queue
+++ b/contrib/llvm-project/libcxx/include/queue
@@ -41,6 +41,8 @@ public:
explicit queue(const container_type& c);
explicit queue(container_type&& c)
+ template<class InputIterator>
+ queue(InputIterator first, InputIterator last); // since C++23
template <class Alloc>
explicit queue(const Alloc& a);
template <class Alloc>
@@ -51,6 +53,8 @@ public:
queue(const queue& q, const Alloc& a);
template <class Alloc>
queue(queue&& q, const Alloc& a);
+ template <class InputIterator, class Alloc>
+ queue(InputIterator first, InputIterator last, const Alloc&); // since C++23
bool empty() const;
size_type size() const;
@@ -71,9 +75,17 @@ public:
template<class Container>
queue(Container) -> queue<typename Container::value_type, Container>; // C++17
+template<class InputIterator>
+ queue(InputIterator, InputIterator) -> queue<iter-value-type<InputIterator>>; // since C++23
+
template<class Container, class Allocator>
queue(Container, Allocator) -> queue<typename Container::value_type, Container>; // C++17
+template<class InputIterator, class Allocator>
+ queue(InputIterator, InputIterator, Allocator)
+ -> queue<iter-value-type<InputIterator>,
+ deque<iter-value-type<InputIterator>, Allocator>>; // since C++23
+
template <class T, class Container>
bool operator==(const queue<T, Container>& x,const queue<T, Container>& y);
@@ -206,13 +218,16 @@ template <class T, class Container, class Compare>
*/
#include <__config>
+#include <__iterator/iterator_traits.h>
#include <__memory/uses_allocator.h>
#include <__utility/forward.h>
#include <algorithm>
#include <compare>
#include <deque>
#include <functional>
+#include <type_traits>
#include <vector>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -255,6 +270,20 @@ public:
_LIBCPP_INLINE_VISIBILITY
queue(const queue& __q) : c(__q.c) {}
+#if _LIBCPP_STD_VER > 20
+ template <class _InputIterator,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>>
+ _LIBCPP_HIDE_FROM_ABI
+ queue(_InputIterator __first, _InputIterator __last) : c(__first, __last) {}
+
+ template <class _InputIterator,
+ class _Alloc,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>,
+ class = __enable_if_t<uses_allocator<container_type, _Alloc>::value>>
+ _LIBCPP_HIDE_FROM_ABI
+ queue(_InputIterator __first, _InputIterator __second, const _Alloc& __alloc) : c(__first, __second, __alloc) {}
+#endif
+
_LIBCPP_INLINE_VISIBILITY
queue& operator=(const queue& __q) {c = __q.c; return *this;}
@@ -358,7 +387,7 @@ public:
operator< (const queue<_T1, _C1>& __x,const queue<_T1, _C1>& __y);
};
-#if _LIBCPP_STD_VER >= 17
+#if _LIBCPP_STD_VER > 14
template<class _Container,
class = enable_if_t<!__is_allocator<_Container>::value>
>
@@ -374,6 +403,20 @@ queue(_Container, _Alloc)
-> queue<typename _Container::value_type, _Container>;
#endif
+#if _LIBCPP_STD_VER > 20
+template <class _InputIterator,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>>
+queue(_InputIterator, _InputIterator)
+ -> queue<__iter_value_type<_InputIterator>>;
+
+template <class _InputIterator,
+ class _Alloc,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>,
+ class = __enable_if_t<__is_allocator<_Alloc>::value>>
+queue(_InputIterator, _InputIterator, _Alloc)
+ -> queue<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>;
+#endif
+
template <class _Tp, class _Container>
inline _LIBCPP_INLINE_VISIBILITY
bool
diff --git a/contrib/llvm-project/libcxx/include/random b/contrib/llvm-project/libcxx/include/random
index c88bfce03b19..2e271cec46ad 100644
--- a/contrib/llvm-project/libcxx/include/random
+++ b/contrib/llvm-project/libcxx/include/random
@@ -1715,6 +1715,7 @@ class piecewise_linear_distribution
#include <__random/uniform_real_distribution.h>
#include <__random/weibull_distribution.h>
#include <initializer_list>
+#include <version>
#include <algorithm> // for backward compatibility; TODO remove it
#include <cmath> // for backward compatibility; TODO remove it
diff --git a/contrib/llvm-project/libcxx/include/ranges b/contrib/llvm-project/libcxx/include/ranges
index dd7decf66fa8..eb4492376c5c 100644
--- a/contrib/llvm-project/libcxx/include/ranges
+++ b/contrib/llvm-project/libcxx/include/ranges
@@ -135,6 +135,13 @@ namespace std::ranges {
template<class T>
inline constexpr bool enable_borrowed_range<ref_view<T>> = true;
+ template<range R>
+ requires see below
+ class owning_view;
+
+ template<class T>
+ inline constexpr bool enable_borrowed_range<owning_view<T>> = enable_borrowed_range<T>;
+
// [range.drop], drop view
template<view V>
class drop_view;
@@ -207,8 +214,8 @@ namespace std::ranges {
#include <__ranges/dangling.h>
#include <__ranges/data.h>
#include <__ranges/drop_view.h>
-#include <__ranges/empty_view.h>
#include <__ranges/empty.h>
+#include <__ranges/empty_view.h>
#include <__ranges/enable_borrowed_range.h>
#include <__ranges/enable_view.h>
#include <__ranges/iota_view.h>
diff --git a/contrib/llvm-project/libcxx/include/ratio b/contrib/llvm-project/libcxx/include/ratio
index 16b45a28ed8b..8859261208d0 100644
--- a/contrib/llvm-project/libcxx/include/ratio
+++ b/contrib/llvm-project/libcxx/include/ratio
@@ -81,6 +81,7 @@ typedef ratio<1000000000000000000000000, 1> yotta; // not supported
#include <climits>
#include <cstdint>
#include <type_traits>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/regex b/contrib/llvm-project/libcxx/include/regex
index 8203c81659c8..59b2c9a23399 100644
--- a/contrib/llvm-project/libcxx/include/regex
+++ b/contrib/llvm-project/libcxx/include/regex
@@ -1310,19 +1310,51 @@ regex_traits<_CharT>::isctype(char_type __c, char_class_type __m) const
return (__c == '_' && (__m & __regex_word));
}
+inline _LIBCPP_INLINE_VISIBILITY
+bool __is_07(unsigned char c)
+{
+ return (c & 0xF8u) ==
+#if defined(__MVS__) && !defined(__NATIVE_ASCII_F)
+ 0xF0;
+#else
+ 0x30;
+#endif
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool __is_89(unsigned char c)
+{
+ return (c & 0xFEu) ==
+#if defined(__MVS__) && !defined(__NATIVE_ASCII_F)
+ 0xF8;
+#else
+ 0x38;
+#endif
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+unsigned char __to_lower(unsigned char c)
+{
+#if defined(__MVS__) && !defined(__NATIVE_ASCII_F)
+ return c & 0xBF;
+#else
+ return c | 0x20;
+#endif
+}
+
template <class _CharT>
int
regex_traits<_CharT>::__regex_traits_value(unsigned char __ch, int __radix)
{
- if ((__ch & 0xF8u) == 0x30) // '0' <= __ch && __ch <= '7'
+ if (__is_07(__ch)) // '0' <= __ch && __ch <= '7'
return __ch - '0';
if (__radix != 8)
{
- if ((__ch & 0xFEu) == 0x38) // '8' <= __ch && __ch <= '9'
+ if (__is_89(__ch)) // '8' <= __ch && __ch <= '9'
return __ch - '0';
if (__radix == 16)
{
- __ch |= 0x20; // tolower
+ __ch = __to_lower(__ch); // tolower
if ('a' <= __ch && __ch <= 'f')
return __ch - ('a' - 10);
}
diff --git a/contrib/llvm-project/libcxx/include/semaphore b/contrib/llvm-project/libcxx/include/semaphore
index 2c2518bce46a..7dffc94b13ba 100644
--- a/contrib/llvm-project/libcxx/include/semaphore
+++ b/contrib/llvm-project/libcxx/include/semaphore
@@ -47,8 +47,10 @@ using binary_semaphore = counting_semaphore<1>;
#include <__availability>
#include <__config>
+#include <__thread/timed_backoff_policy.h>
#include <__threading_support>
#include <atomic>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/sstream b/contrib/llvm-project/libcxx/include/sstream
index e63d1434ac76..6ad624a93a65 100644
--- a/contrib/llvm-project/libcxx/include/sstream
+++ b/contrib/llvm-project/libcxx/include/sstream
@@ -184,6 +184,7 @@ typedef basic_stringstream<wchar_t> wstringstream;
#include <istream>
#include <ostream>
#include <string>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/stack b/contrib/llvm-project/libcxx/include/stack
index 5d959c33c742..ad65d7b29b4f 100644
--- a/contrib/llvm-project/libcxx/include/stack
+++ b/contrib/llvm-project/libcxx/include/stack
@@ -41,11 +41,14 @@ public:
explicit stack(const container_type& c);
explicit stack(container_type&& c);
+ template <class InputIterator> stack(InputIterator first, InputIterator last); // since C++23
template <class Alloc> explicit stack(const Alloc& a);
template <class Alloc> stack(const container_type& c, const Alloc& a);
template <class Alloc> stack(container_type&& c, const Alloc& a);
template <class Alloc> stack(const stack& c, const Alloc& a);
template <class Alloc> stack(stack&& c, const Alloc& a);
+ template<class InputIterator, class Alloc>
+ stack(InputIterator first, InputIterator last, const Alloc&); // since C++23
bool empty() const;
size_type size() const;
@@ -63,9 +66,17 @@ public:
template<class Container>
stack(Container) -> stack<typename Container::value_type, Container>; // C++17
+template<class InputIterator>
+ stack(InputIterator, InputIterator) -> stack<iter-value-type<InputIterator>>; // since C++23
+
template<class Container, class Allocator>
stack(Container, Allocator) -> stack<typename Container::value_type, Container>; // C++17
+template<class InputIterator, class Allocator>
+ stack(InputIterator, InputIterator, Allocator)
+ -> stack<iter-value-type<InputIterator>,
+ deque<iter-value-type<InputIterator>, Allocator>>; // since C++23
+
template <class T, class Container>
bool operator==(const stack<T, Container>& x, const stack<T, Container>& y);
template <class T, class Container>
@@ -88,9 +99,12 @@ template <class T, class Container>
*/
#include <__config>
+#include <__iterator/iterator_traits.h>
#include <__memory/uses_allocator.h>
#include <__utility/forward.h>
#include <deque>
+#include <type_traits>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
@@ -183,6 +197,20 @@ public:
: c(_VSTD::move(__s.c), __a) {}
#endif // _LIBCPP_CXX03_LANG
+#if _LIBCPP_STD_VER > 20
+ template <class _InputIterator,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>>
+ _LIBCPP_HIDE_FROM_ABI
+ stack(_InputIterator __first, _InputIterator __last) : c(__first, __last) {}
+
+ template <class _InputIterator,
+ class _Alloc,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>,
+ class = __enable_if_t<uses_allocator<container_type, _Alloc>::value>>
+ _LIBCPP_HIDE_FROM_ABI
+ stack(_InputIterator __first, _InputIterator __last, const _Alloc& __alloc) : c(__first, __last, __alloc) {}
+#endif
+
_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_INLINE_VISIBILITY
bool empty() const {return c.empty();}
_LIBCPP_INLINE_VISIBILITY
@@ -231,7 +259,7 @@ public:
operator< (const stack<T1, _C1>& __x, const stack<T1, _C1>& __y);
};
-#if _LIBCPP_STD_VER >= 17
+#if _LIBCPP_STD_VER > 14
template<class _Container,
class = enable_if_t<!__is_allocator<_Container>::value>
>
@@ -247,6 +275,20 @@ stack(_Container, _Alloc)
-> stack<typename _Container::value_type, _Container>;
#endif
+#if _LIBCPP_STD_VER > 20
+template<class _InputIterator,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>>
+stack(_InputIterator, _InputIterator)
+ -> stack<__iter_value_type<_InputIterator>>;
+
+template<class _InputIterator,
+ class _Alloc,
+ class = __enable_if_t<__is_cpp17_input_iterator<_InputIterator>::value>,
+ class = __enable_if_t<__is_allocator<_Alloc>::value>>
+stack(_InputIterator, _InputIterator, _Alloc)
+ -> stack<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>;
+#endif
+
template <class _Tp, class _Container>
inline _LIBCPP_INLINE_VISIBILITY
bool
diff --git a/contrib/llvm-project/libcxx/include/stdexcept b/contrib/llvm-project/libcxx/include/stdexcept
index c1dc05669bb9..5df2050ee4ec 100644
--- a/contrib/llvm-project/libcxx/include/stdexcept
+++ b/contrib/llvm-project/libcxx/include/stdexcept
@@ -42,9 +42,9 @@ public:
*/
#include <__config>
+#include <cstdlib>
#include <exception>
#include <iosfwd> // for string forward decl
-#include <cstdlib>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/streambuf b/contrib/llvm-project/libcxx/include/streambuf
index db3078d809a5..9874ec2ce499 100644
--- a/contrib/llvm-project/libcxx/include/streambuf
+++ b/contrib/llvm-project/libcxx/include/streambuf
@@ -108,8 +108,10 @@ protected:
*/
#include <__config>
+#include <cstdint>
#include <ios>
#include <iosfwd>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/string b/contrib/llvm-project/libcxx/include/string
index 9049d7acbb9b..3616de8a214d 100644
--- a/contrib/llvm-project/libcxx/include/string
+++ b/contrib/llvm-project/libcxx/include/string
@@ -158,6 +158,9 @@ public:
void resize(size_type n, value_type c);
void resize(size_type n);
+ template<class Operation>
+ constexpr void resize_and_overwrite(size_type n, Operation op); // since C++23
+
void reserve(size_type res_arg);
void reserve(); // deprecated in C++20
void shrink_to_fit();
@@ -615,6 +618,7 @@ operator+(const basic_string<_CharT, _Traits, _Allocator>& __x, _CharT __y);
_LIBCPP_EXTERN_TEMPLATE(_LIBCPP_FUNC_VIS string operator+<char, char_traits<char>, allocator<char> >(char const*, string const&))
+#ifndef _LIBCPP_ABI_NO_BASIC_STRING_BASE_CLASS
template <bool>
struct __basic_string_common;
@@ -624,6 +628,7 @@ struct __basic_string_common<true> {
_LIBCPP_NORETURN _LIBCPP_EXPORTED_FROM_ABI void __throw_length_error() const;
_LIBCPP_NORETURN _LIBCPP_EXPORTED_FROM_ABI void __throw_out_of_range() const;
};
+#endif
template <class _Iter>
struct __string_is_trivial_iterator : public false_type {};
@@ -677,7 +682,9 @@ class
_LIBCPP_PREFERRED_NAME(u32string)
#endif
basic_string
+#ifndef _LIBCPP_ABI_NO_BASIC_STRING_BASE_CLASS
: private __basic_string_common<true> // This base class is historical, but it needs to remain for ABI compatibility
+#endif
{
public:
typedef basic_string __self;
@@ -826,10 +833,7 @@ public:
basic_string(const _CharT* __s) : __r_(__default_init_tag(), __default_init_tag()) {
_LIBCPP_ASSERT(__s != nullptr, "basic_string(const char*) detected nullptr");
__init(__s, traits_type::length(__s));
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class = __enable_if_t<__is_allocator<_Allocator>::value, nullptr_t> >
@@ -973,6 +977,16 @@ public:
_LIBCPP_INLINE_VISIBILITY void resize(size_type __n) {resize(__n, value_type());}
void reserve(size_type __requested_capacity);
+
+#if _LIBCPP_STD_VER > 20
+ template <class _Op>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ void resize_and_overwrite(size_type __n, _Op __op) {
+ __resize_default_init(__n);
+ __erase_to_end(_VSTD::move(__op)(data(), _LIBCPP_AUTO_CAST(__n)));
+ }
+#endif
+
_LIBCPP_INLINE_VISIBILITY void __resize_default_init(size_type __n);
_LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_INLINE_VISIBILITY
@@ -1455,6 +1469,11 @@ public:
#endif // _LIBCPP_DEBUG_LEVEL == 2
private:
+ _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI static bool __fits_in_sso(size_type __sz) {
+ // SSO is disabled during constant evaluation because `__is_long` isn't constexpr friendly
+ return !__libcpp_is_constant_evaluated() && (__sz < __min_cap);
+ }
+
_LIBCPP_INLINE_VISIBILITY
allocator_type& __alloc() _NOEXCEPT
{return __r_.second();}
@@ -1714,20 +1733,12 @@ private:
_LIBCPP_NORETURN _LIBCPP_HIDE_FROM_ABI
void __throw_length_error() const {
-#ifndef _LIBCPP_NO_EXCEPTIONS
- __basic_string_common<true>::__throw_length_error();
-#else
- _VSTD::abort();
-#endif
+ _VSTD::__throw_length_error("basic_string");
}
_LIBCPP_NORETURN _LIBCPP_HIDE_FROM_ABI
void __throw_out_of_range() const {
-#ifndef _LIBCPP_NO_EXCEPTIONS
- __basic_string_common<true>::__throw_out_of_range();
-#else
- _VSTD::abort();
-#endif
+ _VSTD::__throw_out_of_range("basic_string");
}
friend basic_string operator+<>(const basic_string&, const basic_string&);
@@ -1827,10 +1838,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string()
_NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value)
: __r_(__default_init_tag(), __default_init_tag())
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__zero();
}
@@ -1844,10 +1852,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const allocator_type& __
#endif
: __r_(__default_init_tag(), __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__zero();
}
@@ -1857,9 +1862,9 @@ void basic_string<_CharT, _Traits, _Allocator>::__init(const value_type* __s,
size_type __reserve)
{
if (__reserve > max_size())
- this->__throw_length_error();
+ __throw_length_error();
pointer __p;
- if (__reserve < __min_cap)
+ if (__fits_in_sso(__reserve))
{
__set_short_size(__sz);
__p = __get_short_pointer();
@@ -1881,9 +1886,9 @@ void
basic_string<_CharT, _Traits, _Allocator>::__init(const value_type* __s, size_type __sz)
{
if (__sz > max_size())
- this->__throw_length_error();
+ __throw_length_error();
pointer __p;
- if (__sz < __min_cap)
+ if (__fits_in_sso(__sz))
{
__set_short_size(__sz);
__p = __get_short_pointer();
@@ -1907,10 +1912,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const _CharT* __s, const
{
_LIBCPP_ASSERT(__s != nullptr, "basic_string(const char*, allocator) detected nullptr");
__init(__s, traits_type::length(__s));
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -1918,12 +1920,9 @@ inline
basic_string<_CharT, _Traits, _Allocator>::basic_string(const _CharT* __s, size_type __n)
: __r_(__default_init_tag(), __default_init_tag())
{
- _LIBCPP_ASSERT(__n == 0 || __s != nullptr, "basic_string(const char*, n) detected nullptr");
- __init(__s, __n);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _LIBCPP_ASSERT(__n == 0 || __s != nullptr, "basic_string(const char*, n) detected nullptr");
+ __init(__s, __n);
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -1933,10 +1932,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const _CharT* __s, size_
{
_LIBCPP_ASSERT(__n == 0 || __s != nullptr, "basic_string(const char*, n, allocator) detected nullptr");
__init(__s, __n);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -1948,11 +1944,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const basic_string& __st
else
__init_copy_ctor_external(_VSTD::__to_address(__str.__get_long_pointer()),
__str.__get_long_size());
-
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -1965,22 +1957,19 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(
else
__init_copy_ctor_external(_VSTD::__to_address(__str.__get_long_pointer()),
__str.__get_long_size());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
void basic_string<_CharT, _Traits, _Allocator>::__init_copy_ctor_external(
const value_type* __s, size_type __sz) {
pointer __p;
- if (__sz < __min_cap) {
+ if (__fits_in_sso(__sz)) {
__p = __get_short_pointer();
__set_short_size(__sz);
} else {
if (__sz > max_size())
- this->__throw_length_error();
+ __throw_length_error();
size_t __cap = __recommend(__sz);
__p = __alloc_traits::allocate(__alloc(), __cap + 1);
__set_long_pointer(__p);
@@ -2003,12 +1992,10 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(basic_string&& __str)
: __r_(_VSTD::move(__str.__r_))
{
__str.__zero();
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated()) {
- __get_db()->__insert_c(this);
- if (__is_long())
- __get_db()->swap(this, &__str);
- }
+ if (!__libcpp_is_constant_evaluated() && __is_long())
+ __get_db()->swap(this, &__str);
#endif
}
@@ -2024,12 +2011,10 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(basic_string&& __str, co
__r_.first().__r = __str.__r_.first().__r;
__str.__zero();
}
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated()) {
- __get_db()->__insert_c(this);
- if (__is_long())
- __get_db()->swap(this, &__str);
- }
+ if (!__libcpp_is_constant_evaluated() && __is_long())
+ __get_db()->swap(this, &__str);
#endif
}
@@ -2040,9 +2025,9 @@ void
basic_string<_CharT, _Traits, _Allocator>::__init(size_type __n, value_type __c)
{
if (__n > max_size())
- this->__throw_length_error();
+ __throw_length_error();
pointer __p;
- if (__n < __min_cap)
+ if (__fits_in_sso(__n))
{
__set_short_size(__n);
__p = __get_short_pointer();
@@ -2065,10 +2050,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(size_type __n, _CharT __
: __r_(__default_init_tag(), __default_init_tag())
{
__init(__n, __c);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2077,10 +2059,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(size_type __n, _CharT __
: __r_(__default_init_tag(), __a)
{
__init(__n, __c);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2091,12 +2070,9 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const basic_string& __st
{
size_type __str_sz = __str.size();
if (__pos > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
__init(__str.data() + __pos, _VSTD::min(__n, __str_sz - __pos));
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2107,12 +2083,9 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const basic_string& __st
{
size_type __str_sz = __str.size();
if (__pos > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
__init(__str.data() + __pos, __str_sz - __pos);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2124,10 +2097,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(
__self_view __sv0 = __t;
__self_view __sv = __sv0.substr(__pos, __n);
__init(__sv.data(), __sv.size());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2137,10 +2107,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const _Tp & __t)
{
__self_view __sv = __t;
__init(__sv.data(), __sv.size());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2150,10 +2117,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(const _Tp & __t, const _
{
__self_view __sv = __t;
__init(__sv.data(), __sv.size());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2192,9 +2156,9 @@ basic_string<_CharT, _Traits, _Allocator>::__init(_ForwardIterator __first, _For
{
size_type __sz = static_cast<size_type>(_VSTD::distance(__first, __last));
if (__sz > max_size())
- this->__throw_length_error();
+ __throw_length_error();
pointer __p;
- if (__sz < __min_cap)
+ if (__fits_in_sso(__sz))
{
__set_short_size(__sz);
__p = __get_short_pointer();
@@ -2233,10 +2197,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(_InputIterator __first,
: __r_(__default_init_tag(), __default_init_tag())
{
__init(__first, __last);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2247,10 +2208,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(_InputIterator __first,
: __r_(__default_init_tag(), __a)
{
__init(__first, __last);
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
#ifndef _LIBCPP_CXX03_LANG
@@ -2262,10 +2220,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(
: __r_(__default_init_tag(), __default_init_tag())
{
__init(__il.begin(), __il.end());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -2276,10 +2231,7 @@ basic_string<_CharT, _Traits, _Allocator>::basic_string(
: __r_(__default_init_tag(), __a)
{
__init(__il.begin(), __il.end());
-#if _LIBCPP_DEBUG_LEVEL == 2
- if (!__libcpp_is_constant_evaluated())
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
#endif // _LIBCPP_CXX03_LANG
@@ -2303,7 +2255,7 @@ basic_string<_CharT, _Traits, _Allocator>::__grow_by_and_replace
{
size_type __ms = max_size();
if (__delta_cap > __ms - __old_cap - 1)
- this->__throw_length_error();
+ __throw_length_error();
pointer __old_p = __get_pointer();
size_type __cap = __old_cap < __ms / 2 - __alignment ?
__recommend(_VSTD::max(__old_cap + __delta_cap, 2 * __old_cap)) :
@@ -2335,7 +2287,7 @@ basic_string<_CharT, _Traits, _Allocator>::__grow_by(size_type __old_cap, size_t
{
size_type __ms = max_size();
if (__delta_cap > __ms - __old_cap)
- this->__throw_length_error();
+ __throw_length_error();
pointer __old_p = __get_pointer();
size_type __cap = __old_cap < __ms / 2 - __alignment ?
__recommend(_VSTD::max(__old_cap + __delta_cap, 2 * __old_cap)) :
@@ -2398,7 +2350,7 @@ basic_string<_CharT, _Traits, _Allocator>&
basic_string<_CharT, _Traits, _Allocator>::assign(const value_type* __s, size_type __n)
{
_LIBCPP_ASSERT(__n == 0 || __s != nullptr, "string::assign received nullptr");
- return (__builtin_constant_p(__n) && __n < __min_cap)
+ return (__builtin_constant_p(__n) && __fits_in_sso(__n))
? __assign_short(__s, __n)
: __assign_external(__s, __n);
}
@@ -2567,7 +2519,7 @@ basic_string<_CharT, _Traits, _Allocator>::assign(const basic_string& __str, siz
{
size_type __sz = __str.size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return assign(__str.data() + __pos, _VSTD::min(__n, __sz - __pos));
}
@@ -2584,7 +2536,7 @@ basic_string<_CharT, _Traits, _Allocator>::assign(const _Tp & __t, size_type __p
__self_view __sv = __t;
size_type __sz = __sv.size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return assign(__sv.data() + __pos, _VSTD::min(__n, __sz - __pos));
}
@@ -2601,7 +2553,7 @@ basic_string<_CharT, _Traits, _Allocator>::assign(const value_type* __s)
{
_LIBCPP_ASSERT(__s != nullptr, "string::assign received nullptr");
return __builtin_constant_p(*__s)
- ? (traits_type::length(__s) < __min_cap
+ ? (__fits_in_sso(traits_type::length(__s))
? __assign_short(__s, traits_type::length(__s))
: __assign_external(__s, traits_type::length(__s)))
: __assign_external(__s);
@@ -2753,7 +2705,7 @@ basic_string<_CharT, _Traits, _Allocator>::append(const basic_string& __str, siz
{
size_type __sz = __str.size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return append(__str.data() + __pos, _VSTD::min(__n, __sz - __pos));
}
@@ -2769,7 +2721,7 @@ basic_string<_CharT, _Traits, _Allocator>::append(const _Tp & __t, size_type __p
__self_view __sv = __t;
size_type __sz = __sv.size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return append(__sv.data() + __pos, _VSTD::min(__n, __sz - __pos));
}
@@ -2790,7 +2742,7 @@ basic_string<_CharT, _Traits, _Allocator>::insert(size_type __pos, const value_t
_LIBCPP_ASSERT(__n == 0 || __s != nullptr, "string::insert received nullptr");
size_type __sz = size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
size_type __cap = capacity();
if (__cap - __sz >= __n)
{
@@ -2821,7 +2773,7 @@ basic_string<_CharT, _Traits, _Allocator>::insert(size_type __pos, size_type __n
{
size_type __sz = size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
if (__n)
{
size_type __cap = capacity();
@@ -2927,7 +2879,7 @@ basic_string<_CharT, _Traits, _Allocator>::insert(size_type __pos1, const basic_
{
size_type __str_sz = __str.size();
if (__pos2 > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return insert(__pos1, __str.data() + __pos2, _VSTD::min(__n, __str_sz - __pos2));
}
@@ -2944,7 +2896,7 @@ basic_string<_CharT, _Traits, _Allocator>::insert(size_type __pos1, const _Tp& _
__self_view __sv = __t;
size_type __str_sz = __sv.size();
if (__pos2 > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return insert(__pos1, __sv.data() + __pos2, _VSTD::min(__n, __str_sz - __pos2));
}
@@ -3005,7 +2957,7 @@ basic_string<_CharT, _Traits, _Allocator>::replace(size_type __pos, size_type __
_LIBCPP_ASSERT(__n2 == 0 || __s != nullptr, "string::replace received nullptr");
size_type __sz = size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
__n1 = _VSTD::min(__n1, __sz - __pos);
size_type __cap = capacity();
if (__cap - __sz + __n1 >= __n2)
@@ -3052,7 +3004,7 @@ basic_string<_CharT, _Traits, _Allocator>::replace(size_type __pos, size_type __
{
size_type __sz = size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
__n1 = _VSTD::min(__n1, __sz - __pos);
size_type __cap = capacity();
value_type* __p;
@@ -3086,7 +3038,7 @@ basic_string<_CharT, _Traits, _Allocator>::replace(const_iterator __i1, const_it
_InputIterator __j1, _InputIterator __j2)
{
const basic_string __temp(__j1, __j2, __alloc());
- return this->replace(__i1, __i2, __temp);
+ return replace(__i1, __i2, __temp);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -3104,7 +3056,7 @@ basic_string<_CharT, _Traits, _Allocator>::replace(size_type __pos1, size_type _
{
size_type __str_sz = __str.size();
if (__pos2 > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return replace(__pos1, __n1, __str.data() + __pos2, _VSTD::min(__n2, __str_sz - __pos2));
}
@@ -3121,7 +3073,7 @@ basic_string<_CharT, _Traits, _Allocator>::replace(size_type __pos1, size_type _
__self_view __sv = __t;
size_type __str_sz = __sv.size();
if (__pos2 > __str_sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
return replace(__pos1, __n1, __sv.data() + __pos2, _VSTD::min(__n2, __str_sz - __pos2));
}
@@ -3191,7 +3143,8 @@ template <class _CharT, class _Traits, class _Allocator>
basic_string<_CharT, _Traits, _Allocator>&
basic_string<_CharT, _Traits, _Allocator>::erase(size_type __pos,
size_type __n) {
- if (__pos > size()) this->__throw_out_of_range();
+ if (__pos > size())
+ __throw_out_of_range();
if (__n == npos) {
__erase_to_end(__pos);
} else {
@@ -3307,12 +3260,13 @@ void
basic_string<_CharT, _Traits, _Allocator>::reserve(size_type __requested_capacity)
{
if (__requested_capacity > max_size())
- this->__throw_length_error();
+ __throw_length_error();
-#if _LIBCPP_STD_VER > 17
- // Reserve never shrinks as of C++20.
- if (__requested_capacity <= capacity()) return;
-#endif
+ // Make sure reserve(n) never shrinks. This is technically only required in C++20
+ // and later (since P0966R1), however we provide consistent behavior in all Standard
+ // modes because this function is instantiated in the shared library.
+ if (__requested_capacity <= capacity())
+ return;
size_type __target_capacity = _VSTD::max(__requested_capacity, size());
__target_capacity = __recommend(__target_capacity);
@@ -3413,7 +3367,7 @@ typename basic_string<_CharT, _Traits, _Allocator>::const_reference
basic_string<_CharT, _Traits, _Allocator>::at(size_type __n) const
{
if (__n >= size())
- this->__throw_out_of_range();
+ __throw_out_of_range();
return (*this)[__n];
}
@@ -3422,7 +3376,7 @@ typename basic_string<_CharT, _Traits, _Allocator>::reference
basic_string<_CharT, _Traits, _Allocator>::at(size_type __n)
{
if (__n >= size())
- this->__throw_out_of_range();
+ __throw_out_of_range();
return (*this)[__n];
}
@@ -3468,7 +3422,7 @@ basic_string<_CharT, _Traits, _Allocator>::copy(value_type* __s, size_type __n,
{
size_type __sz = size();
if (__pos > __sz)
- this->__throw_out_of_range();
+ __throw_out_of_range();
size_type __rlen = _VSTD::min(__n, __sz - __pos);
traits_type::copy(__s, data() + __pos, __rlen);
return __rlen;
@@ -3912,7 +3866,7 @@ basic_string<_CharT, _Traits, _Allocator>::compare(size_type __pos1,
_LIBCPP_ASSERT(__n2 == 0 || __s != nullptr, "string::compare(): received nullptr");
size_type __sz = size();
if (__pos1 > __sz || __n2 == npos)
- this->__throw_out_of_range();
+ __throw_out_of_range();
size_type __rlen = _VSTD::min(__n1, __sz - __pos1);
int __r = traits_type::compare(data() + __pos1, __s, _VSTD::min(__rlen, __n2));
if (__r == 0)
@@ -3976,7 +3930,7 @@ basic_string<_CharT, _Traits, _Allocator>::compare(size_type __pos1,
size_type __pos2,
size_type __n2) const
{
- return compare(__pos1, __n1, __self_view(__str), __pos2, __n2);
+ return compare(__pos1, __n1, __self_view(__str), __pos2, __n2);
}
template <class _CharT, class _Traits, class _Allocator>
@@ -4490,16 +4444,16 @@ template<class _CharT, class _Traits, class _Allocator>
bool
basic_string<_CharT, _Traits, _Allocator>::__dereferenceable(const const_iterator* __i) const
{
- return this->data() <= _VSTD::__to_address(__i->base()) &&
- _VSTD::__to_address(__i->base()) < this->data() + this->size();
+ return data() <= _VSTD::__to_address(__i->base()) &&
+ _VSTD::__to_address(__i->base()) < data() + size();
}
template<class _CharT, class _Traits, class _Allocator>
bool
basic_string<_CharT, _Traits, _Allocator>::__decrementable(const const_iterator* __i) const
{
- return this->data() < _VSTD::__to_address(__i->base()) &&
- _VSTD::__to_address(__i->base()) <= this->data() + this->size();
+ return data() < _VSTD::__to_address(__i->base()) &&
+ _VSTD::__to_address(__i->base()) <= data() + size();
}
template<class _CharT, class _Traits, class _Allocator>
@@ -4507,7 +4461,7 @@ bool
basic_string<_CharT, _Traits, _Allocator>::__addable(const const_iterator* __i, ptrdiff_t __n) const
{
const value_type* __p = _VSTD::__to_address(__i->base()) + __n;
- return this->data() <= __p && __p <= this->data() + this->size();
+ return data() <= __p && __p <= data() + size();
}
template<class _CharT, class _Traits, class _Allocator>
@@ -4515,7 +4469,7 @@ bool
basic_string<_CharT, _Traits, _Allocator>::__subscriptable(const const_iterator* __i, ptrdiff_t __n) const
{
const value_type* __p = _VSTD::__to_address(__i->base()) + __n;
- return this->data() <= __p && __p < this->data() + this->size();
+ return data() <= __p && __p < data() + size();
}
#endif // _LIBCPP_DEBUG_LEVEL == 2
diff --git a/contrib/llvm-project/libcxx/include/strstream b/contrib/llvm-project/libcxx/include/strstream
index a5f17a9dc319..c34a5628b634 100644
--- a/contrib/llvm-project/libcxx/include/strstream
+++ b/contrib/llvm-project/libcxx/include/strstream
@@ -132,6 +132,7 @@ private:
#include <__config>
#include <istream>
#include <ostream>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/system_error b/contrib/llvm-project/libcxx/include/system_error
index 059fa0e2d511..6d3a6ca65038 100644
--- a/contrib/llvm-project/libcxx/include/system_error
+++ b/contrib/llvm-project/libcxx/include/system_error
@@ -150,6 +150,7 @@ template <> struct hash<std::error_condition>;
#include <stdexcept>
#include <string>
#include <type_traits>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/thread b/contrib/llvm-project/libcxx/include/thread
index a4632f6fe524..27756e42cdcb 100644
--- a/contrib/llvm-project/libcxx/include/thread
+++ b/contrib/llvm-project/libcxx/include/thread
@@ -87,6 +87,7 @@ void sleep_for(const chrono::duration<Rep, Period>& rel_time);
#include <__functional_base>
#include <__mutex_base>
#include <__thread/poll_with_backoff.h>
+#include <__thread/timed_backoff_policy.h>
#include <__threading_support>
#include <__utility/forward.h>
#include <chrono>
@@ -97,6 +98,7 @@ void sleep_for(const chrono::duration<Rep, Period>& rel_time);
#include <system_error>
#include <tuple>
#include <type_traits>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/tuple b/contrib/llvm-project/libcxx/include/tuple
index 8ee5c2eef51d..5cf120fec359 100644
--- a/contrib/llvm-project/libcxx/include/tuple
+++ b/contrib/llvm-project/libcxx/include/tuple
@@ -73,6 +73,19 @@ public:
void swap(tuple&) noexcept(AND(swap(declval<T&>(), declval<T&>())...)); // constexpr in C++20
};
+
+template<class... TTypes, class... UTypes, template<class> class TQual, template<class> class UQual> // since C++23
+ requires requires { typename tuple<common_reference_t<TQual<TTypes>, UQual<UTypes>>...>; }
+struct basic_common_reference<tuple<TTypes...>, tuple<UTypes...>, TQual, UQual> {
+ using type = tuple<common_reference_t<TQual<TTypes>, UQual<UTypes>>...>;
+};
+
+template<class... TTypes, class... UTypes> // since C++23
+ requires requires { typename tuple<common_type_t<TTypes, UTypes>...>; }
+struct common_type<tuple<TTypes...>, tuple<UTypes...>> {
+ using type = tuple<common_type_t<TTypes, UTypes>...>;
+};
+
template <class ...T>
tuple(T...) -> tuple<T...>; // since C++17
template <class T1, class T2>
@@ -1103,7 +1116,21 @@ public:
void swap(tuple&) _NOEXCEPT {}
};
-#if _LIBCPP_STD_VER >= 17
+#if _LIBCPP_STD_VER > 20
+template <class... _TTypes, class... _UTypes, template<class> class _TQual, template<class> class _UQual>
+ requires requires { typename tuple<common_reference_t<_TQual<_TTypes>, _UQual<_UTypes>>...>; }
+struct basic_common_reference<tuple<_TTypes...>, tuple<_UTypes...>, _TQual, _UQual> {
+ using type = tuple<common_reference_t<_TQual<_TTypes>, _UQual<_UTypes>>...>;
+};
+
+template <class... _TTypes, class... _UTypes>
+ requires requires { typename tuple<common_type_t<_TTypes, _UTypes>...>; }
+struct common_type<tuple<_TTypes...>, tuple<_UTypes...>> {
+ using type = tuple<common_type_t<_TTypes, _UTypes>...>;
+};
+#endif
+
+#if _LIBCPP_STD_VER > 14
template <class ..._Tp>
tuple(_Tp...) -> tuple<_Tp...>;
template <class _Tp1, class _Tp2>
diff --git a/contrib/llvm-project/libcxx/include/type_traits b/contrib/llvm-project/libcxx/include/type_traits
index 155b775e4929..b4010851f133 100644
--- a/contrib/llvm-project/libcxx/include/type_traits
+++ b/contrib/llvm-project/libcxx/include/type_traits
@@ -2318,7 +2318,7 @@ struct __common_type_impl {};
// Clang provides variadic templates in C++03 as an extension.
#if !defined(_LIBCPP_CXX03_LANG) || defined(__clang__)
# define _LIBCPP_OPTIONAL_PACK(...) , __VA_ARGS__
-template <class... Tp>
+template <class... _Tp>
struct __common_types;
template <class... _Tp>
struct _LIBCPP_TEMPLATE_VIS common_type;
diff --git a/contrib/llvm-project/libcxx/include/typeindex b/contrib/llvm-project/libcxx/include/typeindex
index 790aea4d4763..ede0c7fb25c2 100644
--- a/contrib/llvm-project/libcxx/include/typeindex
+++ b/contrib/llvm-project/libcxx/include/typeindex
@@ -49,6 +49,7 @@ struct hash<type_index>
#include <__functional_base>
#include <compare>
#include <typeinfo>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/unordered_map b/contrib/llvm-project/libcxx/include/unordered_map
index 53ddb95663d1..361db707d246 100644
--- a/contrib/llvm-project/libcxx/include/unordered_map
+++ b/contrib/llvm-project/libcxx/include/unordered_map
@@ -517,8 +517,9 @@ template <class Key, class T, class Hash, class Pred, class Alloc>
#include <__config>
#include <__debug>
#include <__functional/is_transparent.h>
-#include <__iterator/iterator_traits.h>
#include <__hash_table>
+#include <__iterator/iterator_traits.h>
+#include <__memory/addressof.h>
#include <__node_handle>
#include <__utility/forward.h>
#include <compare>
@@ -1068,11 +1069,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
unordered_map()
_NOEXCEPT_(is_nothrow_default_constructible<__table>::value)
- {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
- }
+ {
+ _VSTD::__debug_db_insert_c(this);
+ }
explicit unordered_map(size_type __n, const hasher& __hf = hasher(),
const key_equal& __eql = key_equal());
unordered_map(size_type __n, const hasher& __hf,
@@ -1188,13 +1187,10 @@ public:
{return __table_.__insert_unique(__x);}
iterator insert(const_iterator __p, const value_type& __x) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered_map::insert(const_iterator, const value_type&) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered_map::insert(const_iterator, const value_type&) called with an iterator not "
+ "referring to this unordered_map");
((void)__p);
-#endif
return insert(__x).first;
}
@@ -1212,13 +1208,10 @@ public:
{return __table_.__insert_unique(_VSTD::move(__x));}
iterator insert(const_iterator __p, value_type&& __x) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered_map::insert(const_iterator, const value_type&) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered_map::insert(const_iterator, const value_type&) called with an iterator not"
+ " referring to this unordered_map");
((void)__p);
-#endif
return __table_.__insert_unique(_VSTD::move(__x)).first;
}
@@ -1233,13 +1226,10 @@ public:
_LIBCPP_INLINE_VISIBILITY
iterator insert(const_iterator __p, _Pp&& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered_map::insert(const_iterator, value_type&&) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered_map::insert(const_iterator, value_type&&) called with an iterator not"
+ " referring to this unordered_map");
((void)__p);
-#endif
return insert(_VSTD::forward<_Pp>(__x)).first;
}
@@ -1252,13 +1242,10 @@ public:
template <class... _Args>
_LIBCPP_INLINE_VISIBILITY
iterator emplace_hint(const_iterator __p, _Args&&... __args) {
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
- "unordered_map::emplace_hint(const_iterator, args...) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
+ "unordered_map::emplace_hint(const_iterator, args...) called with an iterator not"
+ " referring to this unordered_map");
((void)__p);
-#endif
return __table_.__emplace_unique(_VSTD::forward<_Args>(__args)...).first;
}
@@ -1287,13 +1274,10 @@ public:
_LIBCPP_INLINE_VISIBILITY
iterator try_emplace(const_iterator __h, const key_type& __k, _Args&&... __args)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__h) == this,
- "unordered_map::try_emplace(const_iterator, key, args...) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__h)) == this,
+ "unordered_map::try_emplace(const_iterator, key, args...) called with an iterator not"
+ " referring to this unordered_map");
((void)__h);
-#endif
return try_emplace(__k, _VSTD::forward<_Args>(__args)...).first;
}
@@ -1301,13 +1285,10 @@ public:
_LIBCPP_INLINE_VISIBILITY
iterator try_emplace(const_iterator __h, key_type&& __k, _Args&&... __args)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__h) == this,
- "unordered_map::try_emplace(const_iterator, key, args...) called with an iterator not"
- " referring to this unordered_map");
-#else
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__h)) == this,
+ "unordered_map::try_emplace(const_iterator, key, args...) called with an iterator not"
+ " referring to this unordered_map");
((void)__h);
-#endif
return try_emplace(_VSTD::move(__k), _VSTD::forward<_Args>(__args)...).first;
}
@@ -1625,9 +1606,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
size_type __n, const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -1637,9 +1616,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -1649,9 +1626,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const allocator_type& __a)
: __table_(typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _Key, class _Tp, class _Hash, class _Pred, class _Alloc>
@@ -1659,9 +1634,7 @@ template <class _InputIterator>
unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
_InputIterator __first, _InputIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__first, __last);
}
@@ -1672,9 +1645,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -1686,9 +1657,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const hasher& __hf, const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -1698,9 +1667,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const unordered_map& __u)
: __table_(__u.__table_)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1710,9 +1677,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const unordered_map& __u, const allocator_type& __a)
: __table_(__u.__table_, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1726,9 +1691,9 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
_NOEXCEPT_(is_nothrow_move_constructible<__table>::value)
: __table_(_VSTD::move(__u.__table_))
{
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1737,9 +1702,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
unordered_map&& __u, const allocator_type& __a)
: __table_(_VSTD::move(__u.__table_), typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a != __u.get_allocator())
{
iterator __i = __u.begin();
@@ -1750,7 +1713,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
}
#if _LIBCPP_DEBUG_LEVEL == 2
else
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1758,9 +1721,7 @@ template <class _Key, class _Tp, class _Hash, class _Pred, class _Alloc>
unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__il.begin(), __il.end());
}
@@ -1770,9 +1731,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -1783,9 +1742,7 @@ unordered_map<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_map(
const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -2003,11 +1960,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
unordered_multimap()
_NOEXCEPT_(is_nothrow_default_constructible<__table>::value)
- {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
- }
+ {
+ _VSTD::__debug_db_insert_c(this);
+ }
explicit unordered_multimap(size_type __n, const hasher& __hf = hasher(),
const key_equal& __eql = key_equal());
unordered_multimap(size_type __n, const hasher& __hf,
@@ -2427,9 +2382,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
size_type __n, const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -2439,9 +2392,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -2450,9 +2401,7 @@ template <class _InputIterator>
unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
_InputIterator __first, _InputIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__first, __last);
}
@@ -2463,9 +2412,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -2477,9 +2424,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const hasher& __hf, const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -2490,9 +2435,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const allocator_type& __a)
: __table_(typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _Key, class _Tp, class _Hash, class _Pred, class _Alloc>
@@ -2500,9 +2443,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const unordered_multimap& __u)
: __table_(__u.__table_)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -2512,9 +2453,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const unordered_multimap& __u, const allocator_type& __a)
: __table_(__u.__table_, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -2528,9 +2467,9 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
_NOEXCEPT_(is_nothrow_move_constructible<__table>::value)
: __table_(_VSTD::move(__u.__table_))
{
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -2539,9 +2478,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
unordered_multimap&& __u, const allocator_type& __a)
: __table_(_VSTD::move(__u.__table_), typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a != __u.get_allocator())
{
iterator __i = __u.begin();
@@ -2553,7 +2490,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
}
#if _LIBCPP_DEBUG_LEVEL == 2
else
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -2561,9 +2498,7 @@ template <class _Key, class _Tp, class _Hash, class _Pred, class _Alloc>
unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__il.begin(), __il.end());
}
@@ -2573,9 +2508,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -2586,9 +2519,7 @@ unordered_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::unordered_multimap(
const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, typename __table::allocator_type(__a))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
diff --git a/contrib/llvm-project/libcxx/include/unordered_set b/contrib/llvm-project/libcxx/include/unordered_set
index 1b62e31bb918..29a19f2f0cb5 100644
--- a/contrib/llvm-project/libcxx/include/unordered_set
+++ b/contrib/llvm-project/libcxx/include/unordered_set
@@ -463,6 +463,7 @@ template <class Value, class Hash, class Pred, class Alloc>
#include <__debug>
#include <__functional/is_transparent.h>
#include <__hash_table>
+#include <__memory/addressof.h>
#include <__node_handle>
#include <__utility/forward.h>
#include <compare>
@@ -524,11 +525,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
unordered_set()
_NOEXCEPT_(is_nothrow_default_constructible<__table>::value)
- {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
- }
+ {
+ _VSTD::__debug_db_insert_c(this);
+ }
explicit unordered_set(size_type __n, const hasher& __hf = hasher(),
const key_equal& __eql = key_equal());
#if _LIBCPP_STD_VER > 11
@@ -642,7 +641,7 @@ public:
#if _LIBCPP_DEBUG_LEVEL == 2
iterator emplace_hint(const_iterator __p, _Args&&... __args)
{
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
+ _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
"unordered_set::emplace_hint(const_iterator, args...) called with an iterator not"
" referring to this unordered_set");
return __table_.__emplace_unique(_VSTD::forward<_Args>(__args)...).first;
@@ -659,7 +658,7 @@ public:
#if _LIBCPP_DEBUG_LEVEL == 2
iterator insert(const_iterator __p, value_type&& __x)
{
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
+ _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
"unordered_set::insert(const_iterator, value_type&&) called with an iterator not"
" referring to this unordered_set");
return insert(_VSTD::move(__x)).first;
@@ -680,7 +679,7 @@ public:
#if _LIBCPP_DEBUG_LEVEL == 2
iterator insert(const_iterator __p, const value_type& __x)
{
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this,
+ _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__p)) == this,
"unordered_set::insert(const_iterator, const value_type&) called with an iterator not"
" referring to this unordered_set");
return insert(__x).first;
@@ -935,9 +934,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(size_type __n,
const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -946,9 +943,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(size_type __n,
const hasher& __hf, const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -957,9 +952,7 @@ template <class _InputIterator>
unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
_InputIterator __first, _InputIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__first, __last);
}
@@ -970,9 +963,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -984,9 +975,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const hasher& __hf, const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -997,9 +986,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const allocator_type& __a)
: __table_(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _Value, class _Hash, class _Pred, class _Alloc>
@@ -1007,9 +994,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const unordered_set& __u)
: __table_(__u.__table_)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1019,9 +1004,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const unordered_set& __u, const allocator_type& __a)
: __table_(__u.__table_, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1035,9 +1018,9 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
_NOEXCEPT_(is_nothrow_move_constructible<__table>::value)
: __table_(_VSTD::move(__u.__table_))
{
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1046,9 +1029,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
unordered_set&& __u, const allocator_type& __a)
: __table_(_VSTD::move(__u.__table_), __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a != __u.get_allocator())
{
iterator __i = __u.begin();
@@ -1057,7 +1038,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
}
#if _LIBCPP_DEBUG_LEVEL == 2
else
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1065,9 +1046,7 @@ template <class _Value, class _Hash, class _Pred, class _Alloc>
unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__il.begin(), __il.end());
}
@@ -1077,9 +1056,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -1090,9 +1067,7 @@ unordered_set<_Value, _Hash, _Pred, _Alloc>::unordered_set(
const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -1223,11 +1198,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
unordered_multiset()
_NOEXCEPT_(is_nothrow_default_constructible<__table>::value)
- {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
- }
+ {
+ _VSTD::__debug_db_insert_c(this);
+ }
explicit unordered_multiset(size_type __n, const hasher& __hf = hasher(),
const key_equal& __eql = key_equal());
unordered_multiset(size_type __n, const hasher& __hf,
@@ -1601,9 +1574,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
size_type __n, const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -1613,9 +1584,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
}
@@ -1624,9 +1593,7 @@ template <class _InputIterator>
unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
_InputIterator __first, _InputIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__first, __last);
}
@@ -1637,9 +1604,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const hasher& __hf, const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -1651,9 +1616,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const hasher& __hf, const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__first, __last);
}
@@ -1664,9 +1627,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const allocator_type& __a)
: __table_(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
template <class _Value, class _Hash, class _Pred, class _Alloc>
@@ -1674,9 +1635,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const unordered_multiset& __u)
: __table_(__u.__table_)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1686,9 +1645,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const unordered_multiset& __u, const allocator_type& __a)
: __table_(__u.__table_, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__u.bucket_count());
insert(__u.begin(), __u.end());
}
@@ -1702,9 +1659,9 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
_NOEXCEPT_(is_nothrow_move_constructible<__table>::value)
: __table_(_VSTD::move(__u.__table_))
{
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1713,9 +1670,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
unordered_multiset&& __u, const allocator_type& __a)
: __table_(_VSTD::move(__u.__table_), __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a != __u.get_allocator())
{
iterator __i = __u.begin();
@@ -1724,7 +1679,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
}
#if _LIBCPP_DEBUG_LEVEL == 2
else
- __get_db()->swap(this, &__u);
+ __get_db()->swap(this, _VSTD::addressof(__u));
#endif
}
@@ -1732,9 +1687,7 @@ template <class _Value, class _Hash, class _Pred, class _Alloc>
unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
insert(__il.begin(), __il.end());
}
@@ -1744,9 +1697,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const key_equal& __eql)
: __table_(__hf, __eql)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
@@ -1757,9 +1708,7 @@ unordered_multiset<_Value, _Hash, _Pred, _Alloc>::unordered_multiset(
const key_equal& __eql, const allocator_type& __a)
: __table_(__hf, __eql, __a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
__table_.rehash(__n);
insert(__il.begin(), __il.end());
}
diff --git a/contrib/llvm-project/libcxx/include/valarray b/contrib/llvm-project/libcxx/include/valarray
index 909e0422c476..a55d921872ba 100644
--- a/contrib/llvm-project/libcxx/include/valarray
+++ b/contrib/llvm-project/libcxx/include/valarray
@@ -348,6 +348,7 @@ template <class T> unspecified2 end(const valarray<T>& v);
#include <functional>
#include <initializer_list>
#include <new>
+#include <version>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
diff --git a/contrib/llvm-project/libcxx/include/vector b/contrib/llvm-project/libcxx/include/vector
index 9b0092cfdbd9..fd0fb0db2756 100644
--- a/contrib/llvm-project/libcxx/include/vector
+++ b/contrib/llvm-project/libcxx/include/vector
@@ -271,8 +271,8 @@ erase_if(vector<T, Allocator>& c, Predicate pred); // C++20
*/
-#include <__config>
#include <__bit_reference>
+#include <__config>
#include <__debug>
#include <__functional_base>
#include <__iterator/iterator_traits.h>
@@ -373,11 +373,9 @@ public:
_LIBCPP_INLINE_VISIBILITY
vector() _NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value)
- {
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
- }
+ {
+ _VSTD::__debug_db_insert_c(this);
+ }
_LIBCPP_INLINE_VISIBILITY explicit vector(const allocator_type& __a)
#if _LIBCPP_STD_VER <= 14
_NOEXCEPT_(is_nothrow_copy_constructible<allocator_type>::value)
@@ -386,9 +384,7 @@ public:
#endif
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
}
explicit vector(size_type __n);
#if _LIBCPP_STD_VER > 11
@@ -400,9 +396,7 @@ public:
vector(size_type __n, const value_type& __x, const allocator_type& __a)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__n > 0)
{
__vallocate(__n);
@@ -1101,9 +1095,7 @@ vector<_Tp, _Allocator>::__append(size_type __n, const_reference __x)
template <class _Tp, class _Allocator>
vector<_Tp, _Allocator>::vector(size_type __n)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__n > 0)
{
__vallocate(__n);
@@ -1116,9 +1108,7 @@ template <class _Tp, class _Allocator>
vector<_Tp, _Allocator>::vector(size_type __n, const allocator_type& __a)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__n > 0)
{
__vallocate(__n);
@@ -1130,9 +1120,7 @@ vector<_Tp, _Allocator>::vector(size_type __n, const allocator_type& __a)
template <class _Tp, class _Allocator>
vector<_Tp, _Allocator>::vector(size_type __n, const value_type& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__n > 0)
{
__vallocate(__n);
@@ -1150,9 +1138,7 @@ vector<_Tp, _Allocator>::vector(_InputIterator __first,
typename iterator_traits<_InputIterator>::reference>::value,
_InputIterator>::type __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __first != __last; ++__first)
__emplace_back(*__first);
}
@@ -1167,9 +1153,7 @@ vector<_Tp, _Allocator>::vector(_InputIterator __first, _InputIterator __last, c
typename iterator_traits<_InputIterator>::reference>::value>::type*)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
for (; __first != __last; ++__first)
__emplace_back(*__first);
}
@@ -1183,9 +1167,7 @@ vector<_Tp, _Allocator>::vector(_ForwardIterator __first,
typename iterator_traits<_ForwardIterator>::reference>::value,
_ForwardIterator>::type __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
size_type __n = static_cast<size_type>(_VSTD::distance(__first, __last));
if (__n > 0)
{
@@ -1203,9 +1185,7 @@ vector<_Tp, _Allocator>::vector(_ForwardIterator __first, _ForwardIterator __las
typename iterator_traits<_ForwardIterator>::reference>::value>::type*)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
size_type __n = static_cast<size_type>(_VSTD::distance(__first, __last));
if (__n > 0)
{
@@ -1218,9 +1198,7 @@ template <class _Tp, class _Allocator>
vector<_Tp, _Allocator>::vector(const vector& __x)
: __base(__alloc_traits::select_on_container_copy_construction(__x.__alloc()))
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
size_type __n = __x.size();
if (__n > 0)
{
@@ -1233,9 +1211,7 @@ template <class _Tp, class _Allocator>
vector<_Tp, _Allocator>::vector(const vector& __x, const __identity_t<allocator_type>& __a)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
size_type __n = __x.size();
if (__n > 0)
{
@@ -1256,8 +1232,8 @@ vector<_Tp, _Allocator>::vector(vector&& __x)
#endif
: __base(_VSTD::move(__x.__alloc()))
{
+ _VSTD::__debug_db_insert_c(this);
#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
__get_db()->swap(this, _VSTD::addressof(__x));
#endif
this->__begin_ = __x.__begin_;
@@ -1271,9 +1247,7 @@ inline _LIBCPP_INLINE_VISIBILITY
vector<_Tp, _Allocator>::vector(vector&& __x, const __identity_t<allocator_type>& __a)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__a == __x.__alloc())
{
this->__begin_ = __x.__begin_;
@@ -1295,9 +1269,7 @@ template <class _Tp, class _Allocator>
inline _LIBCPP_INLINE_VISIBILITY
vector<_Tp, _Allocator>::vector(initializer_list<value_type> __il)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__il.size() > 0)
{
__vallocate(__il.size());
@@ -1310,9 +1282,7 @@ inline _LIBCPP_INLINE_VISIBILITY
vector<_Tp, _Allocator>::vector(initializer_list<value_type> __il, const allocator_type& __a)
: __base(__a)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- __get_db()->__insert_c(this);
-#endif
+ _VSTD::__debug_db_insert_c(this);
if (__il.size() > 0)
{
__vallocate(__il.size());
@@ -1677,11 +1647,8 @@ inline _LIBCPP_INLINE_VISIBILITY
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::erase(const_iterator __position)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::erase(iterator) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::erase(iterator) called with an iterator not referring to this vector");
_LIBCPP_ASSERT(__position != end(),
"vector::erase(iterator) called with a non-dereferenceable iterator");
difference_type __ps = __position - cbegin();
@@ -1696,14 +1663,11 @@ template <class _Tp, class _Allocator>
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::erase(const_iterator __first, const_iterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__first)) == this,
- "vector::erase(iterator, iterator) called with an iterator not"
- " referring to this vector");
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__last)) == this,
- "vector::erase(iterator, iterator) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__first)) == this,
+ "vector::erase(iterator, iterator) called with an iterator not referring to this vector");
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__last)) == this,
+ "vector::erase(iterator, iterator) called with an iterator not referring to this vector");
+
_LIBCPP_ASSERT(__first <= __last, "vector::erase(first, last) called with invalid range");
pointer __p = this->__begin_ + (__first - begin());
if (__first != __last) {
@@ -1737,11 +1701,8 @@ template <class _Tp, class _Allocator>
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::insert(const_iterator __position, const_reference __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::insert(iterator, x) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::insert(iterator, x) called with an iterator not referring to this vector");
pointer __p = this->__begin_ + (__position - begin());
if (this->__end_ < this->__end_cap())
{
@@ -1774,11 +1735,8 @@ template <class _Tp, class _Allocator>
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::insert(const_iterator __position, value_type&& __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::insert(iterator, x) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::insert(iterator, x) called with an iterator not referring to this vector");
pointer __p = this->__begin_ + (__position - begin());
if (this->__end_ < this->__end_cap())
{
@@ -1807,11 +1765,8 @@ template <class... _Args>
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::emplace(const_iterator __position, _Args&&... __args)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::emplace(iterator, x) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::emplace(iterator, x) called with an iterator not referring to this vector");
pointer __p = this->__begin_ + (__position - begin());
if (this->__end_ < this->__end_cap())
{
@@ -1842,11 +1797,8 @@ template <class _Tp, class _Allocator>
typename vector<_Tp, _Allocator>::iterator
vector<_Tp, _Allocator>::insert(const_iterator __position, size_type __n, const_reference __x)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::insert(iterator, n, x) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::insert(iterator, n, x) called with an iterator not referring to this vector");
pointer __p = this->__begin_ + (__position - begin());
if (__n > 0)
{
@@ -1893,11 +1845,8 @@ typename enable_if
>::type
vector<_Tp, _Allocator>::insert(const_iterator __position, _InputIterator __first, _InputIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::insert(iterator, range) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::insert(iterator, range) called with an iterator not referring to this vector");
difference_type __off = __position - begin();
pointer __p = this->__begin_ + __off;
allocator_type& __a = this->__alloc();
@@ -1946,11 +1895,8 @@ typename enable_if
>::type
vector<_Tp, _Allocator>::insert(const_iterator __position, _ForwardIterator __first, _ForwardIterator __last)
{
-#if _LIBCPP_DEBUG_LEVEL == 2
- _LIBCPP_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
- "vector::insert(iterator, range) called with an iterator not"
- " referring to this vector");
-#endif
+ _LIBCPP_DEBUG_ASSERT(__get_const_db()->__find_c_from_i(_VSTD::addressof(__position)) == this,
+ "vector::insert(iterator, range) called with an iterator not referring to this vector");
pointer __p = this->__begin_ + (__position - begin());
difference_type __n = _VSTD::distance(__first, __last);
if (__n > 0)
diff --git a/contrib/llvm-project/libcxx/include/version b/contrib/llvm-project/libcxx/include/version
index 574dfe47b58f..0c2365612853 100644
--- a/contrib/llvm-project/libcxx/include/version
+++ b/contrib/llvm-project/libcxx/include/version
@@ -14,7 +14,9 @@
version synopsis
Macro name Value Headers
+__cpp_lib_adaptor_iterator_pair_constructor 202106L <queue> <stack>
__cpp_lib_addressof_constexpr 201603L <memory>
+__cpp_lib_allocate_at_least 202106L <memory>
__cpp_lib_allocator_traits_is_always_equal 201411L <deque> <forward_list> <list>
<map> <memory> <scoped_allocator>
<set> <string> <unordered_map>
@@ -24,6 +26,8 @@ __cpp_lib_apply 201603L <tuple>
__cpp_lib_array_constexpr 201811L <array> <iterator>
201603L // C++17
__cpp_lib_as_const 201510L <utility>
+__cpp_lib_associative_heterogeneous_erasure 202110L <map> <set> <unordered_map>
+ <unordered_set>
__cpp_lib_assume_aligned 201811L <memory>
__cpp_lib_atomic_flag_test 201907L <atomic>
__cpp_lib_atomic_float 201711L <atomic>
@@ -60,6 +64,7 @@ __cpp_lib_constexpr_numeric 201911L <numeric>
__cpp_lib_constexpr_string 201811L <string>
__cpp_lib_constexpr_string_view 201811L <string_view>
__cpp_lib_constexpr_tuple 201811L <tuple>
+__cpp_lib_constexpr_typeinfo 202106L <typeinfo>
__cpp_lib_constexpr_utility 201811L <utility>
__cpp_lib_constexpr_vector 201907L <vector>
__cpp_lib_coroutine 201902L <coroutine>
@@ -87,6 +92,7 @@ __cpp_lib_integer_sequence 201304L <utility>
__cpp_lib_integral_constant_callable 201304L <type_traits>
__cpp_lib_interpolate 201902L <cmath> <numeric>
__cpp_lib_invoke 201411L <functional>
+__cpp_lib_invoke_r 202106L <functional>
__cpp_lib_is_aggregate 201703L <type_traits>
__cpp_lib_is_constant_evaluated 201811L <type_traits>
__cpp_lib_is_final 201402L <type_traits>
@@ -110,6 +116,7 @@ __cpp_lib_math_constants 201907L <numbers>
__cpp_lib_math_special_functions 201603L <cmath>
__cpp_lib_memory_resource 201603L <memory_resource>
__cpp_lib_monadic_optional 202110L <optional>
+__cpp_lib_move_only_function 202110L <functional>
__cpp_lib_node_extract 201606L <map> <set> <unordered_map>
<unordered_set>
__cpp_lib_nonmember_container_access 201411L <array> <deque> <forward_list>
@@ -119,11 +126,14 @@ __cpp_lib_nonmember_container_access 201411L <array> <deque>
__cpp_lib_not_fn 201603L <functional>
__cpp_lib_null_iterators 201304L <iterator>
__cpp_lib_optional 201606L <optional>
+__cpp_lib_out_ptr 202106L <memory>
__cpp_lib_parallel_algorithm 201603L <algorithm> <numeric>
-__cpp_lib_polymorphic_allocator 201902L <memory>
+__cpp_lib_polymorphic_allocator 201902L <memory_resource>
__cpp_lib_quoted_string_io 201304L <iomanip>
__cpp_lib_ranges 201811L <algorithm> <functional> <iterator>
<memory> <ranges>
+__cpp_lib_ranges_starts_ends_with 202106L <algorithm>
+__cpp_lib_ranges_zip 202110L <ranges> <tuple> <utility>
__cpp_lib_raw_memory_algorithms 201606L <memory>
__cpp_lib_remove_cvref 201711L <type_traits>
__cpp_lib_result_of_sfinae 201210L <functional> <type_traits>
@@ -139,11 +149,13 @@ __cpp_lib_shift 201806L <algorithm>
__cpp_lib_smart_ptr_for_overwrite 202002L <memory>
__cpp_lib_source_location 201907L <source_location>
__cpp_lib_span 202002L <span>
+__cpp_lib_spanstream 202106L <spanstream>
__cpp_lib_ssize 201902L <iterator>
__cpp_lib_stacktrace 202011L <stacktrace>
__cpp_lib_starts_ends_with 201711L <string> <string_view>
__cpp_lib_stdatomic_h 202011L <stdatomic.h>
__cpp_lib_string_contains 202011L <string> <string_view>
+__cpp_lib_string_resize_and_overwrite 202110L <string>
__cpp_lib_string_udls 201304L <string>
__cpp_lib_string_view 201803L <string> <string_view>
201606L // C++17
@@ -158,6 +170,7 @@ __cpp_lib_transparent_operators 201510L <functional> <me
201210L // C++14
__cpp_lib_tuple_element_t 201402L <tuple>
__cpp_lib_tuples_by_type 201304L <tuple> <utility>
+__cpp_lib_type_identity 201806L <type_traits>
__cpp_lib_type_trait_variable_templates 201510L <type_traits>
__cpp_lib_uncaught_exceptions 201411L <exception>
__cpp_lib_unordered_map_try_emplace 201411L <unordered_map>
@@ -342,16 +355,28 @@ __cpp_lib_void_t 201411L <type_traits>
// # define __cpp_lib_three_way_comparison 201907L
# define __cpp_lib_to_address 201711L
# define __cpp_lib_to_array 201907L
+# define __cpp_lib_type_identity 201806L
# define __cpp_lib_unwrap_ref 201811L
#endif
#if _LIBCPP_STD_VER > 20
+# define __cpp_lib_adaptor_iterator_pair_constructor 202106L
+// # define __cpp_lib_allocate_at_least 202106L
+// # define __cpp_lib_associative_heterogeneous_erasure 202110L
# define __cpp_lib_byteswap 202110L
+// # define __cpp_lib_constexpr_typeinfo 202106L
+// # define __cpp_lib_invoke_r 202106L
# define __cpp_lib_is_scoped_enum 202011L
# define __cpp_lib_monadic_optional 202110L
+// # define __cpp_lib_move_only_function 202110L
+// # define __cpp_lib_out_ptr 202106L
+// # define __cpp_lib_ranges_starts_ends_with 202106L
+// # define __cpp_lib_ranges_zip 202110L
+// # define __cpp_lib_spanstream 202106L
// # define __cpp_lib_stacktrace 202011L
// # define __cpp_lib_stdatomic_h 202011L
# define __cpp_lib_string_contains 202011L
+# define __cpp_lib_string_resize_and_overwrite 202110L
# define __cpp_lib_to_underlying 202102L
#endif
diff --git a/contrib/llvm-project/libcxx/src/atomic.cpp b/contrib/llvm-project/libcxx/src/atomic.cpp
index 9b61a16106c2..250d33e98b02 100644
--- a/contrib/llvm-project/libcxx/src/atomic.cpp
+++ b/contrib/llvm-project/libcxx/src/atomic.cpp
@@ -9,9 +9,10 @@
#include <__config>
#ifndef _LIBCPP_HAS_NO_THREADS
-#include <climits>
#include <atomic>
+#include <climits>
#include <functional>
+#include <thread>
#ifdef __linux__
diff --git a/contrib/llvm-project/libcxx/src/chrono.cpp b/contrib/llvm-project/libcxx/src/chrono.cpp
index 5aa7af75894b..4f2d51042ff1 100644
--- a/contrib/llvm-project/libcxx/src/chrono.cpp
+++ b/contrib/llvm-project/libcxx/src/chrono.cpp
@@ -44,6 +44,10 @@
# endif
#endif // defined(_LIBCPP_WIN32API)
+#if defined(__Fuchsia__)
+# include <zircon/syscalls.h>
+#endif
+
#if __has_include(<mach/mach_time.h>)
# include <mach/mach_time.h>
#endif
@@ -266,7 +270,18 @@ static steady_clock::time_point __libcpp_steady_clock_now() {
return steady_clock::time_point(seconds(ts.tv_sec) + nanoseconds(ts.tv_nsec));
}
-#elif defined(CLOCK_MONOTONIC)
+# elif defined(__Fuchsia__)
+
+static steady_clock::time_point __libcpp_steady_clock_now() noexcept {
+ // Implicitly link against the vDSO system call ABI without
+ // requiring the final link to specify -lzircon explicitly when
+ // statically linking libc++.
+# pragma comment(lib, "zircon")
+
+ return steady_clock::time_point(nanoseconds(_zx_clock_get_monotonic()));
+}
+
+# elif defined(CLOCK_MONOTONIC)
static steady_clock::time_point __libcpp_steady_clock_now() {
struct timespec tp;
@@ -275,9 +290,9 @@ static steady_clock::time_point __libcpp_steady_clock_now() {
return steady_clock::time_point(seconds(tp.tv_sec) + nanoseconds(tp.tv_nsec));
}
-#else
-# error "Monotonic clock not implemented on this platform"
-#endif
+# else
+# error "Monotonic clock not implemented on this platform"
+# endif
const bool steady_clock::is_steady;
diff --git a/contrib/llvm-project/libcxx/src/chrono_system_time_init.h b/contrib/llvm-project/libcxx/src/chrono_system_time_init.h
index 3c5a0c33a56a..b1bdc691b385 100644
--- a/contrib/llvm-project/libcxx/src/chrono_system_time_init.h
+++ b/contrib/llvm-project/libcxx/src/chrono_system_time_init.h
@@ -1,2 +1,2 @@
#pragma GCC system_header
-GetSystemTimeInit GetSystemTimeAsFileTimeFunc _LIBCPP_INIT_PRIORITY_MAX; \ No newline at end of file
+GetSystemTimeInit GetSystemTimeAsFileTimeFunc _LIBCPP_INIT_PRIORITY_MAX;
diff --git a/contrib/llvm-project/libcxx/src/experimental/memory_resource_init_helper.h b/contrib/llvm-project/libcxx/src/experimental/memory_resource_init_helper.h
index 2e1cae5ecc60..56b9da685878 100644
--- a/contrib/llvm-project/libcxx/src/experimental/memory_resource_init_helper.h
+++ b/contrib/llvm-project/libcxx/src/experimental/memory_resource_init_helper.h
@@ -1,2 +1,2 @@
#pragma GCC system_header
-_LIBCPP_SAFE_STATIC ResourceInitHelper res_init _LIBCPP_INIT_PRIORITY_MAX; \ No newline at end of file
+_LIBCPP_SAFE_STATIC ResourceInitHelper res_init _LIBCPP_INIT_PRIORITY_MAX;
diff --git a/contrib/llvm-project/libcxx/src/filesystem/directory_iterator.cpp b/contrib/llvm-project/libcxx/src/filesystem/directory_iterator.cpp
index 90b255d9877f..fa793f68295b 100644
--- a/contrib/llvm-project/libcxx/src/filesystem/directory_iterator.cpp
+++ b/contrib/llvm-project/libcxx/src/filesystem/directory_iterator.cpp
@@ -9,97 +9,12 @@
#include "__config"
#include "filesystem"
#include "stack"
-#if defined(_LIBCPP_WIN32API)
-#define WIN32_LEAN_AND_MEAN
-#define NOMINMAX
-#include <windows.h>
-#else
-#include <dirent.h>
-#endif
#include <errno.h>
#include "filesystem_common.h"
_LIBCPP_BEGIN_NAMESPACE_FILESYSTEM
-namespace detail {
-namespace {
-
-#if !defined(_LIBCPP_WIN32API)
-
-#if defined(DT_BLK)
-template <class DirEntT, class = decltype(DirEntT::d_type)>
-static file_type get_file_type(DirEntT* ent, int) {
- switch (ent->d_type) {
- case DT_BLK:
- return file_type::block;
- case DT_CHR:
- return file_type::character;
- case DT_DIR:
- return file_type::directory;
- case DT_FIFO:
- return file_type::fifo;
- case DT_LNK:
- return file_type::symlink;
- case DT_REG:
- return file_type::regular;
- case DT_SOCK:
- return file_type::socket;
- // Unlike in lstat, hitting "unknown" here simply means that the underlying
- // filesystem doesn't support d_type. Report is as 'none' so we correctly
- // set the cache to empty.
- case DT_UNKNOWN:
- break;
- }
- return file_type::none;
-}
-#endif // defined(DT_BLK)
-
-template <class DirEntT>
-static file_type get_file_type(DirEntT* ent, long) {
- return file_type::none;
-}
-
-static pair<string_view, file_type> posix_readdir(DIR* dir_stream,
- error_code& ec) {
- struct dirent* dir_entry_ptr = nullptr;
- errno = 0; // zero errno in order to detect errors
- ec.clear();
- if ((dir_entry_ptr = ::readdir(dir_stream)) == nullptr) {
- if (errno)
- ec = capture_errno();
- return {};
- } else {
- return {dir_entry_ptr->d_name, get_file_type(dir_entry_ptr, 0)};
- }
-}
-#else
-// defined(_LIBCPP_WIN32API)
-
-static file_type get_file_type(const WIN32_FIND_DATAW& data) {
- if (data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT &&
- data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
- return file_type::symlink;
- if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
- return file_type::directory;
- return file_type::regular;
-}
-static uintmax_t get_file_size(const WIN32_FIND_DATAW& data) {
- return (static_cast<uint64_t>(data.nFileSizeHigh) << 32) + data.nFileSizeLow;
-}
-static file_time_type get_write_time(const WIN32_FIND_DATAW& data) {
- ULARGE_INTEGER tmp;
- const FILETIME& time = data.ftLastWriteTime;
- tmp.u.LowPart = time.dwLowDateTime;
- tmp.u.HighPart = time.dwHighDateTime;
- return file_time_type(file_time_type::duration(tmp.QuadPart));
-}
-
-#endif
-
-} // namespace
-} // namespace detail
-
using detail::ErrorHandler;
#if defined(_LIBCPP_WIN32API)
@@ -197,9 +112,9 @@ public:
: __stream_(nullptr), __root_(root) {
if ((__stream_ = ::opendir(root.c_str())) == nullptr) {
ec = detail::capture_errno();
- const bool allow_eacess =
+ const bool allow_eacces =
bool(opts & directory_options::skip_permission_denied);
- if (allow_eacess && ec.value() == EACCES)
+ if (allow_eacces && ec.value() == EACCES)
ec.clear();
return;
}
diff --git a/contrib/llvm-project/libcxx/src/filesystem/filesystem_common.h b/contrib/llvm-project/libcxx/src/filesystem/filesystem_common.h
index 717894e537d2..99bba2760dce 100644
--- a/contrib/llvm-project/libcxx/src/filesystem/filesystem_common.h
+++ b/contrib/llvm-project/libcxx/src/filesystem/filesystem_common.h
@@ -17,14 +17,22 @@
#include "cstdlib"
#include "ctime"
#include "filesystem"
+#include "ratio"
#include "system_error"
+#if defined(_LIBCPP_WIN32API)
+# define WIN32_LEAN_AND_MEAN
+# define NOMINMAX
+# include <windows.h>
+#endif
+
#if !defined(_LIBCPP_WIN32API)
-# include <unistd.h>
+# include <dirent.h> // for DIR & friends
+# include <fcntl.h> /* values for fchmodat */
# include <sys/stat.h>
# include <sys/statvfs.h>
# include <sys/time.h> // for ::utimes as used in __last_write_time
-# include <fcntl.h> /* values for fchmodat */
+# include <unistd.h>
#endif
#include "../include/apple_availability.h"
@@ -526,7 +534,76 @@ bool set_file_times(const path& p, std::array<TimeSpec, 2> const& TS,
return posix_utimensat(p, TS, ec);
#endif
}
-#endif /* !_LIBCPP_WIN32API */
+
+#if defined(DT_BLK)
+template <class DirEntT, class = decltype(DirEntT::d_type)>
+static file_type get_file_type(DirEntT* ent, int) {
+ switch (ent->d_type) {
+ case DT_BLK:
+ return file_type::block;
+ case DT_CHR:
+ return file_type::character;
+ case DT_DIR:
+ return file_type::directory;
+ case DT_FIFO:
+ return file_type::fifo;
+ case DT_LNK:
+ return file_type::symlink;
+ case DT_REG:
+ return file_type::regular;
+ case DT_SOCK:
+ return file_type::socket;
+ // Unlike in lstat, hitting "unknown" here simply means that the underlying
+ // filesystem doesn't support d_type. Report is as 'none' so we correctly
+ // set the cache to empty.
+ case DT_UNKNOWN:
+ break;
+ }
+ return file_type::none;
+}
+#endif // defined(DT_BLK)
+
+template <class DirEntT>
+static file_type get_file_type(DirEntT*, long) {
+ return file_type::none;
+}
+
+static pair<string_view, file_type> posix_readdir(DIR* dir_stream,
+ error_code& ec) {
+ struct dirent* dir_entry_ptr = nullptr;
+ errno = 0; // zero errno in order to detect errors
+ ec.clear();
+ if ((dir_entry_ptr = ::readdir(dir_stream)) == nullptr) {
+ if (errno)
+ ec = capture_errno();
+ return {};
+ } else {
+ return {dir_entry_ptr->d_name, get_file_type(dir_entry_ptr, 0)};
+ }
+}
+
+#else // _LIBCPP_WIN32API
+
+static file_type get_file_type(const WIN32_FIND_DATAW& data) {
+ if (data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT &&
+ data.dwReserved0 == IO_REPARSE_TAG_SYMLINK)
+ return file_type::symlink;
+ if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+ return file_type::directory;
+ return file_type::regular;
+}
+static uintmax_t get_file_size(const WIN32_FIND_DATAW& data) {
+ return (static_cast<uint64_t>(data.nFileSizeHigh) << 32) + data.nFileSizeLow;
+}
+static file_time_type get_write_time(const WIN32_FIND_DATAW& data) {
+ ULARGE_INTEGER tmp;
+ const FILETIME& time = data.ftLastWriteTime;
+ tmp.u.LowPart = time.dwLowDateTime;
+ tmp.u.HighPart = time.dwHighDateTime;
+ return file_time_type(file_time_type::duration(tmp.QuadPart));
+}
+
+#endif // !_LIBCPP_WIN32API
} // namespace
} // end namespace detail
diff --git a/contrib/llvm-project/libcxx/src/iostream_init.h b/contrib/llvm-project/libcxx/src/iostream_init.h
index b0a60f42a67c..7d1bb5c2d7d8 100644
--- a/contrib/llvm-project/libcxx/src/iostream_init.h
+++ b/contrib/llvm-project/libcxx/src/iostream_init.h
@@ -1,2 +1,2 @@
#pragma GCC system_header
-_LIBCPP_HIDDEN ios_base::Init __start_std_streams _LIBCPP_INIT_PRIORITY_MAX; \ No newline at end of file
+_LIBCPP_HIDDEN ios_base::Init __start_std_streams _LIBCPP_INIT_PRIORITY_MAX;
diff --git a/contrib/llvm-project/libcxx/src/locale.cpp b/contrib/llvm-project/libcxx/src/locale.cpp
index 79f03b85fab3..2234784769dd 100644
--- a/contrib/llvm-project/libcxx/src/locale.cpp
+++ b/contrib/llvm-project/libcxx/src/locale.cpp
@@ -898,7 +898,7 @@ ctype<wchar_t>::do_toupper(char_type c) const
#ifdef _LIBCPP_HAS_DEFAULTRUNELOCALE
return isascii(c) ? _DefaultRuneLocale.__mapupper[c] : c;
#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__MVS__)
return isascii(c) ? ctype<char>::__classic_upper_table()[c] : c;
#else
return (isascii(c) && iswlower_l(c, _LIBCPP_GET_C_LOCALE)) ? c-L'a'+L'A' : c;
@@ -912,7 +912,7 @@ ctype<wchar_t>::do_toupper(char_type* low, const char_type* high) const
#ifdef _LIBCPP_HAS_DEFAULTRUNELOCALE
*low = isascii(*low) ? _DefaultRuneLocale.__mapupper[*low] : *low;
#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__MVS__)
*low = isascii(*low) ? ctype<char>::__classic_upper_table()[*low]
: *low;
#else
@@ -927,7 +927,7 @@ ctype<wchar_t>::do_tolower(char_type c) const
#ifdef _LIBCPP_HAS_DEFAULTRUNELOCALE
return isascii(c) ? _DefaultRuneLocale.__maplower[c] : c;
#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__MVS__)
return isascii(c) ? ctype<char>::__classic_lower_table()[c] : c;
#else
return (isascii(c) && isupper_l(c, _LIBCPP_GET_C_LOCALE)) ? c-L'A'+'a' : c;
@@ -941,7 +941,7 @@ ctype<wchar_t>::do_tolower(char_type* low, const char_type* high) const
#ifdef _LIBCPP_HAS_DEFAULTRUNELOCALE
*low = isascii(*low) ? _DefaultRuneLocale.__maplower[*low] : *low;
#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__MVS__)
*low = isascii(*low) ? ctype<char>::__classic_lower_table()[*low]
: *low;
#else
@@ -1013,7 +1013,7 @@ ctype<char>::do_toupper(char_type c) const
static_cast<char>(_DefaultRuneLocale.__mapupper[static_cast<ptrdiff_t>(c)]) : c;
#elif defined(__NetBSD__)
return static_cast<char>(__classic_upper_table()[static_cast<unsigned char>(c)]);
-#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__)
+#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || defined(__MVS__)
return isascii(c) ?
static_cast<char>(__classic_upper_table()[static_cast<unsigned char>(c)]) : c;
#else
@@ -1030,7 +1030,7 @@ ctype<char>::do_toupper(char_type* low, const char_type* high) const
static_cast<char>(_DefaultRuneLocale.__mapupper[static_cast<ptrdiff_t>(*low)]) : *low;
#elif defined(__NetBSD__)
*low = static_cast<char>(__classic_upper_table()[static_cast<unsigned char>(*low)]);
-#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__)
+#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || defined(__MVS__)
*low = isascii(*low) ?
static_cast<char>(__classic_upper_table()[static_cast<size_t>(*low)]) : *low;
#else
@@ -1047,7 +1047,7 @@ ctype<char>::do_tolower(char_type c) const
static_cast<char>(_DefaultRuneLocale.__maplower[static_cast<ptrdiff_t>(c)]) : c;
#elif defined(__NetBSD__)
return static_cast<char>(__classic_lower_table()[static_cast<unsigned char>(c)]);
-#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__)
+#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || defined(__MVS__)
return isascii(c) ?
static_cast<char>(__classic_lower_table()[static_cast<size_t>(c)]) : c;
#else
@@ -1063,7 +1063,7 @@ ctype<char>::do_tolower(char_type* low, const char_type* high) const
*low = isascii(*low) ? static_cast<char>(_DefaultRuneLocale.__maplower[static_cast<ptrdiff_t>(*low)]) : *low;
#elif defined(__NetBSD__)
*low = static_cast<char>(__classic_lower_table()[static_cast<unsigned char>(*low)]);
-#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__)
+#elif defined(__GLIBC__) || defined(__EMSCRIPTEN__) || defined(__MVS__)
*low = isascii(*low) ? static_cast<char>(__classic_lower_table()[static_cast<size_t>(*low)]) : *low;
#else
*low = (isascii(*low) && isupper_l(*low, _LIBCPP_GET_C_LOCALE)) ? *low-'A'+'a' : *low;
@@ -1211,6 +1211,12 @@ ctype<char>::classic_table() noexcept
return _ctype_ + 1;
#elif defined(_AIX)
return (const unsigned int *)__lc_ctype_ptr->obj->mask;
+#elif defined(__MVS__)
+# if defined(__NATIVE_ASCII_F)
+ return const_cast<const ctype<char>::mask*> (__OBJ_DATA(__lc_ctype_a)->mask);
+# else
+ return const_cast<const ctype<char>::mask*> (__ctypec);
+# endif
#else
// Platform not supported: abort so the person doing the port knows what to
// fix
@@ -1259,7 +1265,26 @@ ctype<char>::__classic_upper_table() noexcept
{
return *__ctype_toupper_loc();
}
-#endif // __GLIBC__ || __NETBSD__ || __EMSCRIPTEN__
+#elif defined(__MVS__)
+const unsigned short*
+ctype<char>::__classic_lower_table() _NOEXCEPT
+{
+# if defined(__NATIVE_ASCII_F)
+ return const_cast<const unsigned short*>(__OBJ_DATA(__lc_ctype_a)->lower);
+# else
+ return const_cast<const unsigned short*>(__ctype + __TOLOWER_INDEX);
+# endif
+}
+const unsigned short *
+ctype<char>::__classic_upper_table() _NOEXCEPT
+{
+# if defined(__NATIVE_ASCII_F)
+ return const_cast<const unsigned short*>(__OBJ_DATA(__lc_ctype_a)->upper);
+# else
+ return const_cast<const unsigned short*>(__ctype + __TOUPPER_INDEX);
+# endif
+}
+#endif // __GLIBC__ || __NETBSD__ || __EMSCRIPTEN__ || __MVS__
// template <> class ctype_byname<char>
diff --git a/contrib/llvm-project/libcxx/src/random.cpp b/contrib/llvm-project/libcxx/src/random.cpp
index 286a45785154..6472a8dbcba3 100644
--- a/contrib/llvm-project/libcxx/src/random.cpp
+++ b/contrib/llvm-project/libcxx/src/random.cpp
@@ -36,6 +36,8 @@
# endif
#elif defined(_LIBCPP_USING_NACL_RANDOM)
# include <nacl/nacl_random.h>
+#elif defined(_LIBCPP_USING_FUCHSIA_CPRNG)
+# include <zircon/syscalls.h>
#endif
@@ -66,10 +68,8 @@ random_device::operator()()
#elif defined(_LIBCPP_USING_ARC4_RANDOM)
-random_device::random_device(const string& __token)
+random_device::random_device(const string&)
{
- if (__token != "/dev/urandom")
- __throw_system_error(ENOENT, ("random device not supported " + __token).c_str());
}
random_device::~random_device()
@@ -170,6 +170,27 @@ random_device::operator()()
return r;
}
+#elif defined(_LIBCPP_USING_FUCHSIA_CPRNG)
+
+random_device::random_device(const string& __token) {
+ if (__token != "/dev/urandom")
+ __throw_system_error(ENOENT, ("random device not supported " + __token).c_str());
+}
+
+random_device::~random_device() {}
+
+unsigned random_device::operator()() {
+ // Implicitly link against the vDSO system call ABI without
+ // requiring the final link to specify -lzircon explicitly when
+ // statically linking libc++.
+# pragma comment(lib, "zircon")
+
+ // The system call cannot fail. It returns only when the bits are ready.
+ unsigned r;
+ _zx_cprng_draw(&r, sizeof(r));
+ return r;
+}
+
#else
#error "Random device not implemented for this architecture"
#endif
@@ -189,7 +210,7 @@ random_device::entropy() const noexcept
return std::numeric_limits<result_type>::digits;
return ent;
-#elif defined(__OpenBSD__)
+#elif defined(__OpenBSD__) || defined(_LIBCPP_USING_FUCHSIA_CPRNG)
return std::numeric_limits<result_type>::digits;
#else
return 0;
diff --git a/contrib/llvm-project/libcxx/src/regex.cpp b/contrib/llvm-project/libcxx/src/regex.cpp
index 425339a5c9b8..16ad8f0effdb 100644
--- a/contrib/llvm-project/libcxx/src/regex.cpp
+++ b/contrib/llvm-project/libcxx/src/regex.cpp
@@ -76,6 +76,125 @@ struct collationnames
char char_;
};
+#if defined(__MVS__) && !defined(__NATIVE_ASCII_F)
+// EBCDIC IBM-1047
+// Sorted via the EBCDIC collating sequence
+const collationnames collatenames[] =
+{
+ {"a", 0x81},
+ {"alert", 0x2f},
+ {"ampersand", 0x50},
+ {"apostrophe", 0x7d},
+ {"asterisk", 0x5c},
+ {"b", 0x82},
+ {"backslash", 0xe0},
+ {"backspace", 0x16},
+ {"c", 0x83},
+ {"carriage-return", 0xd},
+ {"circumflex", 0x5f},
+ {"circumflex-accent", 0x5f},
+ {"colon", 0x7a},
+ {"comma", 0x6b},
+ {"commercial-at", 0x7c},
+ {"d", 0x84},
+ {"dollar-sign", 0x5b},
+ {"e", 0x85},
+ {"eight", 0xf8},
+ {"equals-sign", 0x7e},
+ {"exclamation-mark", 0x5a},
+ {"f", 0x86},
+ {"five", 0xf5},
+ {"form-feed", 0xc},
+ {"four", 0xf4},
+ {"full-stop", 0x4b},
+ {"g", 0x87},
+ {"grave-accent", 0x79},
+ {"greater-than-sign", 0x6e},
+ {"h", 0x88},
+ {"hyphen", 0x60},
+ {"hyphen-minus", 0x60},
+ {"i", 0x89},
+ {"j", 0x91},
+ {"k", 0x92},
+ {"l", 0x93},
+ {"left-brace", 0xc0},
+ {"left-curly-bracket", 0xc0},
+ {"left-parenthesis", 0x4d},
+ {"left-square-bracket", 0xad},
+ {"less-than-sign", 0x4c},
+ {"low-line", 0x6d},
+ {"m", 0x94},
+ {"n", 0x95},
+ {"newline", 0x15},
+ {"nine", 0xf9},
+ {"number-sign", 0x7b},
+ {"o", 0x96},
+ {"one", 0xf1},
+ {"p", 0x97},
+ {"percent-sign", 0x6c},
+ {"period", 0x4b},
+ {"plus-sign", 0x4e},
+ {"q", 0x98},
+ {"question-mark", 0x6f},
+ {"quotation-mark", 0x7f},
+ {"r", 0x99},
+ {"reverse-solidus", 0xe0},
+ {"right-brace", 0xd0},
+ {"right-curly-bracket", 0xd0},
+ {"right-parenthesis", 0x5d},
+ {"right-square-bracket", 0xbd},
+ {"s", 0xa2},
+ {"semicolon", 0x5e},
+ {"seven", 0xf7},
+ {"six", 0xf6},
+ {"slash", 0x61},
+ {"solidus", 0x61},
+ {"space", 0x40},
+ {"t", 0xa3},
+ {"tab", 0x5},
+ {"three", 0xf3},
+ {"tilde", 0xa1},
+ {"two", 0xf2},
+ {"u", 0xa4},
+ {"underscore", 0x6d},
+ {"v", 0xa5},
+ {"vertical-line", 0x4f},
+ {"vertical-tab", 0xb},
+ {"w", 0xa6},
+ {"x", 0xa7},
+ {"y", 0xa8},
+ {"z", 0xa9},
+ {"zero", 0xf0},
+ {"A", 0xc1},
+ {"B", 0xc2},
+ {"C", 0xc3},
+ {"D", 0xc4},
+ {"E", 0xc5},
+ {"F", 0xc6},
+ {"G", 0xc7},
+ {"H", 0xc8},
+ {"I", 0xc9},
+ {"J", 0xd1},
+ {"K", 0xd2},
+ {"L", 0xd3},
+ {"M", 0xd4},
+ {"N", 0xd5},
+ {"NUL", 0},
+ {"O", 0xd6},
+ {"P", 0xd7},
+ {"Q", 0xd8},
+ {"R", 0xd9},
+ {"S", 0xe2},
+ {"T", 0xe3},
+ {"U", 0xe4},
+ {"V", 0xe5},
+ {"W", 0xe6},
+ {"X", 0xe7},
+ {"Y", 0xe8},
+ {"Z", 0xe9}
+};
+#else
+// ASCII
const collationnames collatenames[] =
{
{"A", 0x41},
@@ -190,6 +309,7 @@ const collationnames collatenames[] =
{"z", 0x7a},
{"zero", 0x30}
};
+#endif
struct classnames
{
diff --git a/contrib/llvm-project/libcxx/src/string.cpp b/contrib/llvm-project/libcxx/src/string.cpp
index 608dcb2c5863..3c63f408240d 100644
--- a/contrib/llvm-project/libcxx/src/string.cpp
+++ b/contrib/llvm-project/libcxx/src/string.cpp
@@ -21,6 +21,7 @@
_LIBCPP_BEGIN_NAMESPACE_STD
+#ifndef _LIBCPP_ABI_NO_BASIC_STRING_BASE_CLASS
void __basic_string_common<true>::__throw_length_error() const {
_VSTD::__throw_length_error("basic_string");
}
@@ -28,6 +29,7 @@ void __basic_string_common<true>::__throw_length_error() const {
void __basic_string_common<true>::__throw_out_of_range() const {
_VSTD::__throw_out_of_range("basic_string");
}
+#endif
#define _LIBCPP_EXTERN_TEMPLATE_DEFINE(...) template __VA_ARGS__;
#ifdef _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION
diff --git a/contrib/llvm-project/libunwind/src/AddressSpace.hpp b/contrib/llvm-project/libunwind/src/AddressSpace.hpp
index cfceac29537f..0c4dfeb4e683 100644
--- a/contrib/llvm-project/libunwind/src/AddressSpace.hpp
+++ b/contrib/llvm-project/libunwind/src/AddressSpace.hpp
@@ -121,23 +121,23 @@ struct UnwindInfoSections {
uintptr_t dso_base;
#endif
#if defined(_LIBUNWIND_USE_DL_ITERATE_PHDR)
- uintptr_t text_segment_length;
+ size_t text_segment_length;
#endif
#if defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND)
uintptr_t dwarf_section;
- uintptr_t dwarf_section_length;
+ size_t dwarf_section_length;
#endif
#if defined(_LIBUNWIND_SUPPORT_DWARF_INDEX)
uintptr_t dwarf_index_section;
- uintptr_t dwarf_index_section_length;
+ size_t dwarf_index_section_length;
#endif
#if defined(_LIBUNWIND_SUPPORT_COMPACT_UNWIND)
uintptr_t compact_unwind_section;
- uintptr_t compact_unwind_section_length;
+ size_t compact_unwind_section_length;
#endif
#if defined(_LIBUNWIND_ARM_EHABI)
uintptr_t arm_section;
- uintptr_t arm_section_length;
+ size_t arm_section_length;
#endif
};
@@ -430,7 +430,7 @@ static bool checkForUnwindInfoSegment(const Elf_Phdr *phdr, size_t image_base,
// .eh_frame_hdr records the start of .eh_frame, but not its size.
// Rely on a zero terminator to find the end of the section.
cbdata->sects->dwarf_section = hdrInfo.eh_frame_ptr;
- cbdata->sects->dwarf_section_length = UINTPTR_MAX;
+ cbdata->sects->dwarf_section_length = SIZE_MAX;
return true;
}
}
@@ -506,22 +506,22 @@ inline bool LocalAddressSpace::findUnwindSections(pint_t targetAddr,
info.dso_base = (uintptr_t)dyldInfo.mh;
#if defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND)
info.dwarf_section = (uintptr_t)dyldInfo.dwarf_section;
- info.dwarf_section_length = dyldInfo.dwarf_section_length;
+ info.dwarf_section_length = (size_t)dyldInfo.dwarf_section_length;
#endif
info.compact_unwind_section = (uintptr_t)dyldInfo.compact_unwind_section;
- info.compact_unwind_section_length = dyldInfo.compact_unwind_section_length;
+ info.compact_unwind_section_length = (size_t)dyldInfo.compact_unwind_section_length;
return true;
}
#elif defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND) && defined(_LIBUNWIND_IS_BAREMETAL)
info.dso_base = 0;
// Bare metal is statically linked, so no need to ask the dynamic loader
- info.dwarf_section_length = (uintptr_t)(&__eh_frame_end - &__eh_frame_start);
+ info.dwarf_section_length = (size_t)(&__eh_frame_end - &__eh_frame_start);
info.dwarf_section = (uintptr_t)(&__eh_frame_start);
_LIBUNWIND_TRACE_UNWINDING("findUnwindSections: section %p length %p",
(void *)info.dwarf_section, (void *)info.dwarf_section_length);
#if defined(_LIBUNWIND_SUPPORT_DWARF_INDEX)
info.dwarf_index_section = (uintptr_t)(&__eh_frame_hdr_start);
- info.dwarf_index_section_length = (uintptr_t)(&__eh_frame_hdr_end - &__eh_frame_hdr_start);
+ info.dwarf_index_section_length = (size_t)(&__eh_frame_hdr_end - &__eh_frame_hdr_start);
_LIBUNWIND_TRACE_UNWINDING("findUnwindSections: index section %p length %p",
(void *)info.dwarf_index_section, (void *)info.dwarf_index_section_length);
#endif
@@ -530,7 +530,7 @@ inline bool LocalAddressSpace::findUnwindSections(pint_t targetAddr,
#elif defined(_LIBUNWIND_ARM_EHABI) && defined(_LIBUNWIND_IS_BAREMETAL)
// Bare metal is statically linked, so no need to ask the dynamic loader
info.arm_section = (uintptr_t)(&__exidx_start);
- info.arm_section_length = (uintptr_t)(&__exidx_end - &__exidx_start);
+ info.arm_section_length = (size_t)(&__exidx_end - &__exidx_start);
_LIBUNWIND_TRACE_UNWINDING("findUnwindSections: section %p length %p",
(void *)info.arm_section, (void *)info.arm_section_length);
if (info.arm_section && info.arm_section_length)
@@ -584,7 +584,7 @@ inline bool LocalAddressSpace::findUnwindSections(pint_t targetAddr,
int length = 0;
info.arm_section =
(uintptr_t)dl_unwind_find_exidx((_Unwind_Ptr)targetAddr, &length);
- info.arm_section_length = (uintptr_t)length * sizeof(EHABIIndexEntry);
+ info.arm_section_length = (size_t)length * sizeof(EHABIIndexEntry);
if (info.arm_section && info.arm_section_length)
return true;
#elif defined(_LIBUNWIND_USE_DL_ITERATE_PHDR)
diff --git a/contrib/llvm-project/libunwind/src/DwarfParser.hpp b/contrib/llvm-project/libunwind/src/DwarfParser.hpp
index 9515fb7dfd52..abf7f613a359 100644
--- a/contrib/llvm-project/libunwind/src/DwarfParser.hpp
+++ b/contrib/llvm-project/libunwind/src/DwarfParser.hpp
@@ -150,7 +150,7 @@ public:
};
static bool findFDE(A &addressSpace, pint_t pc, pint_t ehSectionStart,
- uintptr_t sectionLength, pint_t fdeHint, FDE_Info *fdeInfo,
+ size_t sectionLength, pint_t fdeHint, FDE_Info *fdeInfo,
CIE_Info *cieInfo);
static const char *decodeFDE(A &addressSpace, pint_t fdeStart,
FDE_Info *fdeInfo, CIE_Info *cieInfo,
@@ -229,11 +229,11 @@ const char *CFI_Parser<A>::decodeFDE(A &addressSpace, pint_t fdeStart,
/// Scan an eh_frame section to find an FDE for a pc
template <typename A>
bool CFI_Parser<A>::findFDE(A &addressSpace, pint_t pc, pint_t ehSectionStart,
- uintptr_t sectionLength, pint_t fdeHint,
+ size_t sectionLength, pint_t fdeHint,
FDE_Info *fdeInfo, CIE_Info *cieInfo) {
//fprintf(stderr, "findFDE(0x%llX)\n", (long long)pc);
pint_t p = (fdeHint != 0) ? fdeHint : ehSectionStart;
- const pint_t ehSectionEnd = (sectionLength == UINTPTR_MAX)
+ const pint_t ehSectionEnd = (sectionLength == SIZE_MAX)
? static_cast<pint_t>(-1)
: (ehSectionStart + sectionLength);
while (p < ehSectionEnd) {
diff --git a/contrib/llvm-project/lld/COFF/COFFLinkerContext.h b/contrib/llvm-project/lld/COFF/COFFLinkerContext.h
index e5223da86ef8..a3a6f94a9413 100644
--- a/contrib/llvm-project/lld/COFF/COFFLinkerContext.h
+++ b/contrib/llvm-project/lld/COFF/COFFLinkerContext.h
@@ -15,12 +15,13 @@
#include "InputFiles.h"
#include "SymbolTable.h"
#include "Writer.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Timer.h"
namespace lld {
namespace coff {
-class COFFLinkerContext {
+class COFFLinkerContext : public CommonLinkerContext {
public:
COFFLinkerContext();
COFFLinkerContext(const COFFLinkerContext &) = delete;
diff --git a/contrib/llvm-project/lld/COFF/Chunks.cpp b/contrib/llvm-project/lld/COFF/Chunks.cpp
index 0bff11f450d1..6cabb22d98cf 100644
--- a/contrib/llvm-project/lld/COFF/Chunks.cpp
+++ b/contrib/llvm-project/lld/COFF/Chunks.cpp
@@ -12,7 +12,6 @@
#include "SymbolTable.h"
#include "Symbols.h"
#include "Writer.h"
-#include "lld/Common/ErrorHandler.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFF.h"
@@ -430,7 +429,7 @@ void SectionChunk::sortRelocations() {
return;
warn("some relocations in " + file->getName() + " are not sorted");
MutableArrayRef<coff_relocation> newRelocs(
- bAlloc.Allocate<coff_relocation>(relocsSize), relocsSize);
+ bAlloc().Allocate<coff_relocation>(relocsSize), relocsSize);
memcpy(newRelocs.data(), relocsData, relocsSize * sizeof(coff_relocation));
llvm::sort(newRelocs, cmpByVa);
setRelocs(newRelocs);
@@ -635,7 +634,7 @@ void SectionChunk::printDiscardedMessage() const {
// Removed by dead-stripping. If it's removed by ICF, ICF already
// printed out the name, so don't repeat that here.
if (sym && this == repl)
- message("Discarded " + sym->getName());
+ log("Discarded " + sym->getName());
}
StringRef SectionChunk::getDebugName() const {
diff --git a/contrib/llvm-project/lld/COFF/Config.h b/contrib/llvm-project/lld/COFF/Config.h
index 3917975e165d..4bb42c88aa93 100644
--- a/contrib/llvm-project/lld/COFF/Config.h
+++ b/contrib/llvm-project/lld/COFF/Config.h
@@ -43,6 +43,7 @@ static const auto I386 = llvm::COFF::IMAGE_FILE_MACHINE_I386;
struct Export {
StringRef name; // N in /export:N or /export:E=N
StringRef extName; // E in /export:E=N
+ StringRef aliasTarget; // GNU specific: N in "alias == N"
Symbol *sym = nullptr;
uint16_t ordinal = 0;
bool noname = false;
@@ -63,6 +64,7 @@ struct Export {
bool operator==(const Export &e) {
return (name == e.name && extName == e.extName &&
+ aliasTarget == e.aliasTarget &&
ordinal == e.ordinal && noname == e.noname &&
data == e.data && isPrivate == e.isPrivate);
}
@@ -282,7 +284,7 @@ struct Configuration {
bool stdcallFixup = false;
};
-extern Configuration *config;
+extern std::unique_ptr<Configuration> config;
} // namespace coff
} // namespace lld
diff --git a/contrib/llvm-project/lld/COFF/DLL.cpp b/contrib/llvm-project/lld/COFF/DLL.cpp
index 6fec9df5617d..bfa2a6910e2b 100644
--- a/contrib/llvm-project/lld/COFF/DLL.cpp
+++ b/contrib/llvm-project/lld/COFF/DLL.cpp
@@ -659,14 +659,14 @@ void DelayLoadContents::create(COFFLinkerContext &ctx, Defined *h) {
// Add a syntentic symbol for this load thunk, using the "__imp_load"
// prefix, in case this thunk needs to be added to the list of valid
// call targets for Control Flow Guard.
- StringRef symName = saver.save("__imp_load_" + extName);
+ StringRef symName = saver().save("__imp_load_" + extName);
s->loadThunkSym =
cast<DefinedSynthetic>(ctx.symtab.addSynthetic(symName, t));
}
}
thunks.push_back(tm);
StringRef tmName =
- saver.save("__tailMerge_" + syms[0]->getDLLName().lower());
+ saver().save("__tailMerge_" + syms[0]->getDLLName().lower());
ctx.symtab.addSynthetic(tmName, tm);
// Terminate with null values.
addresses.push_back(make<NullChunk>(8));
diff --git a/contrib/llvm-project/lld/COFF/Driver.cpp b/contrib/llvm-project/lld/COFF/Driver.cpp
index 07b60673577e..1546291e16c6 100644
--- a/contrib/llvm-project/lld/COFF/Driver.cpp
+++ b/contrib/llvm-project/lld/COFF/Driver.cpp
@@ -19,9 +19,7 @@
#include "Writer.h"
#include "lld/Common/Args.h"
#include "lld/Common/Driver.h"
-#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Filesystem.h"
-#include "lld/Common/Memory.h"
#include "lld/Common/Timer.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/Optional.h"
@@ -60,39 +58,25 @@ using namespace llvm::sys;
namespace lld {
namespace coff {
-Configuration *config;
-LinkerDriver *driver;
+std::unique_ptr<Configuration> config;
+std::unique_ptr<LinkerDriver> driver;
-bool link(ArrayRef<const char *> args, bool canExitEarly, raw_ostream &stdoutOS,
- raw_ostream &stderrOS) {
- lld::stdoutOS = &stdoutOS;
- lld::stderrOS = &stderrOS;
+bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput) {
+ // This driver-specific context will be freed later by lldMain().
+ auto *ctx = new COFFLinkerContext;
- errorHandler().cleanupCallback = []() {
- freeArena();
- };
-
- errorHandler().logName = args::getFilenameWithoutExe(args[0]);
- errorHandler().errorLimitExceededMsg =
- "too many errors emitted, stopping now"
- " (use /errorlimit:0 to see all errors)";
- errorHandler().exitEarly = canExitEarly;
- stderrOS.enable_colors(stderrOS.has_colors());
+ ctx->e.initialize(stdoutOS, stderrOS, exitEarly, disableOutput);
+ ctx->e.logName = args::getFilenameWithoutExe(args[0]);
+ ctx->e.errorLimitExceededMsg = "too many errors emitted, stopping now"
+ " (use /errorlimit:0 to see all errors)";
- COFFLinkerContext ctx;
- config = make<Configuration>();
- driver = make<LinkerDriver>(ctx);
+ config = std::make_unique<Configuration>();
+ driver = std::make_unique<LinkerDriver>(*ctx);
driver->linkerMain(args);
- // Call exit() if we can to avoid calling destructors.
- if (canExitEarly)
- exitLld(errorCount() ? 1 : 0);
-
- bool ret = errorCount() == 0;
- if (!canExitEarly)
- errorHandler().reset();
- return ret;
+ return errorCount() == 0;
}
// Parse options of the form "old;new".
@@ -162,7 +146,7 @@ static std::future<MBErrPair> createFutureForFile(std::string path) {
static StringRef mangle(StringRef sym) {
assert(config->machine != IMAGE_FILE_MACHINE_UNKNOWN);
if (config->machine == I386)
- return saver.save("_" + sym);
+ return saver().save("_" + sym);
return sym;
}
@@ -208,17 +192,11 @@ void LinkerDriver::addBuffer(std::unique_ptr<MemoryBuffer> mb,
ctx.symtab.addFile(make<ArchiveFile>(ctx, mbref));
break;
case file_magic::bitcode:
- if (lazy)
- ctx.symtab.addFile(make<LazyObjFile>(ctx, mbref));
- else
- ctx.symtab.addFile(make<BitcodeFile>(ctx, mbref, "", 0));
+ ctx.symtab.addFile(make<BitcodeFile>(ctx, mbref, "", 0, lazy));
break;
case file_magic::coff_object:
case file_magic::coff_import_library:
- if (lazy)
- ctx.symtab.addFile(make<LazyObjFile>(ctx, mbref));
- else
- ctx.symtab.addFile(make<ObjFile>(ctx, mbref));
+ ctx.symtab.addFile(make<ObjFile>(ctx, mbref, lazy));
break;
case file_magic::pdb:
ctx.symtab.addFile(make<PDBInputFile>(ctx, mbref));
@@ -282,7 +260,8 @@ void LinkerDriver::addArchiveBuffer(MemoryBufferRef mb, StringRef symName,
if (magic == file_magic::coff_object) {
obj = make<ObjFile>(ctx, mb);
} else if (magic == file_magic::bitcode) {
- obj = make<BitcodeFile>(ctx, mb, parentName, offsetInArchive);
+ obj =
+ make<BitcodeFile>(ctx, mb, parentName, offsetInArchive, /*lazy=*/false);
} else {
error("unknown file type: " + mb.getBufferIdentifier());
return;
@@ -363,9 +342,9 @@ void LinkerDriver::parseDirectives(InputFile *file) {
Export exp = parseExport(e);
if (config->machine == I386 && config->mingw) {
if (!isDecorated(exp.name))
- exp.name = saver.save("_" + exp.name);
+ exp.name = saver().save("_" + exp.name);
if (!exp.extName.empty() && !isDecorated(exp.extName))
- exp.extName = saver.save("_" + exp.extName);
+ exp.extName = saver().save("_" + exp.extName);
}
exp.directives = true;
config->exports.push_back(exp);
@@ -447,11 +426,11 @@ StringRef LinkerDriver::doFindFile(StringRef filename) {
SmallString<128> path = dir;
sys::path::append(path, filename);
if (sys::fs::exists(path.str()))
- return saver.save(path.str());
+ return saver().save(path.str());
if (!hasExt) {
path.append(".obj");
if (sys::fs::exists(path.str()))
- return saver.save(path.str());
+ return saver().save(path.str());
}
}
return filename;
@@ -488,7 +467,7 @@ StringRef LinkerDriver::doFindLibMinGW(StringRef filename) {
SmallString<128> s = filename;
sys::path::replace_extension(s, ".a");
- StringRef libName = saver.save("lib" + s.str());
+ StringRef libName = saver().save("lib" + s.str());
return doFindFile(libName);
}
@@ -497,7 +476,7 @@ StringRef LinkerDriver::doFindLib(StringRef filename) {
// Add ".lib" to Filename if that has no file extension.
bool hasExt = filename.contains('.');
if (!hasExt)
- filename = saver.save(filename + ".lib");
+ filename = saver().save(filename + ".lib");
StringRef ret = doFindFile(filename);
// For MinGW, if the find above didn't turn up anything, try
// looking for a MinGW formatted library name.
@@ -530,7 +509,7 @@ void LinkerDriver::addLibSearchPaths() {
Optional<std::string> envOpt = Process::GetEnv("LIB");
if (!envOpt.hasValue())
return;
- StringRef env = saver.save(*envOpt);
+ StringRef env = saver().save(*envOpt);
while (!env.empty()) {
StringRef path;
std::tie(path, env) = env.split(';');
@@ -815,6 +794,7 @@ static void createImportLibrary(bool asLib) {
e2.Name = std::string(e1.name);
e2.SymbolName = std::string(e1.symbolName);
e2.ExtName = std::string(e1.extName);
+ e2.AliasTarget = std::string(e1.aliasTarget);
e2.Ordinal = e1.ordinal;
e2.Noname = e1.noname;
e2.Data = e1.data;
@@ -877,8 +857,8 @@ static void parseModuleDefs(StringRef path) {
driver->takeBuffer(std::move(mb));
if (config->outputFile.empty())
- config->outputFile = std::string(saver.save(m.OutputFile));
- config->importName = std::string(saver.save(m.ImportName));
+ config->outputFile = std::string(saver().save(m.OutputFile));
+ config->importName = std::string(saver().save(m.ImportName));
if (m.ImageBase)
config->imageBase = m.ImageBase;
if (m.StackReserve)
@@ -906,13 +886,14 @@ static void parseModuleDefs(StringRef path) {
// DLL instead. This is supported by both MS and GNU linkers.
if (!e1.ExtName.empty() && e1.ExtName != e1.Name &&
StringRef(e1.Name).contains('.')) {
- e2.name = saver.save(e1.ExtName);
- e2.forwardTo = saver.save(e1.Name);
+ e2.name = saver().save(e1.ExtName);
+ e2.forwardTo = saver().save(e1.Name);
config->exports.push_back(e2);
continue;
}
- e2.name = saver.save(e1.Name);
- e2.extName = saver.save(e1.ExtName);
+ e2.name = saver().save(e1.Name);
+ e2.extName = saver().save(e1.ExtName);
+ e2.aliasTarget = saver().save(e1.AliasTarget);
e2.ordinal = e1.Ordinal;
e2.noname = e1.Noname;
e2.data = e1.Data;
@@ -1909,9 +1890,9 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
Export e = parseExport(arg->getValue());
if (config->machine == I386) {
if (!isDecorated(e.name))
- e.name = saver.save("_" + e.name);
+ e.name = saver().save("_" + e.name);
if (!e.extName.empty() && !isDecorated(e.extName))
- e.extName = saver.save("_" + e.extName);
+ e.extName = saver().save("_" + e.extName);
}
config->exports.push_back(e);
}
diff --git a/contrib/llvm-project/lld/COFF/Driver.h b/contrib/llvm-project/lld/COFF/Driver.h
index 77e67b282665..518ec1470677 100644
--- a/contrib/llvm-project/lld/COFF/Driver.h
+++ b/contrib/llvm-project/lld/COFF/Driver.h
@@ -30,8 +30,7 @@
namespace lld {
namespace coff {
-class LinkerDriver;
-extern LinkerDriver *driver;
+extern std::unique_ptr<class LinkerDriver> driver;
using llvm::COFF::MachineTypes;
using llvm::COFF::WindowsSubsystem;
diff --git a/contrib/llvm-project/lld/COFF/DriverUtils.cpp b/contrib/llvm-project/lld/COFF/DriverUtils.cpp
index 0921c8e27f5a..ac0f1f972c79 100644
--- a/contrib/llvm-project/lld/COFF/DriverUtils.cpp
+++ b/contrib/llvm-project/lld/COFF/DriverUtils.cpp
@@ -48,17 +48,17 @@ const uint16_t RT_MANIFEST = 24;
class Executor {
public:
- explicit Executor(StringRef s) : prog(saver.save(s)) {}
- void add(StringRef s) { args.push_back(saver.save(s)); }
- void add(std::string &s) { args.push_back(saver.save(s)); }
- void add(Twine s) { args.push_back(saver.save(s)); }
- void add(const char *s) { args.push_back(saver.save(s)); }
+ explicit Executor(StringRef s) : prog(saver().save(s)) {}
+ void add(StringRef s) { args.push_back(saver().save(s)); }
+ void add(std::string &s) { args.push_back(saver().save(s)); }
+ void add(Twine s) { args.push_back(saver().save(s)); }
+ void add(const char *s) { args.push_back(saver().save(s)); }
void run() {
ErrorOr<std::string> exeOrErr = sys::findProgramByName(prog);
if (auto ec = exeOrErr.getError())
fatal("unable to find " + prog + " in PATH: " + ec.message());
- StringRef exe = saver.save(*exeOrErr);
+ StringRef exe = saver().save(*exeOrErr);
args.insert(args.begin(), exe);
if (sys::ExecuteAndWait(args[0], args) != 0)
@@ -636,14 +636,14 @@ static StringRef killAt(StringRef sym, bool prefix) {
sym = sym.substr(0, sym.find('@', 1));
if (!sym.startswith("@")) {
if (prefix && !sym.startswith("_"))
- return saver.save("_" + sym);
+ return saver().save("_" + sym);
return sym;
}
// For fastcall, remove the leading @ and replace it with an
// underscore, if prefixes are used.
sym = sym.substr(1);
if (prefix)
- sym = saver.save("_" + sym);
+ sym = saver().save("_" + sym);
return sym;
}
@@ -854,7 +854,7 @@ opt::InputArgList ArgParser::parse(ArrayRef<const char *> argv) {
argv.data() + argv.size());
if (!args.hasArg(OPT_lldignoreenv))
addLINK(expandedArgv);
- cl::ExpandResponseFiles(saver, getQuotingStyle(args), expandedArgv);
+ cl::ExpandResponseFiles(saver(), getQuotingStyle(args), expandedArgv);
args = optTable.ParseArgs(makeArrayRef(expandedArgv).drop_front(),
missingIndex, missingCount);
@@ -901,7 +901,7 @@ ParsedDirectives ArgParser::parseDirectives(StringRef s) {
// Handle /EXPORT and /INCLUDE in a fast path. These directives can appear for
// potentially every symbol in the object, so they must be handled quickly.
SmallVector<StringRef, 16> tokens;
- cl::TokenizeWindowsCommandLineNoCopy(s, saver, tokens);
+ cl::TokenizeWindowsCommandLineNoCopy(s, saver(), tokens);
for (StringRef tok : tokens) {
if (tok.startswith_insensitive("/export:") ||
tok.startswith_insensitive("-export:"))
@@ -914,7 +914,7 @@ ParsedDirectives ArgParser::parseDirectives(StringRef s) {
// already copied quoted arguments for us, so those do not need to be
// copied again.
bool HasNul = tok.end() != s.end() && tok.data()[tok.size()] == '\0';
- rest.push_back(HasNul ? tok.data() : saver.save(tok).data());
+ rest.push_back(HasNul ? tok.data() : saver().save(tok).data());
}
}
@@ -948,7 +948,7 @@ void ArgParser::addLINK(SmallVector<const char *, 256> &argv) {
std::vector<const char *> ArgParser::tokenize(StringRef s) {
SmallVector<const char *, 16> tokens;
- cl::TokenizeWindowsCommandLine(s, saver, tokens);
+ cl::TokenizeWindowsCommandLine(s, saver(), tokens);
return std::vector<const char *>(tokens.begin(), tokens.end());
}
diff --git a/contrib/llvm-project/lld/COFF/InputFiles.cpp b/contrib/llvm-project/lld/COFF/InputFiles.cpp
index 4b38e3d1a99b..0f3f5e0ffe7c 100644
--- a/contrib/llvm-project/lld/COFF/InputFiles.cpp
+++ b/contrib/llvm-project/lld/COFF/InputFiles.cpp
@@ -15,8 +15,6 @@
#include "SymbolTable.h"
#include "Symbols.h"
#include "lld/Common/DWARF.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
#include "llvm-c/lto.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
@@ -135,31 +133,7 @@ std::vector<MemoryBufferRef> lld::coff::getArchiveMembers(Archive *file) {
return v;
}
-void LazyObjFile::fetch() {
- if (mb.getBuffer().empty())
- return;
-
- InputFile *file;
- if (isBitcode(mb))
- file = make<BitcodeFile>(ctx, mb, "", 0, std::move(symbols));
- else
- file = make<ObjFile>(ctx, mb, std::move(symbols));
- mb = {};
- ctx.symtab.addFile(file);
-}
-
-void LazyObjFile::parse() {
- if (isBitcode(this->mb)) {
- // Bitcode file.
- std::unique_ptr<lto::InputFile> obj =
- CHECK(lto::InputFile::create(this->mb), this);
- for (const lto::InputFile::Symbol &sym : obj->symbols()) {
- if (!sym.isUndefined())
- ctx.symtab.addLazyObject(this, sym.getName());
- }
- return;
- }
-
+void ObjFile::parseLazy() {
// Native object file.
std::unique_ptr<Binary> coffObjPtr = CHECK(createBinary(mb), this);
COFFObjectFile *coffObj = cast<COFFObjectFile>(coffObjPtr.get());
@@ -929,7 +903,7 @@ ObjFile::getVariableLocation(StringRef var) {
Optional<std::pair<std::string, unsigned>> ret = dwarf->getVariableLoc(var);
if (!ret)
return None;
- return std::make_pair(saver.save(ret->first), ret->second);
+ return std::make_pair(saver().save(ret->first), ret->second);
}
// Used only for DWARF debug info, which is not common (except in MinGW
@@ -964,8 +938,8 @@ void ImportFile::parse() {
fatal("broken import library");
// Read names and create an __imp_ symbol.
- StringRef name = saver.save(StringRef(buf + sizeof(*hdr)));
- StringRef impName = saver.save("__imp_" + name);
+ StringRef name = saver().save(StringRef(buf + sizeof(*hdr)));
+ StringRef impName = saver().save("__imp_" + name);
const char *nameStart = buf + sizeof(coff_import_header) + name.size() + 1;
dllName = std::string(StringRef(nameStart));
StringRef extName;
@@ -1006,13 +980,9 @@ void ImportFile::parse() {
}
BitcodeFile::BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef mb,
- StringRef archiveName, uint64_t offsetInArchive)
- : BitcodeFile(ctx, mb, archiveName, offsetInArchive, {}) {}
-
-BitcodeFile::BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef mb,
StringRef archiveName, uint64_t offsetInArchive,
- std::vector<Symbol *> &&symbols)
- : InputFile(ctx, BitcodeKind, mb), symbols(std::move(symbols)) {
+ bool lazy)
+ : InputFile(ctx, BitcodeKind, mb, lazy) {
std::string path = mb.getBufferIdentifier().str();
if (config->thinLTOIndexOnly)
path = replaceThinLTOSuffix(mb.getBufferIdentifier());
@@ -1023,11 +993,12 @@ BitcodeFile::BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef mb,
// into consideration at LTO time (which very likely causes undefined
// symbols later in the link stage). So we append file offset to make
// filename unique.
- MemoryBufferRef mbref(
- mb.getBuffer(),
- saver.save(archiveName.empty() ? path
- : archiveName + sys::path::filename(path) +
- utostr(offsetInArchive)));
+ MemoryBufferRef mbref(mb.getBuffer(),
+ saver().save(archiveName.empty()
+ ? path
+ : archiveName +
+ sys::path::filename(path) +
+ utostr(offsetInArchive)));
obj = check(lto::InputFile::create(mbref));
}
@@ -1063,6 +1034,7 @@ FakeSectionChunk ltoDataSectionChunk(&ltoDataSection.section);
} // namespace
void BitcodeFile::parse() {
+ llvm::StringSaver &saver = lld::saver();
std::vector<std::pair<Symbol *, bool>> comdat(obj->getComdatTable().size());
for (size_t i = 0; i != obj->getComdatTable().size(); ++i)
// FIXME: Check nodeduplicate
@@ -1107,6 +1079,12 @@ void BitcodeFile::parse() {
directives = obj->getCOFFLinkerOpts();
}
+void BitcodeFile::parseLazy() {
+ for (const lto::InputFile::Symbol &sym : obj->symbols())
+ if (!sym.isUndefined())
+ ctx.symtab.addLazyObject(this, sym.getName());
+}
+
MachineTypes BitcodeFile::getMachineType() {
switch (Triple(obj->getTargetTriple()).getArch()) {
case Triple::x86_64:
@@ -1178,11 +1156,11 @@ void DLLFile::parse() {
s->nameType = ImportNameType::IMPORT_NAME;
if (coffObj->getMachine() == I386) {
- s->symbolName = symbolName = saver.save("_" + symbolName);
+ s->symbolName = symbolName = saver().save("_" + symbolName);
s->nameType = ImportNameType::IMPORT_NAME_NOPREFIX;
}
- StringRef impName = saver.save("__imp_" + symbolName);
+ StringRef impName = saver().save("__imp_" + symbolName);
ctx.symtab.addLazyDLLSymbol(this, s, impName);
if (code)
ctx.symtab.addLazyDLLSymbol(this, s, symbolName);
@@ -1201,7 +1179,7 @@ void DLLFile::makeImport(DLLFile::Symbol *s) {
size_t impSize = s->dllName.size() + s->symbolName.size() + 2; // +2 for NULs
size_t size = sizeof(coff_import_header) + impSize;
- char *buf = bAlloc.Allocate<char>(size);
+ char *buf = bAlloc().Allocate<char>(size);
memset(buf, 0, size);
char *p = buf;
auto *imp = reinterpret_cast<coff_import_header *>(p);
diff --git a/contrib/llvm-project/lld/COFF/InputFiles.h b/contrib/llvm-project/lld/COFF/InputFiles.h
index 801c668d3ae4..2cabb54cb386 100644
--- a/contrib/llvm-project/lld/COFF/InputFiles.h
+++ b/contrib/llvm-project/lld/COFF/InputFiles.h
@@ -95,13 +95,17 @@ public:
COFFLinkerContext &ctx;
protected:
- InputFile(COFFLinkerContext &c, Kind k, MemoryBufferRef m)
- : mb(m), ctx(c), fileKind(k) {}
+ InputFile(COFFLinkerContext &c, Kind k, MemoryBufferRef m, bool lazy = false)
+ : mb(m), ctx(c), fileKind(k), lazy(lazy) {}
StringRef directives;
private:
const Kind fileKind;
+
+public:
+ // True if this is a lazy ObjFile or BitcodeFile.
+ bool lazy = false;
};
// .lib or .a file.
@@ -121,33 +125,14 @@ private:
llvm::DenseSet<uint64_t> seen;
};
-// .obj or .o file between -start-lib and -end-lib.
-class LazyObjFile : public InputFile {
-public:
- explicit LazyObjFile(COFFLinkerContext &ctx, MemoryBufferRef m)
- : InputFile(ctx, LazyObjectKind, m) {}
- static bool classof(const InputFile *f) {
- return f->kind() == LazyObjectKind;
- }
- // Makes this object file part of the link.
- void fetch();
- // Adds the symbols in this file to the symbol table as LazyObject symbols.
- void parse() override;
-
-private:
- std::vector<Symbol *> symbols;
-};
-
// .obj or .o file. This may be a member of an archive file.
class ObjFile : public InputFile {
public:
- explicit ObjFile(COFFLinkerContext &ctx, MemoryBufferRef m)
- : InputFile(ctx, ObjectKind, m) {}
- explicit ObjFile(COFFLinkerContext &ctx, MemoryBufferRef m,
- std::vector<Symbol *> &&symbols)
- : InputFile(ctx, ObjectKind, m), symbols(std::move(symbols)) {}
+ explicit ObjFile(COFFLinkerContext &ctx, MemoryBufferRef m, bool lazy = false)
+ : InputFile(ctx, ObjectKind, m, lazy) {}
static bool classof(const InputFile *f) { return f->kind() == ObjectKind; }
void parse() override;
+ void parseLazy();
MachineTypes getMachineType() override;
ArrayRef<Chunk *> getChunks() { return chunks; }
ArrayRef<SectionChunk *> getDebugChunks() { return debugChunks; }
@@ -380,15 +365,14 @@ public:
// Used for LTO.
class BitcodeFile : public InputFile {
public:
- BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef mb, StringRef archiveName,
- uint64_t offsetInArchive);
- explicit BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef m,
+ explicit BitcodeFile(COFFLinkerContext &ctx, MemoryBufferRef mb,
StringRef archiveName, uint64_t offsetInArchive,
- std::vector<Symbol *> &&symbols);
+ bool lazy);
~BitcodeFile();
static bool classof(const InputFile *f) { return f->kind() == BitcodeKind; }
ArrayRef<Symbol *> getSymbols() { return symbols; }
MachineTypes getMachineType() override;
+ void parseLazy();
std::unique_ptr<llvm::lto::InputFile> obj;
private:
diff --git a/contrib/llvm-project/lld/COFF/LTO.cpp b/contrib/llvm-project/lld/COFF/LTO.cpp
index f117b62192c8..2dbe7b146402 100644
--- a/contrib/llvm-project/lld/COFF/LTO.cpp
+++ b/contrib/llvm-project/lld/COFF/LTO.cpp
@@ -11,7 +11,7 @@
#include "InputFiles.h"
#include "Symbols.h"
#include "lld/Common/Args.h"
-#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "lld/Common/TargetOptionsCommandFlags.h"
#include "llvm/ADT/STLExtras.h"
@@ -209,8 +209,8 @@ std::vector<InputFile *> BitcodeCompiler::compile(COFFLinkerContext &ctx) {
// - foo.exe.lto.1.obj
// - ...
StringRef ltoObjName =
- saver.save(Twine(config->outputFile) + ".lto" +
- (i == 0 ? Twine("") : Twine('.') + Twine(i)) + ".obj");
+ saver().save(Twine(config->outputFile) + ".lto" +
+ (i == 0 ? Twine("") : Twine('.') + Twine(i)) + ".obj");
// Get the native object contents either from the cache or from memory. Do
// not use the cached MemoryBuffer directly, or the PDB will not be
diff --git a/contrib/llvm-project/lld/COFF/MinGW.cpp b/contrib/llvm-project/lld/COFF/MinGW.cpp
index 148ebe5eea66..7a3a3853572f 100644
--- a/contrib/llvm-project/lld/COFF/MinGW.cpp
+++ b/contrib/llvm-project/lld/COFF/MinGW.cpp
@@ -11,7 +11,6 @@
#include "Driver.h"
#include "InputFiles.h"
#include "SymbolTable.h"
-#include "lld/Common/ErrorHandler.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/Object/COFF.h"
@@ -184,8 +183,8 @@ void lld::coff::writeDefFile(StringRef name) {
static StringRef mangle(Twine sym) {
assert(config->machine != IMAGE_FILE_MACHINE_UNKNOWN);
if (config->machine == I386)
- return saver.save("_" + sym);
- return saver.save(sym);
+ return saver().save("_" + sym);
+ return saver().save(sym);
}
// Handles -wrap option.
@@ -249,7 +248,7 @@ void lld::coff::wrapSymbols(COFFLinkerContext &ctx,
// referenced it or not, though.)
if (imp) {
DefinedLocalImport *wrapimp = make<DefinedLocalImport>(
- saver.save("__imp_" + w.wrap->getName()), d);
+ saver().save("__imp_" + w.wrap->getName()), d);
ctx.symtab.localImportChunks.push_back(wrapimp->getChunk());
map[imp] = wrapimp;
}
diff --git a/contrib/llvm-project/lld/COFF/PDB.cpp b/contrib/llvm-project/lld/COFF/PDB.cpp
index a4cef1d0df3b..dea84eca5b12 100644
--- a/contrib/llvm-project/lld/COFF/PDB.cpp
+++ b/contrib/llvm-project/lld/COFF/PDB.cpp
@@ -16,7 +16,6 @@
#include "Symbols.h"
#include "TypeMerger.h"
#include "Writer.h"
-#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Timer.h"
#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
@@ -75,7 +74,7 @@ class PDBLinker {
public:
PDBLinker(COFFLinkerContext &ctx)
- : builder(bAlloc), tMerger(ctx, bAlloc), ctx(ctx) {
+ : builder(bAlloc()), tMerger(ctx, bAlloc()), ctx(ctx) {
// This isn't strictly necessary, but link.exe usually puts an empty string
// as the first "valid" string in the string table, so we do the same in
// order to maintain as much byte-for-byte compatibility as possible.
@@ -501,7 +500,7 @@ static void addGlobalSymbol(pdb::GSIStreamBuilder &builder, uint16_t modIndex,
case SymbolKind::S_LPROCREF: {
// sym is a temporary object, so we have to copy and reallocate the record
// to stabilize it.
- uint8_t *mem = bAlloc.Allocate<uint8_t>(sym.length());
+ uint8_t *mem = bAlloc().Allocate<uint8_t>(sym.length());
memcpy(mem, sym.data().data(), sym.length());
builder.addGlobalSymbol(CVSymbol(makeArrayRef(mem, sym.length())));
break;
@@ -1003,7 +1002,7 @@ static void warnUnusable(InputFile *f, Error e) {
// Allocate memory for a .debug$S / .debug$F section and relocate it.
static ArrayRef<uint8_t> relocateDebugChunk(SectionChunk &debugChunk) {
- uint8_t *buffer = bAlloc.Allocate<uint8_t>(debugChunk.getSize());
+ uint8_t *buffer = bAlloc().Allocate<uint8_t>(debugChunk.getSize());
assert(debugChunk.getOutputSectionIdx() == 0 &&
"debug sections should not be in output sections");
debugChunk.writeTo(buffer);
@@ -1417,6 +1416,7 @@ static void addCommonLinkerModuleSymbols(StringRef path,
ebs.Fields.push_back(path);
ebs.Fields.push_back("cmd");
ebs.Fields.push_back(argStr);
+ llvm::BumpPtrAllocator &bAlloc = lld::bAlloc();
mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
ons, bAlloc, CodeViewContainer::Pdb));
mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
@@ -1448,7 +1448,7 @@ static void addLinkerModuleCoffGroup(PartialSection *sec,
cgs.Characteristics |= llvm::COFF::IMAGE_SCN_MEM_WRITE;
mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
- cgs, bAlloc, CodeViewContainer::Pdb));
+ cgs, bAlloc(), CodeViewContainer::Pdb));
}
static void addLinkerModuleSectionSymbol(pdb::DbiModuleDescriptorBuilder &mod,
@@ -1461,7 +1461,7 @@ static void addLinkerModuleSectionSymbol(pdb::DbiModuleDescriptorBuilder &mod,
sym.Rva = os.getRVA();
sym.SectionNumber = os.sectionIndex;
mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
- sym, bAlloc, CodeViewContainer::Pdb));
+ sym, bAlloc(), CodeViewContainer::Pdb));
// Skip COFF groups in MinGW because it adds a significant footprint to the
// PDB, due to each function being in its own section
@@ -1536,6 +1536,7 @@ void PDBLinker::addImportFilesToPDB() {
ts.Segment = thunkOS->sectionIndex;
ts.Offset = thunkChunk->getRVA() - thunkOS->getRVA();
+ llvm::BumpPtrAllocator &bAlloc = lld::bAlloc();
mod->addSymbol(codeview::SymbolSerializer::writeOneSymbol(
ons, bAlloc, CodeViewContainer::Pdb));
mod->addSymbol(codeview::SymbolSerializer::writeOneSymbol(
diff --git a/contrib/llvm-project/lld/COFF/SymbolTable.cpp b/contrib/llvm-project/lld/COFF/SymbolTable.cpp
index 679c91ad06e6..db2db9c9272e 100644
--- a/contrib/llvm-project/lld/COFF/SymbolTable.cpp
+++ b/contrib/llvm-project/lld/COFF/SymbolTable.cpp
@@ -37,7 +37,21 @@ StringRef ltrim1(StringRef s, const char *chars) {
void SymbolTable::addFile(InputFile *file) {
log("Reading " + toString(file));
- file->parse();
+ if (file->lazy) {
+ if (auto *f = dyn_cast<BitcodeFile>(file))
+ f->parseLazy();
+ else
+ cast<ObjFile>(file)->parseLazy();
+ } else {
+ file->parse();
+ if (auto *f = dyn_cast<ObjFile>(file)) {
+ ctx.objFileInstances.push_back(f);
+ } else if (auto *f = dyn_cast<BitcodeFile>(file)) {
+ ctx.bitcodeFileInstances.push_back(f);
+ } else if (auto *f = dyn_cast<ImportFile>(file)) {
+ ctx.importFileInstances.push_back(f);
+ }
+ }
MachineTypes mt = file->getMachineType();
if (config->machine == IMAGE_FILE_MACHINE_UNKNOWN) {
@@ -48,14 +62,6 @@ void SymbolTable::addFile(InputFile *file) {
return;
}
- if (auto *f = dyn_cast<ObjFile>(file)) {
- ctx.objFileInstances.push_back(f);
- } else if (auto *f = dyn_cast<BitcodeFile>(file)) {
- ctx.bitcodeFileInstances.push_back(f);
- } else if (auto *f = dyn_cast<ImportFile>(file)) {
- ctx.importFileInstances.push_back(f);
- }
-
driver->parseDirectives(file);
}
@@ -75,9 +81,11 @@ static void forceLazy(Symbol *s) {
l->file->addMember(l->sym);
break;
}
- case Symbol::Kind::LazyObjectKind:
- cast<LazyObject>(s)->file->fetch();
+ case Symbol::Kind::LazyObjectKind: {
+ InputFile *file = cast<LazyObject>(s)->file;
+ file->ctx.symtab.addFile(file);
break;
+ }
case Symbol::Kind::LazyDLLSymbolKind: {
auto *l = cast<LazyDLLSymbol>(s);
l->file->makeImport(l->sym);
@@ -126,7 +134,7 @@ getFileLineDwarf(const SectionChunk *c, uint32_t addr) {
const DILineInfo &lineInfo = *optionalLineInfo;
if (lineInfo.FileName == DILineInfo::BadString)
return None;
- return std::make_pair(saver.save(lineInfo.FileName), lineInfo.Line);
+ return std::make_pair(saver().save(lineInfo.FileName), lineInfo.Line);
}
static Optional<std::pair<StringRef, uint32_t>>
@@ -562,7 +570,8 @@ void SymbolTable::addLazyArchive(ArchiveFile *f, const Archive::Symbol &sym) {
f->addMember(sym);
}
-void SymbolTable::addLazyObject(LazyObjFile *f, StringRef n) {
+void SymbolTable::addLazyObject(InputFile *f, StringRef n) {
+ assert(f->lazy);
Symbol *s;
bool wasInserted;
std::tie(s, wasInserted) = insert(n, f);
@@ -574,7 +583,8 @@ void SymbolTable::addLazyObject(LazyObjFile *f, StringRef n) {
if (!u || u->weakAlias || s->pendingArchiveLoad)
return;
s->pendingArchiveLoad = true;
- f->fetch();
+ f->lazy = false;
+ addFile(f);
}
void SymbolTable::addLazyDLLSymbol(DLLFile *f, DLLFile::Symbol *sym,
diff --git a/contrib/llvm-project/lld/COFF/SymbolTable.h b/contrib/llvm-project/lld/COFF/SymbolTable.h
index 3e76b416d1a0..47f3238fd75b 100644
--- a/contrib/llvm-project/lld/COFF/SymbolTable.h
+++ b/contrib/llvm-project/lld/COFF/SymbolTable.h
@@ -91,7 +91,7 @@ public:
Symbol *addUndefined(StringRef name, InputFile *f, bool isWeakAlias);
void addLazyArchive(ArchiveFile *f, const Archive::Symbol &sym);
- void addLazyObject(LazyObjFile *f, StringRef n);
+ void addLazyObject(InputFile *f, StringRef n);
void addLazyDLLSymbol(DLLFile *f, DLLFile::Symbol *sym, StringRef n);
Symbol *addAbsolute(StringRef n, COFFSymbolRef s);
Symbol *addRegular(InputFile *f, StringRef n,
diff --git a/contrib/llvm-project/lld/COFF/Symbols.cpp b/contrib/llvm-project/lld/COFF/Symbols.cpp
index 8a6a9b27d45f..a03cb03f8d17 100644
--- a/contrib/llvm-project/lld/COFF/Symbols.cpp
+++ b/contrib/llvm-project/lld/COFF/Symbols.cpp
@@ -36,9 +36,9 @@ static std::string maybeDemangleSymbol(StringRef symName) {
StringRef demangleInput = prefixless;
if (config->machine == I386)
demangleInput.consume_front("_");
- std::string demangled = demangle(std::string(demangleInput));
+ std::string demangled = demangle(demangleInput, true);
if (demangled != demangleInput)
- return prefix + demangle(std::string(demangleInput));
+ return prefix + demangle(demangleInput, true);
return (prefix + prefixless).str();
}
return std::string(symName);
diff --git a/contrib/llvm-project/lld/COFF/Symbols.h b/contrib/llvm-project/lld/COFF/Symbols.h
index bb911171b1f5..c8865d128fb8 100644
--- a/contrib/llvm-project/lld/COFF/Symbols.h
+++ b/contrib/llvm-project/lld/COFF/Symbols.h
@@ -305,10 +305,9 @@ public:
class LazyObject : public Symbol {
public:
- LazyObject(LazyObjFile *f, StringRef n)
- : Symbol(LazyObjectKind, n), file(f) {}
+ LazyObject(InputFile *f, StringRef n) : Symbol(LazyObjectKind, n), file(f) {}
static bool classof(const Symbol *s) { return s->kind() == LazyObjectKind; }
- LazyObjFile *file;
+ InputFile *file;
};
// MinGW only.
diff --git a/contrib/llvm-project/lld/COFF/Writer.cpp b/contrib/llvm-project/lld/COFF/Writer.cpp
index 0788f3519f4e..12db942f1db5 100644
--- a/contrib/llvm-project/lld/COFF/Writer.cpp
+++ b/contrib/llvm-project/lld/COFF/Writer.cpp
@@ -485,7 +485,7 @@ static bool createThunks(OutputSection *os, int margin) {
MutableArrayRef<coff_relocation> newRelocs;
if (originalRelocs.data() == curRelocs.data()) {
newRelocs = makeMutableArrayRef(
- bAlloc.Allocate<coff_relocation>(originalRelocs.size()),
+ bAlloc().Allocate<coff_relocation>(originalRelocs.size()),
originalRelocs.size());
} else {
newRelocs = makeMutableArrayRef(
@@ -1246,7 +1246,7 @@ void Writer::mergeSections() {
if (p.first == toName)
continue;
StringSet<> names;
- while (1) {
+ while (true) {
if (!names.insert(toName).second)
fatal("/merge: cycle found for section '" + p.first + "'");
auto i = config->merge.find(toName);
diff --git a/contrib/llvm-project/lld/Common/CommonLinkerContext.cpp b/contrib/llvm-project/lld/Common/CommonLinkerContext.cpp
new file mode 100644
index 000000000000..50ccbb37c796
--- /dev/null
+++ b/contrib/llvm-project/lld/Common/CommonLinkerContext.cpp
@@ -0,0 +1,45 @@
+//===- CommonLinkerContext.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lld/Common/CommonLinkerContext.h"
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
+
+using namespace llvm;
+using namespace lld;
+
+// Reference to the current LLD instance. This is a temporary situation, until
+// we pass this context everywhere by reference, or we make it a thread_local,
+// as in https://reviews.llvm.org/D108850?id=370678 where each thread can be
+// associated with a LLD instance. Only then will LLD be free of global
+// state.
+static CommonLinkerContext *lctx;
+
+CommonLinkerContext::CommonLinkerContext() { lctx = this; }
+
+CommonLinkerContext::~CommonLinkerContext() {
+ assert(lctx);
+ // Explicitly call the destructors since we created the objects with placement
+ // new in SpecificAlloc::create().
+ for (auto &it : instances)
+ it.second->~SpecificAllocBase();
+ lctx = nullptr;
+}
+
+CommonLinkerContext &lld::commonContext() {
+ assert(lctx);
+ return *lctx;
+}
+
+bool lld::hasContext() { return lctx != nullptr; }
+
+void CommonLinkerContext::destroy() {
+ if (lctx == nullptr)
+ return;
+ delete lctx;
+}
diff --git a/contrib/llvm-project/lld/Common/ErrorHandler.cpp b/contrib/llvm-project/lld/Common/ErrorHandler.cpp
index 399b6cac7547..15b3bd058ee9 100644
--- a/contrib/llvm-project/lld/Common/ErrorHandler.cpp
+++ b/contrib/llvm-project/lld/Common/ErrorHandler.cpp
@@ -10,6 +10,7 @@
#include "llvm/Support/Parallel.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
@@ -18,51 +19,69 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
-#include <mutex>
#include <regex>
using namespace llvm;
using namespace lld;
-// The functions defined in this file can be called from multiple threads,
-// but lld::outs() or lld::errs() are not thread-safe. We protect them using a
-// mutex.
-static std::mutex mu;
-
-// We want to separate multi-line messages with a newline. `sep` is "\n"
-// if the last messages was multi-line. Otherwise "".
-static StringRef sep;
-
static StringRef getSeparator(const Twine &msg) {
if (StringRef(msg.str()).contains('\n'))
return "\n";
return "";
}
-raw_ostream *lld::stdoutOS;
-raw_ostream *lld::stderrOS;
+ErrorHandler::~ErrorHandler() {
+ if (cleanupCallback)
+ cleanupCallback();
+}
+
+void ErrorHandler::initialize(llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly,
+ bool disableOutput) {
+ this->stdoutOS = &stdoutOS;
+ this->stderrOS = &stderrOS;
+ stderrOS.enable_colors(stderrOS.has_colors());
+ this->exitEarly = exitEarly;
+ this->disableOutput = disableOutput;
+}
-ErrorHandler &lld::errorHandler() {
- static ErrorHandler handler;
- return handler;
+void ErrorHandler::flushStreams() {
+ std::lock_guard<std::mutex> lock(mu);
+ outs().flush();
+ errs().flush();
}
+ErrorHandler &lld::errorHandler() { return context().e; }
+
raw_ostream &lld::outs() {
- if (errorHandler().disableOutput)
+ ErrorHandler &e = errorHandler();
+ return e.outs();
+}
+
+raw_ostream &lld::errs() {
+ ErrorHandler &e = errorHandler();
+ return e.errs();
+}
+
+raw_ostream &ErrorHandler::outs() {
+ if (disableOutput)
return llvm::nulls();
return stdoutOS ? *stdoutOS : llvm::outs();
}
-raw_ostream &lld::errs() {
- if (errorHandler().disableOutput)
+raw_ostream &ErrorHandler::errs() {
+ if (disableOutput)
return llvm::nulls();
return stderrOS ? *stderrOS : llvm::errs();
}
void lld::exitLld(int val) {
- // Delete any temporary file, while keeping the memory mapping open.
- if (errorHandler().outputBuffer)
- errorHandler().outputBuffer->discard();
+ if (hasContext()) {
+ ErrorHandler &e = errorHandler();
+ // Delete any temporary file, while keeping the memory mapping open.
+ if (e.outputBuffer)
+ e.outputBuffer->discard();
+ }
// Re-throw a possible signal or exception once/if it was catched by
// safeLldMain().
@@ -75,11 +94,9 @@ void lld::exitLld(int val) {
if (!CrashRecoveryContext::GetCurrent())
llvm_shutdown();
- {
- std::lock_guard<std::mutex> lock(mu);
- lld::outs().flush();
- lld::errs().flush();
- }
+ if (hasContext())
+ lld::errorHandler().flushStreams();
+
// When running inside safeLldMain(), restore the control flow back to the
// CrashRecoveryContext. Otherwise simply use _exit(), meanning no cleanup,
// since we want to avoid further crashes on shutdown.
diff --git a/contrib/llvm-project/lld/Common/Memory.cpp b/contrib/llvm-project/lld/Common/Memory.cpp
index c53e1d3e6cfc..7c90ff1d799c 100644
--- a/contrib/llvm-project/lld/Common/Memory.cpp
+++ b/contrib/llvm-project/lld/Common/Memory.cpp
@@ -7,16 +7,19 @@
//===----------------------------------------------------------------------===//
#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
using namespace llvm;
using namespace lld;
-BumpPtrAllocator lld::bAlloc;
-StringSaver lld::saver{bAlloc};
-std::vector<SpecificAllocBase *> lld::SpecificAllocBase::instances;
-
-void lld::freeArena() {
- for (SpecificAllocBase *alloc : SpecificAllocBase::instances)
- alloc->reset();
- bAlloc.Reset();
+SpecificAllocBase *
+lld::SpecificAllocBase::getOrCreate(void *tag, size_t size, size_t align,
+ SpecificAllocBase *(&creator)(void *)) {
+ auto &instances = context().instances;
+ auto &instance = instances[tag];
+ if (instance == nullptr) {
+ void *storage = context().bAlloc.Allocate(size, align);
+ instance = creator(storage);
+ }
+ return instance;
}
diff --git a/contrib/llvm-project/lld/Common/Strings.cpp b/contrib/llvm-project/lld/Common/Strings.cpp
index 7bf336490dae..6e5478e335ca 100644
--- a/contrib/llvm-project/lld/Common/Strings.cpp
+++ b/contrib/llvm-project/lld/Common/Strings.cpp
@@ -9,7 +9,6 @@
#include "lld/Common/Strings.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/LLVM.h"
-#include "llvm/Demangle/Demangle.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/GlobPattern.h"
#include <algorithm>
@@ -19,18 +18,6 @@
using namespace llvm;
using namespace lld;
-// Returns the demangled C++ symbol name for name.
-std::string lld::demangleItanium(StringRef name) {
- // demangleItanium() can be called for all symbols. Only demangle C++ symbols,
- // to avoid getting unexpected result for a C symbol that happens to match a
- // mangled type name such as "Pi" (which would demangle to "int*").
- if (!name.startswith("_Z") && !name.startswith("__Z") &&
- !name.startswith("___Z") && !name.startswith("____Z"))
- return std::string(name);
-
- return demangle(std::string(name));
-}
-
SingleStringMatcher::SingleStringMatcher(StringRef Pattern) {
if (Pattern.size() > 2 && Pattern.startswith("\"") &&
Pattern.endswith("\"")) {
diff --git a/contrib/llvm-project/lld/Common/TargetOptionsCommandFlags.cpp b/contrib/llvm-project/lld/Common/TargetOptionsCommandFlags.cpp
index d39477ed89ad..b7749c4a2032 100644
--- a/contrib/llvm-project/lld/Common/TargetOptionsCommandFlags.cpp
+++ b/contrib/llvm-project/lld/Common/TargetOptionsCommandFlags.cpp
@@ -7,12 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lld/Common/TargetOptionsCommandFlags.h"
-
#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/Target/TargetOptions.h"
-static llvm::codegen::RegisterCodeGenFlags CGF;
-
llvm::TargetOptions lld::initTargetOptionsFromCodeGenFlags() {
return llvm::codegen::InitTargetOptionsFromCodeGenFlags(llvm::Triple());
}
diff --git a/contrib/llvm-project/lld/Common/Timer.cpp b/contrib/llvm-project/lld/Common/Timer.cpp
index 40fecd8892c1..29838c9720b7 100644
--- a/contrib/llvm-project/lld/Common/Timer.cpp
+++ b/contrib/llvm-project/lld/Common/Timer.cpp
@@ -9,6 +9,7 @@
#include "lld/Common/Timer.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/Support/Format.h"
+#include <ratio>
using namespace lld;
using namespace llvm;
diff --git a/contrib/llvm-project/lld/ELF/AArch64ErrataFix.cpp b/contrib/llvm-project/lld/ELF/AArch64ErrataFix.cpp
index 50d4c237778b..d45edf9bd8ff 100644
--- a/contrib/llvm-project/lld/ELF/AArch64ErrataFix.cpp
+++ b/contrib/llvm-project/lld/ELF/AArch64ErrataFix.cpp
@@ -33,7 +33,7 @@
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"
@@ -398,9 +398,9 @@ Patch843419Section::Patch843419Section(InputSection *p, uint64_t off)
patchee(p), patcheeOffset(off) {
this->parent = p->getParent();
patchSym = addSyntheticLocal(
- saver.save("__CortexA53843419_" + utohexstr(getLDSTAddr())), STT_FUNC, 0,
- getSize(), *this);
- addSyntheticLocal(saver.save("$x"), STT_NOTYPE, 0, 0, *this);
+ saver().save("__CortexA53843419_" + utohexstr(getLDSTAddr())), STT_FUNC,
+ 0, getSize(), *this);
+ addSyntheticLocal(saver().save("$x"), STT_NOTYPE, 0, 0, *this);
}
uint64_t Patch843419Section::getLDSTAddr() const {
@@ -512,7 +512,7 @@ void AArch64Err843419Patcher::insertPatches(
// determine the insertion point. This is ok as we only merge into an
// InputSectionDescription once per pass, and at the end of the pass
// assignAddresses() will recalculate all the outSecOff values.
- std::vector<InputSection *> tmp;
+ SmallVector<InputSection *, 0> tmp;
tmp.reserve(isd.sections.size() + patches.size());
auto mergeCmp = [](const InputSection *a, const InputSection *b) {
if (a->outSecOff != b->outSecOff)
diff --git a/contrib/llvm-project/lld/ELF/ARMErrataFix.cpp b/contrib/llvm-project/lld/ELF/ARMErrataFix.cpp
index 5ad55f1326b3..25b47b90cef8 100644
--- a/contrib/llvm-project/lld/ELF/ARMErrataFix.cpp
+++ b/contrib/llvm-project/lld/ELF/ARMErrataFix.cpp
@@ -22,7 +22,7 @@
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"
@@ -142,9 +142,9 @@ Patch657417Section::Patch657417Section(InputSection *p, uint64_t off,
patchee(p), patcheeOffset(off), instr(instr), isARM(isARM) {
parent = p->getParent();
patchSym = addSyntheticLocal(
- saver.save("__CortexA8657417_" + utohexstr(getBranchAddr())), STT_FUNC,
+ saver().save("__CortexA8657417_" + utohexstr(getBranchAddr())), STT_FUNC,
isARM ? 0 : 1, getSize(), *this);
- addSyntheticLocal(saver.save(isARM ? "$a" : "$t"), STT_NOTYPE, 0, 0, *this);
+ addSyntheticLocal(saver().save(isARM ? "$a" : "$t"), STT_NOTYPE, 0, 0, *this);
}
uint64_t Patch657417Section::getBranchAddr() const {
@@ -395,7 +395,7 @@ void ARMErr657417Patcher::insertPatches(
// determine the insertion point. This is ok as we only merge into an
// InputSectionDescription once per pass, and at the end of the pass
// assignAddresses() will recalculate all the outSecOff values.
- std::vector<InputSection *> tmp;
+ SmallVector<InputSection *, 0> tmp;
tmp.reserve(isd.sections.size() + patches.size());
auto mergeCmp = [](const InputSection *a, const InputSection *b) {
if (a->outSecOff != b->outSecOff)
diff --git a/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp b/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp
index 2a1791b178bd..805a5665729f 100644
--- a/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp
@@ -575,6 +575,98 @@ void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
}
+AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {
+ if (!config->relax || config->emachine != EM_AARCH64) {
+ safeToRelaxAdrpLdr = false;
+ return;
+ }
+ // Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC
+ // always appear in pairs.
+ size_t i = 0;
+ const size_t size = relocs.size();
+ for (; i != size; ++i) {
+ if (relocs[i].type == R_AARCH64_ADR_GOT_PAGE) {
+ if (i + 1 < size && relocs[i + 1].type == R_AARCH64_LD64_GOT_LO12_NC) {
+ ++i;
+ continue;
+ }
+ break;
+ } else if (relocs[i].type == R_AARCH64_LD64_GOT_LO12_NC) {
+ break;
+ }
+ }
+ safeToRelaxAdrpLdr = i == size;
+}
+
+bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
+ const Relocation &ldrRel, uint64_t secAddr,
+ uint8_t *buf) const {
+ if (!safeToRelaxAdrpLdr)
+ return false;
+
+ // When the definition of sym is not preemptible then we may
+ // be able to relax
+ // ADRP xn, :got: sym
+ // LDR xn, [ xn :got_lo12: sym]
+ // to
+ // ADRP xn, sym
+ // ADD xn, xn, :lo_12: sym
+
+ if (adrpRel.type != R_AARCH64_ADR_GOT_PAGE ||
+ ldrRel.type != R_AARCH64_LD64_GOT_LO12_NC)
+ return false;
+ // Check if the relocations apply to consecutive instructions.
+ if (adrpRel.offset + 4 != ldrRel.offset)
+ return false;
+ // Check if the relocations reference the same symbol and
+ // skip undefined, preemptible and STT_GNU_IFUNC symbols.
+ if (!adrpRel.sym || adrpRel.sym != ldrRel.sym || !adrpRel.sym->isDefined() ||
+ adrpRel.sym->isPreemptible || adrpRel.sym->isGnuIFunc())
+ return false;
+ // Check if the addends of the both relocations are zero.
+ if (adrpRel.addend != 0 || ldrRel.addend != 0)
+ return false;
+ uint32_t adrpInstr = read32le(buf + adrpRel.offset);
+ uint32_t ldrInstr = read32le(buf + ldrRel.offset);
+ // Check if the first instruction is ADRP and the second instruction is LDR.
+ if ((adrpInstr & 0x9f000000) != 0x90000000 ||
+ (ldrInstr & 0x3b000000) != 0x39000000)
+ return false;
+ // Check the value of the sf bit.
+ if (!(ldrInstr >> 31))
+ return false;
+ uint32_t adrpDestReg = adrpInstr & 0x1f;
+ uint32_t ldrDestReg = ldrInstr & 0x1f;
+ uint32_t ldrSrcReg = (ldrInstr >> 5) & 0x1f;
+ // Check if ADPR and LDR use the same register.
+ if (adrpDestReg != ldrDestReg || adrpDestReg != ldrSrcReg)
+ return false;
+
+ Symbol &sym = *adrpRel.sym;
+ // Check if the address difference is within 4GB range.
+ int64_t val =
+ getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset);
+ if (val != llvm::SignExtend64(val, 33))
+ return false;
+
+ Relocation adrpSymRel = {R_AARCH64_PAGE_PC, R_AARCH64_ADR_PREL_PG_HI21,
+ adrpRel.offset, /*addend=*/0, &sym};
+ Relocation addRel = {R_ABS, R_AARCH64_ADD_ABS_LO12_NC, ldrRel.offset,
+ /*addend=*/0, &sym};
+
+ // adrp x_<dest_reg>
+ write32le(buf + adrpSymRel.offset, 0x90000000 | adrpDestReg);
+ // add x_<dest reg>, x_<dest reg>
+ write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5));
+
+ target->relocate(buf + adrpSymRel.offset, adrpSymRel,
+ SignExtend64(getAArch64Page(sym.getVA()) -
+ getAArch64Page(secAddr + adrpSymRel.offset),
+ 64));
+ target->relocate(buf + addRel.offset, addRel, SignExtend64(sym.getVA(), 64));
+ return true;
+}
+
// AArch64 may use security features in variant PLT sequences. These are:
// Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target
// Indicator (BTI) introduced in armv8.5-a. The additional instructions used
diff --git a/contrib/llvm-project/lld/ELF/Arch/PPC.cpp b/contrib/llvm-project/lld/ELF/Arch/PPC.cpp
index 828206478b25..47c31e3a3b94 100644
--- a/contrib/llvm-project/lld/ELF/Arch/PPC.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/PPC.cpp
@@ -193,7 +193,7 @@ void PPC::writeGotHeader(uint8_t *buf) const {
void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const {
// Address of the symbol resolver stub in .glink .
- write32(buf, in.plt->getVA() + in.plt->headerSize + 4 * s.pltIndex);
+ write32(buf, in.plt->getVA() + in.plt->headerSize + 4 * s.getPltIdx());
}
bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
diff --git a/contrib/llvm-project/lld/ELF/Arch/PPC64.cpp b/contrib/llvm-project/lld/ELF/Arch/PPC64.cpp
index dbda7d48a70a..094478548fad 100644
--- a/contrib/llvm-project/lld/ELF/Arch/PPC64.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/PPC64.cpp
@@ -11,8 +11,7 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "Thunks.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/Support/Endian.h"
using namespace llvm;
@@ -197,7 +196,7 @@ static bool addOptional(StringRef name, uint64_t value,
Symbol *sym = symtab->find(name);
if (!sym || sym->isDefined())
return false;
- sym->resolve(Defined{/*file=*/nullptr, saver.save(name), STB_GLOBAL,
+ sym->resolve(Defined{/*file=*/nullptr, saver().save(name), STB_GLOBAL,
STV_HIDDEN, STT_FUNC, value,
/*size=*/0, /*section=*/nullptr});
defined.push_back(cast<Defined>(sym));
@@ -1106,7 +1105,7 @@ void PPC64::writePltHeader(uint8_t *buf) const {
void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t /*pltEntryAddr*/) const {
- int32_t offset = pltHeaderSize + sym.pltIndex * pltEntrySize;
+ int32_t offset = pltHeaderSize + sym.getPltIdx() * pltEntrySize;
// bl __glink_PLTresolve
write32(buf, 0x48000000 | ((-offset) & 0x03FFFFFc));
}
diff --git a/contrib/llvm-project/lld/ELF/Arch/X86.cpp b/contrib/llvm-project/lld/ELF/Arch/X86.cpp
index 2560dc883257..e084bd646912 100644
--- a/contrib/llvm-project/lld/ELF/Arch/X86.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/X86.cpp
@@ -220,7 +220,7 @@ void X86::writePltHeader(uint8_t *buf) const {
void X86::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.pltIndex;
+ unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
if (config->isPic) {
const uint8_t inst[] = {
0xff, 0xa3, 0, 0, 0, 0, // jmp *foo@GOT(%ebx)
@@ -502,7 +502,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; }
void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
uint64_t va =
- in.ibtPlt->getVA() + IBTPltHeaderSize + s.pltIndex * pltEntrySize;
+ in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
write32le(buf, va);
}
@@ -600,7 +600,7 @@ void RetpolinePic::writePltHeader(uint8_t *buf) const {
void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.pltIndex;
+ unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
const uint8_t insn[] = {
0x50, // pushl %eax
0x8b, 0x83, 0, 0, 0, 0, // mov foo@GOT(%ebx), %eax
@@ -659,7 +659,7 @@ void RetpolineNoPic::writePltHeader(uint8_t *buf) const {
void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.pltIndex;
+ unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
const uint8_t insn[] = {
0x50, // 0: pushl %eax
0xa1, 0, 0, 0, 0, // 1: mov foo_in_GOT, %eax
diff --git a/contrib/llvm-project/lld/ELF/Arch/X86_64.cpp b/contrib/llvm-project/lld/ELF/Arch/X86_64.cpp
index 614b5ed59218..f1360f50f8ac 100644
--- a/contrib/llvm-project/lld/ELF/Arch/X86_64.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/X86_64.cpp
@@ -282,12 +282,12 @@ bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file,
const unsigned sizeOfJmpCCInsn = 6;
// To flip, there must be atleast one JmpCC and one direct jmp.
if (is.getSize() < sizeOfDirectJmpInsn + sizeOfJmpCCInsn)
- return 0;
+ return false;
unsigned rbIndex =
getRelocationWithOffset(is, (is.getSize() - sizeOfDirectJmpInsn - 4));
if (rbIndex == is.relocations.size())
- return 0;
+ return false;
Relocation &rB = is.relocations[rbIndex];
@@ -304,7 +304,8 @@ bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file,
JmpInsnOpcode jInvert = invertJmpOpcode(jmpOpcodeB);
if (jInvert == J_UNKNOWN)
return false;
- is.jumpInstrMods.push_back({jInvert, (rB.offset - 1), 4});
+ is.jumpInstrMod = make<JumpInstrMod>();
+ *is.jumpInstrMod = {rB.offset - 1, jInvert, 4};
// Move R's values to rB except the offset.
rB = {r.expr, r.type, rB.offset, r.addend, r.sym};
// Cancel R
@@ -416,7 +417,7 @@ void X86_64::writePlt(uint8_t *buf, const Symbol &sym,
memcpy(buf, inst, sizeof(inst));
write32le(buf + 2, sym.getGotPltVA() - pltEntryAddr - 6);
- write32le(buf + 7, sym.pltIndex);
+ write32le(buf + 7, sym.getPltIdx());
write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16);
}
@@ -946,7 +947,7 @@ void X86_64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
uint8_t stOther) const {
if (!config->is64) {
- error("Target doesn't support split stacks.");
+ error("target doesn't support split stacks");
return false;
}
@@ -994,7 +995,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; }
void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
uint64_t va =
- in.ibtPlt->getVA() + IBTPltHeaderSize + s.pltIndex * pltEntrySize;
+ in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
write64le(buf, va);
}
@@ -1106,7 +1107,7 @@ void Retpoline::writePlt(uint8_t *buf, const Symbol &sym,
write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
write32le(buf + 8, -off - 12 + 32);
write32le(buf + 13, -off - 17 + 18);
- write32le(buf + 18, sym.pltIndex);
+ write32le(buf + 18, sym.getPltIdx());
write32le(buf + 23, -off - 27);
}
diff --git a/contrib/llvm-project/lld/ELF/Config.h b/contrib/llvm-project/lld/ELF/Config.h
index b3d5219ff57b..47bbed125cb1 100644
--- a/contrib/llvm-project/lld/ELF/Config.h
+++ b/contrib/llvm-project/lld/ELF/Config.h
@@ -87,8 +87,8 @@ struct SymbolVersion {
struct VersionDefinition {
llvm::StringRef name;
uint16_t id;
- std::vector<SymbolVersion> nonLocalPatterns;
- std::vector<SymbolVersion> localPatterns;
+ SmallVector<SymbolVersion, 0> nonLocalPatterns;
+ SmallVector<SymbolVersion, 0> localPatterns;
};
// This struct contains the global configuration for the linker.
diff --git a/contrib/llvm-project/lld/ELF/Driver.cpp b/contrib/llvm-project/lld/ELF/Driver.cpp
index 6b689f50cce7..de26afddd28b 100644
--- a/contrib/llvm-project/lld/ELF/Driver.cpp
+++ b/contrib/llvm-project/lld/ELF/Driver.cpp
@@ -77,16 +77,17 @@ std::unique_ptr<LinkerDriver> elf::driver;
static void setConfigs(opt::InputArgList &args);
static void readConfigs(opt::InputArgList &args);
-bool elf::link(ArrayRef<const char *> args, bool canExitEarly,
- raw_ostream &stdoutOS, raw_ostream &stderrOS) {
- lld::stdoutOS = &stdoutOS;
- lld::stderrOS = &stderrOS;
-
- errorHandler().cleanupCallback = []() {
- freeArena();
-
+bool elf::link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly,
+ bool disableOutput) {
+ // This driver-specific context will be freed later by lldMain().
+ auto *ctx = new CommonLinkerContext;
+
+ ctx->e.initialize(stdoutOS, stderrOS, exitEarly, disableOutput);
+ ctx->e.cleanupCallback = []() {
inputSections.clear();
outputSections.clear();
+ memoryBuffers.clear();
archiveFiles.clear();
binaryFiles.clear();
bitcodeFiles.clear();
@@ -95,43 +96,33 @@ bool elf::link(ArrayRef<const char *> args, bool canExitEarly,
sharedFiles.clear();
backwardReferences.clear();
whyExtract.clear();
+ symAux.clear();
tar = nullptr;
- memset(&in, 0, sizeof(in));
+ in.reset();
- partitions = {Partition()};
+ partitions.clear();
+ partitions.emplace_back();
SharedFile::vernauxNum = 0;
};
-
- errorHandler().logName = args::getFilenameWithoutExe(args[0]);
- errorHandler().errorLimitExceededMsg =
- "too many errors emitted, stopping now (use "
- "-error-limit=0 to see all errors)";
- errorHandler().exitEarly = canExitEarly;
- stderrOS.enable_colors(stderrOS.has_colors());
+ ctx->e.logName = args::getFilenameWithoutExe(args[0]);
+ ctx->e.errorLimitExceededMsg = "too many errors emitted, stopping now (use "
+ "-error-limit=0 to see all errors)";
config = std::make_unique<Configuration>();
driver = std::make_unique<LinkerDriver>();
script = std::make_unique<LinkerScript>();
symtab = std::make_unique<SymbolTable>();
- partitions = {Partition()};
+ partitions.clear();
+ partitions.emplace_back();
config->progName = args[0];
driver->linkerMain(args);
- // Exit immediately if we don't need to return to the caller.
- // This saves time because the overhead of calling destructors
- // for all globally-allocated objects is not negligible.
- if (canExitEarly)
- exitLld(errorCount() ? 1 : 0);
-
- bool ret = errorCount() == 0;
- if (!canExitEarly)
- errorHandler().reset();
- return ret;
+ return errorCount() == 0;
}
// Parses a linker -m option.
@@ -232,23 +223,22 @@ void LinkerDriver::addFile(StringRef path, bool withLOption) {
std::unique_ptr<Archive> file =
CHECK(Archive::create(mbref), path + ": failed to parse archive");
- // If an archive file has no symbol table, it is likely that a user
- // is attempting LTO and using a default ar command that doesn't
- // understand the LLVM bitcode file. It is a pretty common error, so
- // we'll handle it as if it had a symbol table.
+ // If an archive file has no symbol table, it may be intentional (used as a
+ // group of lazy object files where the symbol table is not useful), or the
+ // user is attempting LTO and using a default ar command that doesn't
+ // understand the LLVM bitcode file. Treat the archive as a group of lazy
+ // object files.
if (!file->isEmpty() && !file->hasSymbolTable()) {
- // Check if all members are bitcode files. If not, ignore, which is the
- // default action without the LTO hack described above.
- for (const std::pair<MemoryBufferRef, uint64_t> &p :
- getArchiveMembers(mbref))
- if (identify_magic(p.first.getBuffer()) != file_magic::bitcode) {
- error(path + ": archive has no index; run ranlib to add one");
- return;
- }
-
for (const std::pair<MemoryBufferRef, uint64_t> &p :
- getArchiveMembers(mbref))
- files.push_back(createLazyFile(p.first, path, p.second));
+ getArchiveMembers(mbref)) {
+ auto magic = identify_magic(p.first.getBuffer());
+ if (magic == file_magic::bitcode ||
+ magic == file_magic::elf_relocatable)
+ files.push_back(createLazyFile(p.first, path, p.second));
+ else
+ error(path + ": archive member '" + p.first.getBufferIdentifier() +
+ "' is neither ET_REL nor LLVM bitcode");
+ }
return;
}
@@ -1256,7 +1246,7 @@ static void readConfigs(opt::InputArgList &args) {
// Parse LTO options.
if (auto *arg = args.getLastArg(OPT_plugin_opt_mcpu_eq))
- parseClangOption(saver.save("-mcpu=" + StringRef(arg->getValue())),
+ parseClangOption(saver().save("-mcpu=" + StringRef(arg->getValue())),
arg->getSpelling());
for (opt::Arg *arg : args.filtered(OPT_plugin_opt_eq_minus))
@@ -1724,7 +1714,7 @@ static void handleUndefinedGlob(StringRef arg) {
// symbols to the symbol table, invalidating the current iterator.
std::vector<Symbol *> syms;
for (Symbol *sym : symtab->symbols())
- if (pat->match(sym->getName()))
+ if (!sym->isPlaceholder() && pat->match(sym->getName()))
syms.push_back(sym);
for (Symbol *sym : syms)
@@ -1847,8 +1837,7 @@ static void demoteSharedSymbols() {
llvm::TimeTraceScope timeScope("Demote shared symbols");
for (Symbol *sym : symtab->symbols()) {
auto *s = dyn_cast<SharedSymbol>(sym);
- if (!((s && !s->getFile().isNeeded) ||
- (sym->isLazy() && sym->isUsedInRegularObj)))
+ if (!(s && !s->getFile().isNeeded) && !sym->isLazy())
continue;
bool used = sym->used;
@@ -1985,6 +1974,28 @@ static Symbol *addUnusedUndefined(StringRef name,
return symtab->addSymbol(sym);
}
+static void markBuffersAsDontNeed(bool skipLinkedOutput) {
+ // With --thinlto-index-only, all buffers are nearly unused from now on
+ // (except symbol/section names used by infrequent passes). Mark input file
+ // buffers as MADV_DONTNEED so that these pages can be reused by the expensive
+ // thin link, saving memory.
+ if (skipLinkedOutput) {
+ for (MemoryBuffer &mb : llvm::make_pointee_range(memoryBuffers))
+ mb.dontNeedIfMmap();
+ return;
+ }
+
+ // Otherwise, just mark MemoryBuffers backing BitcodeFiles.
+ DenseSet<const char *> bufs;
+ for (BitcodeFile *file : bitcodeFiles)
+ bufs.insert(file->mb.getBufferStart());
+ for (BitcodeFile *file : lazyBitcodeFiles)
+ bufs.insert(file->mb.getBufferStart());
+ for (MemoryBuffer &mb : llvm::make_pointee_range(memoryBuffers))
+ if (bufs.count(mb.getBufferStart()))
+ mb.dontNeedIfMmap();
+}
+
// This function is where all the optimizations of link-time
// optimization takes place. When LTO is in use, some input files are
// not in native object file format but in the LLVM bitcode format.
@@ -1992,13 +2003,17 @@ static Symbol *addUnusedUndefined(StringRef name,
// using LLVM functions and replaces bitcode symbols with the results.
// Because all bitcode files that the program consists of are passed to
// the compiler at once, it can do a whole-program optimization.
-template <class ELFT> void LinkerDriver::compileBitcodeFiles() {
+template <class ELFT>
+void LinkerDriver::compileBitcodeFiles(bool skipLinkedOutput) {
llvm::TimeTraceScope timeScope("LTO");
// Compile bitcode files and replace bitcode symbols.
lto.reset(new BitcodeCompiler);
for (BitcodeFile *file : bitcodeFiles)
lto->add(*file);
+ if (!bitcodeFiles.empty())
+ markBuffersAsDontNeed(skipLinkedOutput);
+
for (InputFile *file : lto->compile()) {
auto *obj = cast<ObjFile<ELFT>>(file);
obj->parse(/*ignoreComdats=*/true);
@@ -2006,7 +2021,8 @@ template <class ELFT> void LinkerDriver::compileBitcodeFiles() {
// Parse '@' in symbol names for non-relocatable output.
if (!config->relocatable)
for (Symbol *sym : obj->getGlobalSymbols())
- sym->parseSymbolVersion();
+ if (sym->hasVersionSuffix)
+ sym->parseSymbolVersion();
objectFiles.push_back(obj);
}
}
@@ -2043,9 +2059,9 @@ static std::vector<WrappedSymbol> addWrappedSymbols(opt::InputArgList &args) {
if (!sym)
continue;
- Symbol *real = addUnusedUndefined(saver.save("__real_" + name));
+ Symbol *real = addUnusedUndefined(saver().save("__real_" + name));
Symbol *wrap =
- addUnusedUndefined(saver.save("__wrap_" + name), sym->binding);
+ addUnusedUndefined(saver().save("__wrap_" + name), sym->binding);
v.push_back({sym, real, wrap});
// We want to tell LTO not to inline symbols to be overwritten
@@ -2080,8 +2096,10 @@ static void redirectSymbols(ArrayRef<WrappedSymbol> wrapped) {
map[w.real] = w.sym;
}
for (Symbol *sym : symtab->symbols()) {
- // Enumerate symbols with a non-default version (foo@v1).
- StringRef name = sym->getName();
+ // Enumerate symbols with a non-default version (foo@v1). hasVersionSuffix
+ // filters out most symbols but is not sufficient.
+ if (!sym->hasVersionSuffix)
+ continue;
const char *suffix1 = sym->getVersionSuffix();
if (suffix1[0] != '@' || suffix1[1] == '@')
continue;
@@ -2090,7 +2108,7 @@ static void redirectSymbols(ArrayRef<WrappedSymbol> wrapped) {
//
// * There is a definition of foo@v1 and foo@@v1.
// * There is a definition of foo@v1 and foo.
- Defined *sym2 = dyn_cast_or_null<Defined>(symtab->find(name));
+ Defined *sym2 = dyn_cast_or_null<Defined>(symtab->find(sym->getName()));
if (!sym2)
continue;
const char *suffix2 = sym2->getVersionSuffix();
@@ -2103,6 +2121,7 @@ static void redirectSymbols(ArrayRef<WrappedSymbol> wrapped) {
sym2->resolve(*sym);
// Eliminate foo@v1 from the symbol table.
sym->symbolKind = Symbol::PlaceholderKind;
+ sym->isUsedInRegularObj = false;
} else if (auto *sym1 = dyn_cast<Defined>(sym)) {
if (sym2->versionId > VER_NDX_GLOBAL
? config->versionDefinitions[sym2->versionId].name == suffix1 + 1
@@ -2115,6 +2134,7 @@ static void redirectSymbols(ArrayRef<WrappedSymbol> wrapped) {
// defined in the same place.
map.try_emplace(sym2, sym);
sym2->symbolKind = Symbol::PlaceholderKind;
+ sym2->isUsedInRegularObj = false;
}
}
}
@@ -2357,39 +2377,45 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
symtab->scanVersionScript();
}
+ // Skip the normal linked output if some LTO options are specified.
+ //
+ // For --thinlto-index-only, index file creation is performed in
+ // compileBitcodeFiles, so we are done afterwards. --plugin-opt=emit-llvm and
+ // --plugin-opt=emit-asm create output files in bitcode or assembly code,
+ // respectively. When only certain thinLTO modules are specified for
+ // compilation, the intermediate object file are the expected output.
+ const bool skipLinkedOutput = config->thinLTOIndexOnly || config->emitLLVM ||
+ config->ltoEmitAsm ||
+ !config->thinLTOModulesToCompile.empty();
+
// Do link-time optimization if given files are LLVM bitcode files.
// This compiles bitcode files into real object files.
//
// With this the symbol table should be complete. After this, no new names
// except a few linker-synthesized ones will be added to the symbol table.
- compileBitcodeFiles<ELFT>();
-
- // Handle --exclude-libs again because lto.tmp may reference additional
- // libcalls symbols defined in an excluded archive. This may override
- // versionId set by scanVersionScript().
- if (args.hasArg(OPT_exclude_libs))
- excludeLibs(args);
+ compileBitcodeFiles<ELFT>(skipLinkedOutput);
// Symbol resolution finished. Report backward reference problems.
reportBackrefs();
if (errorCount())
return;
- // If --thinlto-index-only is given, we should create only "index
- // files" and not object files. Index file creation is already done
- // in compileBitcodeFiles, so we are done if that's the case.
- // Likewise, --plugin-opt=emit-llvm and --plugin-opt=emit-asm are the
- // options to create output files in bitcode or assembly code
- // respectively. No object files are generated.
- // Also bail out here when only certain thinLTO modules are specified for
- // compilation. The intermediate object file are the expected output.
- if (config->thinLTOIndexOnly || config->emitLLVM || config->ltoEmitAsm ||
- !config->thinLTOModulesToCompile.empty())
+ // Bail out if normal linked output is skipped due to LTO.
+ if (skipLinkedOutput)
return;
+ // Handle --exclude-libs again because lto.tmp may reference additional
+ // libcalls symbols defined in an excluded archive. This may override
+ // versionId set by scanVersionScript().
+ if (args.hasArg(OPT_exclude_libs))
+ excludeLibs(args);
+
// Apply symbol renames for --wrap and combine foo@v1 and foo@@v1.
redirectSymbols(wrapped);
+ // Replace common symbols with regular symbols.
+ replaceCommonSymbols();
+
{
llvm::TimeTraceScope timeScope("Aggregate sections");
// Now that we have a complete list of input files.
@@ -2474,9 +2500,6 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
if (!config->relocatable)
inputSections.push_back(createCommentSection());
- // Replace common symbols with regular symbols.
- replaceCommonSymbols();
-
// Split SHF_MERGE and .eh_frame sections into pieces in preparation for garbage collection.
splitSections<ELFT>();
diff --git a/contrib/llvm-project/lld/ELF/Driver.h b/contrib/llvm-project/lld/ELF/Driver.h
index 5961e1f69472..b8cbb3b19268 100644
--- a/contrib/llvm-project/lld/ELF/Driver.h
+++ b/contrib/llvm-project/lld/ELF/Driver.h
@@ -34,7 +34,7 @@ private:
void createFiles(llvm::opt::InputArgList &args);
void inferMachineType();
template <class ELFT> void link(llvm::opt::InputArgList &args);
- template <class ELFT> void compileBitcodeFiles();
+ template <class ELFT> void compileBitcodeFiles(bool skipLinkedOutput);
// True if we are in --whole-archive and --no-whole-archive.
bool inWholeArchive = false;
diff --git a/contrib/llvm-project/lld/ELF/DriverUtils.cpp b/contrib/llvm-project/lld/ELF/DriverUtils.cpp
index 54d2d0ae6fb9..ac29b47abcc9 100644
--- a/contrib/llvm-project/lld/ELF/DriverUtils.cpp
+++ b/contrib/llvm-project/lld/ELF/DriverUtils.cpp
@@ -13,8 +13,7 @@
//===----------------------------------------------------------------------===//
#include "Driver.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Reproduce.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/Optional.h"
@@ -102,7 +101,7 @@ static void concatLTOPluginOptions(SmallVectorImpl<const char *> &args) {
for (size_t i = 0, e = args.size(); i != e; ++i) {
StringRef s = args[i];
if ((s == "-plugin-opt" || s == "--plugin-opt") && i + 1 != e) {
- v.push_back(saver.save(s + "=" + args[i + 1]).data());
+ v.push_back(saver().save(s + "=" + args[i + 1]).data());
++i;
} else {
v.push_back(args[i]);
@@ -125,7 +124,7 @@ opt::InputArgList ELFOptTable::parse(ArrayRef<const char *> argv) {
// Expand response files (arguments in the form of @<filename>)
// and then parse the argument again.
- cl::ExpandResponseFiles(saver, getQuotingStyle(args), vec);
+ cl::ExpandResponseFiles(saver(), getQuotingStyle(args), vec);
concatLTOPluginOptions(vec);
args = this->ParseArgs(vec, missingIndex, missingCount);
diff --git a/contrib/llvm-project/lld/ELF/EhFrame.cpp b/contrib/llvm-project/lld/ELF/EhFrame.cpp
index 578c640f0214..9ac2ed772073 100644
--- a/contrib/llvm-project/lld/ELF/EhFrame.cpp
+++ b/contrib/llvm-project/lld/ELF/EhFrame.cpp
@@ -36,7 +36,6 @@ namespace {
class EhReader {
public:
EhReader(InputSectionBase *s, ArrayRef<uint8_t> d) : isec(s), d(d) {}
- size_t readEhRecordSize();
uint8_t getFdeEncoding();
bool hasLSDA();
@@ -58,28 +57,6 @@ private:
};
}
-size_t elf::readEhRecordSize(InputSectionBase *s, size_t off) {
- return EhReader(s, s->data().slice(off)).readEhRecordSize();
-}
-
-// .eh_frame section is a sequence of records. Each record starts with
-// a 4 byte length field. This function reads the length.
-size_t EhReader::readEhRecordSize() {
- if (d.size() < 4)
- failOn(d.data(), "CIE/FDE too small");
-
- // First 4 bytes of CIE/FDE is the size of the record.
- // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
- // but we do not support that format yet.
- uint64_t v = read32(d.data());
- if (v == UINT32_MAX)
- failOn(d.data(), "CIE/FDE too large");
- uint64_t size = v + 4;
- if (size > d.size())
- failOn(d.data(), "CIE/FDE ends past the end of the section");
- return size;
-}
-
// Read a byte and advance D by one byte.
uint8_t EhReader::readByte() {
if (d.empty())
diff --git a/contrib/llvm-project/lld/ELF/EhFrame.h b/contrib/llvm-project/lld/ELF/EhFrame.h
index e44832317b3a..3b144648cf8f 100644
--- a/contrib/llvm-project/lld/ELF/EhFrame.h
+++ b/contrib/llvm-project/lld/ELF/EhFrame.h
@@ -16,7 +16,6 @@ namespace elf {
class InputSectionBase;
struct EhSectionPiece;
-size_t readEhRecordSize(InputSectionBase *s, size_t off);
uint8_t getFdeEncoding(EhSectionPiece *p);
bool hasLSDA(const EhSectionPiece &p);
} // namespace elf
diff --git a/contrib/llvm-project/lld/ELF/ICF.cpp b/contrib/llvm-project/lld/ELF/ICF.cpp
index ec63d2ef4d6f..3b991e8d3470 100644
--- a/contrib/llvm-project/lld/ELF/ICF.cpp
+++ b/contrib/llvm-project/lld/ELF/ICF.cpp
@@ -461,8 +461,9 @@ template <class ELFT> void ICF<ELFT>::run() {
// Compute isPreemptible early. We may add more symbols later, so this loop
// cannot be merged with the later computeIsPreemptible() pass which is used
// by scanRelocations().
- for (Symbol *sym : symtab->symbols())
- sym->isPreemptible = computeIsPreemptible(*sym);
+ if (config->hasDynSymTab)
+ for (Symbol *sym : symtab->symbols())
+ sym->isPreemptible = computeIsPreemptible(*sym);
// Two text sections may have identical content and relocations but different
// LSDA, e.g. the two functions may have catch blocks of different types. If a
diff --git a/contrib/llvm-project/lld/ELF/InputFiles.cpp b/contrib/llvm-project/lld/ELF/InputFiles.cpp
index e321b0d82920..4da371c619f4 100644
--- a/contrib/llvm-project/lld/ELF/InputFiles.cpp
+++ b/contrib/llvm-project/lld/ELF/InputFiles.cpp
@@ -13,9 +13,8 @@
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/DWARF.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/IR/LLVMContext.h"
@@ -43,6 +42,7 @@ using namespace lld::elf;
bool InputFile::isInGroup;
uint32_t InputFile::nextGroupId;
+SmallVector<std::unique_ptr<MemoryBuffer>> elf::memoryBuffers;
SmallVector<ArchiveFile *, 0> elf::archiveFiles;
SmallVector<BinaryFile *, 0> elf::binaryFiles;
SmallVector<BitcodeFile *, 0> elf::bitcodeFiles;
@@ -110,7 +110,7 @@ Optional<MemoryBufferRef> elf::readFile(StringRef path) {
// The --chroot option changes our virtual root directory.
// This is useful when you are dealing with files created by --reproduce.
if (!config->chroot.empty() && path.startswith("/"))
- path = saver.save(config->chroot + path);
+ path = saver().save(config->chroot + path);
log(path);
config->dependencyFiles.insert(llvm::CachedHashString(path));
@@ -122,9 +122,8 @@ Optional<MemoryBufferRef> elf::readFile(StringRef path) {
return None;
}
- std::unique_ptr<MemoryBuffer> &mb = *mbOrErr;
- MemoryBufferRef mbref = mb->getMemBufferRef();
- make<std::unique_ptr<MemoryBuffer>>(std::move(mb)); // take MB ownership
+ MemoryBufferRef mbref = (*mbOrErr)->getMemBufferRef();
+ memoryBuffers.push_back(std::move(*mbOrErr)); // take MB ownership
if (tar)
tar->append(relativeToRoot(path), mbref.getBuffer());
@@ -552,7 +551,7 @@ void ObjFile<ELFT>::initializeSections(bool ignoreComdats) {
std::vector<ArrayRef<Elf_Word>> selectedGroups;
- for (size_t i = 0, e = objSections.size(); i < e; ++i) {
+ for (size_t i = 0; i != size; ++i) {
if (this->sections[i] == &InputSection::discarded)
continue;
const Elf_Shdr &sec = objSections[i];
@@ -638,21 +637,61 @@ void ObjFile<ELFT>::initializeSections(bool ignoreComdats) {
// such cases, the relocation section would attempt to reference a target
// section that has not yet been created. For simplicity, delay creation of
// relocation sections until now.
- for (size_t i = 0, e = objSections.size(); i < e; ++i) {
+ for (size_t i = 0; i != size; ++i) {
if (this->sections[i] == &InputSection::discarded)
continue;
const Elf_Shdr &sec = objSections[i];
- if (sec.sh_type == SHT_REL || sec.sh_type == SHT_RELA)
- this->sections[i] = createInputSection(i, sec, shstrtab);
+ if (sec.sh_type == SHT_REL || sec.sh_type == SHT_RELA) {
+ // Find a relocation target section and associate this section with that.
+ // Target may have been discarded if it is in a different section group
+ // and the group is discarded, even though it's a violation of the spec.
+ // We handle that situation gracefully by discarding dangling relocation
+ // sections.
+ const uint32_t info = sec.sh_info;
+ InputSectionBase *s = getRelocTarget(i, sec, info);
+ if (!s)
+ continue;
+
+ // ELF spec allows mergeable sections with relocations, but they are rare,
+ // and it is in practice hard to merge such sections by contents, because
+ // applying relocations at end of linking changes section contents. So, we
+ // simply handle such sections as non-mergeable ones. Degrading like this
+ // is acceptable because section merging is optional.
+ if (auto *ms = dyn_cast<MergeInputSection>(s)) {
+ s = make<InputSection>(ms->file, ms->flags, ms->type, ms->alignment,
+ ms->data(), ms->name);
+ sections[info] = s;
+ }
+
+ if (s->relSecIdx != 0)
+ error(
+ toString(s) +
+ ": multiple relocation sections to one section are not supported");
+ s->relSecIdx = i;
+
+ // Relocation sections are usually removed from the output, so return
+ // `nullptr` for the normal case. However, if -r or --emit-relocs is
+ // specified, we need to copy them to the output. (Some post link analysis
+ // tools specify --emit-relocs to obtain the information.)
+ if (config->copyRelocs) {
+ auto *isec = make<InputSection>(
+ *this, sec, check(obj.getSectionName(sec, shstrtab)));
+ // If the relocated section is discarded (due to /DISCARD/ or
+ // --gc-sections), the relocation section should be discarded as well.
+ s->dependentSections.push_back(isec);
+ sections[i] = isec;
+ }
+ continue;
+ }
// A SHF_LINK_ORDER section with sh_link=0 is handled as if it did not have
// the flag.
- if (!(sec.sh_flags & SHF_LINK_ORDER) || !sec.sh_link)
+ if (!sec.sh_link || !(sec.sh_flags & SHF_LINK_ORDER))
continue;
InputSectionBase *linkSec = nullptr;
- if (sec.sh_link < this->sections.size())
+ if (sec.sh_link < size)
linkSec = this->sections[sec.sh_link];
if (!linkSec)
fatal(toString(this) + ": invalid sh_link index: " + Twine(sec.sh_link));
@@ -828,9 +867,9 @@ template <class ELFT> static uint32_t readAndFeatures(const InputSection &sec) {
}
template <class ELFT>
-InputSectionBase *ObjFile<ELFT>::getRelocTarget(uint32_t idx, StringRef name,
- const Elf_Shdr &sec) {
- uint32_t info = sec.sh_info;
+InputSectionBase *ObjFile<ELFT>::getRelocTarget(uint32_t idx,
+ const Elf_Shdr &sec,
+ uint32_t info) {
if (info < this->sections.size()) {
InputSectionBase *target = this->sections[info];
@@ -844,18 +883,11 @@ InputSectionBase *ObjFile<ELFT>::getRelocTarget(uint32_t idx, StringRef name,
return target;
}
- error(toString(this) + Twine(": relocation section ") + name + " (index " +
- Twine(idx) + ") has invalid sh_info (" + Twine(info) + ")");
+ error(toString(this) + Twine(": relocation section (index ") + Twine(idx) +
+ ") has invalid sh_info (" + Twine(info) + ")");
return nullptr;
}
-// Create a regular InputSection class that has the same contents
-// as a given section.
-static InputSection *toRegularSection(MergeInputSection *sec) {
- return make<InputSection>(sec->file, sec->flags, sec->type, sec->alignment,
- sec->data(), sec->name);
-}
-
template <class ELFT>
InputSectionBase *ObjFile<ELFT>::createInputSection(uint32_t idx,
const Elf_Shdr &sec,
@@ -879,8 +911,8 @@ InputSectionBase *ObjFile<ELFT>::createInputSection(uint32_t idx,
// to work. In a full implementation we would merge all attribute
// sections.
if (in.attributes == nullptr) {
- in.attributes = make<InputSection>(*this, sec, name);
- return in.attributes;
+ in.attributes = std::make_unique<InputSection>(*this, sec, name);
+ return in.attributes.get();
}
return &InputSection::discarded;
}
@@ -901,17 +933,14 @@ InputSectionBase *ObjFile<ELFT>::createInputSection(uint32_t idx,
// standard extensions to enable. In a full implementation we would merge
// all attribute sections.
if (in.attributes == nullptr) {
- in.attributes = make<InputSection>(*this, sec, name);
- return in.attributes;
+ in.attributes = std::make_unique<InputSection>(*this, sec, name);
+ return in.attributes.get();
}
return &InputSection::discarded;
}
}
- switch (sec.sh_type) {
- case SHT_LLVM_DEPENDENT_LIBRARIES: {
- if (config->relocatable)
- break;
+ if (sec.sh_type == SHT_LLVM_DEPENDENT_LIBRARIES && !config->relocatable) {
ArrayRef<char> data =
CHECK(this->getObj().template getSectionContentsAsArray<char>(sec), this);
if (!data.empty() && data.back() != '\0') {
@@ -927,45 +956,6 @@ InputSectionBase *ObjFile<ELFT>::createInputSection(uint32_t idx,
}
return &InputSection::discarded;
}
- case SHT_RELA:
- case SHT_REL: {
- // Find a relocation target section and associate this section with that.
- // Target may have been discarded if it is in a different section group
- // and the group is discarded, even though it's a violation of the
- // spec. We handle that situation gracefully by discarding dangling
- // relocation sections.
- InputSectionBase *target = getRelocTarget(idx, name, sec);
- if (!target)
- return nullptr;
-
- // ELF spec allows mergeable sections with relocations, but they are
- // rare, and it is in practice hard to merge such sections by contents,
- // because applying relocations at end of linking changes section
- // contents. So, we simply handle such sections as non-mergeable ones.
- // Degrading like this is acceptable because section merging is optional.
- if (auto *ms = dyn_cast<MergeInputSection>(target)) {
- target = toRegularSection(ms);
- this->sections[sec.sh_info] = target;
- }
-
- if (target->relSecIdx != 0)
- fatal(toString(this) +
- ": multiple relocation sections to one section are not supported");
- target->relSecIdx = idx;
-
- // Relocation sections are usually removed from the output, so return
- // `nullptr` for the normal case. However, if -r or --emit-relocs is
- // specified, we need to copy them to the output. (Some post link analysis
- // tools specify --emit-relocs to obtain the information.)
- if (!config->copyRelocs)
- return nullptr;
- InputSection *relocSec = make<InputSection>(*this, sec, name);
- // If the relocated section is discarded (due to /DISCARD/ or
- // --gc-sections), the relocation section should be discarded as well.
- target->dependentSections.push_back(relocSec);
- return relocSec;
- }
- }
if (name.startswith(".n")) {
// The GNU linker uses .note.GNU-stack section as a marker indicating
@@ -1076,7 +1066,7 @@ template <class ELFT> void ObjFile<ELFT>::initializeSymbols() {
sourceFile = CHECK(eSym.getName(stringTable), this);
if (LLVM_UNLIKELY(stringTable.size() <= eSym.st_name))
fatal(toString(this) + ": invalid symbol name offset");
- StringRefZ name = stringTable.data() + eSym.st_name;
+ StringRef name(stringTable.data() + eSym.st_name);
symbols[i] = reinterpret_cast<Symbol *>(locals + i);
if (eSym.st_shndx == SHN_UNDEF || sec == &InputSection::discarded)
@@ -1460,9 +1450,10 @@ template <class ELFT> void SharedFile::parse() {
}
// DSOs are uniquified not by filename but by soname.
- DenseMap<StringRef, SharedFile *>::iterator it;
+ DenseMap<CachedHashStringRef, SharedFile *>::iterator it;
bool wasInserted;
- std::tie(it, wasInserted) = symtab->soNames.try_emplace(soName, this);
+ std::tie(it, wasInserted) =
+ symtab->soNames.try_emplace(CachedHashStringRef(soName), this);
// If a DSO appears more than once on the command line with and without
// --as-needed, --no-as-needed takes precedence over --as-needed because a
@@ -1526,8 +1517,8 @@ template <class ELFT> void SharedFile::parse() {
}
StringRef verName = stringTable.data() + verneeds[idx];
versionedNameBuffer.clear();
- name =
- saver.save((name + "@" + verName).toStringRef(versionedNameBuffer));
+ name = saver().save(
+ (name + "@" + verName).toStringRef(versionedNameBuffer));
}
Symbol *s = symtab.addSymbol(
Undefined{this, name, sym.getBinding(), sym.st_other, sym.getType()});
@@ -1569,7 +1560,7 @@ template <class ELFT> void SharedFile::parse() {
reinterpret_cast<const Elf_Verdef *>(verdefs[idx])->getAux()->vda_name;
versionedNameBuffer.clear();
name = (name + "@" + verName).toStringRef(versionedNameBuffer);
- symtab.addSymbol(SharedSymbol{*this, saver.save(name), sym.getBinding(),
+ symtab.addSymbol(SharedSymbol{*this, saver().save(name), sym.getBinding(),
sym.st_other, sym.getType(), sym.st_value,
sym.st_size, alignment, idx});
}
@@ -1652,11 +1643,10 @@ BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
// into consideration at LTO time (which very likely causes undefined
// symbols later in the link stage). So we append file offset to make
// filename unique.
- StringRef name =
- archiveName.empty()
- ? saver.save(path)
- : saver.save(archiveName + "(" + path::filename(path) + " at " +
- utostr(offsetInArchive) + ")");
+ StringRef name = archiveName.empty()
+ ? saver().save(path)
+ : saver().save(archiveName + "(" + path::filename(path) +
+ " at " + utostr(offsetInArchive) + ")");
MemoryBufferRef mbref(mb.getBuffer(), name);
obj = CHECK(lto::InputFile::create(mbref), this);
@@ -1680,34 +1670,42 @@ static uint8_t mapVisibility(GlobalValue::VisibilityTypes gvVisibility) {
}
template <class ELFT>
-static Symbol *createBitcodeSymbol(const std::vector<bool> &keptComdats,
- const lto::InputFile::Symbol &objSym,
- BitcodeFile &f) {
- StringRef name = saver.save(objSym.getName());
+static void
+createBitcodeSymbol(Symbol *&sym, const std::vector<bool> &keptComdats,
+ const lto::InputFile::Symbol &objSym, BitcodeFile &f) {
uint8_t binding = objSym.isWeak() ? STB_WEAK : STB_GLOBAL;
uint8_t type = objSym.isTLS() ? STT_TLS : STT_NOTYPE;
uint8_t visibility = mapVisibility(objSym.getVisibility());
bool canOmitFromDynSym = objSym.canBeOmittedFromSymbolTable();
+ StringRef name;
+ if (sym) {
+ name = sym->getName();
+ } else {
+ name = saver().save(objSym.getName());
+ sym = symtab->insert(name);
+ }
+
int c = objSym.getComdatIndex();
if (objSym.isUndefined() || (c != -1 && !keptComdats[c])) {
Undefined newSym(&f, name, binding, visibility, type);
if (canOmitFromDynSym)
newSym.exportDynamic = false;
- Symbol *ret = symtab->addSymbol(newSym);
- ret->referenced = true;
- return ret;
+ sym->resolve(newSym);
+ sym->referenced = true;
+ return;
}
- if (objSym.isCommon())
- return symtab->addSymbol(
- CommonSymbol{&f, name, binding, visibility, STT_OBJECT,
- objSym.getCommonAlignment(), objSym.getCommonSize()});
-
- Defined newSym(&f, name, binding, visibility, type, 0, 0, nullptr);
- if (canOmitFromDynSym)
- newSym.exportDynamic = false;
- return symtab->addSymbol(newSym);
+ if (objSym.isCommon()) {
+ sym->resolve(CommonSymbol{&f, name, binding, visibility, STT_OBJECT,
+ objSym.getCommonAlignment(),
+ objSym.getCommonSize()});
+ } else {
+ Defined newSym(&f, name, binding, visibility, type, 0, 0, nullptr);
+ if (canOmitFromDynSym)
+ newSym.exportDynamic = false;
+ sym->resolve(newSym);
+ }
}
template <class ELFT> void BitcodeFile::parse() {
@@ -1719,10 +1717,11 @@ template <class ELFT> void BitcodeFile::parse() {
.second);
}
- symbols.assign(obj->symbols().size(), nullptr);
- for (auto it : llvm::enumerate(obj->symbols()))
- symbols[it.index()] =
- createBitcodeSymbol<ELFT>(keptComdats, it.value(), *this);
+ symbols.resize(obj->symbols().size());
+ for (auto it : llvm::enumerate(obj->symbols())) {
+ Symbol *&sym = symbols[it.index()];
+ createBitcodeSymbol<ELFT>(sym, keptComdats, it.value(), *this);
+ }
for (auto l : obj->getDependentLibraries())
addDependentLibrary(l, this);
@@ -1730,9 +1729,11 @@ template <class ELFT> void BitcodeFile::parse() {
void BitcodeFile::parseLazy() {
SymbolTable &symtab = *elf::symtab;
- for (const lto::InputFile::Symbol &sym : obj->symbols())
- if (!sym.isUndefined())
- symtab.addSymbol(LazyObject{*this, saver.save(sym.getName())});
+ symbols.resize(obj->symbols().size());
+ for (auto it : llvm::enumerate(obj->symbols()))
+ if (!it.value().isUndefined())
+ symbols[it.index()] = symtab.addSymbol(
+ LazyObject{*this, saver().save(it.value().getName())});
}
void BinaryFile::parse() {
@@ -1750,6 +1751,8 @@ void BinaryFile::parse() {
if (!isAlnum(s[i]))
s[i] = '_';
+ llvm::StringSaver &saver = lld::saver();
+
symtab->addSymbol(Defined{nullptr, saver.save(s + "_start"), STB_GLOBAL,
STV_DEFAULT, STT_OBJECT, 0, 0, section});
symtab->addSymbol(Defined{nullptr, saver.save(s + "_end"), STB_GLOBAL,
diff --git a/contrib/llvm-project/lld/ELF/InputFiles.h b/contrib/llvm-project/lld/ELF/InputFiles.h
index d622390fcade..6febea3f437d 100644
--- a/contrib/llvm-project/lld/ELF/InputFiles.h
+++ b/contrib/llvm-project/lld/ELF/InputFiles.h
@@ -66,7 +66,6 @@ public:
enum Kind : uint8_t {
ObjKind,
SharedKind,
- LazyObjKind,
ArchiveKind,
BitcodeKind,
BinaryKind,
@@ -287,8 +286,8 @@ private:
void initializeSymbols();
void initializeJustSymbols();
- InputSectionBase *getRelocTarget(uint32_t idx, StringRef name,
- const Elf_Shdr &sec);
+ InputSectionBase *getRelocTarget(uint32_t idx, const Elf_Shdr &sec,
+ uint32_t info);
InputSectionBase *createInputSection(uint32_t idx, const Elf_Shdr &sec,
StringRef shstrtab);
@@ -408,6 +407,7 @@ inline bool isBitcode(MemoryBufferRef mb) {
std::string replaceThinLTOSuffix(StringRef path);
+extern SmallVector<std::unique_ptr<MemoryBuffer>> memoryBuffers;
extern SmallVector<ArchiveFile *, 0> archiveFiles;
extern SmallVector<BinaryFile *, 0> binaryFiles;
extern SmallVector<BitcodeFile *, 0> bitcodeFiles;
diff --git a/contrib/llvm-project/lld/ELF/InputSection.cpp b/contrib/llvm-project/lld/ELF/InputSection.cpp
index e1ee3def89f3..943cf18e6cf0 100644
--- a/contrib/llvm-project/lld/ELF/InputSection.cpp
+++ b/contrib/llvm-project/lld/ELF/InputSection.cpp
@@ -18,8 +18,7 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "Thunks.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/Endian.h"
@@ -143,7 +142,7 @@ void InputSectionBase::uncompress() const {
{
static std::mutex mu;
std::lock_guard<std::mutex> lock(mu);
- uncompressedBuf = bAlloc.Allocate<char>(size);
+ uncompressedBuf = bAlloc().Allocate<char>(size);
}
if (Error e = zlib::uncompress(toStringRef(rawData), uncompressedBuf, size))
@@ -153,12 +152,6 @@ void InputSectionBase::uncompress() const {
uncompressedSize = -1;
}
-uint64_t InputSectionBase::getOffsetInFile() const {
- const uint8_t *fileStart = (const uint8_t *)file->mb.getBufferStart();
- const uint8_t *secStart = data().begin();
- return secStart - fileStart;
-}
-
template <class ELFT> RelsOrRelas<ELFT> InputSectionBase::relsOrRelas() const {
if (relSecIdx == 0)
return {};
@@ -225,7 +218,8 @@ OutputSection *SectionBase::getOutputSection() {
// `uncompressedSize` member and remove the header from `rawData`.
template <typename ELFT> void InputSectionBase::parseCompressedHeader() {
// Old-style header
- if (name.startswith(".zdebug")) {
+ if (!(flags & SHF_COMPRESSED)) {
+ assert(name.startswith(".zdebug"));
if (!toStringRef(rawData).startswith("ZLIB")) {
error(toString(this) + ": corrupted compressed section header");
return;
@@ -242,11 +236,10 @@ template <typename ELFT> void InputSectionBase::parseCompressedHeader() {
// Restore the original section name.
// (e.g. ".zdebug_info" -> ".debug_info")
- name = saver.save("." + name.substr(2));
+ name = saver().save("." + name.substr(2));
return;
}
- assert(flags & SHF_COMPRESSED);
flags &= ~(uint64_t)SHF_COMPRESSED;
// New-style header
@@ -274,7 +267,6 @@ InputSection *InputSectionBase::getLinkOrderDep() const {
}
// Find a function symbol that encloses a given location.
-template <class ELFT>
Defined *InputSectionBase::getEnclosingFunction(uint64_t offset) {
for (Symbol *b : file->getSymbols())
if (Defined *d = dyn_cast<Defined>(b))
@@ -285,20 +277,19 @@ Defined *InputSectionBase::getEnclosingFunction(uint64_t offset) {
}
// Returns an object file location string. Used to construct an error message.
-template <class ELFT>
std::string InputSectionBase::getLocation(uint64_t offset) {
std::string secAndOffset =
(name + "+0x" + Twine::utohexstr(offset) + ")").str();
// We don't have file for synthetic sections.
- if (getFile<ELFT>() == nullptr)
+ if (file == nullptr)
return (config->outputFile + ":(" + secAndOffset).str();
- std::string file = toString(getFile<ELFT>());
- if (Defined *d = getEnclosingFunction<ELFT>(offset))
- return file + ":(function " + toString(*d) + ": " + secAndOffset;
+ std::string filename = toString(file);
+ if (Defined *d = getEnclosingFunction(offset))
+ return filename + ":(function " + toString(*d) + ": " + secAndOffset;
- return file + ":(" + secAndOffset;
+ return filename + ":(" + secAndOffset;
}
// This function is intended to be used for constructing an error message.
@@ -593,7 +584,7 @@ static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) {
InputSection *isec = cast<InputSection>(d->section);
if (addend != 0)
- warn("Non-zero addend in R_RISCV_PCREL_LO12 relocation to " +
+ warn("non-zero addend in R_RISCV_PCREL_LO12 relocation to " +
isec->getObjMsg(d->value) + " is ignored");
// Relocations are sorted by offset, so we can use std::equal_range to do
@@ -830,14 +821,13 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_SIZE:
return sym.getSize() + a;
case R_TLSDESC:
- return in.got->getGlobalDynAddr(sym) + a;
+ return in.got->getTlsDescAddr(sym) + a;
case R_TLSDESC_PC:
- return in.got->getGlobalDynAddr(sym) + a - p;
+ return in.got->getTlsDescAddr(sym) + a - p;
case R_TLSDESC_GOTPLT:
- return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA();
+ return in.got->getTlsDescAddr(sym) + a - in.gotPlt->getVA();
case R_AARCH64_TLSDESC_PAGE:
- return getAArch64Page(in.got->getGlobalDynAddr(sym) + a) -
- getAArch64Page(p);
+ return getAArch64Page(in.got->getTlsDescAddr(sym) + a) - getAArch64Page(p);
case R_TLSGD_GOT:
return in.got->getGlobalDynOffset(sym) + a;
case R_TLSGD_GOTPLT:
@@ -898,38 +888,6 @@ void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef<RelTy> rels) {
if (expr == R_NONE)
continue;
- if (expr == R_SIZE) {
- target.relocateNoSym(bufLoc, type,
- SignExtend64<bits>(sym.getSize() + addend));
- continue;
- }
-
- // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
- // sections.
- if (expr != R_ABS && expr != R_DTPREL && expr != R_GOTPLTREL &&
- expr != R_RISCV_ADD) {
- std::string msg = getLocation<ELFT>(offset) +
- ": has non-ABS relocation " + toString(type) +
- " against symbol '" + toString(sym) + "'";
- if (expr != R_PC && expr != R_ARM_PCA) {
- error(msg);
- return;
- }
-
- // If the control reaches here, we found a PC-relative relocation in a
- // non-ALLOC section. Since non-ALLOC section is not loaded into memory
- // at runtime, the notion of PC-relative doesn't make sense here. So,
- // this is a usage error. However, GNU linkers historically accept such
- // relocations without any errors and relocate them as if they were at
- // address 0. For bug-compatibilty, we accept them with warnings. We
- // know Steel Bank Common Lisp as of 2018 have this bug.
- warn(msg);
- target.relocateNoSym(
- bufLoc, type,
- SignExtend64<bits>(sym.getVA(addend - offset - outSecOff)));
- continue;
- }
-
if (tombstone ||
(isDebug && (type == target.symbolicRel || expr == R_DTPREL))) {
// Resolve relocations in .debug_* referencing (discarded symbols or ICF
@@ -969,7 +927,44 @@ void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef<RelTy> rels) {
continue;
}
}
- target.relocateNoSym(bufLoc, type, SignExtend64<bits>(sym.getVA(addend)));
+
+ // For a relocatable link, only tombstone values are applied.
+ if (config->relocatable)
+ continue;
+
+ if (expr == R_SIZE) {
+ target.relocateNoSym(bufLoc, type,
+ SignExtend64<bits>(sym.getSize() + addend));
+ continue;
+ }
+
+ // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
+ // sections.
+ if (expr == R_ABS || expr == R_DTPREL || expr == R_GOTPLTREL ||
+ expr == R_RISCV_ADD) {
+ target.relocateNoSym(bufLoc, type, SignExtend64<bits>(sym.getVA(addend)));
+ continue;
+ }
+
+ std::string msg = getLocation(offset) + ": has non-ABS relocation " +
+ toString(type) + " against symbol '" + toString(sym) +
+ "'";
+ if (expr != R_PC && expr != R_ARM_PCA) {
+ error(msg);
+ return;
+ }
+
+ // If the control reaches here, we found a PC-relative relocation in a
+ // non-ALLOC section. Since non-ALLOC section is not loaded into memory
+ // at runtime, the notion of PC-relative doesn't make sense here. So,
+ // this is a usage error. However, GNU linkers historically accept such
+ // relocations without any errors and relocate them as if they were at
+ // address 0. For bug-compatibilty, we accept them with warnings. We
+ // know Steel Bank Common Lisp as of 2018 have this bug.
+ warn(msg);
+ target.relocateNoSym(
+ bufLoc, type,
+ SignExtend64<bits>(sym.getVA(addend - offset - outSecOff)));
}
}
@@ -1001,15 +996,15 @@ void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) {
}
auto *sec = cast<InputSection>(this);
- if (config->relocatable) {
+ if (config->relocatable)
relocateNonAllocForRelocatable(sec, buf);
- } else {
- const RelsOrRelas<ELFT> rels = sec->template relsOrRelas<ELFT>();
- if (rels.areRelocsRel())
- sec->relocateNonAlloc<ELFT>(buf, rels.rels);
- else
- sec->relocateNonAlloc<ELFT>(buf, rels.relas);
- }
+ // For a relocatable link, also call relocateNonAlloc() to rewrite applicable
+ // locations with tombstone values.
+ const RelsOrRelas<ELFT> rels = sec->template relsOrRelas<ELFT>();
+ if (rels.areRelocsRel())
+ sec->relocateNonAlloc<ELFT>(buf, rels.rels);
+ else
+ sec->relocateNonAlloc<ELFT>(buf, rels.relas);
}
void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) {
@@ -1017,25 +1012,35 @@ void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) {
const unsigned bits = config->wordsize * 8;
const TargetInfo &target = *elf::target;
uint64_t lastPPCRelaxedRelocOff = UINT64_C(-1);
-
- for (const Relocation &rel : relocations) {
+ AArch64Relaxer aarch64relaxer(relocations);
+ for (size_t i = 0, size = relocations.size(); i != size; ++i) {
+ const Relocation &rel = relocations[i];
if (rel.expr == R_NONE)
continue;
uint64_t offset = rel.offset;
uint8_t *bufLoc = buf + offset;
- uint64_t addrLoc = getOutputSection()->addr + offset;
+ uint64_t secAddr = getOutputSection()->addr;
if (auto *sec = dyn_cast<InputSection>(this))
- addrLoc += sec->outSecOff;
+ secAddr += sec->outSecOff;
+ const uint64_t addrLoc = secAddr + offset;
const uint64_t targetVA =
SignExtend64(getRelocTargetVA(file, rel.type, rel.addend, addrLoc,
- *rel.sym, rel.expr), bits);
-
+ *rel.sym, rel.expr),
+ bits);
switch (rel.expr) {
case R_RELAX_GOT_PC:
case R_RELAX_GOT_PC_NOPIC:
target.relaxGot(bufLoc, rel, targetVA);
break;
+ case R_AARCH64_GOT_PAGE_PC:
+ if (i + 1 < size && aarch64relaxer.tryRelaxAdrpLdr(
+ rel, relocations[i + 1], secAddr, buf)) {
+ ++i;
+ continue;
+ }
+ target.relocate(bufLoc, rel, targetVA);
+ break;
case R_PPC64_RELAX_GOT_PC: {
// The R_PPC64_PCREL_OPT relocation must appear immediately after
// R_PPC64_GOT_PCREL34 in the relocations table at the same offset.
@@ -1113,12 +1118,9 @@ void InputSectionBase::relocateAlloc(uint8_t *buf, uint8_t *bufEnd) {
// a jmp insn must be modified to shrink the jmp insn or to flip the jmp
// insn. This is primarily used to relax and optimize jumps created with
// basic block sections.
- if (isa<InputSection>(this)) {
- for (const JumpInstrMod &jumpMod : jumpInstrMods) {
- uint64_t offset = jumpMod.offset;
- uint8_t *bufLoc = buf + offset;
- target.applyJumpInstrMod(bufLoc, jumpMod.original, jumpMod.size);
- }
+ if (jumpInstrMod) {
+ target.applyJumpInstrMod(buf + jumpInstrMod->offset, jumpInstrMod->original,
+ jumpInstrMod->size);
}
}
@@ -1132,7 +1134,7 @@ static void switchMorestackCallsToMorestackNonSplit(
// __morestack_non_split.
Symbol *moreStackNonSplit = symtab->find("__morestack_non_split");
if (!moreStackNonSplit) {
- error("Mixing split-stack objects requires a definition of "
+ error("mixing split-stack objects requires a definition of "
"__morestack_non_split");
return;
}
@@ -1178,11 +1180,6 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
std::vector<Relocation *> morestackCalls;
for (Relocation &rel : relocations) {
- // Local symbols can't possibly be cross-calls, and should have been
- // resolved long before this line.
- if (rel.sym->isLocal())
- continue;
-
// Ignore calls into the split-stack api.
if (rel.sym->getName().startswith("__morestack")) {
if (rel.sym->getName().equals("__morestack"))
@@ -1209,7 +1206,7 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
if (enclosingPrologueAttempted(rel.offset, prologues))
continue;
- if (Defined *f = getEnclosingFunction<ELFT>(rel.offset)) {
+ if (Defined *f = getEnclosingFunction(rel.offset)) {
prologues.insert(f);
if (target->adjustPrologueForCrossSplitStack(buf + f->value, end,
f->stOther))
@@ -1227,7 +1224,7 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
template <class ELFT> void InputSection::writeTo(uint8_t *buf) {
if (auto *s = dyn_cast<SyntheticSection>(this)) {
- s->writeTo(buf + outSecOff);
+ s->writeTo(buf);
return;
}
@@ -1236,17 +1233,17 @@ template <class ELFT> void InputSection::writeTo(uint8_t *buf) {
// If -r or --emit-relocs is given, then an InputSection
// may be a relocation section.
if (LLVM_UNLIKELY(type == SHT_RELA)) {
- copyRelocations<ELFT>(buf + outSecOff, getDataAs<typename ELFT::Rela>());
+ copyRelocations<ELFT>(buf, getDataAs<typename ELFT::Rela>());
return;
}
if (LLVM_UNLIKELY(type == SHT_REL)) {
- copyRelocations<ELFT>(buf + outSecOff, getDataAs<typename ELFT::Rel>());
+ copyRelocations<ELFT>(buf, getDataAs<typename ELFT::Rel>());
return;
}
// If -r is given, we may have a SHT_GROUP section.
if (LLVM_UNLIKELY(type == SHT_GROUP)) {
- copyShtGroup<ELFT>(buf + outSecOff);
+ copyShtGroup<ELFT>(buf);
return;
}
@@ -1254,20 +1251,18 @@ template <class ELFT> void InputSection::writeTo(uint8_t *buf) {
// to the buffer.
if (uncompressedSize >= 0) {
size_t size = uncompressedSize;
- if (Error e = zlib::uncompress(toStringRef(rawData),
- (char *)(buf + outSecOff), size))
+ if (Error e = zlib::uncompress(toStringRef(rawData), (char *)buf, size))
fatal(toString(this) +
": uncompress failed: " + llvm::toString(std::move(e)));
- uint8_t *bufEnd = buf + outSecOff + size;
- relocate<ELFT>(buf + outSecOff, bufEnd);
+ uint8_t *bufEnd = buf + size;
+ relocate<ELFT>(buf, bufEnd);
return;
}
// Copy section contents from source object file to output file
// and then apply relocations.
- memcpy(buf + outSecOff, data().data(), data().size());
- uint8_t *bufEnd = buf + outSecOff + data().size();
- relocate<ELFT>(buf + outSecOff, bufEnd);
+ memcpy(buf, rawData.data(), rawData.size());
+ relocate<ELFT>(buf, buf + rawData.size());
}
void InputSection::replace(InputSection *other) {
@@ -1320,28 +1315,47 @@ static unsigned getReloc(IntTy begin, IntTy size, const ArrayRef<RelTy> &rels,
// This function splits an input section into records and returns them.
template <class ELFT> void EhInputSection::split() {
const RelsOrRelas<ELFT> rels = relsOrRelas<ELFT>();
- if (rels.areRelocsRel())
- split<ELFT>(rels.rels);
- else
- split<ELFT>(rels.relas);
+ // getReloc expects the relocations to be sorted by r_offset. See the comment
+ // in scanRelocs.
+ if (rels.areRelocsRel()) {
+ SmallVector<typename ELFT::Rel, 0> storage;
+ split<ELFT>(sortRels(rels.rels, storage));
+ } else {
+ SmallVector<typename ELFT::Rela, 0> storage;
+ split<ELFT>(sortRels(rels.relas, storage));
+ }
}
template <class ELFT, class RelTy>
void EhInputSection::split(ArrayRef<RelTy> rels) {
- // getReloc expects the relocations to be sorted by r_offset. See the comment
- // in scanRelocs.
- SmallVector<RelTy, 0> storage;
- rels = sortRels(rels, storage);
-
+ ArrayRef<uint8_t> d = rawData;
+ const char *msg = nullptr;
unsigned relI = 0;
- for (size_t off = 0, end = data().size(); off != end;) {
- size_t size = readEhRecordSize(this, off);
- pieces.emplace_back(off, this, size, getReloc(off, size, rels, relI));
- // The empty record is the end marker.
- if (size == 4)
+ while (!d.empty()) {
+ if (d.size() < 4) {
+ msg = "CIE/FDE too small";
break;
- off += size;
+ }
+ uint64_t size = endian::read32<ELFT::TargetEndianness>(d.data());
+ // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
+ // but we do not support that format yet.
+ if (size == UINT32_MAX) {
+ msg = "CIE/FDE too large";
+ break;
+ }
+ size += 4;
+ if (size > d.size()) {
+ msg = "CIE/FDE ends past the end of the section";
+ break;
+ }
+
+ uint64_t off = d.data() - rawData.data();
+ pieces.emplace_back(off, this, size, getReloc(off, size, rels, relI));
+ d = d.slice(size);
}
+ if (msg)
+ errorOrWarn("corrupted .eh_frame: " + Twine(msg) + "\n>>> defined in " +
+ getObjMsg(d.data() - rawData.data()));
}
static size_t findNull(StringRef s, size_t entSize) {
@@ -1451,11 +1465,6 @@ template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
StringRef);
-template std::string InputSectionBase::getLocation<ELF32LE>(uint64_t);
-template std::string InputSectionBase::getLocation<ELF32BE>(uint64_t);
-template std::string InputSectionBase::getLocation<ELF64LE>(uint64_t);
-template std::string InputSectionBase::getLocation<ELF64BE>(uint64_t);
-
template void InputSection::writeTo<ELF32LE>(uint8_t *);
template void InputSection::writeTo<ELF32BE>(uint8_t *);
template void InputSection::writeTo<ELF64LE>(uint8_t *);
diff --git a/contrib/llvm-project/lld/ELF/InputSection.h b/contrib/llvm-project/lld/ELF/InputSection.h
index 5319830b5d80..d7dea9d2587a 100644
--- a/contrib/llvm-project/lld/ELF/InputSection.h
+++ b/contrib/llvm-project/lld/ELF/InputSection.h
@@ -130,13 +130,16 @@ public:
// one or two jump instructions at the end that could be relaxed to a smaller
// instruction. The members below help trimming the trailing jump instruction
// and shrinking a section.
- unsigned bytesDropped = 0;
+ uint8_t bytesDropped = 0;
// Whether the section needs to be padded with a NOP filler due to
// deleteFallThruJmpInsn.
bool nopFiller = false;
- void drop_back(uint64_t num) { bytesDropped += num; }
+ void drop_back(unsigned num) {
+ assert(bytesDropped + num < 256);
+ bytesDropped += num;
+ }
void push_back(uint64_t num) {
assert(bytesDropped >= num);
@@ -156,8 +159,6 @@ public:
return rawData;
}
- uint64_t getOffsetInFile() const;
-
// Input sections are part of an output section. Special sections
// like .eh_frame and merge sections are first combined into a
// synthetic section that is then added to an output section. In all
@@ -180,11 +181,10 @@ public:
// Get the function symbol that encloses this offset from within the
// section.
- template <class ELFT>
Defined *getEnclosingFunction(uint64_t offset);
// Returns a source location string. Used to construct an error message.
- template <class ELFT> std::string getLocation(uint64_t offset);
+ std::string getLocation(uint64_t offset);
std::string getSrcMsg(const Symbol &sym, uint64_t offset);
std::string getObjMsg(uint64_t offset);
@@ -206,7 +206,7 @@ public:
// block sections are enabled. Basic block sections creates opportunities to
// relax jump instructions at basic block boundaries after reordering the
// basic blocks.
- SmallVector<JumpInstrMod, 0> jumpInstrMods;
+ JumpInstrMod *jumpInstrMod = nullptr;
// A function compiled with -fsplit-stack calling a function
// compiled without -fsplit-stack needs its prologue adjusted. Find
@@ -324,7 +324,7 @@ public:
// Splittable sections are handled as a sequence of data
// rather than a single large blob of data.
- std::vector<EhSectionPiece> pieces;
+ SmallVector<EhSectionPiece, 0> pieces;
SyntheticSection *getParent() const;
};
@@ -379,11 +379,7 @@ private:
template <class ELFT> void copyShtGroup(uint8_t *buf);
};
-#ifdef _WIN32
-static_assert(sizeof(InputSection) <= 184, "InputSection is too big");
-#else
-static_assert(sizeof(InputSection) <= 176, "InputSection is too big");
-#endif
+static_assert(sizeof(InputSection) <= 160, "InputSection is too big");
inline bool isDebugSection(const InputSectionBase &sec) {
return (sec.flags & llvm::ELF::SHF_ALLOC) == 0 &&
diff --git a/contrib/llvm-project/lld/ELF/LTO.cpp b/contrib/llvm-project/lld/ELF/LTO.cpp
index 65b943c4a54c..5b7ac6a5e925 100644
--- a/contrib/llvm-project/lld/ELF/LTO.cpp
+++ b/contrib/llvm-project/lld/ELF/LTO.cpp
@@ -207,6 +207,8 @@ BitcodeCompiler::BitcodeCompiler() {
if (bitcodeFiles.empty())
return;
for (Symbol *sym : symtab->symbols()) {
+ if (sym->isPlaceholder())
+ continue;
StringRef s = sym->getName();
for (StringRef prefix : {"__start_", "__stop_"})
if (s.startswith(prefix))
diff --git a/contrib/llvm-project/lld/ELF/LinkerScript.cpp b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
index e8f2ce4fdf1f..bfb583453735 100644
--- a/contrib/llvm-project/lld/ELF/LinkerScript.cpp
+++ b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
@@ -19,7 +19,7 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "Writer.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -64,8 +64,8 @@ static StringRef getOutputSectionName(const InputSectionBase *s) {
if (InputSectionBase *rel = isec->getRelocatedSection()) {
OutputSection *out = rel->getOutputSection();
if (s->type == SHT_RELA)
- return saver.save(".rela" + out->name);
- return saver.save(".rel" + out->name);
+ return saver().save(".rela" + out->name);
+ return saver().save(".rel" + out->name);
}
}
@@ -135,7 +135,7 @@ uint64_t ExprValue::getSectionOffset() const {
OutputSection *LinkerScript::createOutputSection(StringRef name,
StringRef location) {
- OutputSection *&secRef = nameToOutputSection[name];
+ OutputSection *&secRef = nameToOutputSection[CachedHashStringRef(name)];
OutputSection *sec;
if (secRef && secRef->location.empty()) {
// There was a forward reference.
@@ -150,7 +150,7 @@ OutputSection *LinkerScript::createOutputSection(StringRef name,
}
OutputSection *LinkerScript::getOrCreateOutputSection(StringRef name) {
- OutputSection *&cmdRef = nameToOutputSection[name];
+ OutputSection *&cmdRef = nameToOutputSection[CachedHashStringRef(name)];
if (!cmdRef)
cmdRef = make<OutputSection>(name, SHT_PROGBITS, 0);
return cmdRef;
@@ -270,8 +270,8 @@ using SymbolAssignmentMap =
// Collect section/value pairs of linker-script-defined symbols. This is used to
// check whether symbol values converge.
-static SymbolAssignmentMap getSymbolAssignmentValues(
- const std::vector<SectionCommand *> &sectionCommands) {
+static SymbolAssignmentMap
+getSymbolAssignmentValues(ArrayRef<SectionCommand *> sectionCommands) {
SymbolAssignmentMap ret;
for (SectionCommand *cmd : sectionCommands) {
if (auto *assign = dyn_cast<SymbolAssignment>(cmd)) {
@@ -306,7 +306,7 @@ getChangedSymbolAssignment(const SymbolAssignmentMap &oldValues) {
// Process INSERT [AFTER|BEFORE] commands. For each command, we move the
// specified output section to the designated place.
void LinkerScript::processInsertCommands() {
- std::vector<OutputSection *> moves;
+ SmallVector<OutputSection *, 0> moves;
for (const InsertCommand &cmd : insertCommands) {
for (StringRef name : cmd.names) {
// If base is empty, it may have been discarded by
@@ -486,11 +486,11 @@ static void sortInputSections(MutableArrayRef<InputSectionBase *> vec,
}
// Compute and remember which sections the InputSectionDescription matches.
-std::vector<InputSectionBase *>
+SmallVector<InputSectionBase *, 0>
LinkerScript::computeInputSections(const InputSectionDescription *cmd,
ArrayRef<InputSectionBase *> sections) {
- std::vector<InputSectionBase *> ret;
- std::vector<size_t> indexes;
+ SmallVector<InputSectionBase *, 0> ret;
+ SmallVector<size_t, 0> indexes;
DenseSet<size_t> seen;
auto sortByPositionThenCommandLine = [&](size_t begin, size_t end) {
llvm::sort(MutableArrayRef<size_t>(indexes).slice(begin, end - begin));
@@ -561,17 +561,9 @@ LinkerScript::computeInputSections(const InputSectionDescription *cmd,
}
void LinkerScript::discard(InputSectionBase &s) {
- if (&s == in.shStrTab || &s == mainPart->relrDyn)
+ if (&s == in.shStrTab.get())
error("discarding " + s.name + " section is not allowed");
- // You can discard .hash and .gnu.hash sections by linker scripts. Since
- // they are synthesized sections, we need to handle them differently than
- // other regular sections.
- if (&s == mainPart->gnuHashTab)
- mainPart->gnuHashTab = nullptr;
- if (&s == mainPart->hashTab)
- mainPart->hashTab = nullptr;
-
s.markDead();
s.parent = nullptr;
for (InputSection *sec : s.dependentSections)
@@ -582,21 +574,19 @@ void LinkerScript::discardSynthetic(OutputSection &outCmd) {
for (Partition &part : partitions) {
if (!part.armExidx || !part.armExidx->isLive())
continue;
- std::vector<InputSectionBase *> secs(part.armExidx->exidxSections.begin(),
- part.armExidx->exidxSections.end());
+ SmallVector<InputSectionBase *, 0> secs(
+ part.armExidx->exidxSections.begin(),
+ part.armExidx->exidxSections.end());
for (SectionCommand *cmd : outCmd.commands)
- if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) {
- std::vector<InputSectionBase *> matches =
- computeInputSections(isd, secs);
- for (InputSectionBase *s : matches)
+ if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
+ for (InputSectionBase *s : computeInputSections(isd, secs))
discard(*s);
- }
}
}
-std::vector<InputSectionBase *>
+SmallVector<InputSectionBase *, 0>
LinkerScript::createInputSectionList(OutputSection &outCmd) {
- std::vector<InputSectionBase *> ret;
+ SmallVector<InputSectionBase *, 0> ret;
for (SectionCommand *cmd : outCmd.commands) {
if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) {
@@ -612,7 +602,7 @@ LinkerScript::createInputSectionList(OutputSection &outCmd) {
// Create output sections described by SECTIONS commands.
void LinkerScript::processSectionCommands() {
auto process = [this](OutputSection *osec) {
- std::vector<InputSectionBase *> v = createInputSectionList(*osec);
+ SmallVector<InputSectionBase *, 0> v = createInputSectionList(*osec);
// The output section name `/DISCARD/' is special.
// Any input section assigned to it is discarded.
@@ -656,14 +646,16 @@ void LinkerScript::processSectionCommands() {
// Process OVERWRITE_SECTIONS first so that it can overwrite the main script
// or orphans.
- DenseMap<StringRef, OutputSection *> map;
+ DenseMap<CachedHashStringRef, OutputSection *> map;
size_t i = 0;
for (OutputSection *osec : overwriteSections)
- if (process(osec) && !map.try_emplace(osec->name, osec).second)
+ if (process(osec) &&
+ !map.try_emplace(CachedHashStringRef(osec->name), osec).second)
warn("OVERWRITE_SECTIONS specifies duplicate " + osec->name);
for (SectionCommand *&base : sectionCommands)
if (auto *osec = dyn_cast<OutputSection>(base)) {
- if (OutputSection *overwrite = map.lookup(osec->name)) {
+ if (OutputSection *overwrite =
+ map.lookup(CachedHashStringRef(osec->name))) {
log(overwrite->location + " overwrites " + osec->name);
overwrite->sectionIndex = i++;
base = overwrite;
@@ -830,10 +822,9 @@ addInputSec(StringMap<TinyPtrVector<OutputSection *>> &map,
// Add sections that didn't match any sections command.
void LinkerScript::addOrphanSections() {
StringMap<TinyPtrVector<OutputSection *>> map;
- std::vector<OutputSection *> v;
+ SmallVector<OutputSection *, 0> v;
- std::function<void(InputSectionBase *)> add;
- add = [&](InputSectionBase *s) {
+ auto add = [&](InputSectionBase *s) {
if (s->isLive() && !s->parent) {
orphanSections.push_back(s);
@@ -849,11 +840,6 @@ void LinkerScript::addOrphanSections() {
s->getOutputSection()->sectionIndex == UINT32_MAX);
}
}
-
- if (config->relocatable)
- for (InputSectionBase *depSec : s->dependentSections)
- if (depSec->flags & SHF_LINK_ORDER)
- add(depSec);
};
// For further --emit-reloc handling code we need target output section
@@ -872,6 +858,10 @@ void LinkerScript::addOrphanSections() {
if (auto *relIS = dyn_cast_or_null<InputSectionBase>(rel->parent))
add(relIS);
add(isec);
+ if (config->relocatable)
+ for (InputSectionBase *depSec : isec->dependentSections)
+ if (depSec->flags & SHF_LINK_ORDER)
+ add(depSec);
}
// If no SECTIONS command was given, we should insert sections commands
@@ -1113,7 +1103,7 @@ bool LinkerScript::isDiscarded(const OutputSection *sec) const {
}
static void maybePropagatePhdrs(OutputSection &sec,
- std::vector<StringRef> &phdrs) {
+ SmallVector<StringRef, 0> &phdrs) {
if (sec.phdrs.empty()) {
// To match the bfd linker script behaviour, only propagate program
// headers to sections that are allocated.
@@ -1147,7 +1137,7 @@ void LinkerScript::adjustSectionsBeforeSorting() {
// the previous sections. Only a few flags are needed to keep the impact low.
uint64_t flags = SHF_ALLOC;
- std::vector<StringRef> defPhdrs;
+ SmallVector<StringRef, 0> defPhdrs;
for (SectionCommand *&cmd : sectionCommands) {
auto *sec = dyn_cast<OutputSection>(cmd);
if (!sec)
@@ -1218,7 +1208,7 @@ void LinkerScript::adjustSectionsAfterSorting() {
// Below is an example of such linker script:
// PHDRS { seg PT_LOAD; }
// SECTIONS { .aaa : { *(.aaa) } }
- std::vector<StringRef> defPhdrs;
+ SmallVector<StringRef, 0> defPhdrs;
auto firstPtLoad = llvm::find_if(phdrsCommands, [](const PhdrsCommand &cmd) {
return cmd.type == PT_LOAD;
});
@@ -1248,7 +1238,7 @@ static uint64_t computeBase(uint64_t min, bool allocateHeaders) {
// We check if the headers fit below the first allocated section. If there isn't
// enough space for these sections, we'll remove them from the PT_LOAD segment,
// and we'll also remove the PT_PHDR segment.
-void LinkerScript::allocateHeaders(std::vector<PhdrEntry *> &phdrs) {
+void LinkerScript::allocateHeaders(SmallVector<PhdrEntry *, 0> &phdrs) {
uint64_t min = std::numeric_limits<uint64_t>::max();
for (OutputSection *sec : outputSections)
if (sec->flags & SHF_ALLOC)
@@ -1332,8 +1322,8 @@ const Defined *LinkerScript::assignAddresses() {
}
// Creates program headers as instructed by PHDRS linker script command.
-std::vector<PhdrEntry *> LinkerScript::createPhdrs() {
- std::vector<PhdrEntry *> ret;
+SmallVector<PhdrEntry *, 0> LinkerScript::createPhdrs() {
+ SmallVector<PhdrEntry *, 0> ret;
// Process PHDRS and FILEHDR keywords because they are not
// real output sections and cannot be added in the following loop.
@@ -1415,8 +1405,8 @@ static Optional<size_t> getPhdrIndex(ArrayRef<PhdrsCommand> vec,
// Returns indices of ELF headers containing specific section. Each index is a
// zero based number of ELF header listed within PHDRS {} script block.
-std::vector<size_t> LinkerScript::getPhdrIndices(OutputSection *cmd) {
- std::vector<size_t> ret;
+SmallVector<size_t, 0> LinkerScript::getPhdrIndices(OutputSection *cmd) {
+ SmallVector<size_t, 0> ret;
for (StringRef s : cmd->phdrs) {
if (Optional<size_t> idx = getPhdrIndex(phdrsCommands, s))
diff --git a/contrib/llvm-project/lld/ELF/LinkerScript.h b/contrib/llvm-project/lld/ELF/LinkerScript.h
index f385c8320978..d2a6f5e9acb1 100644
--- a/contrib/llvm-project/lld/ELF/LinkerScript.h
+++ b/contrib/llvm-project/lld/ELF/LinkerScript.h
@@ -203,20 +203,20 @@ public:
// Input sections that matches at least one of SectionPatterns
// will be associated with this InputSectionDescription.
- std::vector<SectionPattern> sectionPatterns;
+ SmallVector<SectionPattern, 0> sectionPatterns;
// Includes InputSections and MergeInputSections. Used temporarily during
// assignment of input sections to output sections.
- std::vector<InputSectionBase *> sectionBases;
+ SmallVector<InputSectionBase *, 0> sectionBases;
// Used after the finalizeInputSections() pass. MergeInputSections have been
// merged into MergeSyntheticSections.
- std::vector<InputSection *> sections;
+ SmallVector<InputSection *, 0> sections;
// Temporary record of synthetic ThunkSection instances and the pass that
// they were created in. This is used to insert newly created ThunkSections
// into Sections at the end of a createThunks() pass.
- std::vector<std::pair<ThunkSection *, uint32_t>> thunkSections;
+ SmallVector<std::pair<ThunkSection *, uint32_t>, 0> thunkSections;
// SectionPatterns can be filtered with the INPUT_SECTION_FLAGS command.
uint64_t withFlags;
@@ -244,7 +244,7 @@ struct ByteCommand : SectionCommand {
};
struct InsertCommand {
- std::vector<StringRef> names;
+ SmallVector<StringRef, 0> names;
bool isAfter;
StringRef where;
};
@@ -271,7 +271,8 @@ class LinkerScript final {
uint64_t tbssAddr = 0;
};
- llvm::DenseMap<StringRef, OutputSection *> nameToOutputSection;
+ llvm::DenseMap<llvm::CachedHashStringRef, OutputSection *>
+ nameToOutputSection;
void addSymbol(SymbolAssignment *cmd);
void assignSymbol(SymbolAssignment *cmd, bool inSec);
@@ -279,15 +280,15 @@ class LinkerScript final {
void expandOutputSection(uint64_t size);
void expandMemoryRegions(uint64_t size);
- std::vector<InputSectionBase *>
+ SmallVector<InputSectionBase *, 0>
computeInputSections(const InputSectionDescription *,
ArrayRef<InputSectionBase *>);
- std::vector<InputSectionBase *> createInputSectionList(OutputSection &cmd);
+ SmallVector<InputSectionBase *, 0> createInputSectionList(OutputSection &cmd);
void discardSynthetic(OutputSection &);
- std::vector<size_t> getPhdrIndices(OutputSection *sec);
+ SmallVector<size_t, 0> getPhdrIndices(OutputSection *sec);
std::pair<MemoryRegion *, MemoryRegion *>
findMemoryRegion(OutputSection *sec, MemoryRegion *hint);
@@ -321,12 +322,12 @@ public:
void adjustSectionsBeforeSorting();
void adjustSectionsAfterSorting();
- std::vector<PhdrEntry *> createPhdrs();
+ SmallVector<PhdrEntry *, 0> createPhdrs();
bool needsInterpSection();
bool shouldKeep(InputSectionBase *s);
const Defined *assignAddresses();
- void allocateHeaders(std::vector<PhdrEntry *> &phdrs);
+ void allocateHeaders(SmallVector<PhdrEntry *, 0> &phdrs);
void processSectionCommands();
void processSymbolAssignments();
void declareSymbols();
@@ -337,33 +338,33 @@ public:
void processInsertCommands();
// SECTIONS command list.
- std::vector<SectionCommand *> sectionCommands;
+ SmallVector<SectionCommand *, 0> sectionCommands;
// PHDRS command list.
- std::vector<PhdrsCommand> phdrsCommands;
+ SmallVector<PhdrsCommand, 0> phdrsCommands;
bool hasSectionsCommand = false;
bool errorOnMissingSection = false;
// List of section patterns specified with KEEP commands. They will
// be kept even if they are unused and --gc-sections is specified.
- std::vector<InputSectionDescription *> keptSections;
+ SmallVector<InputSectionDescription *, 0> keptSections;
// A map from memory region name to a memory region descriptor.
llvm::MapVector<llvm::StringRef, MemoryRegion *> memoryRegions;
// A list of symbols referenced by the script.
- std::vector<llvm::StringRef> referencedSymbols;
+ SmallVector<llvm::StringRef, 0> referencedSymbols;
// Used to implement INSERT [AFTER|BEFORE]. Contains output sections that need
// to be reordered.
- std::vector<InsertCommand> insertCommands;
+ SmallVector<InsertCommand, 0> insertCommands;
// OutputSections specified by OVERWRITE_SECTIONS.
- std::vector<OutputSection *> overwriteSections;
+ SmallVector<OutputSection *, 0> overwriteSections;
// Sections that will be warned/errored by --orphan-handling.
- std::vector<const InputSectionBase *> orphanSections;
+ SmallVector<const InputSectionBase *, 0> orphanSections;
};
extern std::unique_ptr<LinkerScript> script;
diff --git a/contrib/llvm-project/lld/ELF/MapFile.cpp b/contrib/llvm-project/lld/ELF/MapFile.cpp
index 1998192bfba6..c7d8967358ad 100644
--- a/contrib/llvm-project/lld/ELF/MapFile.cpp
+++ b/contrib/llvm-project/lld/ELF/MapFile.cpp
@@ -37,7 +37,8 @@ using namespace llvm::object;
using namespace lld;
using namespace lld::elf;
-using SymbolMapTy = DenseMap<const SectionBase *, SmallVector<Defined *, 4>>;
+using SymbolMapTy = DenseMap<const SectionBase *,
+ SmallVector<std::pair<Defined *, uint64_t>, 0>>;
static constexpr char indent8[] = " "; // 8 spaces
static constexpr char indent16[] = " "; // 16 spaces
@@ -67,7 +68,7 @@ static std::vector<Defined *> getSymbols() {
static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
SymbolMapTy ret;
for (Defined *dr : syms)
- ret[dr->section].push_back(dr);
+ ret[dr->section].emplace_back(dr, dr->getVA());
// Sort symbols by address. We want to print out symbols in the
// order in the output file rather than the order they appeared
@@ -76,12 +77,11 @@ static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
for (auto &it : ret) {
// Deduplicate symbols which need a canonical PLT entry/copy relocation.
set.clear();
- llvm::erase_if(it.second,
- [&](Defined *sym) { return !set.insert(sym).second; });
-
- llvm::stable_sort(it.second, [](Defined *a, Defined *b) {
- return a->getVA() < b->getVA();
+ llvm::erase_if(it.second, [&](std::pair<Defined *, uint64_t> a) {
+ return !set.insert(a.first).second;
});
+
+ llvm::stable_sort(it.second, llvm::less_second());
}
return ret;
}
@@ -91,9 +91,9 @@ static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
// we do that in batch using parallel-for.
static DenseMap<Symbol *, std::string>
getSymbolStrings(ArrayRef<Defined *> syms) {
- std::vector<std::string> str(syms.size());
+ auto strs = std::make_unique<std::string[]>(syms.size());
parallelForEachN(0, syms.size(), [&](size_t i) {
- raw_string_ostream os(str[i]);
+ raw_string_ostream os(strs[i]);
OutputSection *osec = syms[i]->getOutputSection();
uint64_t vma = syms[i]->getVA();
uint64_t lma = osec ? osec->getLMA() + vma - osec->getVA(0) : 0;
@@ -103,7 +103,7 @@ getSymbolStrings(ArrayRef<Defined *> syms) {
DenseMap<Symbol *, std::string> ret;
for (size_t i = 0, e = syms.size(); i < e; ++i)
- ret[syms[i]] = std::move(str[i]);
+ ret[syms[i]] = std::move(strs[i]);
return ret;
}
@@ -184,7 +184,7 @@ static void writeMapFile(raw_fd_ostream &os) {
writeHeader(os, isec->getVA(), osec->getLMA() + isec->outSecOff,
isec->getSize(), isec->alignment);
os << indent8 << toString(isec) << '\n';
- for (Symbol *sym : sectionSyms[isec])
+ for (Symbol *sym : llvm::make_first_range(sectionSyms[isec]))
os << symStr[sym] << '\n';
}
continue;
diff --git a/contrib/llvm-project/lld/ELF/MarkLive.cpp b/contrib/llvm-project/lld/ELF/MarkLive.cpp
index b63f2beb9dcb..597c0684b8b2 100644
--- a/contrib/llvm-project/lld/ELF/MarkLive.cpp
+++ b/contrib/llvm-project/lld/ELF/MarkLive.cpp
@@ -27,7 +27,7 @@
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Object/ELF.h"
@@ -68,8 +68,8 @@ private:
SmallVector<InputSection *, 0> queue;
// There are normally few input sections whose names are valid C
- // identifiers, so we just store a std::vector instead of a multimap.
- DenseMap<StringRef, std::vector<InputSectionBase *>> cNamedSections;
+ // identifiers, so we just store a SmallVector instead of a multimap.
+ DenseMap<StringRef, SmallVector<InputSectionBase *, 0>> cNamedSections;
};
} // namespace
@@ -177,11 +177,12 @@ static bool isReserved(InputSectionBase *sec) {
// SHT_NOTE sections in a group are subject to garbage collection.
return !sec->nextInSectionGroup;
default:
- // Support SHT_PROGBITS .init_array for a while
- // (https://golang.org/issue/50295).
+ // Support SHT_PROGBITS .init_array (https://golang.org/issue/50295) and
+ // .init_array.N (https://github.com/rust-lang/rust/issues/92181) for a
+ // while.
StringRef s = sec->name;
- return s == ".init" || s == ".fini" || s == ".init_array" || s == ".jcr" ||
- s.startswith(".ctors") || s.startswith(".dtors");
+ return s == ".init" || s == ".fini" || s.startswith(".init_array") ||
+ s == ".jcr" || s.startswith(".ctors") || s.startswith(".dtors");
}
}
@@ -307,8 +308,8 @@ template <class ELFT> void MarkLive<ELFT>::run() {
// As a workaround for glibc libc.a before 2.34
// (https://sourceware.org/PR27492), retain __libc_atexit and similar
// sections regardless of zStartStopGC.
- cNamedSections[saver.save("__start_" + sec->name)].push_back(sec);
- cNamedSections[saver.save("__stop_" + sec->name)].push_back(sec);
+ cNamedSections[saver().save("__start_" + sec->name)].push_back(sec);
+ cNamedSections[saver().save("__stop_" + sec->name)].push_back(sec);
}
}
diff --git a/contrib/llvm-project/lld/ELF/Options.td b/contrib/llvm-project/lld/ELF/Options.td
index bddf13a3cb42..ca9fdcde791f 100644
--- a/contrib/llvm-project/lld/ELF/Options.td
+++ b/contrib/llvm-project/lld/ELF/Options.td
@@ -119,10 +119,10 @@ defm call_graph_profile_sort: BB<"call-graph-profile-sort",
// --chroot doesn't have a help text because it is an internal option.
def chroot: Separate<["--"], "chroot">;
-defm color_diagnostics: B<"color-diagnostics",
+defm color_diagnostics: BB<"color-diagnostics",
"Alias for --color-diagnostics=always",
"Alias for --color-diagnostics=never">;
-def color_diagnostics_eq: J<"color-diagnostics=">,
+def color_diagnostics_eq: JJ<"color-diagnostics=">,
HelpText<"Use colors in diagnostics (default: auto)">,
MetaVarName<"[auto,always,never]">;
@@ -344,10 +344,10 @@ defm print_symbol_order: Eq<"print-symbol-order",
"Print a symbol order specified by --call-graph-ordering-file into the specified file">;
def pop_state: F<"pop-state">,
- HelpText<"Undo the effect of -push-state">;
+ HelpText<"Restore the states saved by --push-state">;
def push_state: F<"push-state">,
- HelpText<"Save the current state of -as-needed, -static and -whole-archive">;
+ HelpText<"Save the current state of --as-needed, -static and --whole-archive">;
def print_map: F<"print-map">,
HelpText<"Print a link map to the standard output">;
@@ -395,7 +395,7 @@ def strip_all: F<"strip-all">, HelpText<"Strip all symbols. Implies --strip-debu
def strip_debug: F<"strip-debug">, HelpText<"Strip debugging information">;
defm symbol_ordering_file:
- Eq<"symbol-ordering-file", "Layout sections to place symbols in the order specified by symbol ordering file">;
+ EEq<"symbol-ordering-file", "Layout sections to place symbols in the order specified by symbol ordering file">;
defm sysroot: Eq<"sysroot", "Set the system root">;
@@ -445,7 +445,7 @@ defm undefined_version: B<"undefined-version",
"Allow unused version in version script (default)",
"Report version scripts that refer undefined symbols">;
-defm rsp_quoting: Eq<"rsp-quoting", "Quoting style for response files">,
+defm rsp_quoting: EEq<"rsp-quoting", "Quoting style for response files">,
MetaVarName<"[posix,windows]">;
def v: Flag<["-"], "v">, HelpText<"Display the version number">;
diff --git a/contrib/llvm-project/lld/ELF/OutputSections.cpp b/contrib/llvm-project/lld/ELF/OutputSections.cpp
index 4a03ac387814..c73d6e439238 100644
--- a/contrib/llvm-project/lld/ELF/OutputSections.cpp
+++ b/contrib/llvm-project/lld/ELF/OutputSections.cpp
@@ -15,7 +15,7 @@
#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#include "llvm/BinaryFormat/Dwarf.h"
-#include "llvm/Support/Compression.h"
+#include "llvm/Config/config.h" // LLVM_ENABLE_ZLIB
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Parallel.h"
@@ -23,6 +23,9 @@
#include "llvm/Support/TimeProfiler.h"
#include <regex>
#include <unordered_set>
+#if LLVM_ENABLE_ZLIB
+#include <zlib.h>
+#endif
using namespace llvm;
using namespace llvm::dwarf;
@@ -284,38 +287,94 @@ static void fill(uint8_t *buf, size_t size,
memcpy(buf + i, filler.data(), size - i);
}
+#if LLVM_ENABLE_ZLIB
+static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
+ int flush) {
+ // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
+ // data with no zlib header or trailer.
+ z_stream s = {};
+ deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
+ s.next_in = const_cast<uint8_t *>(in.data());
+ s.avail_in = in.size();
+
+ // Allocate a buffer of half of the input size, and grow it by 1.5x if
+ // insufficient.
+ SmallVector<uint8_t, 0> out;
+ size_t pos = 0;
+ out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
+ do {
+ if (pos == out.size())
+ out.resize_for_overwrite(out.size() * 3 / 2);
+ s.next_out = out.data() + pos;
+ s.avail_out = out.size() - pos;
+ (void)deflate(&s, flush);
+ pos = s.next_out - out.data();
+ } while (s.avail_out == 0);
+ assert(s.avail_in == 0);
+
+ out.truncate(pos);
+ deflateEnd(&s);
+ return out;
+}
+#endif
+
// Compress section contents if this section contains debug info.
template <class ELFT> void OutputSection::maybeCompress() {
+#if LLVM_ENABLE_ZLIB
using Elf_Chdr = typename ELFT::Chdr;
// Compress only DWARF debug sections.
if (!config->compressDebugSections || (flags & SHF_ALLOC) ||
- !name.startswith(".debug_"))
+ !name.startswith(".debug_") || size == 0)
return;
llvm::TimeTraceScope timeScope("Compress debug sections");
- // Create a section header.
- zDebugHeader.resize(sizeof(Elf_Chdr));
- auto *hdr = reinterpret_cast<Elf_Chdr *>(zDebugHeader.data());
- hdr->ch_type = ELFCOMPRESS_ZLIB;
- hdr->ch_size = size;
- hdr->ch_addralign = alignment;
-
- // Write section contents to a temporary buffer and compress it.
- std::vector<uint8_t> buf(size);
- writeTo<ELFT>(buf.data());
- // We chose 1 as the default compression level because it is the fastest. If
- // -O2 is given, we use level 6 to compress debug info more by ~15%. We found
- // that level 7 to 9 doesn't make much difference (~1% more compression) while
- // they take significant amount of time (~2x), so level 6 seems enough.
- if (Error e = zlib::compress(toStringRef(buf), compressedData,
- config->optimize >= 2 ? 6 : 1))
- fatal("compress failed: " + llvm::toString(std::move(e)));
-
- // Update section headers.
- size = sizeof(Elf_Chdr) + compressedData.size();
+ // Write uncompressed data to a temporary zero-initialized buffer.
+ auto buf = std::make_unique<uint8_t[]>(size);
+ writeTo<ELFT>(buf.get());
+ // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
+ // the fastest. If -O2 is given, we use level 6 to compress debug info more by
+ // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more
+ // compression) while they take significant amount of time (~2x), so level 6
+ // seems enough.
+ const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED;
+
+ // Split input into 1-MiB shards.
+ constexpr size_t shardSize = 1 << 20;
+ const size_t numShards = (size + shardSize - 1) / shardSize;
+ auto shardsIn = std::make_unique<ArrayRef<uint8_t>[]>(numShards);
+ for (size_t i = 0, start = 0, end; start != size; ++i, start = end) {
+ end = std::min(start + shardSize, (size_t)size);
+ shardsIn[i] = makeArrayRef<uint8_t>(buf.get() + start, end - start);
+ }
+
+ // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
+ // shards but the last to flush the output to a byte boundary to be
+ // concatenated with the next shard.
+ auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
+ auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
+ parallelForEachN(0, numShards, [&](size_t i) {
+ shardsOut[i] = deflateShard(shardsIn[i], level,
+ i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
+ shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
+ });
+
+ // Update section size and combine Alder-32 checksums.
+ uint32_t checksum = 1; // Initial Adler-32 value
+ compressed.uncompressedSize = size;
+ size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header
+ for (size_t i = 0; i != numShards; ++i) {
+ size += shardsOut[i].size();
+ checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
+ }
+ size += 4; // checksum
+
+ compressed.shards = std::move(shardsOut);
+ compressed.numShards = numShards;
+ compressed.checksum = checksum;
flags |= SHF_COMPRESSED;
+#endif
}
static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
@@ -339,15 +398,32 @@ template <class ELFT> void OutputSection::writeTo(uint8_t *buf) {
// If --compress-debug-section is specified and if this is a debug section,
// we've already compressed section contents. If that's the case,
// just write it down.
- if (!compressedData.empty()) {
- memcpy(buf, zDebugHeader.data(), zDebugHeader.size());
- memcpy(buf + zDebugHeader.size(), compressedData.data(),
- compressedData.size());
+ if (compressed.shards) {
+ auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
+ chdr->ch_type = ELFCOMPRESS_ZLIB;
+ chdr->ch_size = compressed.uncompressedSize;
+ chdr->ch_addralign = alignment;
+ buf += sizeof(*chdr);
+
+ // Compute shard offsets.
+ auto offsets = std::make_unique<size_t[]>(compressed.numShards);
+ offsets[0] = 2; // zlib header
+ for (size_t i = 1; i != compressed.numShards; ++i)
+ offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
+
+ buf[0] = 0x78; // CMF
+ buf[1] = 0x01; // FLG: best speed
+ parallelForEachN(0, compressed.numShards, [&](size_t i) {
+ memcpy(buf + offsets[i], compressed.shards[i].data(),
+ compressed.shards[i].size());
+ });
+
+ write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
return;
}
// Write leading padding.
- std::vector<InputSection *> sections = getInputSections(this);
+ SmallVector<InputSection *, 0> sections = getInputSections(*this);
std::array<uint8_t, 4> filler = getFiller();
bool nonZeroFiller = read32(filler.data()) != 0;
if (nonZeroFiller)
@@ -355,7 +431,7 @@ template <class ELFT> void OutputSection::writeTo(uint8_t *buf) {
parallelForEachN(0, sections.size(), [&](size_t i) {
InputSection *isec = sections[i];
- isec->writeTo<ELFT>(buf);
+ isec->writeTo<ELFT>(buf + isec->outSecOff);
// Fill gaps between sections.
if (nonZeroFiller) {
@@ -520,9 +596,9 @@ InputSection *elf::getFirstInputSection(const OutputSection *os) {
return nullptr;
}
-std::vector<InputSection *> elf::getInputSections(const OutputSection *os) {
- std::vector<InputSection *> ret;
- for (SectionCommand *cmd : os->commands)
+SmallVector<InputSection *, 0> elf::getInputSections(const OutputSection &os) {
+ SmallVector<InputSection *, 0> ret;
+ for (SectionCommand *cmd : os.commands)
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
ret.insert(ret.end(), isd->sections.begin(), isd->sections.end());
return ret;
@@ -550,7 +626,7 @@ std::array<uint8_t, 4> OutputSection::getFiller() {
void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
assert(config->writeAddends && config->checkDynamicRelocs);
assert(type == SHT_REL || type == SHT_RELA);
- std::vector<InputSection *> sections = getInputSections(this);
+ SmallVector<InputSection *, 0> sections = getInputSections(*this);
parallelForEachN(0, sections.size(), [&](size_t i) {
// When linking with -r or --emit-relocs we might also call this function
// for input .rel[a].<sec> sections which we simply pass through to the
diff --git a/contrib/llvm-project/lld/ELF/OutputSections.h b/contrib/llvm-project/lld/ELF/OutputSections.h
index fb3eb0059909..ad656e49ceff 100644
--- a/contrib/llvm-project/lld/ELF/OutputSections.h
+++ b/contrib/llvm-project/lld/ELF/OutputSections.h
@@ -25,6 +25,13 @@ struct PhdrEntry;
class InputSection;
class InputSectionBase;
+struct CompressedData {
+ std::unique_ptr<SmallVector<uint8_t, 0>[]> shards;
+ uint32_t numShards = 0;
+ uint32_t checksum = 0;
+ uint64_t uncompressedSize;
+};
+
// This represents a section in an output file.
// It is composed of multiple InputSections.
// The writer creates multiple OutputSections and assign them unique,
@@ -82,8 +89,8 @@ public:
Expr alignExpr;
Expr lmaExpr;
Expr subalignExpr;
- std::vector<SectionCommand *> commands;
- std::vector<StringRef> phdrs;
+ SmallVector<SectionCommand *, 0> commands;
+ SmallVector<StringRef, 0> phdrs;
llvm::Optional<std::array<uint8_t, 4>> filler;
ConstraintKind constraint = ConstraintKind::NoConstraint;
std::string location;
@@ -112,8 +119,7 @@ public:
private:
// Used for implementation of --compress-debug-sections option.
- std::vector<uint8_t> zDebugHeader;
- llvm::SmallVector<char, 0> compressedData;
+ CompressedData compressed;
std::array<uint8_t, 4> getFiller();
};
@@ -121,7 +127,7 @@ private:
int getPriority(StringRef s);
InputSection *getFirstInputSection(const OutputSection *os);
-std::vector<InputSection *> getInputSections(const OutputSection *os);
+SmallVector<InputSection *, 0> getInputSections(const OutputSection &os);
// All output sections that are handled by the linker specially are
// globally accessible. Writer initializes them, so don't use them
diff --git a/contrib/llvm-project/lld/ELF/Relocations.cpp b/contrib/llvm-project/lld/ELF/Relocations.cpp
index 47a5e91c5d1f..074bc7150991 100644
--- a/contrib/llvm-project/lld/ELF/Relocations.cpp
+++ b/contrib/llvm-project/lld/ELF/Relocations.cpp
@@ -100,11 +100,11 @@ void elf::reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
int64_t min, uint64_t max) {
ErrorPlace errPlace = getErrorPlace(loc);
std::string hint;
- if (rel.sym && !rel.sym->isLocal())
+ if (rel.sym && !rel.sym->isSection())
hint = "; references " + lld::toString(*rel.sym);
if (!errPlace.srcLoc.empty())
hint += "\n>>> referenced by " + errPlace.srcLoc;
- if (rel.sym && !rel.sym->isLocal())
+ if (rel.sym && !rel.sym->isSection())
hint += getDefinedLocation(*rel.sym);
if (errPlace.isec && errPlace.isec->name.startswith(".debug"))
@@ -302,8 +302,7 @@ static void replaceWithDefined(Symbol &sym, SectionBase &sec, uint64_t value,
sym.replace(Defined{sym.file, sym.getName(), sym.binding, sym.stOther,
sym.type, value, size, &sec});
- sym.pltIndex = old.pltIndex;
- sym.gotIndex = old.gotIndex;
+ sym.auxIdx = old.auxIdx;
sym.verdefIndex = old.verdefIndex;
sym.exportDynamic = true;
sym.isUsedInRegularObj = true;
@@ -404,14 +403,89 @@ static void addCopyRelSymbol(SharedSymbol &ss) {
}
}
+// .eh_frame sections are mergeable input sections, so their input
+// offsets are not linearly mapped to output section. For each input
+// offset, we need to find a section piece containing the offset and
+// add the piece's base address to the input offset to compute the
+// output offset. That isn't cheap.
+//
+// This class is to speed up the offset computation. When we process
+// relocations, we access offsets in the monotonically increasing
+// order. So we can optimize for that access pattern.
+//
+// For sections other than .eh_frame, this class doesn't do anything.
+namespace {
+class OffsetGetter {
+public:
+ explicit OffsetGetter(InputSectionBase &sec) {
+ if (auto *eh = dyn_cast<EhInputSection>(&sec))
+ pieces = eh->pieces;
+ }
+
+ // Translates offsets in input sections to offsets in output sections.
+ // Given offset must increase monotonically. We assume that Piece is
+ // sorted by inputOff.
+ uint64_t get(uint64_t off) {
+ if (pieces.empty())
+ return off;
+
+ while (i != pieces.size() && pieces[i].inputOff + pieces[i].size <= off)
+ ++i;
+ if (i == pieces.size())
+ fatal(".eh_frame: relocation is not in any piece");
+
+ // Pieces must be contiguous, so there must be no holes in between.
+ assert(pieces[i].inputOff <= off && "Relocation not in any piece");
+
+ // Offset -1 means that the piece is dead (i.e. garbage collected).
+ if (pieces[i].outputOff == -1)
+ return -1;
+ return pieces[i].outputOff + off - pieces[i].inputOff;
+ }
+
+private:
+ ArrayRef<EhSectionPiece> pieces;
+ size_t i = 0;
+};
+
+// This class encapsulates states needed to scan relocations for one
+// InputSectionBase.
+class RelocationScanner {
+public:
+ explicit RelocationScanner(InputSectionBase &sec)
+ : sec(sec), getter(sec), config(elf::config.get()), target(*elf::target) {
+ }
+ template <class ELFT, class RelTy> void scan(ArrayRef<RelTy> rels);
+
+private:
+ InputSectionBase &sec;
+ OffsetGetter getter;
+ const Configuration *const config;
+ const TargetInfo &target;
+
+ // End of relocations, used by Mips/PPC64.
+ const void *end = nullptr;
+
+ template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const;
+ template <class ELFT, class RelTy>
+ int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const;
+ template <class ELFT, class RelTy>
+ int64_t computeAddend(const RelTy &rel, RelExpr expr, bool isLocal) const;
+ bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
+ uint64_t relOff) const;
+ void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym,
+ int64_t addend) const;
+ template <class ELFT, class RelTy> void scanOne(RelTy *&i);
+};
+} // namespace
+
// MIPS has an odd notion of "paired" relocations to calculate addends.
// For example, if a relocation is of R_MIPS_HI16, there must be a
// R_MIPS_LO16 relocation after that, and an addend is calculated using
// the two relocations.
template <class ELFT, class RelTy>
-static int64_t computeMipsAddend(const RelTy &rel, const RelTy *end,
- InputSectionBase &sec, RelExpr expr,
- bool isLocal) {
+int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr,
+ bool isLocal) const {
if (expr == R_MIPS_GOTREL && isLocal)
return sec.getFile<ELFT>()->mipsGp0;
@@ -430,10 +504,10 @@ static int64_t computeMipsAddend(const RelTy &rel, const RelTy *end,
// To make things worse, paired relocations might not be contiguous in
// the relocation table, so we need to do linear search. *sigh*
- for (const RelTy *ri = &rel; ri != end; ++ri)
+ for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri)
if (ri->getType(config->isMips64EL) == pairTy &&
ri->getSymbol(config->isMips64EL) == symIndex)
- return target->getImplicitAddend(buf + ri->r_offset, pairTy);
+ return target.getImplicitAddend(buf + ri->r_offset, pairTy);
warn("can't find matching " + toString(pairTy) + " relocation for " +
toString(type));
@@ -444,9 +518,8 @@ static int64_t computeMipsAddend(const RelTy &rel, const RelTy *end,
// is in a relocation itself. If it is REL, we need to read it from an
// input section.
template <class ELFT, class RelTy>
-static int64_t computeAddend(const RelTy &rel, const RelTy *end,
- InputSectionBase &sec, RelExpr expr,
- bool isLocal) {
+int64_t RelocationScanner::computeAddend(const RelTy &rel, RelExpr expr,
+ bool isLocal) const {
int64_t addend;
RelType type = rel.getType(config->isMips64EL);
@@ -454,13 +527,13 @@ static int64_t computeAddend(const RelTy &rel, const RelTy *end,
addend = getAddend<ELFT>(rel);
} else {
const uint8_t *buf = sec.data().data();
- addend = target->getImplicitAddend(buf + rel.r_offset, type);
+ addend = target.getImplicitAddend(buf + rel.r_offset, type);
}
if (config->emachine == EM_PPC64 && config->isPic && type == R_PPC64_TOC)
addend += getPPC64TocBase();
if (config->emachine == EM_MIPS)
- addend += computeMipsAddend<ELFT>(rel, end, sec, expr, isLocal);
+ addend += computeMipsAddend<ELFT>(rel, expr, isLocal);
return addend;
}
@@ -503,7 +576,7 @@ static std::string maybeReportDiscarded(Undefined &sym) {
// them are known, so that some postprocessing on the list of undefined symbols
// can happen before lld emits diagnostics.
struct UndefinedDiag {
- Symbol *sym;
+ Undefined *sym;
struct Loc {
InputSectionBase *sec;
uint64_t offset;
@@ -532,12 +605,12 @@ static bool canSuggestExternCForCXX(StringRef ref, StringRef def) {
// Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns
// the suggested symbol, which is either in the symbol table, or in the same
// file of sym.
-template <class ELFT>
static const Symbol *getAlternativeSpelling(const Undefined &sym,
std::string &pre_hint,
std::string &post_hint) {
DenseMap<StringRef, const Symbol *> map;
- if (auto *file = dyn_cast_or_null<ObjFile<ELFT>>(sym.file)) {
+ if (sym.file && sym.file->kind() == InputFile::ObjKind) {
+ auto *file = cast<ELFFileBase>(sym.file);
// If sym is a symbol defined in a discarded section, maybeReportDiscarded()
// will give an error. Don't suggest an alternative spelling.
if (file && sym.discardedSecIdx != 0 &&
@@ -649,7 +722,7 @@ static const Symbol *getAlternativeSpelling(const Undefined &sym,
template <class ELFT>
static void reportUndefinedSymbol(const UndefinedDiag &undef,
bool correctSpelling) {
- Symbol &sym = *undef.sym;
+ Undefined &sym = *undef.sym;
auto visibility = [&]() -> std::string {
switch (sym.visibility) {
@@ -664,7 +737,7 @@ static void reportUndefinedSymbol(const UndefinedDiag &undef,
}
};
- std::string msg = maybeReportDiscarded<ELFT>(cast<Undefined>(sym));
+ std::string msg = maybeReportDiscarded<ELFT>(sym);
if (msg.empty())
msg = "undefined " + visibility() + "symbol: " + toString(sym);
@@ -690,8 +763,8 @@ static void reportUndefinedSymbol(const UndefinedDiag &undef,
if (correctSpelling) {
std::string pre_hint = ": ", post_hint;
- if (const Symbol *corrected = getAlternativeSpelling<ELFT>(
- cast<Undefined>(sym), pre_hint, post_hint)) {
+ if (const Symbol *corrected =
+ getAlternativeSpelling(sym, pre_hint, post_hint)) {
msg += "\n>>> did you mean" + pre_hint + toString(*corrected) + post_hint;
if (corrected->file)
msg += "\n>>> defined in: " + toString(corrected->file);
@@ -737,11 +810,11 @@ template <class ELFT> void elf::reportUndefinedSymbols() {
// Report an undefined symbol if necessary.
// Returns true if the undefined symbol will produce an error message.
-static bool maybeReportUndefined(Symbol &sym, InputSectionBase &sec,
+static bool maybeReportUndefined(Undefined &sym, InputSectionBase &sec,
uint64_t offset) {
// If versioned, issue an error (even if the symbol is weak) because we don't
// know the defining filename which is required to construct a Verneed entry.
- if (*sym.getVersionSuffix() == '@') {
+ if (sym.hasVersionSuffix) {
undefs.push_back({&sym, {{&sec, offset}}, false});
return true;
}
@@ -761,8 +834,7 @@ static bool maybeReportUndefined(Symbol &sym, InputSectionBase &sec,
// PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible
// because .LC0-.LTOC is not representable if the two labels are in different
// .got2
- if (cast<Undefined>(sym).discardedSecIdx != 0 &&
- (sec.name == ".got2" || sec.name == ".toc"))
+ if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc"))
return false;
bool isWarning =
@@ -777,62 +849,17 @@ static bool maybeReportUndefined(Symbol &sym, InputSectionBase &sec,
// packs all relocations into the single relocation record. Here we emulate
// this for the N32 ABI. Iterate over relocation with the same offset and put
// theirs types into the single bit-set.
-template <class RelTy> static RelType getMipsN32RelType(RelTy *&rel, RelTy *end) {
+template <class RelTy>
+RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const {
RelType type = 0;
uint64_t offset = rel->r_offset;
int n = 0;
- while (rel != end && rel->r_offset == offset)
+ while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset)
type |= (rel++)->getType(config->isMips64EL) << (8 * n++);
return type;
}
-// .eh_frame sections are mergeable input sections, so their input
-// offsets are not linearly mapped to output section. For each input
-// offset, we need to find a section piece containing the offset and
-// add the piece's base address to the input offset to compute the
-// output offset. That isn't cheap.
-//
-// This class is to speed up the offset computation. When we process
-// relocations, we access offsets in the monotonically increasing
-// order. So we can optimize for that access pattern.
-//
-// For sections other than .eh_frame, this class doesn't do anything.
-namespace {
-class OffsetGetter {
-public:
- explicit OffsetGetter(InputSectionBase &sec) {
- if (auto *eh = dyn_cast<EhInputSection>(&sec))
- pieces = eh->pieces;
- }
-
- // Translates offsets in input sections to offsets in output sections.
- // Given offset must increase monotonically. We assume that Piece is
- // sorted by inputOff.
- uint64_t get(uint64_t off) {
- if (pieces.empty())
- return off;
-
- while (i != pieces.size() && pieces[i].inputOff + pieces[i].size <= off)
- ++i;
- if (i == pieces.size())
- fatal(".eh_frame: relocation is not in any piece");
-
- // Pieces must be contiguous, so there must be no holes in between.
- assert(pieces[i].inputOff <= off && "Relocation not in any piece");
-
- // Offset -1 means that the piece is dead (i.e. garbage collected).
- if (pieces[i].outputOff == -1)
- return -1;
- return pieces[i].outputOff + off - pieces[i].inputOff;
- }
-
-private:
- ArrayRef<EhSectionPiece> pieces;
- size_t i = 0;
-};
-} // namespace
-
static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec,
Symbol &sym, int64_t addend, RelExpr expr,
RelType type) {
@@ -870,7 +897,7 @@ static void addGotEntry(Symbol &sym) {
// If preemptible, emit a GLOB_DAT relocation.
if (sym.isPreemptible) {
- mainPart->relaDyn->addReloc({target->gotRel, in.got, off,
+ mainPart->relaDyn->addReloc({target->gotRel, in.got.get(), off,
DynamicReloc::AgainstSymbol, sym, 0, R_ABS});
return;
}
@@ -924,8 +951,9 @@ static bool canDefineSymbolInExecutable(Symbol &sym) {
//
// If this function returns false, that means we need to emit a
// dynamic relocation so that the relocation will be fixed at load-time.
-static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
- InputSectionBase &s, uint64_t relOff) {
+bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
+ const Symbol &sym,
+ uint64_t relOff) const {
// These expressions always compute a constant
if (oneof<R_GOTPLT, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOTREL,
R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC,
@@ -937,7 +965,7 @@ static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
// These never do, except if the entire file is position dependent or if
// only the low bits are used.
if (e == R_GOT || e == R_PLT)
- return target->usesOnlyLowPageBits(type) || !config->isPic;
+ return target.usesOnlyLowPageBits(type) || !config->isPic;
if (sym.isPreemptible)
return false;
@@ -957,7 +985,7 @@ static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
if (!absVal && relE)
return true;
if (!absVal && !relE)
- return target->usesOnlyLowPageBits(type);
+ return target.usesOnlyLowPageBits(type);
assert(absVal && relE);
@@ -975,7 +1003,7 @@ static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
return true;
error("relocation " + toString(type) + " cannot refer to absolute symbol: " +
- toString(sym) + getLocation(s, sym, relOff));
+ toString(sym) + getLocation(sec, sym, relOff));
return true;
}
@@ -992,9 +1020,8 @@ static bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym,
// sections. Given that it is ro, we will need an extra PT_LOAD. This
// complicates things for the dynamic linker and means we would have to reserve
// space for the extra PT_LOAD even if we end up not using it.
-template <class ELFT>
-static void processRelocAux(InputSectionBase &sec, RelExpr expr, RelType type,
- uint64_t offset, Symbol &sym, int64_t addend) {
+void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
+ Symbol &sym, int64_t addend) const {
// If the relocation is known to be a link-time constant, we know no dynamic
// relocation will be created, pass the control to relocateAlloc() or
// relocateNonAlloc() to resolve it.
@@ -1009,7 +1036,7 @@ static void processRelocAux(InputSectionBase &sec, RelExpr expr, RelType type,
// -shared matches the spirit of its -z undefs default. -pie has freedom on
// choices, and we choose dynamic relocations to be consistent with the
// handling of GOT-generating relocations.
- if (isStaticLinkTimeConstant(expr, type, sym, sec, offset) ||
+ if (isStaticLinkTimeConstant(expr, type, sym, offset) ||
(!config->isPic && sym.isUndefWeak())) {
sec.relocations.push_back({expr, type, offset, addend, &sym});
return;
@@ -1017,13 +1044,13 @@ static void processRelocAux(InputSectionBase &sec, RelExpr expr, RelType type,
bool canWrite = (sec.flags & SHF_WRITE) || !config->zText;
if (canWrite) {
- RelType rel = target->getDynRel(type);
- if (expr == R_GOT || (rel == target->symbolicRel && !sym.isPreemptible)) {
+ RelType rel = target.getDynRel(type);
+ if (expr == R_GOT || (rel == target.symbolicRel && !sym.isPreemptible)) {
addRelativeReloc(sec, offset, sym, addend, expr, type);
return;
} else if (rel != 0) {
- if (config->emachine == EM_MIPS && rel == target->symbolicRel)
- rel = target->relativeRel;
+ if (config->emachine == EM_MIPS && rel == target.symbolicRel)
+ rel = target.relativeRel;
sec.getPartition().relaDyn->addSymbolReloc(rel, sec, offset, sym, addend,
type);
@@ -1145,10 +1172,9 @@ static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym,
// symbol in TLS block.
//
// Returns the number of relocations processed.
-template <class ELFT>
-static unsigned
-handleTlsRelocation(RelType type, Symbol &sym, InputSectionBase &c,
- typename ELFT::uint offset, int64_t addend, RelExpr expr) {
+static unsigned handleTlsRelocation(RelType type, Symbol &sym,
+ InputSectionBase &c, uint64_t offset,
+ int64_t addend, RelExpr expr) {
if (!sym.isTls())
return 0;
@@ -1262,9 +1288,7 @@ handleTlsRelocation(RelType type, Symbol &sym, InputSectionBase &c,
return 0;
}
-template <class ELFT, class RelTy>
-static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
- RelTy *start, RelTy *end) {
+template <class ELFT, class RelTy> void RelocationScanner::scanOne(RelTy *&i) {
const RelTy &rel = *i;
uint32_t symIndex = rel.getSymbol(config->isMips64EL);
Symbol &sym = sec.getFile<ELFT>()->getSymbol(symIndex);
@@ -1272,32 +1296,32 @@ static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
// Deal with MIPS oddity.
if (config->mipsN32Abi) {
- type = getMipsN32RelType(i, end);
+ type = getMipsN32RelType(i);
} else {
type = rel.getType(config->isMips64EL);
++i;
}
// Get an offset in an output section this relocation is applied to.
- uint64_t offset = getOffset.get(rel.r_offset);
+ uint64_t offset = getter.get(rel.r_offset);
if (offset == uint64_t(-1))
return;
// Error if the target symbol is undefined. Symbol index 0 may be used by
// marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them.
if (sym.isUndefined() && symIndex != 0 &&
- maybeReportUndefined(sym, sec, rel.r_offset))
+ maybeReportUndefined(cast<Undefined>(sym), sec, offset))
return;
- const uint8_t *relocatedAddr = sec.data().begin() + rel.r_offset;
- RelExpr expr = target->getRelExpr(type, sym, relocatedAddr);
+ const uint8_t *relocatedAddr = sec.data().begin() + offset;
+ RelExpr expr = target.getRelExpr(type, sym, relocatedAddr);
// Ignore R_*_NONE and other marker relocations.
if (expr == R_NONE)
return;
// Read an addend.
- int64_t addend = computeAddend<ELFT>(rel, end, sec, expr, sym.isLocal());
+ int64_t addend = computeAddend<ELFT>(rel, expr, sym.isLocal());
if (config->emachine == EM_PPC64) {
// We can separate the small code model relocations into 2 categories:
@@ -1346,7 +1370,7 @@ static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
}
// Process TLS relocations, including relaxing TLS relocations. Note that
- // R_TPREL and R_TPREL_NEG relocations are resolved in processRelocAux.
+ // R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
if (expr == R_TPREL || expr == R_TPREL_NEG) {
if (config->shared) {
errorOrWarn("relocation " + toString(type) + " against " + toString(sym) +
@@ -1354,8 +1378,8 @@ static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
getLocation(sec, sym, offset));
return;
}
- } else if (unsigned processed = handleTlsRelocation<ELFT>(
- type, sym, sec, offset, addend, expr)) {
+ } else if (unsigned processed =
+ handleTlsRelocation(type, sym, sec, offset, addend, expr)) {
i += (processed - 1);
return;
}
@@ -1382,7 +1406,7 @@ static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
type == R_HEX_GD_PLT_B32_PCREL_X)))
expr = fromPlt(expr);
} else if (!isAbsoluteValue(sym)) {
- expr = target->adjustGotPcExpr(type, addend, relocatedAddr);
+ expr = target.adjustGotPcExpr(type, addend, relocatedAddr);
}
}
@@ -1413,7 +1437,7 @@ static void scanReloc(InputSectionBase &sec, OffsetGetter &getOffset, RelTy *&i,
sym.hasDirectReloc = true;
}
- processRelocAux<ELFT>(sec, expr, type, offset, sym, addend);
+ processAux(expr, type, offset, sym, addend);
}
// R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for
@@ -1454,9 +1478,7 @@ static void checkPPC64TLSRelax(InputSectionBase &sec, ArrayRef<RelTy> rels) {
}
template <class ELFT, class RelTy>
-static void scanRelocs(InputSectionBase &sec, ArrayRef<RelTy> rels) {
- OffsetGetter getOffset(sec);
-
+void RelocationScanner::scan(ArrayRef<RelTy> rels) {
// Not all relocations end up in Sec.Relocations, but a lot do.
sec.relocations.reserve(rels.size());
@@ -1470,8 +1492,9 @@ static void scanRelocs(InputSectionBase &sec, ArrayRef<RelTy> rels) {
if (isa<EhInputSection>(sec))
rels = sortRels(rels, storage);
- for (auto i = rels.begin(), end = rels.end(); i != end;)
- scanReloc<ELFT>(sec, getOffset, i, rels.begin(), end);
+ end = static_cast<const void *>(rels.end());
+ for (auto i = rels.begin(); i != end;)
+ scanOne<ELFT>(i);
// Sort relocations by offset for more efficient searching for
// R_RISCV_PCREL_HI20 and R_PPC64_ADDR64.
@@ -1484,11 +1507,12 @@ static void scanRelocs(InputSectionBase &sec, ArrayRef<RelTy> rels) {
}
template <class ELFT> void elf::scanRelocations(InputSectionBase &s) {
+ RelocationScanner scanner(s);
const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>();
if (rels.areRelocsRel())
- scanRelocs<ELFT>(s, rels.rels);
+ scanner.template scan<ELFT>(rels.rels);
else
- scanRelocs<ELFT>(s, rels.relas);
+ scanner.template scan<ELFT>(rels.relas);
}
static bool handleNonPreemptibleIfunc(Symbol &sym) {
@@ -1545,15 +1569,17 @@ static bool handleNonPreemptibleIfunc(Symbol &sym) {
// may alter section/value, so create a copy of the symbol to make
// section/value fixed.
auto *directSym = makeDefined(cast<Defined>(sym));
+ directSym->allocateAux();
addPltEntry(*in.iplt, *in.igotPlt, *in.relaIplt, target->iRelativeRel,
*directSym);
- sym.pltIndex = directSym->pltIndex;
+ sym.allocateAux();
+ symAux.back().pltIdx = symAux[directSym->auxIdx].pltIdx;
if (sym.hasDirectReloc) {
// Change the value to the IPLT and redirect all references to it.
auto &d = cast<Defined>(sym);
- d.section = in.iplt;
- d.value = sym.pltIndex * target->ipltEntrySize;
+ d.section = in.iplt.get();
+ d.value = d.getPltIdx() * target->ipltEntrySize;
d.size = 0;
// It's important to set the symbol type here so that dynamic loaders
// don't try to call the PLT as if it were an ifunc resolver.
@@ -1572,6 +1598,10 @@ void elf::postScanRelocations() {
auto fn = [](Symbol &sym) {
if (handleNonPreemptibleIfunc(sym))
return;
+ if (!sym.needsDynReloc())
+ return;
+ sym.allocateAux();
+
if (sym.needsGot)
addGotEntry(sym);
if (sym.needsPlt)
@@ -1585,9 +1615,10 @@ void elf::postScanRelocations() {
} else {
assert(sym.isFunc() && sym.needsPlt);
if (!sym.isDefined()) {
- replaceWithDefined(
- sym, *in.plt,
- target->pltHeaderSize + target->pltEntrySize * sym.pltIndex, 0);
+ replaceWithDefined(sym, *in.plt,
+ target->pltHeaderSize +
+ target->pltEntrySize * sym.getPltIdx(),
+ 0);
sym.needsCopy = true;
if (config->emachine == EM_PPC) {
// PPC32 canonical PLT entries are at the beginning of .glink
@@ -1604,13 +1635,12 @@ void elf::postScanRelocations() {
bool isLocalInExecutable = !sym.isPreemptible && !config->shared;
if (sym.needsTlsDesc) {
- in.got->addDynTlsEntry(sym);
+ in.got->addTlsDescEntry(sym);
mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
- target->tlsDescRel, *in.got, in.got->getGlobalDynOffset(sym), sym,
+ target->tlsDescRel, *in.got, in.got->getTlsDescOffset(sym), sym,
target->tlsDescRel);
}
- if (sym.needsTlsGd && !sym.needsTlsDesc) {
- // TODO Support mixed TLSDESC and TLS GD.
+ if (sym.needsTlsGd) {
in.got->addDynTlsEntry(sym);
uint64_t off = in.got->getGlobalDynOffset(sym);
if (isLocalInExecutable)
@@ -1642,8 +1672,8 @@ void elf::postScanRelocations() {
in.got->relocations.push_back(
{R_ADDEND, target->symbolicRel, in.got->getTlsIndexOff(), 1, &sym});
else
- mainPart->relaDyn->addReloc(
- {target->tlsModuleIndexRel, in.got, in.got->getTlsIndexOff()});
+ mainPart->relaDyn->addReloc({target->tlsModuleIndexRel, in.got.get(),
+ in.got->getTlsIndexOff()});
}
if (sym.needsGotDtprel) {
in.got->addEntry(sym);
@@ -1654,13 +1684,15 @@ void elf::postScanRelocations() {
if (sym.needsTlsIe && !sym.needsTlsGdToIe)
addTpOffsetGotEntry(sym);
};
+
+ assert(symAux.empty());
for (Symbol *sym : symtab->symbols())
fn(*sym);
// Local symbols may need the aforementioned non-preemptible ifunc and GOT
// handling. They don't need regular PLT.
for (ELFFileBase *file : objectFiles)
- for (Symbol *sym : cast<ELFFileBase>(file)->getLocalSymbols())
+ for (Symbol *sym : file->getLocalSymbols())
fn(*sym);
}
@@ -1817,7 +1849,7 @@ void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) {
});
// Merge sorted vectors of Thunks and InputSections by outSecOff
- std::vector<InputSection *> tmp;
+ SmallVector<InputSection *, 0> tmp;
tmp.reserve(isd->sections.size() + newThunks.size());
std::merge(isd->sections.begin(), isd->sections.end(),
@@ -2175,6 +2207,7 @@ void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) {
for (Relocation &rel : isec->relocations)
if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
if (needEntry) {
+ sym->allocateAux();
addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, target->pltRel,
*sym);
needEntry = false;
diff --git a/contrib/llvm-project/lld/ELF/Relocations.h b/contrib/llvm-project/lld/ELF/Relocations.h
index c652c0a5f70f..73f6970b80f1 100644
--- a/contrib/llvm-project/lld/ELF/Relocations.h
+++ b/contrib/llvm-project/lld/ELF/Relocations.h
@@ -11,6 +11,7 @@
#include "lld/Common/LLVM.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include <map>
#include <vector>
@@ -117,8 +118,8 @@ struct Relocation {
// jump instruction opcodes at basic block boundaries and are particularly
// useful when basic block sections are enabled.
struct JumpInstrMod {
- JumpModType original;
uint64_t offset;
+ JumpModType original;
unsigned size;
};
diff --git a/contrib/llvm-project/lld/ELF/ScriptParser.cpp b/contrib/llvm-project/lld/ELF/ScriptParser.cpp
index d3b0296acab0..7331d1156f27 100644
--- a/contrib/llvm-project/lld/ELF/ScriptParser.cpp
+++ b/contrib/llvm-project/lld/ELF/ScriptParser.cpp
@@ -20,7 +20,7 @@
#include "ScriptLexer.h"
#include "Symbols.h"
#include "Target.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
@@ -93,12 +93,12 @@ private:
void readSectionAddressType(OutputSection *cmd);
OutputSection *readOverlaySectionDescription();
OutputSection *readOutputSectionDescription(StringRef outSec);
- std::vector<SectionCommand *> readOverlay();
- std::vector<StringRef> readOutputSectionPhdrs();
+ SmallVector<SectionCommand *, 0> readOverlay();
+ SmallVector<StringRef, 0> readOutputSectionPhdrs();
std::pair<uint64_t, uint64_t> readInputSectionFlags();
InputSectionDescription *readInputSectionDescription(StringRef tok);
StringMatcher readFilePatterns();
- std::vector<SectionPattern> readInputSectionsList();
+ SmallVector<SectionPattern, 0> readInputSectionsList();
InputSectionDescription *readInputSectionRules(StringRef filePattern,
uint64_t withFlags,
uint64_t withoutFlags);
@@ -125,11 +125,11 @@ private:
Expr readParenExpr();
// For parsing version script.
- std::vector<SymbolVersion> readVersionExtern();
+ SmallVector<SymbolVersion, 0> readVersionExtern();
void readAnonymousDeclaration();
void readVersionDeclaration(StringRef verStr);
- std::pair<std::vector<SymbolVersion>, std::vector<SymbolVersion>>
+ std::pair<SmallVector<SymbolVersion, 0>, SmallVector<SymbolVersion, 0>>
readSymbols();
// True if a script being read is in the --sysroot directory.
@@ -181,8 +181,8 @@ static ExprValue bitOr(ExprValue a, ExprValue b) {
void ScriptParser::readDynamicList() {
expect("{");
- std::vector<SymbolVersion> locals;
- std::vector<SymbolVersion> globals;
+ SmallVector<SymbolVersion, 0> locals;
+ SmallVector<SymbolVersion, 0> globals;
std::tie(locals, globals) = readSymbols();
expect(";");
@@ -290,7 +290,7 @@ void ScriptParser::addFile(StringRef s) {
SmallString<128> pathData;
StringRef path = (config->sysroot + s).toStringRef(pathData);
if (sys::fs::exists(path))
- driver->addFile(saver.save(path), /*withLOption=*/false);
+ driver->addFile(saver().save(path), /*withLOption=*/false);
else
setError("cannot find " + s + " inside " + config->sysroot);
return;
@@ -304,7 +304,7 @@ void ScriptParser::addFile(StringRef s) {
if (config->sysroot.empty())
driver->addFile(s.substr(1), /*withLOption=*/false);
else
- driver->addFile(saver.save(config->sysroot + "/" + s.substr(1)),
+ driver->addFile(saver().save(config->sysroot + "/" + s.substr(1)),
/*withLOption=*/false);
} else if (s.startswith("-l")) {
// Case 3: search in the list of library paths.
@@ -327,7 +327,7 @@ void ScriptParser::addFile(StringRef s) {
} else {
// Finally, search in the list of library paths.
if (Optional<std::string> path = findFromSearchPaths(s))
- driver->addFile(saver.save(*path), /*withLOption=*/true);
+ driver->addFile(saver().save(*path), /*withLOption=*/true);
else
setError("unable to find " + s);
}
@@ -519,7 +519,7 @@ void ScriptParser::readSearchDir() {
// sections that use the same virtual memory range and normally would trigger
// linker's sections sanity check failures.
// https://sourceware.org/binutils/docs/ld/Overlay-Description.html#Overlay-Description
-std::vector<SectionCommand *> ScriptParser::readOverlay() {
+SmallVector<SectionCommand *, 0> ScriptParser::readOverlay() {
// VA and LMA expressions are optional, though for simplicity of
// implementation we assume they are not. That is what OVERLAY was designed
// for first of all: to allow sections with overlapping VAs at different LMAs.
@@ -529,7 +529,7 @@ std::vector<SectionCommand *> ScriptParser::readOverlay() {
Expr lmaExpr = readParenExpr();
expect("{");
- std::vector<SectionCommand *> v;
+ SmallVector<SectionCommand *, 0> v;
OutputSection *prev = nullptr;
while (!errorCount() && !consume("}")) {
// VA is the same for all sections. The LMAs are consecutive in memory
@@ -566,7 +566,7 @@ void ScriptParser::readOverwriteSections() {
void ScriptParser::readSections() {
expect("{");
- std::vector<SectionCommand *> v;
+ SmallVector<SectionCommand *, 0> v;
while (!errorCount() && !consume("}")) {
StringRef tok = next();
if (tok == "OVERLAY") {
@@ -597,7 +597,7 @@ void ScriptParser::readSections() {
else if (!consume("BEFORE"))
setError("expected AFTER/BEFORE, but got '" + next() + "'");
StringRef where = next();
- std::vector<StringRef> names;
+ SmallVector<StringRef, 0> names;
for (SectionCommand *cmd : v)
if (auto *os = dyn_cast<OutputSection>(cmd))
names.push_back(os->name);
@@ -672,8 +672,8 @@ SortSectionPolicy ScriptParser::readSortKind() {
// is parsed as ".foo", ".bar" with "a.o", and ".baz" with "b.o".
// The semantics of that is section .foo in any file, section .bar in
// any file but a.o, and section .baz in any file but b.o.
-std::vector<SectionPattern> ScriptParser::readInputSectionsList() {
- std::vector<SectionPattern> ret;
+SmallVector<SectionPattern, 0> ScriptParser::readInputSectionsList() {
+ SmallVector<SectionPattern, 0> ret;
while (!errorCount() && peek() != ")") {
StringMatcher excludeFilePat;
if (consume("EXCLUDE_FILE")) {
@@ -718,7 +718,7 @@ ScriptParser::readInputSectionRules(StringRef filePattern, uint64_t withFlags,
while (!errorCount() && !consume(")")) {
SortSectionPolicy outer = readSortKind();
SortSectionPolicy inner = SortSectionPolicy::Default;
- std::vector<SectionPattern> v;
+ SmallVector<SectionPattern, 0> v;
if (outer != SortSectionPolicy::Default) {
expect("(");
inner = readSortKind();
@@ -1452,8 +1452,8 @@ Expr ScriptParser::readParenExpr() {
return e;
}
-std::vector<StringRef> ScriptParser::readOutputSectionPhdrs() {
- std::vector<StringRef> phdrs;
+SmallVector<StringRef, 0> ScriptParser::readOutputSectionPhdrs() {
+ SmallVector<StringRef, 0> phdrs;
while (!errorCount() && peek().startswith(":")) {
StringRef tok = next();
phdrs.push_back((tok.size() == 1) ? next() : tok.substr(1));
@@ -1494,8 +1494,8 @@ unsigned ScriptParser::readPhdrType() {
// Reads an anonymous version declaration.
void ScriptParser::readAnonymousDeclaration() {
- std::vector<SymbolVersion> locals;
- std::vector<SymbolVersion> globals;
+ SmallVector<SymbolVersion, 0> locals;
+ SmallVector<SymbolVersion, 0> globals;
std::tie(locals, globals) = readSymbols();
for (const SymbolVersion &pat : locals)
config->versionDefinitions[VER_NDX_LOCAL].localPatterns.push_back(pat);
@@ -1509,8 +1509,8 @@ void ScriptParser::readAnonymousDeclaration() {
// e.g. "VerStr { global: foo; bar; local: *; };".
void ScriptParser::readVersionDeclaration(StringRef verStr) {
// Read a symbol list.
- std::vector<SymbolVersion> locals;
- std::vector<SymbolVersion> globals;
+ SmallVector<SymbolVersion, 0> locals;
+ SmallVector<SymbolVersion, 0> globals;
std::tie(locals, globals) = readSymbols();
// Create a new version definition and add that to the global symbols.
@@ -1535,11 +1535,11 @@ bool elf::hasWildcard(StringRef s) {
}
// Reads a list of symbols, e.g. "{ global: foo; bar; local: *; };".
-std::pair<std::vector<SymbolVersion>, std::vector<SymbolVersion>>
+std::pair<SmallVector<SymbolVersion, 0>, SmallVector<SymbolVersion, 0>>
ScriptParser::readSymbols() {
- std::vector<SymbolVersion> locals;
- std::vector<SymbolVersion> globals;
- std::vector<SymbolVersion> *v = &globals;
+ SmallVector<SymbolVersion, 0> locals;
+ SmallVector<SymbolVersion, 0> globals;
+ SmallVector<SymbolVersion, 0> *v = &globals;
while (!errorCount()) {
if (consume("}"))
@@ -1554,7 +1554,7 @@ ScriptParser::readSymbols() {
}
if (consume("extern")) {
- std::vector<SymbolVersion> ext = readVersionExtern();
+ SmallVector<SymbolVersion, 0> ext = readVersionExtern();
v->insert(v->end(), ext.begin(), ext.end());
} else {
StringRef tok = next();
@@ -1570,14 +1570,14 @@ ScriptParser::readSymbols() {
//
// The last semicolon is optional. E.g. this is OK:
// "extern "C++" { ns::*; "f(int, double)" };"
-std::vector<SymbolVersion> ScriptParser::readVersionExtern() {
+SmallVector<SymbolVersion, 0> ScriptParser::readVersionExtern() {
StringRef tok = next();
bool isCXX = tok == "\"C++\"";
if (!isCXX && tok != "\"C\"")
setError("Unknown language");
expect("{");
- std::vector<SymbolVersion> ret;
+ SmallVector<SymbolVersion, 0> ret;
while (!errorCount() && peek() != "}") {
StringRef tok = next();
ret.push_back(
diff --git a/contrib/llvm-project/lld/ELF/SymbolTable.cpp b/contrib/llvm-project/lld/ELF/SymbolTable.cpp
index a12c5f22c4fe..ec425cd7e1d1 100644
--- a/contrib/llvm-project/lld/ELF/SymbolTable.cpp
+++ b/contrib/llvm-project/lld/ELF/SymbolTable.cpp
@@ -72,8 +72,10 @@ Symbol *SymbolTable::insert(StringRef name) {
auto p = symMap.insert({CachedHashStringRef(stem), (int)symVector.size()});
if (!p.second) {
Symbol *sym = symVector[p.first->second];
- if (stem.size() != name.size())
+ if (stem.size() != name.size()) {
sym->setName(name);
+ sym->hasVersionSuffix = true;
+ }
return sym;
}
@@ -93,6 +95,8 @@ Symbol *SymbolTable::insert(StringRef name) {
sym->referenced = false;
sym->traced = false;
sym->scriptDefined = false;
+ if (pos != StringRef::npos)
+ sym->hasVersionSuffix = true;
sym->partition = 1;
return sym;
}
@@ -139,12 +143,13 @@ StringMap<SmallVector<Symbol *, 0>> &SymbolTable::getDemangledSyms() {
StringRef name = sym->getName();
size_t pos = name.find('@');
if (pos == std::string::npos)
- demangled = demangleItanium(name);
+ demangled = demangle(name, config->demangle);
else if (pos + 1 == name.size() || name[pos + 1] == '@')
- demangled = demangleItanium(name.substr(0, pos));
+ demangled = demangle(name.substr(0, pos), config->demangle);
else
- demangled =
- (demangleItanium(name.substr(0, pos)) + name.substr(pos)).str();
+ demangled = (demangle(name.substr(0, pos), config->demangle) +
+ name.substr(pos))
+ .str();
(*demangledSyms)[demangled].push_back(sym);
}
}
@@ -316,7 +321,8 @@ void SymbolTable::scanVersionScript() {
// can contain versions in the form of <name>@<version>.
// Let them parse and update their names to exclude version suffix.
for (Symbol *sym : symVector)
- sym->parseSymbolVersion();
+ if (sym->hasVersionSuffix)
+ sym->parseSymbolVersion();
// isPreemptible is false at this point. To correctly compute the binding of a
// Defined (which is used by includeInDynsym()), we need to know if it is
diff --git a/contrib/llvm-project/lld/ELF/SymbolTable.h b/contrib/llvm-project/lld/ELF/SymbolTable.h
index 84d93a3dc786..04c81f642fe0 100644
--- a/contrib/llvm-project/lld/ELF/SymbolTable.h
+++ b/contrib/llvm-project/lld/ELF/SymbolTable.h
@@ -32,17 +32,8 @@ namespace elf {
// add*() functions, which are called by input files as they are parsed. There
// is one add* function per symbol type.
class SymbolTable {
- struct FilterOutPlaceholder {
- bool operator()(Symbol *S) const { return !S->isPlaceholder(); }
- };
- using iterator =
- llvm::filter_iterator<SmallVector<Symbol *, 0>::const_iterator,
- FilterOutPlaceholder>;
-
public:
- llvm::iterator_range<iterator> symbols() const {
- return llvm::make_filter_range(symVector, FilterOutPlaceholder());
- }
+ ArrayRef<Symbol *> symbols() const { return symVector; }
void wrap(Symbol *sym, Symbol *real, Symbol *wrap);
@@ -57,7 +48,7 @@ public:
void handleDynamicList();
// Set of .so files to not link the same shared object file more than once.
- llvm::DenseMap<StringRef, SharedFile *> soNames;
+ llvm::DenseMap<llvm::CachedHashStringRef, SharedFile *> soNames;
// Comdat groups define "link once" sections. If two comdat groups have the
// same name, only one of them is linked, and the other is ignored. This map
diff --git a/contrib/llvm-project/lld/ELF/Symbols.cpp b/contrib/llvm-project/lld/ELF/Symbols.cpp
index 20301497a059..d943c1996422 100644
--- a/contrib/llvm-project/lld/ELF/Symbols.cpp
+++ b/contrib/llvm-project/lld/ELF/Symbols.cpp
@@ -26,16 +26,9 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-// Returns a symbol for an error message.
-static std::string demangle(StringRef symName) {
- if (elf::config->demangle)
- return demangleItanium(symName);
- return std::string(symName);
-}
-
std::string lld::toString(const elf::Symbol &sym) {
StringRef name = sym.getName();
- std::string ret = demangle(name);
+ std::string ret = demangle(name, config->demangle);
const char *suffix = sym.getVersionSuffix();
if (*suffix == '@')
@@ -44,7 +37,7 @@ std::string lld::toString(const elf::Symbol &sym) {
}
std::string lld::toELFString(const Archive::Symbol &b) {
- return demangle(b.getName());
+ return demangle(b.getName(), config->demangle);
}
Defined *ElfSym::bss;
@@ -66,6 +59,7 @@ DenseMap<const Symbol *, std::pair<const InputFile *, const InputFile *>>
elf::backwardReferences;
SmallVector<std::tuple<std::string, const InputFile *, const Symbol &>, 0>
elf::whyExtract;
+SmallVector<SymbolAux, 0> elf::symAux;
static uint64_t getSymVA(const Symbol &sym, int64_t addend) {
switch (sym.kind()) {
@@ -140,8 +134,7 @@ static uint64_t getSymVA(const Symbol &sym, int64_t addend) {
return 0;
case Symbol::LazyArchiveKind:
case Symbol::LazyObjectKind:
- assert(sym.isUsedInRegularObj && "lazy symbol reached writer");
- return 0;
+ llvm_unreachable("lazy symbol reached writer");
case Symbol::CommonKind:
llvm_unreachable("common symbol reached writer");
case Symbol::PlaceholderKind:
@@ -161,7 +154,7 @@ uint64_t Symbol::getGotVA() const {
}
uint64_t Symbol::getGotOffset() const {
- return gotIndex * target->gotEntrySize;
+ return getGotIdx() * target->gotEntrySize;
}
uint64_t Symbol::getGotPltVA() const {
@@ -172,15 +165,15 @@ uint64_t Symbol::getGotPltVA() const {
uint64_t Symbol::getGotPltOffset() const {
if (isInIplt)
- return pltIndex * target->gotEntrySize;
- return (pltIndex + target->gotPltHeaderEntriesNum) * target->gotEntrySize;
+ return getPltIdx() * target->gotEntrySize;
+ return (getPltIdx() + target->gotPltHeaderEntriesNum) * target->gotEntrySize;
}
uint64_t Symbol::getPltVA() const {
uint64_t outVA = isInIplt
- ? in.iplt->getVA() + pltIndex * target->ipltEntrySize
+ ? in.iplt->getVA() + getPltIdx() * target->ipltEntrySize
: in.plt->getVA() + in.plt->headerSize +
- pltIndex * target->pltEntrySize;
+ getPltIdx() * target->pltEntrySize;
// While linking microMIPS code PLT code are always microMIPS
// code. Set the less-significant bit to track that fact.
@@ -216,12 +209,13 @@ void Symbol::parseSymbolVersion() {
if (pos == StringRef::npos)
return;
StringRef verstr = s.substr(pos + 1);
- if (verstr.empty())
- return;
// Truncate the symbol name so that it doesn't include the version string.
nameSize = pos;
+ if (verstr.empty())
+ return;
+
// If this is not in this DSO, it is not a definition.
if (!isDefined())
return;
@@ -274,19 +268,15 @@ MemoryBufferRef LazyArchive::getMemberBuffer() {
}
uint8_t Symbol::computeBinding() const {
- if (config->relocatable)
- return binding;
if ((visibility != STV_DEFAULT && visibility != STV_PROTECTED) ||
- (versionId == VER_NDX_LOCAL && !isLazy()))
+ versionId == VER_NDX_LOCAL)
return STB_LOCAL;
- if (!config->gnuUnique && binding == STB_GNU_UNIQUE)
+ if (binding == STB_GNU_UNIQUE && !config->gnuUnique)
return STB_GLOBAL;
return binding;
}
bool Symbol::includeInDynsym() const {
- if (!config->hasDynSymTab)
- return false;
if (computeBinding() == STB_LOCAL)
return false;
if (!isDefined() && !isCommon())
@@ -294,7 +284,7 @@ bool Symbol::includeInDynsym() const {
// expects undefined weak symbols not to exist in .dynsym, e.g.
// __pthread_mutex_lock reference in _dl_add_to_namespace_list,
// __pthread_initialize_minimal reference in csu/libc-start.c.
- return !(config->noDynamicLinker && isUndefWeak());
+ return !(isUndefWeak() && config->noDynamicLinker);
return exportDynamic || inDynamicList;
}
@@ -354,7 +344,7 @@ void elf::maybeWarnUnorderableSymbol(const Symbol *sym) {
// Returns true if a symbol can be replaced at load-time by a symbol
// with the same name defined in other ELF executable or DSO.
bool elf::computeIsPreemptible(const Symbol &sym) {
- assert(!sym.isLocal());
+ assert(!sym.isLocal() || sym.isPlaceholder());
// Only symbols with default visibility that appear in dynsym can be
// preempted. Symbols with protected visibility cannot be preempted.
diff --git a/contrib/llvm-project/lld/ELF/Symbols.h b/contrib/llvm-project/lld/ELF/Symbols.h
index 27c36eedce80..cd5d4b280e79 100644
--- a/contrib/llvm-project/lld/ELF/Symbols.h
+++ b/contrib/llvm-project/lld/ELF/Symbols.h
@@ -42,20 +42,17 @@ class SharedSymbol;
class Symbol;
class Undefined;
-// This is a StringRef-like container that doesn't run strlen().
-//
-// ELF string tables contain a lot of null-terminated strings. Most of them
-// are not necessary for the linker because they are names of local symbols,
-// and the linker doesn't use local symbol names for name resolution. So, we
-// use this class to represents strings read from string tables.
-struct StringRefZ {
- StringRefZ(const char *s) : data(s), size(-1) {}
- StringRefZ(StringRef s) : data(s.data()), size(s.size()) {}
-
- const char *data;
- const uint32_t size;
+// Some index properties of a symbol are stored separately in this auxiliary
+// struct to decrease sizeof(SymbolUnion) in the majority of cases.
+struct SymbolAux {
+ uint32_t gotIdx = -1;
+ uint32_t pltIdx = -1;
+ uint32_t tlsDescIdx = -1;
+ uint32_t tlsGdIdx = -1;
};
+extern SmallVector<SymbolAux, 0> symAux;
+
// The base class for real symbol classes.
class Symbol {
public:
@@ -76,14 +73,14 @@ public:
protected:
const char *nameData;
- mutable uint32_t nameSize;
+ // 32-bit size saves space.
+ uint32_t nameSize;
public:
+ // A symAux index used to access GOT/PLT entry indexes. This is allocated in
+ // postScanRelocations().
+ uint32_t auxIdx = -1;
uint32_t dynsymIndex = 0;
- uint32_t gotIndex = -1;
- uint32_t pltIndex = -1;
-
- uint32_t globalDynIndex = -1;
// This field is a index to the symbol's version definition.
uint16_t verdefIndex = -1;
@@ -144,6 +141,9 @@ public:
// True if this symbol is specified by --trace-symbol option.
uint8_t traced : 1;
+ // True if the name contains '@'.
+ uint8_t hasVersionSuffix : 1;
+
inline void replace(const Symbol &newSym);
bool includeInDynsym() const;
@@ -166,11 +166,7 @@ public:
// all input files have been added.
bool isUndefWeak() const { return isWeak() && isUndefined(); }
- StringRef getName() const {
- if (nameSize == (uint32_t)-1)
- nameSize = strlen(nameData);
- return {nameData, nameSize};
- }
+ StringRef getName() const { return {nameData, nameSize}; }
void setName(StringRef s) {
nameData = s.data();
@@ -183,13 +179,23 @@ public:
//
// For @@, the name has been truncated by insert(). For @, the name has been
// truncated by Symbol::parseSymbolVersion().
- const char *getVersionSuffix() const {
- (void)getName();
- return nameData + nameSize;
+ const char *getVersionSuffix() const { return nameData + nameSize; }
+
+ uint32_t getGotIdx() const {
+ return auxIdx == uint32_t(-1) ? uint32_t(-1) : symAux[auxIdx].gotIdx;
+ }
+ uint32_t getPltIdx() const {
+ return auxIdx == uint32_t(-1) ? uint32_t(-1) : symAux[auxIdx].pltIdx;
+ }
+ uint32_t getTlsDescIdx() const {
+ return auxIdx == uint32_t(-1) ? uint32_t(-1) : symAux[auxIdx].tlsDescIdx;
+ }
+ uint32_t getTlsGdIdx() const {
+ return auxIdx == uint32_t(-1) ? uint32_t(-1) : symAux[auxIdx].tlsGdIdx;
}
- bool isInGot() const { return gotIndex != -1U; }
- bool isInPlt() const { return pltIndex != -1U; }
+ bool isInGot() const { return getGotIdx() != uint32_t(-1); }
+ bool isInPlt() const { return getPltIdx() != uint32_t(-1); }
uint64_t getVA(int64_t addend = 0) const;
@@ -240,16 +246,18 @@ private:
inline size_t getSymbolSize() const;
protected:
- Symbol(Kind k, InputFile *file, StringRefZ name, uint8_t binding,
+ Symbol(Kind k, InputFile *file, StringRef name, uint8_t binding,
uint8_t stOther, uint8_t type)
- : file(file), nameData(name.data), nameSize(name.size), binding(binding),
- type(type), stOther(stOther), symbolKind(k), visibility(stOther & 3),
+ : file(file), nameData(name.data()), nameSize(name.size()),
+ binding(binding), type(type), stOther(stOther), symbolKind(k),
+ visibility(stOther & 3),
isUsedInRegularObj(!file || file->kind() == InputFile::ObjKind),
exportDynamic(isExportDynamic(k, visibility)), inDynamicList(false),
- canInline(false), referenced(false), traced(false), isInIplt(false),
- gotInIgot(false), isPreemptible(false), used(!config->gcSections),
- folded(false), needsTocRestore(false), scriptDefined(false),
- needsCopy(false), needsGot(false), needsPlt(false), needsTlsDesc(false),
+ canInline(false), referenced(false), traced(false),
+ hasVersionSuffix(false), isInIplt(false), gotInIgot(false),
+ isPreemptible(false), used(!config->gcSections), folded(false),
+ needsTocRestore(false), scriptDefined(false), needsCopy(false),
+ needsGot(false), needsPlt(false), needsTlsDesc(false),
needsTlsGd(false), needsTlsGdToIe(false), needsTlsLd(false),
needsGotDtprel(false), needsTlsIe(false), hasDirectReloc(false) {}
@@ -297,6 +305,16 @@ public:
uint8_t needsTlsIe : 1;
uint8_t hasDirectReloc : 1;
+ bool needsDynReloc() const {
+ return needsCopy || needsGot || needsPlt || needsTlsDesc || needsTlsGd ||
+ needsTlsGdToIe || needsTlsLd || needsGotDtprel || needsTlsIe;
+ }
+ void allocateAux() {
+ assert(auxIdx == uint32_t(-1));
+ auxIdx = symAux.size();
+ symAux.emplace_back();
+ }
+
// The partition whose dynamic symbol table contains this symbol's definition.
uint8_t partition = 1;
@@ -311,7 +329,7 @@ public:
// Represents a symbol that is defined in the current output file.
class Defined : public Symbol {
public:
- Defined(InputFile *file, StringRefZ name, uint8_t binding, uint8_t stOther,
+ Defined(InputFile *file, StringRef name, uint8_t binding, uint8_t stOther,
uint8_t type, uint64_t value, uint64_t size, SectionBase *section)
: Symbol(DefinedKind, file, name, binding, stOther, type), value(value),
size(size), section(section) {}
@@ -346,7 +364,7 @@ public:
// section. (Therefore, the later passes don't see any CommonSymbols.)
class CommonSymbol : public Symbol {
public:
- CommonSymbol(InputFile *file, StringRefZ name, uint8_t binding,
+ CommonSymbol(InputFile *file, StringRef name, uint8_t binding,
uint8_t stOther, uint8_t type, uint64_t alignment, uint64_t size)
: Symbol(CommonKind, file, name, binding, stOther, type),
alignment(alignment), size(size) {}
@@ -359,7 +377,7 @@ public:
class Undefined : public Symbol {
public:
- Undefined(InputFile *file, StringRefZ name, uint8_t binding, uint8_t stOther,
+ Undefined(InputFile *file, StringRef name, uint8_t binding, uint8_t stOther,
uint8_t type, uint32_t discardedSecIdx = 0)
: Symbol(UndefinedKind, file, name, binding, stOther, type),
discardedSecIdx(discardedSecIdx) {}
@@ -500,9 +518,9 @@ union SymbolUnion {
};
// It is important to keep the size of SymbolUnion small for performance and
-// memory usage reasons. 80 bytes is a soft limit based on the size of Defined
+// memory usage reasons. 72 bytes is a soft limit based on the size of Defined
// on a 64-bit system.
-static_assert(sizeof(SymbolUnion) <= 80, "SymbolUnion too large");
+static_assert(sizeof(SymbolUnion) <= 72, "SymbolUnion too large");
template <typename T> struct AssertSymbol {
static_assert(std::is_trivially_destructible<T>(),
@@ -575,6 +593,7 @@ void Symbol::replace(const Symbol &newSym) {
canInline = old.canInline;
referenced = old.referenced;
traced = old.traced;
+ hasVersionSuffix = old.hasVersionSuffix;
isPreemptible = old.isPreemptible;
scriptDefined = old.scriptDefined;
partition = old.partition;
diff --git a/contrib/llvm-project/lld/ELF/SyntheticSections.cpp b/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
index e480118f5ae9..f125e3f0a51a 100644
--- a/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
+++ b/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
@@ -22,9 +22,8 @@
#include "Symbols.h"
#include "Target.h"
#include "Writer.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/DWARF.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/SetOperations.h"
@@ -73,7 +72,7 @@ static ArrayRef<uint8_t> getVersion() {
// This is only for testing.
StringRef s = getenv("LLD_VERSION");
if (s.empty())
- s = saver.save(Twine("Linker: ") + getLLDVersion());
+ s = saver().save(Twine("Linker: ") + getLLDVersion());
// +1 to include the terminating '\0'.
return {(const uint8_t *)s.data(), s.size() + 1};
@@ -171,7 +170,7 @@ MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
if (!ELFT::Is64Bits)
return nullptr;
- std::vector<InputSectionBase *> sections;
+ SmallVector<InputSectionBase *, 0> sections;
for (InputSectionBase *sec : inputSections)
if (sec->type == SHT_MIPS_OPTIONS)
sections.push_back(sec);
@@ -228,7 +227,7 @@ MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
if (ELFT::Is64Bits)
return nullptr;
- std::vector<InputSectionBase *> sections;
+ SmallVector<InputSectionBase *, 0> sections;
for (InputSectionBase *sec : inputSections)
if (sec->type == SHT_MIPS_REGINFO)
sections.push_back(sec);
@@ -255,7 +254,7 @@ MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
InputSection *elf::createInterpSection() {
// StringSaver guarantees that the returned string ends with '\0'.
- StringRef s = saver.save(config->dynamicLinker);
+ StringRef s = saver().save(config->dynamicLinker);
ArrayRef<uint8_t> contents = {(const uint8_t *)s.data(), s.size() + 1};
return make<InputSection>(nullptr, SHF_ALLOC, SHT_PROGBITS, 1, contents,
@@ -496,9 +495,8 @@ static void writeCieFde(uint8_t *buf, ArrayRef<uint8_t> d) {
memcpy(buf, d.data(), d.size());
size_t aligned = alignTo(d.size(), config->wordsize);
-
- // Zero-clear trailing padding if it exists.
- memset(buf + d.size(), 0, aligned - d.size());
+ assert(std::all_of(buf + d.size(), buf + aligned,
+ [](uint8_t c) { return c == 0; }));
// Fix the size field. -4 since size does not include the size field itself.
write32(buf, aligned - 4);
@@ -551,9 +549,9 @@ void EhFrameSection::finalizeContents() {
// Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table
// to get an FDE from an address to which FDE is applied. This function
// returns a list of such pairs.
-std::vector<EhFrameSection::FdeData> EhFrameSection::getFdeData() const {
+SmallVector<EhFrameSection::FdeData, 0> EhFrameSection::getFdeData() const {
uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff;
- std::vector<FdeData> ret;
+ SmallVector<FdeData, 0> ret;
uint64_t va = getPartition().ehFrameHdr->getVA();
for (CieRecord *rec : cieRecords) {
@@ -650,14 +648,20 @@ GotSection::GotSection()
}
void GotSection::addEntry(Symbol &sym) {
- sym.gotIndex = numEntries;
- ++numEntries;
+ assert(sym.auxIdx == symAux.size() - 1);
+ symAux.back().gotIdx = numEntries++;
+}
+
+bool GotSection::addTlsDescEntry(Symbol &sym) {
+ assert(sym.auxIdx == symAux.size() - 1);
+ symAux.back().tlsDescIdx = numEntries;
+ numEntries += 2;
+ return true;
}
bool GotSection::addDynTlsEntry(Symbol &sym) {
- if (sym.globalDynIndex != -1U)
- return false;
- sym.globalDynIndex = numEntries;
+ assert(sym.auxIdx == symAux.size() - 1);
+ symAux.back().tlsGdIdx = numEntries;
// Global Dynamic TLS entries take two GOT slots.
numEntries += 2;
return true;
@@ -673,12 +677,20 @@ bool GotSection::addTlsIndex() {
return true;
}
+uint32_t GotSection::getTlsDescOffset(const Symbol &sym) const {
+ return sym.getTlsDescIdx() * config->wordsize;
+}
+
+uint64_t GotSection::getTlsDescAddr(const Symbol &sym) const {
+ return getVA() + getTlsDescOffset(sym);
+}
+
uint64_t GotSection::getGlobalDynAddr(const Symbol &b) const {
- return this->getVA() + b.globalDynIndex * config->wordsize;
+ return this->getVA() + b.getTlsGdIdx() * config->wordsize;
}
uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const {
- return b.globalDynIndex * config->wordsize;
+ return b.getTlsGdIdx() * config->wordsize;
}
void GotSection::finalizeContents() {
@@ -972,12 +984,18 @@ void MipsGotSection::build() {
}
}
- // Update Symbol::gotIndex field to use this
+ // Update SymbolAux::gotIdx field to use this
// value later in the `sortMipsSymbols` function.
- for (auto &p : primGot->global)
- p.first->gotIndex = p.second;
- for (auto &p : primGot->relocs)
- p.first->gotIndex = p.second;
+ for (auto &p : primGot->global) {
+ if (p.first->auxIdx == uint32_t(-1))
+ p.first->allocateAux();
+ symAux.back().gotIdx = p.second;
+ }
+ for (auto &p : primGot->relocs) {
+ if (p.first->auxIdx == uint32_t(-1))
+ p.first->allocateAux();
+ symAux.back().gotIdx = p.second;
+ }
// Create dynamic relocations.
for (FileGot &got : gots) {
@@ -1145,7 +1163,8 @@ GotPltSection::GotPltSection()
}
void GotPltSection::addEntry(Symbol &sym) {
- assert(sym.pltIndex == entries.size());
+ assert(sym.auxIdx == symAux.size() - 1 &&
+ symAux.back().pltIdx == entries.size());
entries.push_back(&sym);
}
@@ -1190,7 +1209,7 @@ IgotPltSection::IgotPltSection()
target->gotEntrySize, getIgotPltName()) {}
void IgotPltSection::addEntry(Symbol &sym) {
- assert(sym.pltIndex == entries.size());
+ assert(symAux.back().pltIdx == entries.size());
entries.push_back(&sym);
}
@@ -1218,7 +1237,7 @@ StringTableSection::StringTableSection(StringRef name, bool dynamic)
// them with some other string that happens to be the same.
unsigned StringTableSection::addString(StringRef s, bool hashIt) {
if (hashIt) {
- auto r = stringMap.insert(std::make_pair(s, this->size));
+ auto r = stringMap.try_emplace(CachedHashStringRef(s), size);
if (!r.second)
return r.first->second;
}
@@ -1265,11 +1284,11 @@ DynamicSection<ELFT>::DynamicSection()
// .rela.dyn
//
// DT_RELASZ is the total size of the included sections.
-static uint64_t addRelaSz(RelocationBaseSection *relaDyn) {
- size_t size = relaDyn->getSize();
- if (in.relaIplt->getParent() == relaDyn->getParent())
+static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
+ size_t size = relaDyn.getSize();
+ if (in.relaIplt->getParent() == relaDyn.getParent())
size += in.relaIplt->getSize();
- if (in.relaPlt->getParent() == relaDyn->getParent())
+ if (in.relaPlt->getParent() == relaDyn.getParent())
size += in.relaPlt->getSize();
return size;
}
@@ -1375,7 +1394,8 @@ DynamicSection<ELFT>::computeContents() {
(in.relaIplt->isNeeded() &&
part.relaDyn->getParent() == in.relaIplt->getParent())) {
addInSec(part.relaDyn->dynamicTag, *part.relaDyn);
- entries.emplace_back(part.relaDyn->sizeDynamicTag, addRelaSz(part.relaDyn));
+ entries.emplace_back(part.relaDyn->sizeDynamicTag,
+ addRelaSz(*part.relaDyn));
bool isRela = config->isRela;
addInt(isRela ? DT_RELAENT : DT_RELENT,
@@ -1390,7 +1410,8 @@ DynamicSection<ELFT>::computeContents() {
addInt(isRela ? DT_RELACOUNT : DT_RELCOUNT, numRelativeRels);
}
}
- if (part.relrDyn && !part.relrDyn->relocs.empty()) {
+ if (part.relrDyn && part.relrDyn->getParent() &&
+ !part.relrDyn->relocs.empty()) {
addInSec(config->useAndroidRelrTags ? DT_ANDROID_RELR : DT_RELR,
*part.relrDyn);
addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRSZ : DT_RELRSZ,
@@ -1441,9 +1462,9 @@ DynamicSection<ELFT>::computeContents() {
addInt(DT_STRSZ, part.dynStrTab->getSize());
if (!config->zText)
addInt(DT_TEXTREL, 0);
- if (part.gnuHashTab)
+ if (part.gnuHashTab && part.gnuHashTab->getParent())
addInSec(DT_GNU_HASH, *part.gnuHashTab);
- if (part.hashTab)
+ if (part.hashTab && part.hashTab->getParent())
addInSec(DT_HASH, *part.hashTab);
if (isMain) {
@@ -1626,7 +1647,7 @@ void RelocationBaseSection::addReloc(const DynamicReloc &reloc) {
}
void RelocationBaseSection::finalizeContents() {
- SymbolTableBaseSection *symTab = getPartition().dynSymTab;
+ SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
// When linking glibc statically, .rel{,a}.plt contains R_*_IRELATIVE
// relocations due to IFUNC (e.g. strcpy). sh_link will be set to 0 in that
@@ -1636,11 +1657,11 @@ void RelocationBaseSection::finalizeContents() {
else
getParent()->link = 0;
- if (in.relaPlt == this && in.gotPlt->getParent()) {
+ if (in.relaPlt.get() == this && in.gotPlt->getParent()) {
getParent()->flags |= ELF::SHF_INFO_LINK;
getParent()->info = in.gotPlt->getParent()->sectionIndex;
}
- if (in.relaIplt == this && in.igotPlt->getParent()) {
+ if (in.relaIplt.get() == this && in.igotPlt->getParent()) {
getParent()->flags |= ELF::SHF_INFO_LINK;
getParent()->info = in.igotPlt->getParent()->sectionIndex;
}
@@ -1677,7 +1698,7 @@ RelocationSection<ELFT>::RelocationSection(StringRef name, bool sort)
}
template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) {
- SymbolTableBaseSection *symTab = getPartition().dynSymTab;
+ SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
parallelForEach(relocs,
[symTab](DynamicReloc &rel) { rel.computeRaw(symTab); });
@@ -1686,9 +1707,13 @@ template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) {
// is to make results easier to read.
if (sort) {
const RelType relativeRel = target->relativeRel;
- parallelSort(relocs, [&](const DynamicReloc &a, const DynamicReloc &b) {
- return std::make_tuple(a.type != relativeRel, a.r_sym, a.r_offset) <
- std::make_tuple(b.type != relativeRel, b.r_sym, b.r_offset);
+ auto nonRelative =
+ llvm::partition(relocs, [=](auto &r) { return r.type == relativeRel; });
+ parallelSort(relocs.begin(), nonRelative,
+ [&](auto &a, auto &b) { return a.r_offset < b.r_offset; });
+ // Non-relative relocations are few, so don't bother with parallelSort.
+ std::sort(nonRelative, relocs.end(), [&](auto &a, auto &b) {
+ return std::tie(a.r_sym, a.r_offset) < std::tie(b.r_sym, b.r_offset);
});
}
@@ -1772,8 +1797,8 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
for (const DynamicReloc &rel : relocs) {
Elf_Rela r;
r.r_offset = rel.getOffset();
- r.setSymbolAndType(rel.getSymIndex(getPartition().dynSymTab), rel.type,
- false);
+ r.setSymbolAndType(rel.getSymIndex(getPartition().dynSymTab.get()),
+ rel.type, false);
if (config->isRela)
r.r_addend = rel.computeAddend();
@@ -1999,42 +2024,30 @@ template <class ELFT> bool RelrSection<ELFT>::updateAllocSize() {
const size_t nBits = wordsize * 8 - 1;
// Get offsets for all relative relocations and sort them.
- std::vector<uint64_t> offsets;
- for (const RelativeReloc &rel : relocs)
- offsets.push_back(rel.getOffset());
- llvm::sort(offsets);
+ std::unique_ptr<uint64_t[]> offsets(new uint64_t[relocs.size()]);
+ for (auto it : llvm::enumerate(relocs))
+ offsets[it.index()] = it.value().getOffset();
+ std::sort(offsets.get(), offsets.get() + relocs.size());
// For each leading relocation, find following ones that can be folded
// as a bitmap and fold them.
- for (size_t i = 0, e = offsets.size(); i < e;) {
+ for (size_t i = 0, e = relocs.size(); i != e;) {
// Add a leading relocation.
relrRelocs.push_back(Elf_Relr(offsets[i]));
uint64_t base = offsets[i] + wordsize;
++i;
// Find foldable relocations to construct bitmaps.
- while (i < e) {
+ for (;;) {
uint64_t bitmap = 0;
-
- while (i < e) {
- uint64_t delta = offsets[i] - base;
-
- // If it is too far, it cannot be folded.
- if (delta >= nBits * wordsize)
- break;
-
- // If it is not a multiple of wordsize away, it cannot be folded.
- if (delta % wordsize)
+ for (; i != e; ++i) {
+ uint64_t d = offsets[i] - base;
+ if (d >= nBits * wordsize || d % wordsize)
break;
-
- // Fold it.
- bitmap |= 1ULL << (delta / wordsize);
- ++i;
+ bitmap |= uint64_t(1) << (d / wordsize);
}
-
if (!bitmap)
break;
-
relrRelocs.push_back(Elf_Relr((bitmap << 1) | 1));
base += nBits * wordsize;
}
@@ -2068,7 +2081,7 @@ static bool sortMipsSymbols(const SymbolTableEntry &l,
// Sort entries related to non-local preemptible symbols by GOT indexes.
// All other entries go to the beginning of a dynsym in arbitrary order.
if (l.sym->isInGot() && r.sym->isInGot())
- return l.sym->gotIndex < r.sym->gotIndex;
+ return l.sym->getGotIdx() < r.sym->getGotIdx();
if (!l.sym->isInGot() && !r.sym->isInGot())
return false;
return !l.sym->isInGot();
@@ -2099,7 +2112,7 @@ void SymbolTableBaseSection::finalizeContents() {
// Only the main partition's dynsym indexes are stored in the symbols
// themselves. All other partitions use a lookup table.
- if (this == mainPart->dynSymTab) {
+ if (this == mainPart->dynSymTab.get()) {
size_t i = 0;
for (const SymbolTableEntry &s : symbols)
s.sym->dynsymIndex = ++i;
@@ -2116,9 +2129,8 @@ void SymbolTableBaseSection::finalizeContents() {
void SymbolTableBaseSection::sortSymTabSymbols() {
// Move all local symbols before global symbols.
auto e = std::stable_partition(
- symbols.begin(), symbols.end(), [](const SymbolTableEntry &s) {
- return s.sym->isLocal() || s.sym->computeBinding() == STB_LOCAL;
- });
+ symbols.begin(), symbols.end(),
+ [](const SymbolTableEntry &s) { return s.sym->isLocal(); });
size_t numLocals = e - symbols.begin();
getParent()->info = numLocals + 1;
@@ -2127,12 +2139,12 @@ void SymbolTableBaseSection::sortSymTabSymbols() {
// symbols, they are already naturally placed first in each group. That
// happens because STT_FILE is always the first symbol in the object and hence
// precede all other local symbols we add for a file.
- MapVector<InputFile *, std::vector<SymbolTableEntry>> arr;
+ MapVector<InputFile *, SmallVector<SymbolTableEntry, 0>> arr;
for (const SymbolTableEntry &s : llvm::make_range(symbols.begin(), e))
arr[s.sym->file].push_back(s);
auto i = symbols.begin();
- for (std::pair<InputFile *, std::vector<SymbolTableEntry>> &p : arr)
+ for (auto &p : arr)
for (SymbolTableEntry &entry : p.second)
*i++ = entry;
}
@@ -2146,7 +2158,7 @@ void SymbolTableBaseSection::addSymbol(Symbol *b) {
}
size_t SymbolTableBaseSection::getSymbolIndex(Symbol *sym) {
- if (this == mainPart->dynSymTab)
+ if (this == mainPart->dynSymTab.get())
return sym->dynsymIndex;
// Initializes symbol lookup tables lazily. This is used only for -r,
@@ -2183,8 +2195,6 @@ static BssSection *getCommonSec(Symbol *sym) {
}
static uint32_t getSymSectionIndex(Symbol *sym) {
- if (getCommonSec(sym))
- return SHN_COMMON;
assert(!(sym->needsCopy && sym->isObject()));
if (!isa<Defined>(sym) || sym->needsCopy)
return SHN_UNDEF;
@@ -2197,7 +2207,6 @@ static uint32_t getSymSectionIndex(Symbol *sym) {
// Write the internal symbol table contents to the output symbol table.
template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {
// The first entry is a null entry as per the ELF spec.
- memset(buf, 0, sizeof(Elf_Sym));
buf += sizeof(Elf_Sym);
auto *eSym = reinterpret_cast<Elf_Sym *>(buf);
@@ -2206,14 +2215,10 @@ template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {
Symbol *sym = ent.sym;
bool isDefinedHere = type == SHT_SYMTAB || sym->partition == partition;
- // Set st_info and st_other.
- eSym->st_other = 0;
- if (sym->isLocal()) {
- eSym->setBindingAndType(STB_LOCAL, sym->type);
- } else {
- eSym->setBindingAndType(sym->computeBinding(), sym->type);
- eSym->setVisibility(sym->visibility);
- }
+ // Set st_name, st_info and st_other.
+ eSym->st_name = ent.strTabOffset;
+ eSym->setBindingAndType(sym->binding, sym->type);
+ eSym->st_other = sym->visibility;
// The 3 most significant bits of st_other are used by OpenPOWER ABI.
// See getPPC64GlobalEntryToLocalEntryOffset() for more details.
@@ -2224,30 +2229,29 @@ template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {
else if (config->emachine == EM_AARCH64)
eSym->st_other |= sym->stOther & STO_AARCH64_VARIANT_PCS;
- eSym->st_name = ent.strTabOffset;
- if (isDefinedHere)
- eSym->st_shndx = getSymSectionIndex(ent.sym);
- else
- eSym->st_shndx = 0;
-
- // Copy symbol size if it is a defined symbol. st_size is not significant
- // for undefined symbols, so whether copying it or not is up to us if that's
- // the case. We'll leave it as zero because by not setting a value, we can
- // get the exact same outputs for two sets of input files that differ only
- // in undefined symbol size in DSOs.
- if (eSym->st_shndx == SHN_UNDEF || !isDefinedHere)
- eSym->st_size = 0;
- else
- eSym->st_size = sym->getSize();
-
- // st_value is usually an address of a symbol, but that has a special
- // meaning for uninstantiated common symbols (--no-define-common).
- if (BssSection *commonSec = getCommonSec(ent.sym))
+ if (BssSection *commonSec = getCommonSec(sym)) {
+ // st_value is usually an address of a symbol, but that has a special
+ // meaning for uninstantiated common symbols (--no-define-common).
+ eSym->st_shndx = SHN_COMMON;
eSym->st_value = commonSec->alignment;
- else if (isDefinedHere)
- eSym->st_value = sym->getVA();
- else
- eSym->st_value = 0;
+ eSym->st_size = cast<Defined>(sym)->size;
+ } else {
+ const uint32_t shndx = getSymSectionIndex(sym);
+ if (isDefinedHere) {
+ eSym->st_shndx = shndx;
+ eSym->st_value = sym->getVA();
+ // Copy symbol size if it is a defined symbol. st_size is not
+ // significant for undefined symbols, so whether copying it or not is up
+ // to us if that's the case. We'll leave it as zero because by not
+ // setting a value, we can get the exact same outputs for two sets of
+ // input files that differ only in undefined symbol size in DSOs.
+ eSym->st_size = shndx != SHN_UNDEF ? cast<Defined>(sym)->size : 0;
+ } else {
+ eSym->st_shndx = 0;
+ eSym->st_value = 0;
+ eSym->st_size = 0;
+ }
+ }
++eSym;
}
@@ -2298,7 +2302,7 @@ void SymtabShndxSection::writeTo(uint8_t *buf) {
// we need to write actual index, otherwise, we must write SHN_UNDEF(0).
buf += 4; // Ignore .symtab[0] entry.
for (const SymbolTableEntry &entry : in.symTab->getSymbols()) {
- if (getSymSectionIndex(entry.sym) == SHN_XINDEX)
+ if (!getCommonSec(entry.sym) && getSymSectionIndex(entry.sym) == SHN_XINDEX)
write32(buf, entry.sym->getOutputSection()->sectionIndex);
buf += 4;
}
@@ -2379,11 +2383,6 @@ void GnuHashTableSection::finalizeContents() {
}
void GnuHashTableSection::writeTo(uint8_t *buf) {
- // The output buffer is not guaranteed to be zero-cleared because we pre-
- // fill executable sections with trap instructions. This is a precaution
- // for that case, which happens only when --no-rosegment is given.
- memset(buf, 0, size);
-
// Write a header.
write32(buf, nBuckets);
write32(buf + 4, getPartition().dynSymTab->getNumSymbols() - symbols.size());
@@ -2466,8 +2465,9 @@ void GnuHashTableSection::addSymbols(SmallVectorImpl<SymbolTableEntry> &v) {
symbols.push_back({b, ent.strTabOffset, hash, bucketIdx});
}
- llvm::stable_sort(symbols, [](const Entry &l, const Entry &r) {
- return l.bucketIdx < r.bucketIdx;
+ llvm::sort(symbols, [](const Entry &l, const Entry &r) {
+ return std::tie(l.bucketIdx, l.strTabOffset) <
+ std::tie(r.bucketIdx, r.strTabOffset);
});
v.erase(mid, v.end());
@@ -2481,7 +2481,7 @@ HashTableSection::HashTableSection()
}
void HashTableSection::finalizeContents() {
- SymbolTableBaseSection *symTab = getPartition().dynSymTab;
+ SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
if (OutputSection *sec = symTab->getParent())
getParent()->link = sec->sectionIndex;
@@ -2495,11 +2495,7 @@ void HashTableSection::finalizeContents() {
}
void HashTableSection::writeTo(uint8_t *buf) {
- SymbolTableBaseSection *symTab = getPartition().dynSymTab;
-
- // See comment in GnuHashTableSection::writeTo.
- memset(buf, 0, size);
-
+ SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
unsigned numSymbols = symTab->getNumSymbols();
uint32_t *p = reinterpret_cast<uint32_t *>(buf);
@@ -2553,7 +2549,8 @@ void PltSection::writeTo(uint8_t *buf) {
}
void PltSection::addEntry(Symbol &sym) {
- sym.pltIndex = entries.size();
+ assert(sym.auxIdx == symAux.size() - 1);
+ symAux.back().pltIdx = entries.size();
entries.push_back(&sym);
}
@@ -2599,7 +2596,8 @@ size_t IpltSection::getSize() const {
}
void IpltSection::addEntry(Symbol &sym) {
- sym.pltIndex = entries.size();
+ assert(sym.auxIdx == symAux.size() - 1);
+ symAux.back().pltIdx = entries.size();
entries.push_back(&sym);
}
@@ -2808,7 +2806,7 @@ createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> nameAttrs,
// For each chunk, compute the number of compilation units preceding it.
uint32_t cuIdx = 0;
- std::vector<uint32_t> cuIdxs(chunks.size());
+ std::unique_ptr<uint32_t[]> cuIdxs(new uint32_t[chunks.size()]);
for (uint32_t i = 0, e = chunks.size(); i != e; ++i) {
cuIdxs[i] = cuIdx;
cuIdx += chunks[i].compilationUnits.size();
@@ -2824,11 +2822,13 @@ createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> nameAttrs,
numShards));
// A sharded map to uniquify symbols by name.
- std::vector<DenseMap<CachedHashStringRef, size_t>> map(numShards);
+ auto map =
+ std::make_unique<DenseMap<CachedHashStringRef, size_t>[]>(numShards);
size_t shift = 32 - countTrailingZeros(numShards);
// Instantiate GdbSymbols while uniqufying them by name.
- std::vector<std::vector<GdbSymbol>> symbols(numShards);
+ auto symbols = std::make_unique<std::vector<GdbSymbol>[]>(numShards);
+
parallelForEachN(0, concurrency, [&](size_t threadId) {
uint32_t i = 0;
for (ArrayRef<NameAttrEntry> entries : nameAttrs) {
@@ -2852,14 +2852,15 @@ createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> nameAttrs,
});
size_t numSymbols = 0;
- for (ArrayRef<GdbSymbol> v : symbols)
+ for (ArrayRef<GdbSymbol> v : makeArrayRef(symbols.get(), numShards))
numSymbols += v.size();
// The return type is a flattened vector, so we'll copy each vector
// contents to Ret.
std::vector<GdbSymbol> ret;
ret.reserve(numSymbols);
- for (std::vector<GdbSymbol> &vec : symbols)
+ for (std::vector<GdbSymbol> &vec :
+ makeMutableArrayRef(symbols.get(), numShards))
for (GdbSymbol &sym : vec)
ret.push_back(std::move(sym));
@@ -3019,8 +3020,7 @@ void EhFrameHeader::writeTo(uint8_t *buf) {
void EhFrameHeader::write() {
uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff;
using FdeData = EhFrameSection::FdeData;
-
- std::vector<FdeData> fdes = getPartition().ehFrame->getFdeData();
+ SmallVector<FdeData, 0> fdes = getPartition().ehFrame->getFdeData();
buf[0] = 1;
buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4;
@@ -3708,10 +3708,6 @@ static uint8_t getAbiVersion() {
}
template <typename ELFT> void elf::writeEhdr(uint8_t *buf, Partition &part) {
- // For executable segments, the trap instructions are written before writing
- // the header. Setting Elf header bytes to zero ensures that any unused bytes
- // in header are zero-cleared, instead of having trap instructions.
- memset(buf, 0, sizeof(typename ELFT::Ehdr));
memcpy(buf, "\177ELF", 4);
auto *eHdr = reinterpret_cast<typename ELFT::Ehdr *>(buf);
@@ -3799,8 +3795,9 @@ void PartitionIndexSection::writeTo(uint8_t *buf) {
write32(buf, mainPart->dynStrTab->getVA() + partitions[i].nameStrTab - va);
write32(buf + 4, partitions[i].elfHeader->getVA() - (va + 4));
- SyntheticSection *next =
- i == partitions.size() - 1 ? in.partEnd : partitions[i + 1].elfHeader;
+ SyntheticSection *next = i == partitions.size() - 1
+ ? in.partEnd.get()
+ : partitions[i + 1].elfHeader.get();
write32(buf + 8, next->getVA() - partitions[i].elfHeader->getVA());
va += 12;
@@ -3808,6 +3805,30 @@ void PartitionIndexSection::writeTo(uint8_t *buf) {
}
}
+void InStruct::reset() {
+ attributes.reset();
+ bss.reset();
+ bssRelRo.reset();
+ got.reset();
+ gotPlt.reset();
+ igotPlt.reset();
+ ppc64LongBranchTarget.reset();
+ mipsGot.reset();
+ mipsRldMap.reset();
+ partEnd.reset();
+ partIndex.reset();
+ plt.reset();
+ iplt.reset();
+ ppc32Got2.reset();
+ ibtPlt.reset();
+ relaPlt.reset();
+ relaIplt.reset();
+ shStrTab.reset();
+ strTab.reset();
+ symTab.reset();
+ symTabShndx.reset();
+}
+
InStruct elf::in;
std::vector<Partition> elf::partitions;
diff --git a/contrib/llvm-project/lld/ELF/SyntheticSections.h b/contrib/llvm-project/lld/ELF/SyntheticSections.h
index c35e19cf2fb4..9f4073048ce5 100644
--- a/contrib/llvm-project/lld/ELF/SyntheticSections.h
+++ b/contrib/llvm-project/lld/ELF/SyntheticSections.h
@@ -34,7 +34,6 @@ namespace elf {
class Defined;
struct PhdrEntry;
class SymbolTableBaseSection;
-class VersionNeedBaseSection;
class SyntheticSection : public InputSection {
public:
@@ -87,7 +86,7 @@ public:
uint32_t fdeVARel;
};
- std::vector<FdeData> getFdeData() const;
+ SmallVector<FdeData, 0> getFdeData() const;
ArrayRef<CieRecord *> getCieRecords() const { return cieRecords; }
template <class ELFT>
void iterateFDEWithLSDA(llvm::function_ref<void(InputSection &)> fn);
@@ -130,8 +129,11 @@ public:
void writeTo(uint8_t *buf) override;
void addEntry(Symbol &sym);
+ bool addTlsDescEntry(Symbol &sym);
bool addDynTlsEntry(Symbol &sym);
bool addTlsIndex();
+ uint32_t getTlsDescOffset(const Symbol &sym) const;
+ uint64_t getTlsDescAddr(const Symbol &sym) const;
uint64_t getGlobalDynAddr(const Symbol &b) const;
uint64_t getGlobalDynOffset(const Symbol &b) const;
@@ -419,7 +421,7 @@ private:
uint64_t size = 0;
- llvm::DenseMap<StringRef, unsigned> stringMap;
+ llvm::DenseMap<llvm::CachedHashStringRef, unsigned> stringMap;
SmallVector<StringRef, 0> strings;
};
@@ -1203,24 +1205,24 @@ struct Partition {
StringRef name;
uint64_t nameStrTab;
- SyntheticSection *elfHeader;
- SyntheticSection *programHeaders;
- std::vector<PhdrEntry *> phdrs;
-
- ARMExidxSyntheticSection *armExidx;
- BuildIdSection *buildId;
- SyntheticSection *dynamic;
- StringTableSection *dynStrTab;
- SymbolTableBaseSection *dynSymTab;
- EhFrameHeader *ehFrameHdr;
- EhFrameSection *ehFrame;
- GnuHashTableSection *gnuHashTab;
- HashTableSection *hashTab;
- RelocationBaseSection *relaDyn;
- RelrBaseSection *relrDyn;
- VersionDefinitionSection *verDef;
- SyntheticSection *verNeed;
- VersionTableSection *verSym;
+ std::unique_ptr<SyntheticSection> elfHeader;
+ std::unique_ptr<SyntheticSection> programHeaders;
+ SmallVector<PhdrEntry *, 0> phdrs;
+
+ std::unique_ptr<ARMExidxSyntheticSection> armExidx;
+ std::unique_ptr<BuildIdSection> buildId;
+ std::unique_ptr<SyntheticSection> dynamic;
+ std::unique_ptr<StringTableSection> dynStrTab;
+ std::unique_ptr<SymbolTableBaseSection> dynSymTab;
+ std::unique_ptr<EhFrameHeader> ehFrameHdr;
+ std::unique_ptr<EhFrameSection> ehFrame;
+ std::unique_ptr<GnuHashTableSection> gnuHashTab;
+ std::unique_ptr<HashTableSection> hashTab;
+ std::unique_ptr<RelocationBaseSection> relaDyn;
+ std::unique_ptr<RelrBaseSection> relrDyn;
+ std::unique_ptr<VersionDefinitionSection> verDef;
+ std::unique_ptr<SyntheticSection> verNeed;
+ std::unique_ptr<VersionTableSection> verSym;
unsigned getNumber() const { return this - &partitions[0] + 1; }
};
@@ -1235,27 +1237,29 @@ inline Partition &SectionBase::getPartition() const {
// Linker generated sections which can be used as inputs and are not specific to
// a partition.
struct InStruct {
- InputSection *attributes;
- BssSection *bss;
- BssSection *bssRelRo;
- GotSection *got;
- GotPltSection *gotPlt;
- IgotPltSection *igotPlt;
- PPC64LongBranchTargetSection *ppc64LongBranchTarget;
- MipsGotSection *mipsGot;
- MipsRldMapSection *mipsRldMap;
- SyntheticSection *partEnd;
- SyntheticSection *partIndex;
- PltSection *plt;
- IpltSection *iplt;
- PPC32Got2Section *ppc32Got2;
- IBTPltSection *ibtPlt;
- RelocationBaseSection *relaPlt;
- RelocationBaseSection *relaIplt;
- StringTableSection *shStrTab;
- StringTableSection *strTab;
- SymbolTableBaseSection *symTab;
- SymtabShndxSection *symTabShndx;
+ std::unique_ptr<InputSection> attributes;
+ std::unique_ptr<BssSection> bss;
+ std::unique_ptr<BssSection> bssRelRo;
+ std::unique_ptr<GotSection> got;
+ std::unique_ptr<GotPltSection> gotPlt;
+ std::unique_ptr<IgotPltSection> igotPlt;
+ std::unique_ptr<PPC64LongBranchTargetSection> ppc64LongBranchTarget;
+ std::unique_ptr<MipsGotSection> mipsGot;
+ std::unique_ptr<MipsRldMapSection> mipsRldMap;
+ std::unique_ptr<SyntheticSection> partEnd;
+ std::unique_ptr<SyntheticSection> partIndex;
+ std::unique_ptr<PltSection> plt;
+ std::unique_ptr<IpltSection> iplt;
+ std::unique_ptr<PPC32Got2Section> ppc32Got2;
+ std::unique_ptr<IBTPltSection> ibtPlt;
+ std::unique_ptr<RelocationBaseSection> relaPlt;
+ std::unique_ptr<RelocationBaseSection> relaIplt;
+ std::unique_ptr<StringTableSection> shStrTab;
+ std::unique_ptr<StringTableSection> strTab;
+ std::unique_ptr<SymbolTableBaseSection> symTab;
+ std::unique_ptr<SymtabShndxSection> symTabShndx;
+
+ void reset();
};
extern InStruct in;
diff --git a/contrib/llvm-project/lld/ELF/Target.cpp b/contrib/llvm-project/lld/ELF/Target.cpp
index 88d3006f9a2d..f0e7ebfc64df 100644
--- a/contrib/llvm-project/lld/ELF/Target.cpp
+++ b/contrib/llvm-project/lld/ELF/Target.cpp
@@ -107,7 +107,7 @@ template <class ELFT> static ErrorPlace getErrPlace(const uint8_t *loc) {
continue;
}
if (isecLoc <= loc && loc < isecLoc + isec->getSize()) {
- auto objLoc = isec->template getLocation<ELFT>(loc - isecLoc);
+ std::string objLoc = isec->getLocation(loc - isecLoc);
// Return object file location and source file location.
// TODO: Refactor getSrcMsg not to take a variable.
Undefined dummy(nullptr, "", STB_LOCAL, 0, 0);
diff --git a/contrib/llvm-project/lld/ELF/Target.h b/contrib/llvm-project/lld/ELF/Target.h
index e0e97301ca98..f7b947ec3aa2 100644
--- a/contrib/llvm-project/lld/ELF/Target.h
+++ b/contrib/llvm-project/lld/ELF/Target.h
@@ -221,6 +221,16 @@ void addPPC64SaveRestore();
uint64_t getPPC64TocBase();
uint64_t getAArch64Page(uint64_t expr);
+class AArch64Relaxer {
+ bool safeToRelaxAdrpLdr = true;
+
+public:
+ explicit AArch64Relaxer(ArrayRef<Relocation> relocs);
+
+ bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,
+ uint64_t secAddr, uint8_t *buf) const;
+};
+
extern const TargetInfo *target;
TargetInfo *getTarget();
diff --git a/contrib/llvm-project/lld/ELF/Thunks.cpp b/contrib/llvm-project/lld/ELF/Thunks.cpp
index 38de4db191f4..ae740810acb5 100644
--- a/contrib/llvm-project/lld/ELF/Thunks.cpp
+++ b/contrib/llvm-project/lld/ELF/Thunks.cpp
@@ -27,8 +27,7 @@
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Endian.h"
@@ -434,7 +433,7 @@ void AArch64ABSLongThunk::writeTo(uint8_t *buf) {
}
void AArch64ABSLongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__AArch64AbsLongThunk_" + destination.getName()),
+ addSymbol(saver().save("__AArch64AbsLongThunk_" + destination.getName()),
STT_FUNC, 0, isec);
addSymbol("$x", STT_NOTYPE, 0, isec);
addSymbol("$d", STT_NOTYPE, 8, isec);
@@ -460,8 +459,8 @@ void AArch64ADRPThunk::writeTo(uint8_t *buf) {
}
void AArch64ADRPThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__AArch64ADRPThunk_" + destination.getName()), STT_FUNC,
- 0, isec);
+ addSymbol(saver().save("__AArch64ADRPThunk_" + destination.getName()),
+ STT_FUNC, 0, isec);
addSymbol("$x", STT_NOTYPE, 0, isec);
}
@@ -560,7 +559,7 @@ void ARMV7ABSLongThunk::writeLong(uint8_t *buf) {
}
void ARMV7ABSLongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__ARMv7ABSLongThunk_" + destination.getName()),
+ addSymbol(saver().save("__ARMv7ABSLongThunk_" + destination.getName()),
STT_FUNC, 0, isec);
addSymbol("$a", STT_NOTYPE, 0, isec);
}
@@ -578,7 +577,7 @@ void ThumbV7ABSLongThunk::writeLong(uint8_t *buf) {
}
void ThumbV7ABSLongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__Thumbv7ABSLongThunk_" + destination.getName()),
+ addSymbol(saver().save("__Thumbv7ABSLongThunk_" + destination.getName()),
STT_FUNC, 1, isec);
addSymbol("$t", STT_NOTYPE, 0, isec);
}
@@ -599,8 +598,8 @@ void ARMV7PILongThunk::writeLong(uint8_t *buf) {
}
void ARMV7PILongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__ARMV7PILongThunk_" + destination.getName()), STT_FUNC,
- 0, isec);
+ addSymbol(saver().save("__ARMV7PILongThunk_" + destination.getName()),
+ STT_FUNC, 0, isec);
addSymbol("$a", STT_NOTYPE, 0, isec);
}
@@ -620,7 +619,7 @@ void ThumbV7PILongThunk::writeLong(uint8_t *buf) {
}
void ThumbV7PILongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__ThumbV7PILongThunk_" + destination.getName()),
+ addSymbol(saver().save("__ThumbV7PILongThunk_" + destination.getName()),
STT_FUNC, 1, isec);
addSymbol("$t", STT_NOTYPE, 0, isec);
}
@@ -635,7 +634,7 @@ void ARMV5ABSLongThunk::writeLong(uint8_t *buf) {
}
void ARMV5ABSLongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__ARMv5ABSLongThunk_" + destination.getName()),
+ addSymbol(saver().save("__ARMv5ABSLongThunk_" + destination.getName()),
STT_FUNC, 0, isec);
addSymbol("$a", STT_NOTYPE, 0, isec);
addSymbol("$d", STT_NOTYPE, 4, isec);
@@ -661,8 +660,8 @@ void ARMV5PILongThunk::writeLong(uint8_t *buf) {
}
void ARMV5PILongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__ARMV5PILongThunk_" + destination.getName()), STT_FUNC,
- 0, isec);
+ addSymbol(saver().save("__ARMV5PILongThunk_" + destination.getName()),
+ STT_FUNC, 0, isec);
addSymbol("$a", STT_NOTYPE, 0, isec);
addSymbol("$d", STT_NOTYPE, 12, isec);
}
@@ -691,7 +690,7 @@ void ThumbV6MABSLongThunk::writeLong(uint8_t *buf) {
}
void ThumbV6MABSLongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__Thumbv6MABSLongThunk_" + destination.getName()),
+ addSymbol(saver().save("__Thumbv6MABSLongThunk_" + destination.getName()),
STT_FUNC, 1, isec);
addSymbol("$t", STT_NOTYPE, 0, isec);
addSymbol("$d", STT_NOTYPE, 8, isec);
@@ -717,7 +716,7 @@ void ThumbV6MPILongThunk::writeLong(uint8_t *buf) {
}
void ThumbV6MPILongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__Thumbv6MPILongThunk_" + destination.getName()),
+ addSymbol(saver().save("__Thumbv6MPILongThunk_" + destination.getName()),
STT_FUNC, 1, isec);
addSymbol("$t", STT_NOTYPE, 0, isec);
addSymbol("$d", STT_NOTYPE, 12, isec);
@@ -735,7 +734,7 @@ void MipsThunk::writeTo(uint8_t *buf) {
}
void MipsThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__LA25Thunk_" + destination.getName()), STT_FUNC, 0,
+ addSymbol(saver().save("__LA25Thunk_" + destination.getName()), STT_FUNC, 0,
isec);
}
@@ -758,8 +757,9 @@ void MicroMipsThunk::writeTo(uint8_t *buf) {
}
void MicroMipsThunk::addSymbols(ThunkSection &isec) {
- Defined *d = addSymbol(
- saver.save("__microLA25Thunk_" + destination.getName()), STT_FUNC, 0, isec);
+ Defined *d =
+ addSymbol(saver().save("__microLA25Thunk_" + destination.getName()),
+ STT_FUNC, 0, isec);
d->stOther |= STO_MIPS_MICROMIPS;
}
@@ -782,8 +782,9 @@ void MicroMipsR6Thunk::writeTo(uint8_t *buf) {
}
void MicroMipsR6Thunk::addSymbols(ThunkSection &isec) {
- Defined *d = addSymbol(
- saver.save("__microLA25Thunk_" + destination.getName()), STT_FUNC, 0, isec);
+ Defined *d =
+ addSymbol(saver().save("__microLA25Thunk_" + destination.getName()),
+ STT_FUNC, 0, isec);
d->stOther |= STO_MIPS_MICROMIPS;
}
@@ -843,7 +844,7 @@ void PPC32PltCallStub::addSymbols(ThunkSection &isec) {
else
os << ".plt_pic32.";
os << destination.getName();
- addSymbol(saver.save(os.str()), STT_FUNC, 0, isec);
+ addSymbol(saver().save(os.str()), STT_FUNC, 0, isec);
}
bool PPC32PltCallStub::isCompatibleWith(const InputSection &isec,
@@ -852,7 +853,7 @@ bool PPC32PltCallStub::isCompatibleWith(const InputSection &isec,
}
void PPC32LongThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__LongThunk_" + destination.getName()), STT_FUNC, 0,
+ addSymbol(saver().save("__LongThunk_" + destination.getName()), STT_FUNC, 0,
isec);
}
@@ -896,8 +897,8 @@ void PPC64PltCallStub::writeTo(uint8_t *buf) {
}
void PPC64PltCallStub::addSymbols(ThunkSection &isec) {
- Defined *s = addSymbol(saver.save("__plt_" + destination.getName()), STT_FUNC,
- 0, isec);
+ Defined *s = addSymbol(saver().save("__plt_" + destination.getName()),
+ STT_FUNC, 0, isec);
s->needsTocRestore = true;
s->file = destination.file;
}
@@ -947,7 +948,7 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) {
}
void PPC64R2SaveStub::addSymbols(ThunkSection &isec) {
- Defined *s = addSymbol(saver.save("__toc_save_" + destination.getName()),
+ Defined *s = addSymbol(saver().save("__toc_save_" + destination.getName()),
STT_FUNC, 0, isec);
s->needsTocRestore = true;
}
@@ -983,7 +984,7 @@ void PPC64R12SetupStub::writeTo(uint8_t *buf) {
}
void PPC64R12SetupStub::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__gep_setup_" + destination.getName()), STT_FUNC, 0,
+ addSymbol(saver().save("__gep_setup_" + destination.getName()), STT_FUNC, 0,
isec);
}
@@ -1019,7 +1020,7 @@ void PPC64PCRelPLTStub::writeTo(uint8_t *buf) {
}
void PPC64PCRelPLTStub::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__plt_pcrel_" + destination.getName()), STT_FUNC, 0,
+ addSymbol(saver().save("__plt_pcrel_" + destination.getName()), STT_FUNC, 0,
isec);
}
@@ -1035,7 +1036,7 @@ void PPC64LongBranchThunk::writeTo(uint8_t *buf) {
}
void PPC64LongBranchThunk::addSymbols(ThunkSection &isec) {
- addSymbol(saver.save("__long_branch_" + destination.getName()), STT_FUNC, 0,
+ addSymbol(saver().save("__long_branch_" + destination.getName()), STT_FUNC, 0,
isec);
}
diff --git a/contrib/llvm-project/lld/ELF/Writer.cpp b/contrib/llvm-project/lld/ELF/Writer.cpp
index 497e56886b72..69fcad390d61 100644
--- a/contrib/llvm-project/lld/ELF/Writer.cpp
+++ b/contrib/llvm-project/lld/ELF/Writer.cpp
@@ -20,8 +20,8 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/Arrays.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Filesystem.h"
-#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
@@ -55,7 +55,6 @@ public:
private:
void copyLocalSymbols();
void addSectionSymbols();
- void forEachRelSec(llvm::function_ref<void(InputSectionBase &)> fn);
void sortSections();
void resolveShfLinkOrder();
void finalizeAddressDependentContent();
@@ -65,7 +64,7 @@ private:
void checkExecuteOnly();
void setReservedSymbolSections();
- std::vector<PhdrEntry *> createPhdrs(Partition &part);
+ SmallVector<PhdrEntry *, 0> createPhdrs(Partition &part);
void addPhdrForSection(Partition &part, unsigned shType, unsigned pType,
unsigned pFlags);
void assignFileOffsets();
@@ -100,7 +99,7 @@ template <class ELFT> void elf::writeResult() {
Writer<ELFT>().run();
}
-static void removeEmptyPTLoad(std::vector<PhdrEntry *> &phdrs) {
+static void removeEmptyPTLoad(SmallVector<PhdrEntry *, 0> &phdrs) {
auto it = std::stable_partition(
phdrs.begin(), phdrs.end(), [&](const PhdrEntry *p) {
if (p->p_type != PT_LOAD)
@@ -121,7 +120,7 @@ static void removeEmptyPTLoad(std::vector<PhdrEntry *> &phdrs) {
}
void elf::copySectionsIntoPartitions() {
- std::vector<InputSectionBase *> newSections;
+ SmallVector<InputSectionBase *, 0> newSections;
for (unsigned part = 2; part != partitions.size() + 1; ++part) {
for (InputSectionBase *s : inputSections) {
if (!(s->flags & SHF_ALLOC) || !s->isLive())
@@ -299,18 +298,18 @@ template <class ELFT> void elf::createSyntheticSections() {
auto add = [](SyntheticSection &sec) { inputSections.push_back(&sec); };
- in.shStrTab = make<StringTableSection>(".shstrtab", false);
+ in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false);
Out::programHeaders = make<OutputSection>("", 0, SHF_ALLOC);
Out::programHeaders->alignment = config->wordsize;
if (config->strip != StripPolicy::All) {
- in.strTab = make<StringTableSection>(".strtab", false);
- in.symTab = make<SymbolTableSection<ELFT>>(*in.strTab);
- in.symTabShndx = make<SymtabShndxSection>();
+ in.strTab = std::make_unique<StringTableSection>(".strtab", false);
+ in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*in.strTab);
+ in.symTabShndx = std::make_unique<SymtabShndxSection>();
}
- in.bss = make<BssSection>(".bss", 0, 1);
+ in.bss = std::make_unique<BssSection>(".bss", 0, 1);
add(*in.bss);
// If there is a SECTIONS command and a .data.rel.ro section name use name
@@ -318,14 +317,14 @@ template <class ELFT> void elf::createSyntheticSections() {
// This makes sure our relro is contiguous.
bool hasDataRelRo =
script->hasSectionsCommand && findSection(".data.rel.ro", 0);
- in.bssRelRo =
- make<BssSection>(hasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1);
+ in.bssRelRo = std::make_unique<BssSection>(
+ hasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1);
add(*in.bssRelRo);
// Add MIPS-specific sections.
if (config->emachine == EM_MIPS) {
if (!config->shared && config->hasDynSymTab) {
- in.mipsRldMap = make<MipsRldMapSection>();
+ in.mipsRldMap = std::make_unique<MipsRldMapSection>();
add(*in.mipsRldMap);
}
if (auto *sec = MipsAbiFlagsSection<ELFT>::create())
@@ -345,49 +344,52 @@ template <class ELFT> void elf::createSyntheticSections() {
};
if (!part.name.empty()) {
- part.elfHeader = make<PartitionElfHeaderSection<ELFT>>();
+ part.elfHeader = std::make_unique<PartitionElfHeaderSection<ELFT>>();
part.elfHeader->name = part.name;
add(*part.elfHeader);
- part.programHeaders = make<PartitionProgramHeadersSection<ELFT>>();
+ part.programHeaders =
+ std::make_unique<PartitionProgramHeadersSection<ELFT>>();
add(*part.programHeaders);
}
if (config->buildId != BuildIdKind::None) {
- part.buildId = make<BuildIdSection>();
+ part.buildId = std::make_unique<BuildIdSection>();
add(*part.buildId);
}
- part.dynStrTab = make<StringTableSection>(".dynstr", true);
- part.dynSymTab = make<SymbolTableSection<ELFT>>(*part.dynStrTab);
- part.dynamic = make<DynamicSection<ELFT>>();
+ part.dynStrTab = std::make_unique<StringTableSection>(".dynstr", true);
+ part.dynSymTab =
+ std::make_unique<SymbolTableSection<ELFT>>(*part.dynStrTab);
+ part.dynamic = std::make_unique<DynamicSection<ELFT>>();
if (config->androidPackDynRelocs)
- part.relaDyn = make<AndroidPackedRelocationSection<ELFT>>(relaDynName);
- else
part.relaDyn =
- make<RelocationSection<ELFT>>(relaDynName, config->zCombreloc);
+ std::make_unique<AndroidPackedRelocationSection<ELFT>>(relaDynName);
+ else
+ part.relaDyn = std::make_unique<RelocationSection<ELFT>>(
+ relaDynName, config->zCombreloc);
if (config->hasDynSymTab) {
add(*part.dynSymTab);
- part.verSym = make<VersionTableSection>();
+ part.verSym = std::make_unique<VersionTableSection>();
add(*part.verSym);
if (!namedVersionDefs().empty()) {
- part.verDef = make<VersionDefinitionSection>();
+ part.verDef = std::make_unique<VersionDefinitionSection>();
add(*part.verDef);
}
- part.verNeed = make<VersionNeedSection<ELFT>>();
+ part.verNeed = std::make_unique<VersionNeedSection<ELFT>>();
add(*part.verNeed);
if (config->gnuHash) {
- part.gnuHashTab = make<GnuHashTableSection>();
+ part.gnuHashTab = std::make_unique<GnuHashTableSection>();
add(*part.gnuHashTab);
}
if (config->sysvHash) {
- part.hashTab = make<HashTableSection>();
+ part.hashTab = std::make_unique<HashTableSection>();
add(*part.hashTab);
}
@@ -397,23 +399,23 @@ template <class ELFT> void elf::createSyntheticSections() {
}
if (config->relrPackDynRelocs) {
- part.relrDyn = make<RelrSection<ELFT>>();
+ part.relrDyn = std::make_unique<RelrSection<ELFT>>();
add(*part.relrDyn);
}
if (!config->relocatable) {
if (config->ehFrameHdr) {
- part.ehFrameHdr = make<EhFrameHeader>();
+ part.ehFrameHdr = std::make_unique<EhFrameHeader>();
add(*part.ehFrameHdr);
}
- part.ehFrame = make<EhFrameSection>();
+ part.ehFrame = std::make_unique<EhFrameSection>();
add(*part.ehFrame);
}
if (config->emachine == EM_ARM && !config->relocatable) {
// The ARMExidxsyntheticsection replaces all the individual .ARM.exidx
// InputSections.
- part.armExidx = make<ARMExidxSyntheticSection>();
+ part.armExidx = std::make_unique<ARMExidxSyntheticSection>();
add(*part.armExidx);
}
}
@@ -422,13 +424,14 @@ template <class ELFT> void elf::createSyntheticSections() {
// Create the partition end marker. This needs to be in partition number 255
// so that it is sorted after all other partitions. It also has other
// special handling (see createPhdrs() and combineEhSections()).
- in.partEnd = make<BssSection>(".part.end", config->maxPageSize, 1);
+ in.partEnd =
+ std::make_unique<BssSection>(".part.end", config->maxPageSize, 1);
in.partEnd->partition = 255;
add(*in.partEnd);
- in.partIndex = make<PartitionIndexSection>();
- addOptionalRegular("__part_index_begin", in.partIndex, 0);
- addOptionalRegular("__part_index_end", in.partIndex,
+ in.partIndex = std::make_unique<PartitionIndexSection>();
+ addOptionalRegular("__part_index_begin", in.partIndex.get(), 0);
+ addOptionalRegular("__part_index_end", in.partIndex.get(),
in.partIndex->getSize());
add(*in.partIndex);
}
@@ -436,26 +439,26 @@ template <class ELFT> void elf::createSyntheticSections() {
// Add .got. MIPS' .got is so different from the other archs,
// it has its own class.
if (config->emachine == EM_MIPS) {
- in.mipsGot = make<MipsGotSection>();
+ in.mipsGot = std::make_unique<MipsGotSection>();
add(*in.mipsGot);
} else {
- in.got = make<GotSection>();
+ in.got = std::make_unique<GotSection>();
add(*in.got);
}
if (config->emachine == EM_PPC) {
- in.ppc32Got2 = make<PPC32Got2Section>();
+ in.ppc32Got2 = std::make_unique<PPC32Got2Section>();
add(*in.ppc32Got2);
}
if (config->emachine == EM_PPC64) {
- in.ppc64LongBranchTarget = make<PPC64LongBranchTargetSection>();
+ in.ppc64LongBranchTarget = std::make_unique<PPC64LongBranchTargetSection>();
add(*in.ppc64LongBranchTarget);
}
- in.gotPlt = make<GotPltSection>();
+ in.gotPlt = std::make_unique<GotPltSection>();
add(*in.gotPlt);
- in.igotPlt = make<IgotPltSection>();
+ in.igotPlt = std::make_unique<IgotPltSection>();
add(*in.igotPlt);
// _GLOBAL_OFFSET_TABLE_ is defined relative to either .got.plt or .got. Treat
@@ -472,7 +475,7 @@ template <class ELFT> void elf::createSyntheticSections() {
// We always need to add rel[a].plt to output if it has entries.
// Even for static linking it can contain R_[*]_IRELATIVE relocations.
- in.relaPlt = make<RelocationSection<ELFT>>(
+ in.relaPlt = std::make_unique<RelocationSection<ELFT>>(
config->isRela ? ".rela.plt" : ".rel.plt", /*sort=*/false);
add(*in.relaPlt);
@@ -482,21 +485,23 @@ template <class ELFT> void elf::createSyntheticSections() {
// that would cause a section type mismatch. However, because the Android
// dynamic loader reads .rel.plt after .rel.dyn, we can get the desired
// behaviour by placing the iplt section in .rel.plt.
- in.relaIplt = make<RelocationSection<ELFT>>(
+ in.relaIplt = std::make_unique<RelocationSection<ELFT>>(
config->androidPackDynRelocs ? in.relaPlt->name : relaDynName,
/*sort=*/false);
add(*in.relaIplt);
if ((config->emachine == EM_386 || config->emachine == EM_X86_64) &&
(config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT)) {
- in.ibtPlt = make<IBTPltSection>();
+ in.ibtPlt = std::make_unique<IBTPltSection>();
add(*in.ibtPlt);
}
- in.plt = config->emachine == EM_PPC ? make<PPC32GlinkSection>()
- : make<PltSection>();
+ if (config->emachine == EM_PPC)
+ in.plt = std::make_unique<PPC32GlinkSection>();
+ else
+ in.plt = std::make_unique<PltSection>();
add(*in.plt);
- in.iplt = make<IpltSection>();
+ in.iplt = std::make_unique<IpltSection>();
add(*in.iplt);
if (config->andFeatures)
@@ -532,8 +537,6 @@ template <class ELFT> void Writer<ELFT>::run() {
// finalizeSections does that.
finalizeSections();
checkExecuteOnly();
- if (errorCount())
- return;
// If --compressed-debug-sections is specified, compress .debug_* sections.
// Do it right now because it changes the size of output sections.
@@ -557,9 +560,6 @@ template <class ELFT> void Writer<ELFT>::run() {
for (Partition &part : partitions)
setPhdrs(part);
- if (config->relocatable)
- for (OutputSection *sec : outputSections)
- sec->addr = 0;
// Handle --print-map(-M)/--Map, --why-extract=, --cref and
// --print-archive-stats=. Dump them before checkSections() because the files
@@ -1030,26 +1030,6 @@ template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
Out::elfHeader, 0, STV_HIDDEN);
}
-template <class ELFT>
-void Writer<ELFT>::forEachRelSec(
- llvm::function_ref<void(InputSectionBase &)> fn) {
- // Scan all relocations. Each relocation goes through a series
- // of tests to determine if it needs special treatment, such as
- // creating GOT, PLT, copy relocations, etc.
- // Note that relocations for non-alloc sections are directly
- // processed by InputSection::relocateNonAlloc.
- for (InputSectionBase *isec : inputSections)
- if (isec->isLive() && isa<InputSection>(isec) && (isec->flags & SHF_ALLOC))
- fn(*isec);
- for (Partition &part : partitions) {
- for (EhInputSection *es : part.ehFrame->sections)
- fn(*es);
- if (part.armExidx && part.armExidx->isLive())
- for (InputSection *ex : part.armExidx->exidxSections)
- fn(*ex);
- }
-}
-
// This function generates assignments for predefined symbols (e.g. _end or
// _etext) and inserts them into the commands sequence to be processed at the
// appropriate time. This ensures that the value is going to be correct by the
@@ -1059,17 +1039,17 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
if (ElfSym::globalOffsetTable) {
// The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
// to the start of the .got or .got.plt section.
- InputSection *gotSection = in.gotPlt;
+ InputSection *sec = in.gotPlt.get();
if (!target->gotBaseSymInGotPlt)
- gotSection = in.mipsGot ? cast<InputSection>(in.mipsGot)
- : cast<InputSection>(in.got);
- ElfSym::globalOffsetTable->section = gotSection;
+ sec = in.mipsGot.get() ? cast<InputSection>(in.mipsGot.get())
+ : cast<InputSection>(in.got.get());
+ ElfSym::globalOffsetTable->section = sec;
}
// .rela_iplt_{start,end} mark the start and the end of in.relaIplt.
if (ElfSym::relaIpltStart && in.relaIplt->isNeeded()) {
- ElfSym::relaIpltStart->section = in.relaIplt;
- ElfSym::relaIpltEnd->section = in.relaIplt;
+ ElfSym::relaIpltStart->section = in.relaIplt.get();
+ ElfSym::relaIpltEnd->section = in.relaIplt.get();
ElfSym::relaIpltEnd->value = in.relaIplt->getSize();
}
@@ -1170,9 +1150,9 @@ static bool shouldSkip(SectionCommand *cmd) {
// We want to place orphan sections so that they share as much
// characteristics with their neighbors as possible. For example, if
// both are rw, or both are tls.
-static std::vector<SectionCommand *>::iterator
-findOrphanPos(std::vector<SectionCommand *>::iterator b,
- std::vector<SectionCommand *>::iterator e) {
+static SmallVectorImpl<SectionCommand *>::iterator
+findOrphanPos(SmallVectorImpl<SectionCommand *>::iterator b,
+ SmallVectorImpl<SectionCommand *>::iterator e) {
OutputSection *sec = cast<OutputSection>(*e);
// Find the first element that has as close a rank as possible.
@@ -1209,9 +1189,9 @@ findOrphanPos(std::vector<SectionCommand *>::iterator b,
auto *os = dyn_cast<OutputSection>(cmd);
return os && os->hasInputSections;
};
- auto j = std::find_if(llvm::make_reverse_iterator(i),
- llvm::make_reverse_iterator(b),
- isOutputSecWithInputSections);
+ auto j =
+ std::find_if(std::make_reverse_iterator(i), std::make_reverse_iterator(b),
+ isOutputSecWithInputSections);
i = j.base();
// As a special case, if the orphan section is the last section, put
@@ -1284,14 +1264,14 @@ static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
// Build a map from symbols to their priorities. Symbols that didn't
// appear in the symbol ordering file have the lowest priority 0.
// All explicitly mentioned symbols have negative (higher) priorities.
- DenseMap<StringRef, SymbolOrderEntry> symbolOrder;
+ DenseMap<CachedHashStringRef, SymbolOrderEntry> symbolOrder;
int priority = -config->symbolOrderingFile.size();
for (StringRef s : config->symbolOrderingFile)
- symbolOrder.insert({s, {priority++, false}});
+ symbolOrder.insert({CachedHashStringRef(s), {priority++, false}});
// Build a map from sections to their priorities.
auto addSym = [&](Symbol &sym) {
- auto it = symbolOrder.find(sym.getName());
+ auto it = symbolOrder.find(CachedHashStringRef(sym.getName()));
if (it == symbolOrder.end())
return;
SymbolOrderEntry &ent = it->second;
@@ -1310,20 +1290,16 @@ static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
// We want both global and local symbols. We get the global ones from the
// symbol table and iterate the object files for the local ones.
for (Symbol *sym : symtab->symbols())
- if (!sym->isLazy())
- addSym(*sym);
+ addSym(*sym);
for (ELFFileBase *file : objectFiles)
- for (Symbol *sym : file->getSymbols()) {
- if (!sym->isLocal())
- break;
+ for (Symbol *sym : file->getLocalSymbols())
addSym(*sym);
- }
if (config->warnSymbolOrdering)
for (auto orderEntry : symbolOrder)
if (!orderEntry.second.present)
- warn("symbol ordering file: no such symbol: " + orderEntry.first);
+ warn("symbol ordering file: no such symbol: " + orderEntry.first.val());
return sectionOrder;
}
@@ -1332,8 +1308,8 @@ static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
static void
sortISDBySectionOrder(InputSectionDescription *isd,
const DenseMap<const InputSectionBase *, int> &order) {
- std::vector<InputSection *> unorderedSections;
- std::vector<std::pair<InputSection *, int>> orderedSections;
+ SmallVector<InputSection *, 0> unorderedSections;
+ SmallVector<std::pair<InputSection *, int>, 0> orderedSections;
uint64_t unorderedSize = 0;
for (InputSection *isec : isd->sections) {
@@ -1646,7 +1622,7 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
// can assign Virtual Addresses to OutputSections that are not monotonically
// increasing.
for (Partition &part : partitions)
- finalizeSynthetic(part.armExidx);
+ finalizeSynthetic(part.armExidx.get());
resolveShfLinkOrder();
// Converts call x@GDPLT to call __tls_get_addr
@@ -1699,6 +1675,10 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
}
}
+ if (config->relocatable)
+ for (OutputSection *sec : outputSections)
+ sec->addr = 0;
+
// If addrExpr is set, the address may not be a multiple of the alignment.
// Warn because this is error-prone.
for (SectionCommand *cmd : script->sectionCommands)
@@ -1766,21 +1746,19 @@ template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
// jump to the following section as it is not required.
// 2. If there are two consecutive jump instructions, it checks
// if they can be flipped and one can be deleted.
- for (OutputSection *os : outputSections) {
- if (!(os->flags & SHF_EXECINSTR))
+ for (OutputSection *osec : outputSections) {
+ if (!(osec->flags & SHF_EXECINSTR))
continue;
- std::vector<InputSection *> sections = getInputSections(os);
- std::vector<unsigned> result(sections.size());
+ SmallVector<InputSection *, 0> sections = getInputSections(*osec);
+ size_t numDeleted = 0;
// Delete all fall through jump instructions. Also, check if two
// consecutive jump instructions can be flipped so that a fall
// through jmp instruction can be deleted.
- parallelForEachN(0, sections.size(), [&](size_t i) {
+ for (size_t i = 0, e = sections.size(); i != e; ++i) {
InputSection *next = i + 1 < sections.size() ? sections[i + 1] : nullptr;
- InputSection &is = *sections[i];
- result[i] =
- target->deleteFallThruJmpInsn(is, is.getFile<ELFT>(), next) ? 1 : 0;
- });
- size_t numDeleted = std::count(result.begin(), result.end(), 1);
+ InputSection &sec = *sections[i];
+ numDeleted += target->deleteFallThruJmpInsn(sec, sec.file, next);
+ }
if (numDeleted > 0) {
script->assignAddresses();
LLVM_DEBUG(llvm::dbgs()
@@ -1790,11 +1768,9 @@ template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
fixSymbolsAfterShrinking();
- for (OutputSection *os : outputSections) {
- std::vector<InputSection *> sections = getInputSections(os);
- for (InputSection *is : sections)
+ for (OutputSection *osec : outputSections)
+ for (InputSection *is : getInputSections(*osec))
is->trim();
- }
}
// In order to allow users to manipulate linker-synthesized sections,
@@ -1866,9 +1842,9 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// Even the author of gold doesn't remember why gold behaves that way.
// https://sourceware.org/ml/binutils/2002-03/msg00360.html
if (mainPart->dynamic->parent)
- symtab->addSymbol(Defined{/*file=*/nullptr, "_DYNAMIC", STB_WEAK,
- STV_HIDDEN, STT_NOTYPE,
- /*value=*/0, /*size=*/0, mainPart->dynamic});
+ symtab->addSymbol(
+ Defined{/*file=*/nullptr, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE,
+ /*value=*/0, /*size=*/0, mainPart->dynamic.get()});
// Define __rel[a]_iplt_{start,end} symbols if needed.
addRelIpltSymbols();
@@ -1911,11 +1887,14 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// pieces. The relocation scan uses those pieces, so this has to be
// earlier.
for (Partition &part : partitions)
- finalizeSynthetic(part.ehFrame);
+ finalizeSynthetic(part.ehFrame.get());
}
- for (Symbol *sym : symtab->symbols())
- sym->isPreemptible = computeIsPreemptible(*sym);
+ if (config->hasDynSymTab) {
+ parallelForEach(symtab->symbols(), [](Symbol *sym) {
+ sym->isPreemptible = computeIsPreemptible(*sym);
+ });
+ }
// Change values of linker-script-defined symbols from placeholders (assigned
// by declareSymbols) to actual definitions.
@@ -1929,7 +1908,21 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// a linker-script-defined symbol is absolute.
ppc64noTocRelax.clear();
if (!config->relocatable) {
- forEachRelSec(scanRelocations<ELFT>);
+ // Scan all relocations. Each relocation goes through a series of tests to
+ // determine if it needs special treatment, such as creating GOT, PLT,
+ // copy relocations, etc. Note that relocations for non-alloc sections are
+ // directly processed by InputSection::relocateNonAlloc.
+ for (InputSectionBase *sec : inputSections)
+ if (sec->isLive() && isa<InputSection>(sec) && (sec->flags & SHF_ALLOC))
+ scanRelocations<ELFT>(*sec);
+ for (Partition &part : partitions) {
+ for (EhInputSection *sec : part.ehFrame->sections)
+ scanRelocations<ELFT>(*sec);
+ if (part.armExidx && part.armExidx->isLive())
+ for (InputSection *sec : part.armExidx->exidxSections)
+ scanRelocations<ELFT>(*sec);
+ }
+
reportUndefinedSymbols<ELFT>();
postScanRelocations();
}
@@ -1955,7 +1948,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
for (SharedFile *file : sharedFiles) {
bool allNeededIsKnown =
llvm::all_of(file->dtNeeded, [&](StringRef needed) {
- return symtab->soNames.count(needed);
+ return symtab->soNames.count(CachedHashStringRef(needed));
});
if (!allNeededIsKnown)
continue;
@@ -1973,6 +1966,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
for (Symbol *sym : symtab->symbols()) {
if (!sym->isUsedInRegularObj || !includeInSymtab(*sym))
continue;
+ if (!config->relocatable)
+ sym->binding = sym->computeBinding();
if (in.symTab)
in.symTab->addSymbol(sym);
@@ -1997,10 +1992,6 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
}
}
- // Do not proceed if there was an undefined symbol.
- if (errorCount())
- return;
-
if (in.mipsGot)
in.mipsGot->build();
@@ -2076,35 +2067,35 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
{
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
- finalizeSynthetic(in.bss);
- finalizeSynthetic(in.bssRelRo);
- finalizeSynthetic(in.symTabShndx);
- finalizeSynthetic(in.shStrTab);
- finalizeSynthetic(in.strTab);
- finalizeSynthetic(in.got);
- finalizeSynthetic(in.mipsGot);
- finalizeSynthetic(in.igotPlt);
- finalizeSynthetic(in.gotPlt);
- finalizeSynthetic(in.relaIplt);
- finalizeSynthetic(in.relaPlt);
- finalizeSynthetic(in.plt);
- finalizeSynthetic(in.iplt);
- finalizeSynthetic(in.ppc32Got2);
- finalizeSynthetic(in.partIndex);
+ finalizeSynthetic(in.bss.get());
+ finalizeSynthetic(in.bssRelRo.get());
+ finalizeSynthetic(in.symTabShndx.get());
+ finalizeSynthetic(in.shStrTab.get());
+ finalizeSynthetic(in.strTab.get());
+ finalizeSynthetic(in.got.get());
+ finalizeSynthetic(in.mipsGot.get());
+ finalizeSynthetic(in.igotPlt.get());
+ finalizeSynthetic(in.gotPlt.get());
+ finalizeSynthetic(in.relaIplt.get());
+ finalizeSynthetic(in.relaPlt.get());
+ finalizeSynthetic(in.plt.get());
+ finalizeSynthetic(in.iplt.get());
+ finalizeSynthetic(in.ppc32Got2.get());
+ finalizeSynthetic(in.partIndex.get());
// Dynamic section must be the last one in this list and dynamic
// symbol table section (dynSymTab) must be the first one.
for (Partition &part : partitions) {
- finalizeSynthetic(part.dynSymTab);
- finalizeSynthetic(part.gnuHashTab);
- finalizeSynthetic(part.hashTab);
- finalizeSynthetic(part.verDef);
- finalizeSynthetic(part.relaDyn);
- finalizeSynthetic(part.relrDyn);
- finalizeSynthetic(part.ehFrameHdr);
- finalizeSynthetic(part.verSym);
- finalizeSynthetic(part.verNeed);
- finalizeSynthetic(part.dynamic);
+ finalizeSynthetic(part.dynSymTab.get());
+ finalizeSynthetic(part.gnuHashTab.get());
+ finalizeSynthetic(part.hashTab.get());
+ finalizeSynthetic(part.verDef.get());
+ finalizeSynthetic(part.relaDyn.get());
+ finalizeSynthetic(part.relrDyn.get());
+ finalizeSynthetic(part.ehFrameHdr.get());
+ finalizeSynthetic(part.verSym.get());
+ finalizeSynthetic(part.verNeed.get());
+ finalizeSynthetic(part.dynamic.get());
}
}
@@ -2133,6 +2124,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// sometimes using forward symbol declarations. We want to set the correct
// values. They also might change after adding the thunks.
finalizeAddressDependentContent();
+
+ // All information needed for OutputSection part of Map file is available.
if (errorCount())
return;
@@ -2140,8 +2133,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
// finalizeAddressDependentContent may have added local symbols to the
// static symbol table.
- finalizeSynthetic(in.symTab);
- finalizeSynthetic(in.ppc64LongBranchTarget);
+ finalizeSynthetic(in.symTab.get());
+ finalizeSynthetic(in.ppc64LongBranchTarget.get());
}
// Relaxation to delete inter-basic block jumps created by basic block
@@ -2164,12 +2157,13 @@ template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
if (!config->executeOnly)
return;
- for (OutputSection *os : outputSections)
- if (os->flags & SHF_EXECINSTR)
- for (InputSection *isec : getInputSections(os))
+ for (OutputSection *osec : outputSections)
+ if (osec->flags & SHF_EXECINSTR)
+ for (InputSection *isec : getInputSections(*osec))
if (!(isec->flags & SHF_EXECINSTR))
- error("cannot place " + toString(isec) + " into " + toString(os->name) +
- ": -execute-only does not support intermingling data and code");
+ error("cannot place " + toString(isec) + " into " +
+ toString(osec->name) +
+ ": --execute-only does not support intermingling data and code");
}
// The linker is expected to define SECNAME_start and SECNAME_end
@@ -2223,9 +2217,9 @@ void Writer<ELFT>::addStartStopSymbols(OutputSection *sec) {
StringRef s = sec->name;
if (!isValidCIdentifier(s))
return;
- addOptionalRegular(saver.save("__start_" + s), sec, 0,
+ addOptionalRegular(saver().save("__start_" + s), sec, 0,
config->zStartStopVisibility);
- addOptionalRegular(saver.save("__stop_" + s), sec, -1,
+ addOptionalRegular(saver().save("__stop_" + s), sec, -1,
config->zStartStopVisibility);
}
@@ -2258,8 +2252,8 @@ static uint64_t computeFlags(uint64_t flags) {
// Decide which program headers to create and which sections to include in each
// one.
template <class ELFT>
-std::vector<PhdrEntry *> Writer<ELFT>::createPhdrs(Partition &part) {
- std::vector<PhdrEntry *> ret;
+SmallVector<PhdrEntry *, 0> Writer<ELFT>::createPhdrs(Partition &part) {
+ SmallVector<PhdrEntry *, 0> ret;
auto addHdr = [&](unsigned type, unsigned flags) -> PhdrEntry * {
ret.push_back(make<PhdrEntry>(type, flags));
return ret.back();
@@ -2858,8 +2852,8 @@ template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
// Fill the last page.
for (PhdrEntry *p : part.phdrs)
if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
- fillTrap(Out::bufferStart + alignDown(p->firstSec->offset + p->p_filesz,
- config->maxPageSize),
+ fillTrap(Out::bufferStart +
+ alignDown(p->firstSec->offset + p->p_filesz, 4),
Out::bufferStart + alignTo(p->firstSec->offset + p->p_filesz,
config->maxPageSize));
@@ -2909,15 +2903,16 @@ computeHash(llvm::MutableArrayRef<uint8_t> hashBuf,
llvm::ArrayRef<uint8_t> data,
std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) {
std::vector<ArrayRef<uint8_t>> chunks = split(data, 1024 * 1024);
- std::vector<uint8_t> hashes(chunks.size() * hashBuf.size());
+ const size_t hashesSize = chunks.size() * hashBuf.size();
+ std::unique_ptr<uint8_t[]> hashes(new uint8_t[hashesSize]);
// Compute hash values.
parallelForEachN(0, chunks.size(), [&](size_t i) {
- hashFn(hashes.data() + i * hashBuf.size(), chunks[i]);
+ hashFn(hashes.get() + i * hashBuf.size(), chunks[i]);
});
// Write to the final output buffer.
- hashFn(hashBuf.data(), hashes);
+ hashFn(hashBuf.data(), makeArrayRef(hashes.get(), hashesSize));
}
template <class ELFT> void Writer<ELFT>::writeBuildId() {
@@ -2932,34 +2927,35 @@ template <class ELFT> void Writer<ELFT>::writeBuildId() {
// Compute a hash of all sections of the output file.
size_t hashSize = mainPart->buildId->hashSize;
- std::vector<uint8_t> buildId(hashSize);
- llvm::ArrayRef<uint8_t> buf{Out::bufferStart, size_t(fileSize)};
+ std::unique_ptr<uint8_t[]> buildId(new uint8_t[hashSize]);
+ MutableArrayRef<uint8_t> output(buildId.get(), hashSize);
+ llvm::ArrayRef<uint8_t> input{Out::bufferStart, size_t(fileSize)};
switch (config->buildId) {
case BuildIdKind::Fast:
- computeHash(buildId, buf, [](uint8_t *dest, ArrayRef<uint8_t> arr) {
+ computeHash(output, input, [](uint8_t *dest, ArrayRef<uint8_t> arr) {
write64le(dest, xxHash64(arr));
});
break;
case BuildIdKind::Md5:
- computeHash(buildId, buf, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
+ computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
memcpy(dest, MD5::hash(arr).data(), hashSize);
});
break;
case BuildIdKind::Sha1:
- computeHash(buildId, buf, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
+ computeHash(output, input, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
memcpy(dest, SHA1::hash(arr).data(), hashSize);
});
break;
case BuildIdKind::Uuid:
- if (auto ec = llvm::getRandomBytes(buildId.data(), hashSize))
+ if (auto ec = llvm::getRandomBytes(buildId.get(), hashSize))
error("entropy source failure: " + ec.message());
break;
default:
llvm_unreachable("unknown BuildIdKind");
}
for (Partition &part : partitions)
- part.buildId->writeBuildId(buildId);
+ part.buildId->writeBuildId(output);
}
template void elf::createSyntheticSections<ELF32LE>();
diff --git a/contrib/llvm-project/lld/MachO/Arch/ARM.cpp b/contrib/llvm-project/lld/MachO/Arch/ARM.cpp
index 42c7b8930868..4dda94d7b0f3 100644
--- a/contrib/llvm-project/lld/MachO/Arch/ARM.cpp
+++ b/contrib/llvm-project/lld/MachO/Arch/ARM.cpp
@@ -116,7 +116,7 @@ void ARM::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
return;
} else if (isBlx && !defined->thumb) {
Bitfield::set<Cond>(base, 0xe); // unconditional BL
- Bitfield::set<BitfieldFlag<24>>(base, 1);
+ Bitfield::set<BitfieldFlag<24>>(base, true);
isBlx = false;
}
} else {
diff --git a/contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp b/contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp
index 9f71e81b073a..4fae93469b5f 100644
--- a/contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp
+++ b/contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp
@@ -13,8 +13,7 @@
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TimeProfiler.h"
@@ -244,7 +243,7 @@ void ConcatOutputSection::finalize() {
// contains several branch instructions in succession, then the distance
// from the current position to the position where the thunks are inserted
// grows. So leave room for a bunch of thunks.
- unsigned slop = 100 * thunkSize;
+ unsigned slop = 256 * thunkSize;
while (finalIdx < endIdx && isecAddr + inputs[finalIdx]->getSize() <
isecVA + forwardBranchRange - slop)
finalizeOne(inputs[finalIdx++]);
@@ -322,8 +321,8 @@ void ConcatOutputSection::finalize() {
// get written are happy.
thunkInfo.isec->live = true;
- StringRef thunkName = saver.save(funcSym->getName() + ".thunk." +
- std::to_string(thunkInfo.sequence++));
+ StringRef thunkName = saver().save(funcSym->getName() + ".thunk." +
+ std::to_string(thunkInfo.sequence++));
r.referent = thunkInfo.sym = symtab->addDefined(
thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0,
/*size=*/thunkSize, /*isWeakDef=*/false, /*isPrivateExtern=*/true,
diff --git a/contrib/llvm-project/lld/MachO/Config.h b/contrib/llvm-project/lld/MachO/Config.h
index 42528185e57c..e6849a39d03a 100644
--- a/contrib/llvm-project/lld/MachO/Config.h
+++ b/contrib/llvm-project/lld/MachO/Config.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/BinaryFormat/MachO.h"
@@ -27,6 +28,7 @@
namespace lld {
namespace macho {
+class InputSection;
class Symbol;
struct SymbolPriorityEntry;
@@ -169,6 +171,12 @@ struct Configuration {
std::vector<SegmentProtection> segmentProtections;
llvm::DenseMap<llvm::StringRef, SymbolPriorityEntry> priorities;
+ llvm::MapVector<std::pair<const InputSection *, const InputSection *>,
+ uint64_t>
+ callGraphProfile;
+ bool callGraphProfileSort = false;
+ llvm::StringRef printSymbolOrder;
+
SectionRenameMap sectionRenameMap;
SegmentRenameMap segmentRenameMap;
@@ -181,7 +189,7 @@ struct Configuration {
llvm::MachO::Architecture arch() const { return platformInfo.target.Arch; }
- llvm::MachO::PlatformKind platform() const {
+ llvm::MachO::PlatformType platform() const {
return platformInfo.target.Platform;
}
};
@@ -207,7 +215,7 @@ enum class ForceLoad {
No, // Never load the archive, regardless of other flags
};
-extern Configuration *config;
+extern std::unique_ptr<Configuration> config;
} // namespace macho
} // namespace lld
diff --git a/contrib/llvm-project/lld/MachO/Driver.cpp b/contrib/llvm-project/lld/MachO/Driver.cpp
index e9d65d3c73f2..e4c9f4dd6024 100644
--- a/contrib/llvm-project/lld/MachO/Driver.cpp
+++ b/contrib/llvm-project/lld/MachO/Driver.cpp
@@ -15,6 +15,7 @@
#include "ObjC.h"
#include "OutputSection.h"
#include "OutputSegment.h"
+#include "SectionPriorities.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
@@ -59,8 +60,8 @@ using namespace llvm::sys;
using namespace lld;
using namespace lld::macho;
-Configuration *macho::config;
-DependencyTracker *macho::depTracker;
+std::unique_ptr<Configuration> macho::config;
+std::unique_ptr<DependencyTracker> macho::depTracker;
static HeaderFileType getOutputType(const InputArgList &args) {
// TODO: -r, -dylinker, -preload...
@@ -128,7 +129,7 @@ static Optional<StringRef> findFramework(StringRef name) {
// only append suffix if realpath() succeeds
Twine suffixed = location + suffix;
if (fs::exists(suffixed))
- return resolvedFrameworks[key] = saver.save(suffixed.str());
+ return resolvedFrameworks[key] = saver().save(suffixed.str());
}
// Suffix lookup failed, fall through to the no-suffix case.
}
@@ -165,7 +166,7 @@ getSearchPaths(unsigned optionCode, InputArgList &args,
path::append(buffer, path);
// Do not warn about paths that are computed via the syslib roots
if (fs::is_directory(buffer)) {
- paths.push_back(saver.save(buffer.str()));
+ paths.push_back(saver().save(buffer.str()));
found = true;
}
}
@@ -183,7 +184,7 @@ getSearchPaths(unsigned optionCode, InputArgList &args,
SmallString<261> buffer(root);
path::append(buffer, path);
if (fs::is_directory(buffer))
- paths.push_back(saver.save(buffer.str()));
+ paths.push_back(saver().save(buffer.str()));
}
}
return paths;
@@ -249,7 +250,8 @@ static llvm::CachePruningPolicy getLTOCachePolicy(InputArgList &args) {
static DenseMap<StringRef, ArchiveFile *> loadedArchives;
static InputFile *addFile(StringRef path, ForceLoad forceLoadArchive,
- bool isExplicit = true, bool isBundleLoader = false) {
+ bool isLazy = false, bool isExplicit = true,
+ bool isBundleLoader = false) {
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer)
return nullptr;
@@ -319,7 +321,7 @@ static InputFile *addFile(StringRef path, ForceLoad forceLoadArchive,
break;
}
case file_magic::macho_object:
- newFile = make<ObjFile>(mbref, getModTime(path), "");
+ newFile = make<ObjFile>(mbref, getModTime(path), "", isLazy);
break;
case file_magic::macho_dynamically_linked_shared_lib:
case file_magic::macho_dynamically_linked_shared_lib_stub:
@@ -331,7 +333,7 @@ static InputFile *addFile(StringRef path, ForceLoad forceLoadArchive,
}
break;
case file_magic::bitcode:
- newFile = make<BitcodeFile>(mbref, "", 0);
+ newFile = make<BitcodeFile>(mbref, "", 0, isLazy);
break;
case file_magic::macho_executable:
case file_magic::macho_bundle:
@@ -346,9 +348,20 @@ static InputFile *addFile(StringRef path, ForceLoad forceLoadArchive,
error(path + ": unhandled file type");
}
if (newFile && !isa<DylibFile>(newFile)) {
+ if ((isa<ObjFile>(newFile) || isa<BitcodeFile>(newFile)) && newFile->lazy &&
+ config->forceLoadObjC) {
+ for (Symbol *sym : newFile->symbols)
+ if (sym && sym->getName().startswith(objc::klass)) {
+ extract(*newFile, "-ObjC");
+ break;
+ }
+ if (newFile->lazy && hasObjCSection(mbref))
+ extract(*newFile, "-ObjC");
+ }
+
// printArchiveMemberLoad() prints both .a and .o names, so no need to
- // print the .a name here.
- if (config->printEachFile && magic != file_magic::archive)
+ // print the .a name here. Similarly skip lazy files.
+ if (config->printEachFile && magic != file_magic::archive && !isLazy)
message(toString(newFile));
inputFiles.insert(newFile);
}
@@ -360,7 +373,7 @@ static void addLibrary(StringRef name, bool isNeeded, bool isWeak,
ForceLoad forceLoadArchive) {
if (Optional<StringRef> path = findLibrary(name)) {
if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
- addFile(*path, forceLoadArchive, isExplicit))) {
+ addFile(*path, forceLoadArchive, /*isLazy=*/false, isExplicit))) {
if (isNeeded)
dylibFile->forceNeeded = true;
if (isWeak)
@@ -380,7 +393,7 @@ static void addFramework(StringRef name, bool isNeeded, bool isWeak,
ForceLoad forceLoadArchive) {
if (Optional<StringRef> path = findFramework(name)) {
if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
- addFile(*path, forceLoadArchive, isExplicit))) {
+ addFile(*path, forceLoadArchive, /*isLazy=*/false, isExplicit))) {
if (isNeeded)
dylibFile->forceNeeded = true;
if (isWeak)
@@ -425,13 +438,13 @@ void macho::parseLCLinkerOption(InputFile *f, unsigned argc, StringRef data) {
}
}
-static void addFileList(StringRef path) {
+static void addFileList(StringRef path, bool isLazy) {
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer)
return;
MemoryBufferRef mbref = *buffer;
for (StringRef path : args::getLines(mbref))
- addFile(rerootPath(path), ForceLoad::Default);
+ addFile(rerootPath(path), ForceLoad::Default, isLazy);
}
// An order file has one entry per line, in the following format:
@@ -448,60 +461,6 @@ static void addFileList(StringRef path) {
// entry (the one nearest to the front of the list.)
//
// The file can also have line comments that start with '#'.
-static void parseOrderFile(StringRef path) {
- Optional<MemoryBufferRef> buffer = readFile(path);
- if (!buffer) {
- error("Could not read order file at " + path);
- return;
- }
-
- MemoryBufferRef mbref = *buffer;
- size_t priority = std::numeric_limits<size_t>::max();
- for (StringRef line : args::getLines(mbref)) {
- StringRef objectFile, symbol;
- line = line.take_until([](char c) { return c == '#'; }); // ignore comments
- line = line.ltrim();
-
- CPUType cpuType = StringSwitch<CPUType>(line)
- .StartsWith("i386:", CPU_TYPE_I386)
- .StartsWith("x86_64:", CPU_TYPE_X86_64)
- .StartsWith("arm:", CPU_TYPE_ARM)
- .StartsWith("arm64:", CPU_TYPE_ARM64)
- .StartsWith("ppc:", CPU_TYPE_POWERPC)
- .StartsWith("ppc64:", CPU_TYPE_POWERPC64)
- .Default(CPU_TYPE_ANY);
-
- if (cpuType != CPU_TYPE_ANY && cpuType != target->cpuType)
- continue;
-
- // Drop the CPU type as well as the colon
- if (cpuType != CPU_TYPE_ANY)
- line = line.drop_until([](char c) { return c == ':'; }).drop_front();
-
- constexpr std::array<StringRef, 2> fileEnds = {".o:", ".o):"};
- for (StringRef fileEnd : fileEnds) {
- size_t pos = line.find(fileEnd);
- if (pos != StringRef::npos) {
- // Split the string around the colon
- objectFile = line.take_front(pos + fileEnd.size() - 1);
- line = line.drop_front(pos + fileEnd.size());
- break;
- }
- }
- symbol = line.trim();
-
- if (!symbol.empty()) {
- SymbolPriorityEntry &entry = config->priorities[symbol];
- if (!objectFile.empty())
- entry.objectFiles.insert(std::make_pair(objectFile, priority));
- else
- entry.anyObjectFile = std::max(entry.anyObjectFile, priority);
- }
-
- --priority;
- }
-}
-
// We expect sub-library names of the form "libfoo", which will match a dylib
// with a path of .*/libfoo.{dylib, tbd}.
// XXX ld64 seems to ignore the extension entirely when matching sub-libraries;
@@ -545,7 +504,8 @@ static void compileBitcodeFiles() {
auto *lto = make<BitcodeCompiler>();
for (InputFile *file : inputFiles)
if (auto *bitcodeFile = dyn_cast<BitcodeFile>(file))
- lto->add(*bitcodeFile);
+ if (!file->lazy)
+ lto->add(*bitcodeFile);
for (ObjFile *file : lto->compile())
inputFiles.insert(file);
@@ -629,11 +589,11 @@ static std::string lowerDash(StringRef s) {
}
// Has the side-effect of setting Config::platformInfo.
-static PlatformKind parsePlatformVersion(const ArgList &args) {
+static PlatformType parsePlatformVersion(const ArgList &args) {
const Arg *arg = args.getLastArg(OPT_platform_version);
if (!arg) {
error("must specify -platform_version");
- return PlatformKind::unknown;
+ return PLATFORM_UNKNOWN;
}
StringRef platformStr = arg->getValue(0);
@@ -641,20 +601,20 @@ static PlatformKind parsePlatformVersion(const ArgList &args) {
StringRef sdkVersionStr = arg->getValue(2);
// TODO(compnerd) see if we can generate this case list via XMACROS
- PlatformKind platform =
- StringSwitch<PlatformKind>(lowerDash(platformStr))
- .Cases("macos", "1", PlatformKind::macOS)
- .Cases("ios", "2", PlatformKind::iOS)
- .Cases("tvos", "3", PlatformKind::tvOS)
- .Cases("watchos", "4", PlatformKind::watchOS)
- .Cases("bridgeos", "5", PlatformKind::bridgeOS)
- .Cases("mac-catalyst", "6", PlatformKind::macCatalyst)
- .Cases("ios-simulator", "7", PlatformKind::iOSSimulator)
- .Cases("tvos-simulator", "8", PlatformKind::tvOSSimulator)
- .Cases("watchos-simulator", "9", PlatformKind::watchOSSimulator)
- .Cases("driverkit", "10", PlatformKind::driverKit)
- .Default(PlatformKind::unknown);
- if (platform == PlatformKind::unknown)
+ PlatformType platform =
+ StringSwitch<PlatformType>(lowerDash(platformStr))
+ .Cases("macos", "1", PLATFORM_MACOS)
+ .Cases("ios", "2", PLATFORM_IOS)
+ .Cases("tvos", "3", PLATFORM_TVOS)
+ .Cases("watchos", "4", PLATFORM_WATCHOS)
+ .Cases("bridgeos", "5", PLATFORM_BRIDGEOS)
+ .Cases("mac-catalyst", "6", PLATFORM_MACCATALYST)
+ .Cases("ios-simulator", "7", PLATFORM_IOSSIMULATOR)
+ .Cases("tvos-simulator", "8", PLATFORM_TVOSSIMULATOR)
+ .Cases("watchos-simulator", "9", PLATFORM_WATCHOSSIMULATOR)
+ .Cases("driverkit", "10", PLATFORM_DRIVERKIT)
+ .Default(PLATFORM_UNKNOWN);
+ if (platform == PLATFORM_UNKNOWN)
error(Twine("malformed platform: ") + platformStr);
// TODO: check validity of version strings, which varies by platform
// NOTE: ld64 accepts version strings with 5 components
@@ -675,7 +635,7 @@ static TargetInfo *createTargetInfo(InputArgList &args) {
return nullptr;
}
- PlatformKind platform = parsePlatformVersion(args);
+ PlatformType platform = parsePlatformVersion(args);
config->platformInfo.target =
MachO::Target(getArchitectureFromName(archName), platform);
@@ -861,27 +821,27 @@ static std::vector<SectionAlign> parseSectAlign(const opt::InputArgList &args) {
return sectAligns;
}
-PlatformKind macho::removeSimulator(PlatformKind platform) {
+PlatformType macho::removeSimulator(PlatformType platform) {
switch (platform) {
- case PlatformKind::iOSSimulator:
- return PlatformKind::iOS;
- case PlatformKind::tvOSSimulator:
- return PlatformKind::tvOS;
- case PlatformKind::watchOSSimulator:
- return PlatformKind::watchOS;
+ case PLATFORM_IOSSIMULATOR:
+ return PLATFORM_IOS;
+ case PLATFORM_TVOSSIMULATOR:
+ return PLATFORM_TVOS;
+ case PLATFORM_WATCHOSSIMULATOR:
+ return PLATFORM_WATCHOS;
default:
return platform;
}
}
static bool dataConstDefault(const InputArgList &args) {
- static const std::vector<std::pair<PlatformKind, VersionTuple>> minVersion = {
- {PlatformKind::macOS, VersionTuple(10, 15)},
- {PlatformKind::iOS, VersionTuple(13, 0)},
- {PlatformKind::tvOS, VersionTuple(13, 0)},
- {PlatformKind::watchOS, VersionTuple(6, 0)},
- {PlatformKind::bridgeOS, VersionTuple(4, 0)}};
- PlatformKind platform = removeSimulator(config->platformInfo.target.Platform);
+ static const std::vector<std::pair<PlatformType, VersionTuple>> minVersion = {
+ {PLATFORM_MACOS, VersionTuple(10, 15)},
+ {PLATFORM_IOS, VersionTuple(13, 0)},
+ {PLATFORM_TVOS, VersionTuple(13, 0)},
+ {PLATFORM_WATCHOS, VersionTuple(6, 0)},
+ {PLATFORM_BRIDGEOS, VersionTuple(4, 0)}};
+ PlatformType platform = removeSimulator(config->platformInfo.target.Platform);
auto it = llvm::find_if(minVersion,
[&](const auto &p) { return p.first == platform; });
if (it != minVersion.end())
@@ -962,6 +922,7 @@ static void createFiles(const InputArgList &args) {
TimeTraceScope timeScope("Load input files");
// This loop should be reserved for options whose exact ordering matters.
// Other options should be handled via filtered() and/or getLastArg().
+ bool isLazy = false;
for (const Arg *arg : args) {
const Option &opt = arg->getOption();
warnIfDeprecatedOption(opt);
@@ -969,7 +930,7 @@ static void createFiles(const InputArgList &args) {
switch (opt.getID()) {
case OPT_INPUT:
- addFile(rerootPath(arg->getValue()), ForceLoad::Default);
+ addFile(rerootPath(arg->getValue()), ForceLoad::Default, isLazy);
break;
case OPT_needed_library:
if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
@@ -989,7 +950,7 @@ static void createFiles(const InputArgList &args) {
dylibFile->forceWeakImport = true;
break;
case OPT_filelist:
- addFileList(arg->getValue());
+ addFileList(arg->getValue(), isLazy);
break;
case OPT_force_load:
addFile(rerootPath(arg->getValue()), ForceLoad::Yes);
@@ -1011,6 +972,16 @@ static void createFiles(const InputArgList &args) {
opt.getID() == OPT_reexport_framework, /*isExplicit=*/true,
ForceLoad::Default);
break;
+ case OPT_start_lib:
+ if (isLazy)
+ error("nested --start-lib");
+ isLazy = true;
+ break;
+ case OPT_end_lib:
+ if (!isLazy)
+ error("stray --end-lib");
+ isLazy = false;
+ break;
default:
break;
}
@@ -1083,14 +1054,14 @@ static void referenceStubBinder() {
symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr, /*isWeak=*/false);
}
-bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
- raw_ostream &stdoutOS, raw_ostream &stderrOS) {
- lld::stdoutOS = &stdoutOS;
- lld::stderrOS = &stderrOS;
-
- errorHandler().cleanupCallback = []() {
- freeArena();
+bool macho::link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly,
+ bool disableOutput) {
+ // This driver-specific context will be freed later by lldMain().
+ auto *ctx = new CommonLinkerContext;
+ ctx->e.initialize(stdoutOS, stderrOS, exitEarly, disableOutput);
+ ctx->e.cleanupCallback = []() {
resolvedFrameworks.clear();
resolvedLibraries.clear();
cachedReads.clear();
@@ -1111,17 +1082,15 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
InputFile::resetIdCount();
};
- errorHandler().logName = args::getFilenameWithoutExe(argsArr[0]);
- stderrOS.enable_colors(stderrOS.has_colors());
+ ctx->e.logName = args::getFilenameWithoutExe(argsArr[0]);
MachOOptTable parser;
InputArgList args = parser.parse(argsArr.slice(1));
- errorHandler().errorLimitExceededMsg =
- "too many errors emitted, stopping now "
- "(use --error-limit=0 to see all errors)";
- errorHandler().errorLimit = args::getInteger(args, OPT_error_limit_eq, 20);
- errorHandler().verbose = args.hasArg(OPT_verbose);
+ ctx->e.errorLimitExceededMsg = "too many errors emitted, stopping now "
+ "(use --error-limit=0 to see all errors)";
+ ctx->e.errorLimit = args::getInteger(args, OPT_error_limit_eq, 20);
+ ctx->e.verbose = args.hasArg(OPT_verbose);
if (args.hasArg(OPT_help_hidden)) {
parser.printHelp(argsArr[0], /*showHidden=*/true);
@@ -1136,11 +1105,11 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
return true;
}
- config = make<Configuration>();
- symtab = make<SymbolTable>();
+ config = std::make_unique<Configuration>();
+ symtab = std::make_unique<SymbolTable>();
target = createTargetInfo(args);
- depTracker =
- make<DependencyTracker>(args.getLastArgValue(OPT_dependency_info));
+ depTracker = std::make_unique<DependencyTracker>(
+ args.getLastArgValue(OPT_dependency_info));
if (errorCount())
return false;
@@ -1165,7 +1134,7 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
// these are meaningful for our text based stripping
if (config->osoPrefix.equals(".") || config->osoPrefix.endswith(sep))
expanded += sep;
- config->osoPrefix = saver.save(expanded.str());
+ config->osoPrefix = saver().save(expanded.str());
}
}
@@ -1228,7 +1197,8 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
if (const Arg *arg = args.getLastArg(OPT_bundle_loader)) {
if (config->outputType != MH_BUNDLE)
error("-bundle_loader can only be used with MachO bundle output");
- addFile(arg->getValue(), ForceLoad::Default, /*isExplicit=*/false,
+ addFile(arg->getValue(), ForceLoad::Default, /*isLazy=*/false,
+ /*isExplicit=*/false,
/*isBundleLoader=*/true);
}
if (const Arg *arg = args.getLastArg(OPT_umbrella)) {
@@ -1246,7 +1216,7 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
config->thinLTOCacheDir = args.getLastArgValue(OPT_cache_path_lto);
config->thinLTOCachePolicy = getLTOCachePolicy(args);
config->runtimePaths = args::getStrings(args, OPT_rpath);
- config->allLoad = args.hasArg(OPT_all_load);
+ config->allLoad = args.hasFlag(OPT_all_load, OPT_noall_load, false);
config->archMultiple = args.hasArg(OPT_arch_multiple);
config->applicationExtension = args.hasFlag(
OPT_application_extension, OPT_no_application_extension, false);
@@ -1262,16 +1232,20 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
config->emitDataInCodeInfo =
args.hasFlag(OPT_data_in_code_info, OPT_no_data_in_code_info, true);
config->icfLevel = getICFLevel(args);
- config->dedupLiterals = args.hasArg(OPT_deduplicate_literals) ||
- config->icfLevel != ICFLevel::none;
+ config->dedupLiterals =
+ args.hasFlag(OPT_deduplicate_literals, OPT_icf_eq, false) ||
+ config->icfLevel != ICFLevel::none;
config->warnDylibInstallName = args.hasFlag(
OPT_warn_dylib_install_name, OPT_no_warn_dylib_install_name, false);
+ config->callGraphProfileSort = args.hasFlag(
+ OPT_call_graph_profile_sort, OPT_no_call_graph_profile_sort, true);
+ config->printSymbolOrder = args.getLastArgValue(OPT_print_symbol_order);
// FIXME: Add a commandline flag for this too.
config->zeroModTime = getenv("ZERO_AR_DATE");
- std::array<PlatformKind, 3> encryptablePlatforms{
- PlatformKind::iOS, PlatformKind::watchOS, PlatformKind::tvOS};
+ std::array<PlatformType, 3> encryptablePlatforms{
+ PLATFORM_IOS, PLATFORM_WATCHOS, PLATFORM_TVOS};
config->emitEncryptionInfo =
args.hasFlag(OPT_encryptable, OPT_no_encryption,
is_contained(encryptablePlatforms, config->platform()));
@@ -1386,7 +1360,7 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
config->adhocCodesign = args.hasFlag(
OPT_adhoc_codesign, OPT_no_adhoc_codesign,
(config->arch() == AK_arm64 || config->arch() == AK_arm64e) &&
- config->platform() == PlatformKind::macOS);
+ config->platform() == PLATFORM_MACOS);
if (args.hasArg(OPT_v)) {
message(getLLDVersion(), lld::errs());
@@ -1448,7 +1422,7 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
// Parse LTO options.
if (const Arg *arg = args.getLastArg(OPT_mcpu))
- parseClangOption(saver.save("-mcpu=" + StringRef(arg->getValue())),
+ parseClangOption(saver().save("-mcpu=" + StringRef(arg->getValue())),
arg->getSpelling());
for (const Arg *arg : args.filtered(OPT_mllvm))
@@ -1458,8 +1432,10 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
replaceCommonSymbols();
StringRef orderFile = args.getLastArgValue(OPT_order_file);
- if (!orderFile.empty())
+ if (!orderFile.empty()) {
parseOrderFile(orderFile);
+ config->callGraphProfileSort = false;
+ }
referenceStubBinder();
@@ -1509,6 +1485,8 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
}
gatherInputSections();
+ if (config->callGraphProfileSort)
+ extractCallGraphProfile();
if (config->deadStrip)
markLive();
@@ -1535,11 +1513,5 @@ bool macho::link(ArrayRef<const char *> argsArr, bool canExitEarly,
timeTraceProfilerCleanup();
}
-
- if (canExitEarly)
- exitLld(errorCount() ? 1 : 0);
-
- bool ret = errorCount() == 0;
- errorHandler().reset();
- return ret;
+ return errorCount() == 0;
}
diff --git a/contrib/llvm-project/lld/MachO/Driver.h b/contrib/llvm-project/lld/MachO/Driver.h
index 4a970ac8a084..c2933344e611 100644
--- a/contrib/llvm-project/lld/MachO/Driver.h
+++ b/contrib/llvm-project/lld/MachO/Driver.h
@@ -13,19 +13,13 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Support/MemoryBuffer.h"
#include <set>
#include <type_traits>
-namespace llvm {
-namespace MachO {
-class InterfaceFile;
-enum class PlatformKind : unsigned;
-} // namespace MachO
-} // namespace llvm
-
namespace lld {
namespace macho {
@@ -74,7 +68,7 @@ uint32_t getModTime(llvm::StringRef path);
void printArchiveMemberLoad(StringRef reason, const InputFile *);
// Map simulator platforms to their underlying device platform.
-llvm::MachO::PlatformKind removeSimulator(llvm::MachO::PlatformKind platform);
+llvm::MachO::PlatformType removeSimulator(llvm::MachO::PlatformType platform);
// Helper class to export dependency info.
class DependencyTracker {
@@ -115,7 +109,7 @@ private:
std::set<std::string> notFounds;
};
-extern DependencyTracker *depTracker;
+extern std::unique_ptr<DependencyTracker> depTracker;
} // namespace macho
} // namespace lld
diff --git a/contrib/llvm-project/lld/MachO/DriverUtils.cpp b/contrib/llvm-project/lld/MachO/DriverUtils.cpp
index 3c5440544614..83940b54486f 100644
--- a/contrib/llvm-project/lld/MachO/DriverUtils.cpp
+++ b/contrib/llvm-project/lld/MachO/DriverUtils.cpp
@@ -13,8 +13,7 @@
#include "Target.h"
#include "lld/Common/Args.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Reproduce.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
@@ -82,7 +81,7 @@ InputArgList MachOOptTable::parse(ArrayRef<const char *> argv) {
// Expand response files (arguments in the form of @<filename>)
// and then parse the argument again.
- cl::ExpandResponseFiles(saver, cl::TokenizeGNUCommandLine, vec);
+ cl::ExpandResponseFiles(saver(), cl::TokenizeGNUCommandLine, vec);
InputArgList args = ParseArgs(vec, missingIndex, missingCount);
// Handle -fatal_warnings early since it converts missing argument warnings
@@ -191,12 +190,12 @@ Optional<StringRef> macho::resolveDylibPath(StringRef dylibPath) {
bool tbdExists = fs::exists(tbdPath);
searchedDylib(tbdPath, tbdExists);
if (tbdExists)
- return saver.save(tbdPath.str());
+ return saver().save(tbdPath.str());
bool dylibExists = fs::exists(dylibPath);
searchedDylib(dylibPath, dylibExists);
if (dylibExists)
- return saver.save(dylibPath);
+ return saver().save(dylibPath);
return {};
}
@@ -261,7 +260,7 @@ macho::findPathCombination(const Twine &name,
bool exists = fs::exists(location);
searchedDylib(location, exists);
if (exists)
- return saver.save(location.str());
+ return saver().save(location.str());
}
}
return {};
diff --git a/contrib/llvm-project/lld/MachO/InputFiles.cpp b/contrib/llvm-project/lld/MachO/InputFiles.cpp
index c596b8c1ff32..bbeb2dc09bf0 100644
--- a/contrib/llvm-project/lld/MachO/InputFiles.cpp
+++ b/contrib/llvm-project/lld/MachO/InputFiles.cpp
@@ -56,17 +56,18 @@
#include "SyntheticSections.h"
#include "Target.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/DWARF.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
#include "lld/Common/Reproduce.h"
#include "llvm/ADT/iterator.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/LTO/LTO.h"
+#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TarWriter.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/InterfaceFile.h"
@@ -114,7 +115,7 @@ static std::vector<PlatformInfo> getPlatformInfos(const InputFile *input) {
std::vector<PlatformInfo> platformInfos;
for (auto *cmd : findCommands<build_version_command>(hdr, LC_BUILD_VERSION)) {
PlatformInfo info;
- info.target.Platform = static_cast<PlatformKind>(cmd->platform);
+ info.target.Platform = static_cast<PlatformType>(cmd->platform);
info.minimum = decodeVersion(cmd->minos);
platformInfos.emplace_back(std::move(info));
}
@@ -124,16 +125,16 @@ static std::vector<PlatformInfo> getPlatformInfos(const InputFile *input) {
PlatformInfo info;
switch (cmd->cmd) {
case LC_VERSION_MIN_MACOSX:
- info.target.Platform = PlatformKind::macOS;
+ info.target.Platform = PLATFORM_MACOS;
break;
case LC_VERSION_MIN_IPHONEOS:
- info.target.Platform = PlatformKind::iOS;
+ info.target.Platform = PLATFORM_IOS;
break;
case LC_VERSION_MIN_TVOS:
- info.target.Platform = PlatformKind::tvOS;
+ info.target.Platform = PLATFORM_TVOS;
break;
case LC_VERSION_MIN_WATCHOS:
- info.target.Platform = PlatformKind::watchOS;
+ info.target.Platform = PLATFORM_WATCHOS;
break;
}
info.minimum = decodeVersion(cmd->version);
@@ -208,6 +209,8 @@ Optional<MemoryBufferRef> macho::readFile(StringRef path) {
return cachedReads[key] = mbref;
}
+ llvm::BumpPtrAllocator &bAlloc = lld::bAlloc();
+
// Object files and archive files may be fat files, which contain multiple
// real files for different CPU ISAs. Here, we search for a file that matches
// with the current link target and returns it as a MemoryBufferRef.
@@ -239,7 +242,7 @@ Optional<MemoryBufferRef> macho::readFile(StringRef path) {
}
InputFile::InputFile(Kind kind, const InterfaceFile &interface)
- : id(idCount++), fileKind(kind), name(saver.save(interface.getPath())) {}
+ : id(idCount++), fileKind(kind), name(saver().save(interface.getPath())) {}
// Some sections comprise of fixed-size records, so instead of splitting them at
// symbol boundaries, we split them based on size. Records are distinct from
@@ -325,6 +328,25 @@ void ObjFile::parseSections(ArrayRef<SectionHeader> sectionHeaders) {
if (name == section_names::compactUnwind)
compactUnwindSection = &sections.back();
} else if (segname == segment_names::llvm) {
+ if (name == "__cg_profile" && config->callGraphProfileSort) {
+ TimeTraceScope timeScope("Parsing call graph section");
+ BinaryStreamReader reader(data, support::little);
+ while (!reader.empty()) {
+ uint32_t fromIndex, toIndex;
+ uint64_t count;
+ if (Error err = reader.readInteger(fromIndex))
+ fatal(toString(this) + ": Expected 32-bit integer");
+ if (Error err = reader.readInteger(toIndex))
+ fatal(toString(this) + ": Expected 32-bit integer");
+ if (Error err = reader.readInteger(count))
+ fatal(toString(this) + ": Expected 64-bit integer");
+ callGraph.emplace_back();
+ CallGraphEntry &entry = callGraph.back();
+ entry.fromIndex = fromIndex;
+ entry.toIndex = toIndex;
+ entry.count = count;
+ }
+ }
// ld64 does not appear to emit contents from sections within the __LLVM
// segment. Symbols within those sections point to bitcode metadata
// instead of actual symbols. Global symbols within those sections could
@@ -815,13 +837,21 @@ OpaqueFile::OpaqueFile(MemoryBufferRef mb, StringRef segName,
sections.back().subsections.push_back({0, isec});
}
-ObjFile::ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName)
- : InputFile(ObjKind, mb), modTime(modTime) {
+ObjFile::ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName,
+ bool lazy)
+ : InputFile(ObjKind, mb, lazy), modTime(modTime) {
this->archiveName = std::string(archiveName);
- if (target->wordSize == 8)
- parse<LP64>();
- else
- parse<ILP32>();
+ if (lazy) {
+ if (target->wordSize == 8)
+ parseLazy<LP64>();
+ else
+ parseLazy<ILP32>();
+ } else {
+ if (target->wordSize == 8)
+ parse<LP64>();
+ else
+ parse<ILP32>();
+ }
}
template <class LP> void ObjFile::parse() {
@@ -883,6 +913,32 @@ template <class LP> void ObjFile::parse() {
registerCompactUnwind();
}
+template <class LP> void ObjFile::parseLazy() {
+ using Header = typename LP::mach_header;
+ using NList = typename LP::nlist;
+
+ auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
+ auto *hdr = reinterpret_cast<const Header *>(mb.getBufferStart());
+ const load_command *cmd = findCommand(hdr, LC_SYMTAB);
+ if (!cmd)
+ return;
+ auto *c = reinterpret_cast<const symtab_command *>(cmd);
+ ArrayRef<NList> nList(reinterpret_cast<const NList *>(buf + c->symoff),
+ c->nsyms);
+ const char *strtab = reinterpret_cast<const char *>(buf) + c->stroff;
+ symbols.resize(nList.size());
+ for (auto it : llvm::enumerate(nList)) {
+ const NList &sym = it.value();
+ if ((sym.n_type & N_EXT) && !isUndef(sym)) {
+ // TODO: Bound checking
+ StringRef name = strtab + sym.n_strx;
+ symbols[it.index()] = symtab->addLazyObject(name, *this);
+ if (!lazy)
+ break;
+ }
+ }
+}
+
void ObjFile::parseDebugInfo() {
std::unique_ptr<DwarfObject> dObj = DwarfObject::create(this);
if (!dObj)
@@ -1156,7 +1212,7 @@ DylibFile::DylibFile(MemoryBufferRef mb, DylibFile *umbrella,
// Find all the $ld$* symbols to process first.
parseTrie(buf + c->export_off, c->export_size,
[&](const Twine &name, uint64_t flags) {
- StringRef savedName = saver.save(name);
+ StringRef savedName = saver().save(name);
if (handleLDSymbol(savedName))
return;
entries.push_back({savedName, flags});
@@ -1230,7 +1286,7 @@ DylibFile::DylibFile(const InterfaceFile &interface, DylibFile *umbrella,
umbrella = this;
this->umbrella = umbrella;
- installName = saver.save(interface.getInstallName());
+ installName = saver().save(interface.getInstallName());
compatibilityVersion = interface.getCompatibilityVersion().rawValue();
currentVersion = interface.getCurrentVersion().rawValue();
@@ -1249,7 +1305,7 @@ DylibFile::DylibFile(const InterfaceFile &interface, DylibFile *umbrella,
exportingFile = isImplicitlyLinked(installName) ? this : umbrella;
auto addSymbol = [&](const Twine &name) -> void {
- StringRef savedName = saver.save(name);
+ StringRef savedName = saver().save(name);
if (exportingFile->hiddenSymbols.contains(CachedHashStringRef(savedName)))
return;
@@ -1368,7 +1424,7 @@ void DylibFile::handleLDPreviousSymbol(StringRef name, StringRef originalName) {
config->platformInfo.minimum >= end)
return;
- this->installName = saver.save(installName);
+ this->installName = saver().save(installName);
if (!compatVersion.empty()) {
VersionTuple cVersion;
@@ -1390,7 +1446,7 @@ void DylibFile::handleLDInstallNameSymbol(StringRef name,
if (!condition.consume_front("os") || version.tryParse(condition))
warn("failed to parse os version, symbol '" + originalName + "' ignored");
else if (version == config->platformInfo.minimum)
- this->installName = saver.save(installName);
+ this->installName = saver().save(installName);
}
void DylibFile::handleLDHideSymbol(StringRef name, StringRef originalName) {
@@ -1426,7 +1482,7 @@ ArchiveFile::ArchiveFile(std::unique_ptr<object::Archive> &&f)
void ArchiveFile::addLazySymbols() {
for (const object::Archive::Symbol &sym : file->symbols())
- symtab->addLazy(sym.getName(), this, sym);
+ symtab->addLazyArchive(sym.getName(), this, sym);
}
static Expected<InputFile *> loadArchiveMember(MemoryBufferRef mb,
@@ -1495,7 +1551,7 @@ void ArchiveFile::fetch(const object::Archive::Symbol &sym) {
static macho::Symbol *createBitcodeSymbol(const lto::InputFile::Symbol &objSym,
BitcodeFile &file) {
- StringRef name = saver.save(objSym.getName());
+ StringRef name = saver().save(objSym.getName());
if (objSym.isUndefined())
return symtab->addUndefined(name, &file, /*isWeakRef=*/objSym.isWeak());
@@ -1527,8 +1583,8 @@ static macho::Symbol *createBitcodeSymbol(const lto::InputFile::Symbol &objSym,
}
BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
- uint64_t offsetInArchive)
- : InputFile(BitcodeKind, mb) {
+ uint64_t offsetInArchive, bool lazy)
+ : InputFile(BitcodeKind, mb, lazy) {
this->archiveName = std::string(archiveName);
std::string path = mb.getBufferIdentifier().str();
// ThinLTO assumes that all MemoryBufferRefs given to it have a unique
@@ -1537,19 +1593,55 @@ BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
// So, we append the archive name to disambiguate two members with the same
// name from multiple different archives, and offset within the archive to
// disambiguate two members of the same name from a single archive.
- MemoryBufferRef mbref(
- mb.getBuffer(),
- saver.save(archiveName.empty() ? path
- : archiveName + sys::path::filename(path) +
- utostr(offsetInArchive)));
+ MemoryBufferRef mbref(mb.getBuffer(),
+ saver().save(archiveName.empty()
+ ? path
+ : archiveName +
+ sys::path::filename(path) +
+ utostr(offsetInArchive)));
obj = check(lto::InputFile::create(mbref));
+ if (lazy)
+ parseLazy();
+ else
+ parse();
+}
+void BitcodeFile::parse() {
// Convert LTO Symbols to LLD Symbols in order to perform resolution. The
// "winning" symbol will then be marked as Prevailing at LTO compilation
// time.
+ symbols.clear();
for (const lto::InputFile::Symbol &objSym : obj->symbols())
symbols.push_back(createBitcodeSymbol(objSym, *this));
}
+void BitcodeFile::parseLazy() {
+ symbols.resize(obj->symbols().size());
+ for (auto it : llvm::enumerate(obj->symbols())) {
+ const lto::InputFile::Symbol &objSym = it.value();
+ if (!objSym.isUndefined()) {
+ symbols[it.index()] =
+ symtab->addLazyObject(saver().save(objSym.getName()), *this);
+ if (!lazy)
+ break;
+ }
+ }
+}
+
+void macho::extract(InputFile &file, StringRef reason) {
+ assert(file.lazy);
+ file.lazy = false;
+ printArchiveMemberLoad(reason, &file);
+ if (auto *bitcode = dyn_cast<BitcodeFile>(&file)) {
+ bitcode->parse();
+ } else {
+ auto &f = cast<ObjFile>(file);
+ if (target->wordSize == 8)
+ f.parse<LP64>();
+ else
+ f.parse<ILP32>();
+ }
+}
+
template void ObjFile::parse<LP64>();
diff --git a/contrib/llvm-project/lld/MachO/InputFiles.h b/contrib/llvm-project/lld/MachO/InputFiles.h
index 36da70011170..6a4a4fdb43b6 100644
--- a/contrib/llvm-project/lld/MachO/InputFiles.h
+++ b/contrib/llvm-project/lld/MachO/InputFiles.h
@@ -65,6 +65,16 @@ struct Section {
Section(uint64_t addr) : address(addr){};
};
+// Represents a call graph profile edge.
+struct CallGraphEntry {
+ // The index of the caller in the symbol table.
+ uint32_t fromIndex;
+ // The index of the callee in the symbol table.
+ uint32_t toIndex;
+ // Number of calls from callee to caller in the profile.
+ uint64_t count;
+};
+
class InputFile {
public:
enum Kind {
@@ -84,16 +94,21 @@ public:
std::vector<Symbol *> symbols;
std::vector<Section> sections;
- // Provides an easy way to sort InputFiles deterministically.
- const int id;
// If not empty, this stores the name of the archive containing this file.
// We use this string for creating error messages.
std::string archiveName;
+ // Provides an easy way to sort InputFiles deterministically.
+ const int id;
+
+ // True if this is a lazy ObjFile or BitcodeFile.
+ bool lazy = false;
+
protected:
- InputFile(Kind kind, MemoryBufferRef mb)
- : mb(mb), id(idCount++), fileKind(kind), name(mb.getBufferIdentifier()) {}
+ InputFile(Kind kind, MemoryBufferRef mb, bool lazy = false)
+ : mb(mb), id(idCount++), lazy(lazy), fileKind(kind),
+ name(mb.getBufferIdentifier()) {}
InputFile(Kind, const llvm::MachO::InterfaceFile &);
@@ -107,19 +122,22 @@ private:
// .o file
class ObjFile final : public InputFile {
public:
- ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName);
+ ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName,
+ bool lazy = false);
ArrayRef<llvm::MachO::data_in_code_entry> getDataInCode() const;
+ template <class LP> void parse();
static bool classof(const InputFile *f) { return f->kind() == ObjKind; }
llvm::DWARFUnit *compileUnit = nullptr;
const uint32_t modTime;
std::vector<ConcatInputSection *> debugSections;
+ std::vector<CallGraphEntry> callGraph;
private:
Section *compactUnwindSection = nullptr;
- template <class LP> void parse();
+ template <class LP> void parseLazy();
template <class SectionHeader> void parseSections(ArrayRef<SectionHeader>);
template <class LP>
void parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
@@ -218,10 +236,14 @@ private:
class BitcodeFile final : public InputFile {
public:
explicit BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
- uint64_t offsetInArchive);
+ uint64_t offsetInArchive, bool lazy = false);
static bool classof(const InputFile *f) { return f->kind() == BitcodeKind; }
+ void parse();
std::unique_ptr<llvm::lto::InputFile> obj;
+
+private:
+ void parseLazy();
};
extern llvm::SetVector<InputFile *> inputFiles;
@@ -229,6 +251,8 @@ extern llvm::DenseMap<llvm::CachedHashStringRef, MemoryBufferRef> cachedReads;
llvm::Optional<MemoryBufferRef> readFile(StringRef path);
+void extract(InputFile &file, StringRef reason);
+
namespace detail {
template <class CommandType, class... Types>
diff --git a/contrib/llvm-project/lld/MachO/InputSection.h b/contrib/llvm-project/lld/MachO/InputSection.h
index fa137223c426..5535e871c539 100644
--- a/contrib/llvm-project/lld/MachO/InputSection.h
+++ b/contrib/llvm-project/lld/MachO/InputSection.h
@@ -53,6 +53,7 @@ public:
virtual bool isLive(uint64_t off) const = 0;
virtual void markLive(uint64_t off) = 0;
virtual InputSection *canonical() { return this; }
+ virtual const InputSection *canonical() const { return this; }
OutputSection *parent = nullptr;
@@ -124,6 +125,9 @@ public:
ConcatInputSection *canonical() override {
return replacement ? replacement : this;
}
+ const InputSection *canonical() const override {
+ return replacement ? replacement : this;
+ }
static bool classof(const InputSection *isec) {
return isec->kind() == ConcatKind;
@@ -234,7 +238,9 @@ public:
bool isLive(uint64_t off) const override {
return live[off >> power2LiteralSize];
}
- void markLive(uint64_t off) override { live[off >> power2LiteralSize] = 1; }
+ void markLive(uint64_t off) override {
+ live[off >> power2LiteralSize] = true;
+ }
static bool classof(const InputSection *isec) {
return isec->kind() == WordLiteralKind;
diff --git a/contrib/llvm-project/lld/MachO/LTO.cpp b/contrib/llvm-project/lld/MachO/LTO.cpp
index c71ea33d2896..fd49a09229d1 100644
--- a/contrib/llvm-project/lld/MachO/LTO.cpp
+++ b/contrib/llvm-project/lld/MachO/LTO.cpp
@@ -14,7 +14,7 @@
#include "Target.h"
#include "lld/Common/Args.h"
-#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/Strings.h"
#include "lld/Common/TargetOptionsCommandFlags.h"
#include "llvm/LTO/Config.h"
@@ -148,7 +148,7 @@ std::vector<ObjFile *> BitcodeCompiler::compile() {
modTime = getModTime(filePath);
}
ret.push_back(make<ObjFile>(
- MemoryBufferRef(buf[i], saver.save(filePath.str())), modTime, ""));
+ MemoryBufferRef(buf[i], saver().save(filePath.str())), modTime, ""));
}
for (std::unique_ptr<MemoryBuffer> &file : files)
if (file)
diff --git a/contrib/llvm-project/lld/MachO/MapFile.cpp b/contrib/llvm-project/lld/MachO/MapFile.cpp
index 79471eecbd52..93abea2ed08b 100644
--- a/contrib/llvm-project/lld/MachO/MapFile.cpp
+++ b/contrib/llvm-project/lld/MachO/MapFile.cpp
@@ -40,26 +40,6 @@ using namespace llvm::sys;
using namespace lld;
using namespace lld::macho;
-using SymbolMapTy = DenseMap<const InputSection *, SmallVector<Defined *, 4>>;
-
-// Returns a map from sections to their symbols.
-static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
- SymbolMapTy ret;
- for (Defined *dr : syms)
- ret[dr->isec].push_back(dr);
-
- // Sort symbols by address. We want to print out symbols in the order they
- // appear in the output file rather than the order they appeared in the input
- // files.
- for (auto &it : ret)
- parallelSort(
- it.second.begin(), it.second.end(), [](Defined *a, Defined *b) {
- return a->getVA() != b->getVA() ? a->getVA() < b->getVA()
- : a->getName() < b->getName();
- });
- return ret;
-}
-
// Returns a list of all symbols that we want to print out.
static std::vector<Defined *> getSymbols() {
std::vector<Defined *> v;
@@ -126,7 +106,10 @@ void macho::writeMapFile() {
// Collect symbol info that we want to print out.
std::vector<Defined *> syms = getSymbols();
- SymbolMapTy sectionSyms = getSectionSyms(syms);
+ parallelSort(syms.begin(), syms.end(), [](Defined *a, Defined *b) {
+ return a->getVA() != b->getVA() ? a->getVA() < b->getVA()
+ : a->getName() < b->getName();
+ });
DenseMap<Symbol *, std::string> symStr = getSymbolStrings(syms);
// Dump table of sections
@@ -144,15 +127,9 @@ void macho::writeMapFile() {
// Dump table of symbols
os << "# Symbols:\n";
os << "# Address\t File Name\n";
- for (InputSection *isec : inputSections) {
- auto symsIt = sectionSyms.find(isec);
- assert(!shouldOmitFromOutput(isec) || (symsIt == sectionSyms.end()));
- if (symsIt == sectionSyms.end())
- continue;
- for (Symbol *sym : symsIt->second) {
- os << format("0x%08llX\t[%3u] %s\n", sym->getVA(),
- readerToFileOrdinal[sym->getFile()], symStr[sym].c_str());
- }
+ for (Symbol *sym : syms) {
+ os << format("0x%08llX\t[%3u] %s\n", sym->getVA(),
+ readerToFileOrdinal[sym->getFile()], symStr[sym].c_str());
}
// TODO: when we implement -dead_strip, we should dump dead stripped symbols
diff --git a/contrib/llvm-project/lld/MachO/Options.td b/contrib/llvm-project/lld/MachO/Options.td
index 7cd4d01a0a5a..3d1d97641d71 100644
--- a/contrib/llvm-project/lld/MachO/Options.td
+++ b/contrib/llvm-project/lld/MachO/Options.td
@@ -73,12 +73,25 @@ def thinlto_cache_policy: Joined<["--"], "thinlto-cache-policy=">,
Group<grp_lld>;
def O : JoinedOrSeparate<["-"], "O">,
HelpText<"Optimize output file size">;
+def start_lib: Flag<["--"], "start-lib">,
+ HelpText<"Start a grouping of objects that should be treated as if they were together in an archive">;
+def end_lib: Flag<["--"], "end-lib">,
+ HelpText<"End a grouping of objects that should be treated as if they were together in an archive">;
def no_warn_dylib_install_name: Joined<["--"], "no-warn-dylib-install-name">,
HelpText<"Do not warn on -install-name if -dylib is not passed (default)">,
Group<grp_lld>;
def warn_dylib_install_name: Joined<["--"], "warn-dylib-install-name">,
HelpText<"Warn on -install-name if -dylib is not passed">,
Group<grp_lld>;
+def call_graph_profile_sort: Flag<["--"], "call-graph-profile-sort">,
+ HelpText<"Reorder sections with call graph profile (default)">,
+ Group<grp_lld>;
+def no_call_graph_profile_sort : Flag<["--"], "no-call-graph-profile-sort">,
+ HelpText<"Do not reorder sections with call graph profile">,
+ Group<grp_lld>;
+def print_symbol_order: Joined<["--"], "print-symbol-order=">,
+ HelpText<"Print a symbol order specified by --call-graph-profile-sort into the specified file">,
+ Group<grp_lld>;
// This is a complete Options.td compiled from Apple's ld(1) manpage
// dated 2018-03-07 and cross checked with ld64 source code in repo
@@ -214,6 +227,9 @@ def F : JoinedOrSeparate<["-"], "F">,
def all_load : Flag<["-"], "all_load">,
HelpText<"Load all members of all static archive libraries">,
Group<grp_libs>;
+def noall_load : Flag<["-"], "noall_load">,
+ HelpText<"Don't load all static members from archives, this is the default, this negates -all_load">,
+ Group<grp_libs>;
def ObjC : Flag<["-"], "ObjC">,
HelpText<"Load all members of static archives that are an Objective-C class or category.">,
Group<grp_libs>;
@@ -980,10 +996,6 @@ def no_dead_strip_inits_and_terms : Flag<["-"], "no_dead_strip_inits_and_terms">
HelpText<"Unnecessary option: initialization and termination are roots of the dead strip graph, so never dead stripped">,
Flags<[HelpHidden]>,
Group<grp_deprecated>;
-def noall_load : Flag<["-"], "noall_load">,
- HelpText<"Unnecessary option: this is already the default">,
- Flags<[HelpHidden]>,
- Group<grp_deprecated>;
def grp_obsolete : OptionGroup<"obsolete">, HelpText<"OBSOLETE">;
diff --git a/contrib/llvm-project/lld/MachO/SectionPriorities.cpp b/contrib/llvm-project/lld/MachO/SectionPriorities.cpp
new file mode 100644
index 000000000000..35510d7338e8
--- /dev/null
+++ b/contrib/llvm-project/lld/MachO/SectionPriorities.cpp
@@ -0,0 +1,379 @@
+//===- SectionPriorities.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This is based on the ELF port, see ELF/CallGraphSort.cpp for the details
+/// about the algorithm.
+///
+//===----------------------------------------------------------------------===//
+
+#include "SectionPriorities.h"
+#include "Config.h"
+#include "InputFiles.h"
+#include "Symbols.h"
+#include "Target.h"
+#include "lld/Common/Args.h"
+#include "lld/Common/CommonLinkerContext.h"
+#include "lld/Common/ErrorHandler.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <numeric>
+
+using namespace llvm;
+using namespace llvm::MachO;
+using namespace llvm::sys;
+using namespace lld;
+using namespace lld::macho;
+
+namespace {
+struct Edge {
+ int from;
+ uint64_t weight;
+};
+
+struct Cluster {
+ Cluster(int sec, size_t s) : next(sec), prev(sec), size(s) {}
+
+ double getDensity() const {
+ if (size == 0)
+ return 0;
+ return double(weight) / double(size);
+ }
+
+ int next;
+ int prev;
+ uint64_t size;
+ uint64_t weight = 0;
+ uint64_t initialWeight = 0;
+ Edge bestPred = {-1, 0};
+};
+
+class CallGraphSort {
+public:
+ CallGraphSort();
+
+ DenseMap<const InputSection *, size_t> run();
+
+private:
+ std::vector<Cluster> clusters;
+ std::vector<const InputSection *> sections;
+};
+// Maximum amount the combined cluster density can be worse than the original
+// cluster to consider merging.
+constexpr int MAX_DENSITY_DEGRADATION = 8;
+} // end anonymous namespace
+
+using SectionPair = std::pair<const InputSection *, const InputSection *>;
+
+// Take the edge list in config->callGraphProfile, resolve symbol names to
+// Symbols, and generate a graph between InputSections with the provided
+// weights.
+CallGraphSort::CallGraphSort() {
+ MapVector<SectionPair, uint64_t> &profile = config->callGraphProfile;
+ DenseMap<const InputSection *, int> secToCluster;
+
+ auto getOrCreateCluster = [&](const InputSection *isec) -> int {
+ auto res = secToCluster.try_emplace(isec, clusters.size());
+ if (res.second) {
+ sections.push_back(isec);
+ clusters.emplace_back(clusters.size(), isec->getSize());
+ }
+ return res.first->second;
+ };
+
+ // Create the graph
+ for (std::pair<SectionPair, uint64_t> &c : profile) {
+ const auto fromSec = c.first.first->canonical();
+ const auto toSec = c.first.second->canonical();
+ uint64_t weight = c.second;
+ // Ignore edges between input sections belonging to different output
+ // sections. This is done because otherwise we would end up with clusters
+ // containing input sections that can't actually be placed adjacently in the
+ // output. This messes with the cluster size and density calculations. We
+ // would also end up moving input sections in other output sections without
+ // moving them closer to what calls them.
+ if (fromSec->parent != toSec->parent)
+ continue;
+
+ int from = getOrCreateCluster(fromSec);
+ int to = getOrCreateCluster(toSec);
+
+ clusters[to].weight += weight;
+
+ if (from == to)
+ continue;
+
+ // Remember the best edge.
+ Cluster &toC = clusters[to];
+ if (toC.bestPred.from == -1 || toC.bestPred.weight < weight) {
+ toC.bestPred.from = from;
+ toC.bestPred.weight = weight;
+ }
+ }
+ for (Cluster &c : clusters)
+ c.initialWeight = c.weight;
+}
+
+// It's bad to merge clusters which would degrade the density too much.
+static bool isNewDensityBad(Cluster &a, Cluster &b) {
+ double newDensity = double(a.weight + b.weight) / double(a.size + b.size);
+ return newDensity < a.getDensity() / MAX_DENSITY_DEGRADATION;
+}
+
+// Find the leader of V's belonged cluster (represented as an equivalence
+// class). We apply union-find path-halving technique (simple to implement) in
+// the meantime as it decreases depths and the time complexity.
+static int getLeader(std::vector<int> &leaders, int v) {
+ while (leaders[v] != v) {
+ leaders[v] = leaders[leaders[v]];
+ v = leaders[v];
+ }
+ return v;
+}
+
+static void mergeClusters(std::vector<Cluster> &cs, Cluster &into, int intoIdx,
+ Cluster &from, int fromIdx) {
+ int tail1 = into.prev, tail2 = from.prev;
+ into.prev = tail2;
+ cs[tail2].next = intoIdx;
+ from.prev = tail1;
+ cs[tail1].next = fromIdx;
+ into.size += from.size;
+ into.weight += from.weight;
+ from.size = 0;
+ from.weight = 0;
+}
+
+// Group InputSections into clusters using the Call-Chain Clustering heuristic
+// then sort the clusters by density.
+DenseMap<const InputSection *, size_t> CallGraphSort::run() {
+ const uint64_t maxClusterSize = target->getPageSize();
+
+ // Cluster indices sorted by density.
+ std::vector<int> sorted(clusters.size());
+ // For union-find.
+ std::vector<int> leaders(clusters.size());
+
+ std::iota(leaders.begin(), leaders.end(), 0);
+ std::iota(sorted.begin(), sorted.end(), 0);
+
+ llvm::stable_sort(sorted, [&](int a, int b) {
+ return clusters[a].getDensity() > clusters[b].getDensity();
+ });
+
+ for (int l : sorted) {
+ // The cluster index is the same as the index of its leader here because
+ // clusters[L] has not been merged into another cluster yet.
+ Cluster &c = clusters[l];
+
+ // Don't consider merging if the edge is unlikely.
+ if (c.bestPred.from == -1 || c.bestPred.weight * 10 <= c.initialWeight)
+ continue;
+
+ int predL = getLeader(leaders, c.bestPred.from);
+ // Already in the same cluster.
+ if (l == predL)
+ continue;
+
+ Cluster *predC = &clusters[predL];
+ if (c.size + predC->size > maxClusterSize)
+ continue;
+
+ if (isNewDensityBad(*predC, c))
+ continue;
+
+ leaders[l] = predL;
+ mergeClusters(clusters, *predC, predL, c, l);
+ }
+ // Sort remaining non-empty clusters by density.
+ sorted.clear();
+ for (int i = 0, e = (int)clusters.size(); i != e; ++i)
+ if (clusters[i].size > 0)
+ sorted.push_back(i);
+ llvm::stable_sort(sorted, [&](int a, int b) {
+ return clusters[a].getDensity() > clusters[b].getDensity();
+ });
+
+ DenseMap<const InputSection *, size_t> orderMap;
+
+ // Sections will be sorted by decreasing order. Absent sections will have
+ // priority 0 and be placed at the end of sections.
+ // NB: This is opposite from COFF/ELF to be compatible with the existing
+ // order-file code.
+ int curOrder = clusters.size();
+ for (int leader : sorted) {
+ for (int i = leader;;) {
+ orderMap[sections[i]] = curOrder--;
+ i = clusters[i].next;
+ if (i == leader)
+ break;
+ }
+ }
+ if (!config->printSymbolOrder.empty()) {
+ std::error_code ec;
+ raw_fd_ostream os(config->printSymbolOrder, ec, sys::fs::OF_None);
+ if (ec) {
+ error("cannot open " + config->printSymbolOrder + ": " + ec.message());
+ return orderMap;
+ }
+ // Print the symbols ordered by C3, in the order of decreasing curOrder
+ // Instead of sorting all the orderMap, just repeat the loops above.
+ for (int leader : sorted)
+ for (int i = leader;;) {
+ const InputSection *isec = sections[i];
+ // Search all the symbols in the file of the section
+ // and find out a Defined symbol with name that is within the
+ // section.
+ for (Symbol *sym : isec->getFile()->symbols) {
+ if (auto *d = dyn_cast_or_null<Defined>(sym)) {
+ if (d->isec == isec)
+ os << sym->getName() << "\n";
+ }
+ }
+ i = clusters[i].next;
+ if (i == leader)
+ break;
+ }
+ }
+
+ return orderMap;
+}
+
+static size_t getSymbolPriority(const SymbolPriorityEntry &entry,
+ const InputFile *f) {
+ // We don't use toString(InputFile *) here because it returns the full path
+ // for object files, and we only want the basename.
+ StringRef filename;
+ if (f->archiveName.empty())
+ filename = path::filename(f->getName());
+ else
+ filename = saver().save(path::filename(f->archiveName) + "(" +
+ path::filename(f->getName()) + ")");
+ return std::max(entry.objectFiles.lookup(filename), entry.anyObjectFile);
+}
+
+void macho::extractCallGraphProfile() {
+ TimeTraceScope timeScope("Extract call graph profile");
+ for (const InputFile *file : inputFiles) {
+ auto *obj = dyn_cast_or_null<ObjFile>(file);
+ if (!obj)
+ continue;
+ for (const CallGraphEntry &entry : obj->callGraph) {
+ assert(entry.fromIndex < obj->symbols.size() &&
+ entry.toIndex < obj->symbols.size());
+ auto *fromSym = dyn_cast_or_null<Defined>(obj->symbols[entry.fromIndex]);
+ auto *toSym = dyn_cast_or_null<Defined>(obj->symbols[entry.toIndex]);
+
+ if (!fromSym || !toSym)
+ continue;
+ config->callGraphProfile[{fromSym->isec, toSym->isec}] += entry.count;
+ }
+ }
+}
+
+void macho::parseOrderFile(StringRef path) {
+ Optional<MemoryBufferRef> buffer = readFile(path);
+ if (!buffer) {
+ error("Could not read order file at " + path);
+ return;
+ }
+
+ MemoryBufferRef mbref = *buffer;
+ size_t priority = std::numeric_limits<size_t>::max();
+ for (StringRef line : args::getLines(mbref)) {
+ StringRef objectFile, symbol;
+ line = line.take_until([](char c) { return c == '#'; }); // ignore comments
+ line = line.ltrim();
+
+ CPUType cpuType = StringSwitch<CPUType>(line)
+ .StartsWith("i386:", CPU_TYPE_I386)
+ .StartsWith("x86_64:", CPU_TYPE_X86_64)
+ .StartsWith("arm:", CPU_TYPE_ARM)
+ .StartsWith("arm64:", CPU_TYPE_ARM64)
+ .StartsWith("ppc:", CPU_TYPE_POWERPC)
+ .StartsWith("ppc64:", CPU_TYPE_POWERPC64)
+ .Default(CPU_TYPE_ANY);
+
+ if (cpuType != CPU_TYPE_ANY && cpuType != target->cpuType)
+ continue;
+
+ // Drop the CPU type as well as the colon
+ if (cpuType != CPU_TYPE_ANY)
+ line = line.drop_until([](char c) { return c == ':'; }).drop_front();
+
+ constexpr std::array<StringRef, 2> fileEnds = {".o:", ".o):"};
+ for (StringRef fileEnd : fileEnds) {
+ size_t pos = line.find(fileEnd);
+ if (pos != StringRef::npos) {
+ // Split the string around the colon
+ objectFile = line.take_front(pos + fileEnd.size() - 1);
+ line = line.drop_front(pos + fileEnd.size());
+ break;
+ }
+ }
+ symbol = line.trim();
+
+ if (!symbol.empty()) {
+ SymbolPriorityEntry &entry = config->priorities[symbol];
+ if (!objectFile.empty())
+ entry.objectFiles.insert(std::make_pair(objectFile, priority));
+ else
+ entry.anyObjectFile = std::max(entry.anyObjectFile, priority);
+ }
+
+ --priority;
+ }
+}
+
+// Sort sections by the profile data provided by __LLVM,__cg_profile sections.
+//
+// This first builds a call graph based on the profile data then merges sections
+// according to the C³ heuristic. All clusters are then sorted by a density
+// metric to further improve locality.
+static DenseMap<const InputSection *, size_t> computeCallGraphProfileOrder() {
+ TimeTraceScope timeScope("Call graph profile sort");
+ return CallGraphSort().run();
+}
+
+// Each section gets assigned the priority of the highest-priority symbol it
+// contains.
+DenseMap<const InputSection *, size_t> macho::buildInputSectionPriorities() {
+ if (config->callGraphProfileSort)
+ return computeCallGraphProfileOrder();
+ DenseMap<const InputSection *, size_t> sectionPriorities;
+
+ if (config->priorities.empty())
+ return sectionPriorities;
+
+ auto addSym = [&](Defined &sym) {
+ if (sym.isAbsolute())
+ return;
+
+ auto it = config->priorities.find(sym.getName());
+ if (it == config->priorities.end())
+ return;
+
+ SymbolPriorityEntry &entry = it->second;
+ size_t &priority = sectionPriorities[sym.isec];
+ priority =
+ std::max(priority, getSymbolPriority(entry, sym.isec->getFile()));
+ };
+
+ // TODO: Make sure this handles weak symbols correctly.
+ for (const InputFile *file : inputFiles) {
+ if (isa<ObjFile>(file))
+ for (Symbol *sym : file->symbols)
+ if (auto *d = dyn_cast_or_null<Defined>(sym))
+ addSym(*d);
+ }
+
+ return sectionPriorities;
+}
diff --git a/contrib/llvm-project/lld/MachO/SectionPriorities.h b/contrib/llvm-project/lld/MachO/SectionPriorities.h
new file mode 100644
index 000000000000..9cc4eff958cd
--- /dev/null
+++ b/contrib/llvm-project/lld/MachO/SectionPriorities.h
@@ -0,0 +1,55 @@
+//===- SectionPriorities.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_MACHO_SECTION_PRIORITIES_H
+#define LLD_MACHO_SECTION_PRIORITIES_H
+
+#include "InputSection.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace lld {
+namespace macho {
+
+// Reads every input section's call graph profile, and combines them into
+// config->callGraphProfile. If an order file is present, any edges where one
+// or both of the vertices are specified in the order file are discarded.
+void extractCallGraphProfile();
+
+// Reads the order file at `path` into config->priorities.
+//
+// An order file has one entry per line, in the following format:
+//
+// <cpu>:<object file>:<symbol name>
+//
+// <cpu> and <object file> are optional. If not specified, then that entry
+// matches any symbol of that name. Parsing this format is not quite
+// straightforward because the symbol name itself can contain colons, so when
+// encountering a colon, we consider the preceding characters to decide if it
+// can be a valid CPU type or file path.
+//
+// If a symbol is matched by multiple entries, then it takes the lowest-ordered
+// entry (the one nearest to the front of the list.)
+//
+// The file can also have line comments that start with '#'.
+void parseOrderFile(StringRef path);
+
+// Returns layout priorities for some or all input sections. Sections are laid
+// out in decreasing order; that is, a higher priority section will be closer
+// to the beginning of its output section.
+//
+// If either an order file or a call graph profile are present, this is used
+// as the source of priorities. If both are present, the order file takes
+// precedence. If neither is present, an empty map is returned.
+//
+// Each section gets assigned the priority of the highest-priority symbol it
+// contains.
+llvm::DenseMap<const InputSection *, size_t> buildInputSectionPriorities();
+} // namespace macho
+} // namespace lld
+
+#endif
diff --git a/contrib/llvm-project/lld/MachO/SymbolTable.cpp b/contrib/llvm-project/lld/MachO/SymbolTable.cpp
index cec717e0cd31..c7017c8a000d 100644
--- a/contrib/llvm-project/lld/MachO/SymbolTable.cpp
+++ b/contrib/llvm-project/lld/MachO/SymbolTable.cpp
@@ -113,8 +113,10 @@ Symbol *SymbolTable::addUndefined(StringRef name, InputFile *file,
if (wasInserted)
replaceSymbol<Undefined>(s, name, file, refState);
- else if (auto *lazy = dyn_cast<LazySymbol>(s))
+ else if (auto *lazy = dyn_cast<LazyArchive>(s))
lazy->fetchArchiveMember();
+ else if (isa<LazyObject>(s))
+ extract(*s->getFile(), s->getName());
else if (auto *dynsym = dyn_cast<DylibSymbol>(s))
dynsym->reference(refState);
else if (auto *undefined = dyn_cast<Undefined>(s))
@@ -178,14 +180,14 @@ Symbol *SymbolTable::addDynamicLookup(StringRef name) {
return addDylib(name, /*file=*/nullptr, /*isWeakDef=*/false, /*isTlv=*/false);
}
-Symbol *SymbolTable::addLazy(StringRef name, ArchiveFile *file,
- const object::Archive::Symbol &sym) {
+Symbol *SymbolTable::addLazyArchive(StringRef name, ArchiveFile *file,
+ const object::Archive::Symbol &sym) {
Symbol *s;
bool wasInserted;
std::tie(s, wasInserted) = insert(name, file);
if (wasInserted) {
- replaceSymbol<LazySymbol>(s, file, sym);
+ replaceSymbol<LazyArchive>(s, file, sym);
} else if (isa<Undefined>(s)) {
file->fetch(sym);
} else if (auto *dysym = dyn_cast<DylibSymbol>(s)) {
@@ -193,7 +195,27 @@ Symbol *SymbolTable::addLazy(StringRef name, ArchiveFile *file,
if (dysym->getRefState() != RefState::Unreferenced)
file->fetch(sym);
else
- replaceSymbol<LazySymbol>(s, file, sym);
+ replaceSymbol<LazyArchive>(s, file, sym);
+ }
+ }
+ return s;
+}
+
+Symbol *SymbolTable::addLazyObject(StringRef name, InputFile &file) {
+ Symbol *s;
+ bool wasInserted;
+ std::tie(s, wasInserted) = insert(name, &file);
+
+ if (wasInserted) {
+ replaceSymbol<LazyObject>(s, file, name);
+ } else if (isa<Undefined>(s)) {
+ extract(file, name);
+ } else if (auto *dysym = dyn_cast<DylibSymbol>(s)) {
+ if (dysym->isWeakDef()) {
+ if (dysym->getRefState() != RefState::Unreferenced)
+ extract(file, name);
+ else
+ replaceSymbol<LazyObject>(s, file, name);
}
}
return s;
@@ -319,4 +341,4 @@ void lld::macho::treatUndefinedSymbol(const Undefined &sym, StringRef source) {
}
}
-SymbolTable *macho::symtab;
+std::unique_ptr<SymbolTable> macho::symtab;
diff --git a/contrib/llvm-project/lld/MachO/SymbolTable.h b/contrib/llvm-project/lld/MachO/SymbolTable.h
index 625f78aa6141..5f844170efe0 100644
--- a/contrib/llvm-project/lld/MachO/SymbolTable.h
+++ b/contrib/llvm-project/lld/MachO/SymbolTable.h
@@ -51,8 +51,9 @@ public:
Symbol *addDylib(StringRef name, DylibFile *file, bool isWeakDef, bool isTlv);
Symbol *addDynamicLookup(StringRef name);
- Symbol *addLazy(StringRef name, ArchiveFile *file,
- const llvm::object::Archive::Symbol &sym);
+ Symbol *addLazyArchive(StringRef name, ArchiveFile *file,
+ const llvm::object::Archive::Symbol &sym);
+ Symbol *addLazyObject(StringRef name, InputFile &file);
Defined *addSynthetic(StringRef name, InputSection *, uint64_t value,
bool isPrivateExtern, bool includeInSymtab,
@@ -70,7 +71,7 @@ private:
void treatUndefinedSymbol(const Undefined &, StringRef source = "");
-extern SymbolTable *symtab;
+extern std::unique_ptr<SymbolTable> symtab;
} // namespace macho
} // namespace lld
diff --git a/contrib/llvm-project/lld/MachO/Symbols.cpp b/contrib/llvm-project/lld/MachO/Symbols.cpp
index bb6d073dcf30..7bf8c6dd56d6 100644
--- a/contrib/llvm-project/lld/MachO/Symbols.cpp
+++ b/contrib/llvm-project/lld/MachO/Symbols.cpp
@@ -9,6 +9,7 @@
#include "Symbols.h"
#include "InputFiles.h"
#include "SyntheticSections.h"
+#include "lld/Common/Strings.h"
using namespace llvm;
using namespace lld;
@@ -27,17 +28,12 @@ static_assert(sizeof(void *) != 8 || sizeof(Defined) == 80,
static_assert(sizeof(SymbolUnion) == sizeof(Defined),
"Defined should be the largest Symbol kind");
-// Returns a symbol for an error message.
-static std::string demangle(StringRef symName) {
- if (config->demangle)
- return demangleItanium(symName);
- return std::string(symName);
+std::string lld::toString(const Symbol &sym) {
+ return demangle(sym.getName(), config->demangle);
}
-std::string lld::toString(const Symbol &sym) { return demangle(sym.getName()); }
-
std::string lld::toMachOString(const object::Archive::Symbol &b) {
- return demangle(b.getName());
+ return demangle(b.getName(), config->demangle);
}
uint64_t Symbol::getStubVA() const { return in.stubs->getVA(stubsIndex); }
@@ -105,4 +101,4 @@ uint64_t DylibSymbol::getVA() const {
return isInStubs() ? getStubVA() : Symbol::getVA();
}
-void LazySymbol::fetchArchiveMember() { getFile()->fetch(sym); }
+void LazyArchive::fetchArchiveMember() { getFile()->fetch(sym); }
diff --git a/contrib/llvm-project/lld/MachO/Symbols.h b/contrib/llvm-project/lld/MachO/Symbols.h
index d1182a0a2d32..b3d86b0d3cad 100644
--- a/contrib/llvm-project/lld/MachO/Symbols.h
+++ b/contrib/llvm-project/lld/MachO/Symbols.h
@@ -37,7 +37,8 @@ public:
UndefinedKind,
CommonKind,
DylibKind,
- LazyKind,
+ LazyArchiveKind,
+ LazyObjectKind,
};
virtual ~Symbol() {}
@@ -51,6 +52,9 @@ public:
}
bool isLive() const { return used; }
+ bool isLazy() const {
+ return symbolKind == LazyArchiveKind || symbolKind == LazyObjectKind;
+ }
virtual uint64_t getVA() const { return 0; }
@@ -280,26 +284,39 @@ private:
const bool tlv : 1;
};
-class LazySymbol : public Symbol {
+class LazyArchive : public Symbol {
public:
- LazySymbol(ArchiveFile *file, const llvm::object::Archive::Symbol &sym)
- : Symbol(LazyKind, sym.getName(), file), sym(sym) {}
+ LazyArchive(ArchiveFile *file, const llvm::object::Archive::Symbol &sym)
+ : Symbol(LazyArchiveKind, sym.getName(), file), sym(sym) {}
ArchiveFile *getFile() const { return cast<ArchiveFile>(file); }
void fetchArchiveMember();
- static bool classof(const Symbol *s) { return s->kind() == LazyKind; }
+ static bool classof(const Symbol *s) { return s->kind() == LazyArchiveKind; }
private:
const llvm::object::Archive::Symbol sym;
};
+// A defined symbol in an ObjFile/BitcodeFile surrounded by --start-lib and
+// --end-lib.
+class LazyObject : public Symbol {
+public:
+ LazyObject(InputFile &file, StringRef name)
+ : Symbol(LazyObjectKind, name, &file) {
+ isUsedInRegularObj = false;
+ }
+
+ static bool classof(const Symbol *s) { return s->kind() == LazyObjectKind; }
+};
+
union SymbolUnion {
alignas(Defined) char a[sizeof(Defined)];
alignas(Undefined) char b[sizeof(Undefined)];
alignas(CommonSymbol) char c[sizeof(CommonSymbol)];
alignas(DylibSymbol) char d[sizeof(DylibSymbol)];
- alignas(LazySymbol) char e[sizeof(LazySymbol)];
+ alignas(LazyArchive) char e[sizeof(LazyArchive)];
+ alignas(LazyObject) char f[sizeof(LazyObject)];
};
template <typename T, typename... ArgT>
diff --git a/contrib/llvm-project/lld/MachO/SyntheticSections.cpp b/contrib/llvm-project/lld/MachO/SyntheticSections.cpp
index b64a9db485c5..3fe551f5684b 100644
--- a/contrib/llvm-project/lld/MachO/SyntheticSections.cpp
+++ b/contrib/llvm-project/lld/MachO/SyntheticSections.cpp
@@ -16,8 +16,7 @@
#include "SymbolTable.h"
#include "Symbols.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/EndianStream.h"
@@ -85,7 +84,7 @@ static uint32_t cpuSubtype() {
if (config->outputType == MH_EXECUTE && !config->staticLink &&
target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&
- config->platform() == PlatformKind::macOS &&
+ config->platform() == PLATFORM_MACOS &&
config->platformInfo.minimum >= VersionTuple(10, 5))
subtype |= CPU_SUBTYPE_LIB64;
@@ -834,7 +833,7 @@ void SymtabSection::emitBeginSourceStab(DWARFUnit *compileUnit) {
if (!dir.endswith(sep))
dir += sep;
stab.strx = stringTableSection.addString(
- saver.save(dir + compileUnit->getUnitDIE().getShortName()));
+ saver().save(dir + compileUnit->getUnitDIE().getShortName()));
stabs.emplace_back(std::move(stab));
}
@@ -856,7 +855,7 @@ void SymtabSection::emitObjectFileStab(ObjFile *file) {
if (!file->archiveName.empty())
path.append({"(", file->getName(), ")"});
- StringRef adjustedPath = saver.save(path.str());
+ StringRef adjustedPath = saver().save(path.str());
adjustedPath.consume_front(config->osoPrefix);
stab.strx = stringTableSection.addString(adjustedPath);
@@ -1283,7 +1282,10 @@ void BitcodeBundleSection::finalize() {
using namespace llvm::sys::fs;
CHECK_EC(createTemporaryFile("bitcode-bundle", "xar", xarPath));
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
xar_t xar(xar_open(xarPath.data(), O_RDWR));
+#pragma clang diagnostic pop
if (!xar)
fatal("failed to open XAR temporary file at " + xarPath);
CHECK_EC(xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE));
@@ -1379,26 +1381,15 @@ DeduplicatedCStringSection::DeduplicatedCStringSection()
void DeduplicatedCStringSection::finalizeContents() {
// Add all string pieces to the string table builder to create section
// contents.
- for (const CStringInputSection *isec : inputs)
+ for (CStringInputSection *isec : inputs) {
for (size_t i = 0, e = isec->pieces.size(); i != e; ++i)
if (isec->pieces[i].live)
- builder.add(isec->getCachedHashStringRef(i));
+ isec->pieces[i].outSecOff =
+ builder.add(isec->getCachedHashStringRef(i));
+ isec->isFinal = true;
+ }
- // Fix the string table content. After this, the contents will never change.
builder.finalizeInOrder();
-
- // finalize() fixed tail-optimized strings, so we can now get
- // offsets of strings. Get an offset for each string and save it
- // to a corresponding SectionPiece for easy access.
- for (CStringInputSection *isec : inputs) {
- for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) {
- if (!isec->pieces[i].live)
- continue;
- isec->pieces[i].outSecOff =
- builder.getOffset(isec->getCachedHashStringRef(i));
- isec->isFinal = true;
- }
- }
}
// This section is actually emitted as __TEXT,__const by ld64, but clang may
@@ -1474,7 +1465,7 @@ void WordLiteralSection::writeTo(uint8_t *buf) const {
void macho::createSyntheticSymbols() {
auto addHeaderSymbol = [](const char *name) {
symtab->addSynthetic(name, in.header->isec, /*value=*/0,
- /*privateExtern=*/true, /*includeInSymtab=*/false,
+ /*isPrivateExtern=*/true, /*includeInSymtab=*/false,
/*referencedDynamically=*/false);
};
@@ -1487,11 +1478,11 @@ void macho::createSyntheticSymbols() {
// Otherwise, it's an absolute symbol.
if (config->isPic)
symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0,
- /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
/*referencedDynamically=*/true);
else
symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0,
- /*privateExtern=*/false, /*includeInSymtab=*/true,
+ /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
/*referencedDynamically=*/true);
break;
diff --git a/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp b/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp
index d28c7a33ff36..49af2f6ad9a8 100644
--- a/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp
+++ b/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp
@@ -226,7 +226,7 @@ void UnwindInfoSectionImpl<Ptr>::prepareRelocations(ConcatInputSection *isec) {
// (See discussions/alternatives already considered on D107533)
if (!defined->isExternal())
if (Symbol *sym = symtab->find(defined->getName()))
- if (sym->kind() != Symbol::LazyKind)
+ if (!sym->isLazy())
r.referent = s = sym;
}
if (auto *undefined = dyn_cast<Undefined>(s)) {
diff --git a/contrib/llvm-project/lld/MachO/Writer.cpp b/contrib/llvm-project/lld/MachO/Writer.cpp
index 8903f0189ef9..2c0794e08ae3 100644
--- a/contrib/llvm-project/lld/MachO/Writer.cpp
+++ b/contrib/llvm-project/lld/MachO/Writer.cpp
@@ -14,6 +14,7 @@
#include "MapFile.h"
#include "OutputSection.h"
#include "OutputSegment.h"
+#include "SectionPriorities.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
@@ -21,8 +22,7 @@
#include "UnwindInfoSection.h"
#include "lld/Common/Arrays.h"
-#include "lld/Common/ErrorHandler.h"
-#include "lld/Common/Memory.h"
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/LEB128.h"
@@ -415,19 +415,19 @@ public:
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<version_min_command *>(buf);
switch (platformInfo.target.Platform) {
- case PlatformKind::macOS:
+ case PLATFORM_MACOS:
c->cmd = LC_VERSION_MIN_MACOSX;
break;
- case PlatformKind::iOS:
- case PlatformKind::iOSSimulator:
+ case PLATFORM_IOS:
+ case PLATFORM_IOSSIMULATOR:
c->cmd = LC_VERSION_MIN_IPHONEOS;
break;
- case PlatformKind::tvOS:
- case PlatformKind::tvOSSimulator:
+ case PLATFORM_TVOS:
+ case PLATFORM_TVOSSIMULATOR:
c->cmd = LC_VERSION_MIN_TVOS;
break;
- case PlatformKind::watchOS:
- case PlatformKind::watchOSSimulator:
+ case PLATFORM_WATCHOS:
+ case PLATFORM_WATCHOSSIMULATOR:
c->cmd = LC_VERSION_MIN_WATCHOS;
break;
default:
@@ -610,7 +610,7 @@ static bool needsBinding(const Symbol *sym) {
}
static void prepareSymbolRelocation(Symbol *sym, const InputSection *isec,
- const Reloc &r) {
+ const lld::macho::Reloc &r) {
assert(sym->isLive());
const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type);
@@ -643,7 +643,7 @@ void Writer::scanRelocations() {
continue;
for (auto it = isec->relocs.begin(); it != isec->relocs.end(); ++it) {
- Reloc &r = *it;
+ lld::macho::Reloc &r = *it;
if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) {
// Skip over the following UNSIGNED relocation -- it's just there as the
// minuend, and doesn't have the usual UNSIGNED semantics. We don't want
@@ -709,14 +709,14 @@ void Writer::scanSymbols() {
// TODO: ld64 enforces the old load commands in a few other cases.
static bool useLCBuildVersion(const PlatformInfo &platformInfo) {
- static const std::vector<std::pair<PlatformKind, VersionTuple>> minVersion = {
- {PlatformKind::macOS, VersionTuple(10, 14)},
- {PlatformKind::iOS, VersionTuple(12, 0)},
- {PlatformKind::iOSSimulator, VersionTuple(13, 0)},
- {PlatformKind::tvOS, VersionTuple(12, 0)},
- {PlatformKind::tvOSSimulator, VersionTuple(13, 0)},
- {PlatformKind::watchOS, VersionTuple(5, 0)},
- {PlatformKind::watchOSSimulator, VersionTuple(6, 0)}};
+ static const std::vector<std::pair<PlatformType, VersionTuple>> minVersion = {
+ {PLATFORM_MACOS, VersionTuple(10, 14)},
+ {PLATFORM_IOS, VersionTuple(12, 0)},
+ {PLATFORM_IOSSIMULATOR, VersionTuple(13, 0)},
+ {PLATFORM_TVOS, VersionTuple(12, 0)},
+ {PLATFORM_TVOSSIMULATOR, VersionTuple(13, 0)},
+ {PLATFORM_WATCHOS, VersionTuple(5, 0)},
+ {PLATFORM_WATCHOSSIMULATOR, VersionTuple(6, 0)}};
auto it = llvm::find_if(minVersion, [&](const auto &p) {
return p.first == platformInfo.target.Platform;
});
@@ -849,52 +849,6 @@ template <class LP> void Writer::createLoadCommands() {
: 0));
}
-static size_t getSymbolPriority(const SymbolPriorityEntry &entry,
- const InputFile *f) {
- // We don't use toString(InputFile *) here because it returns the full path
- // for object files, and we only want the basename.
- StringRef filename;
- if (f->archiveName.empty())
- filename = path::filename(f->getName());
- else
- filename = saver.save(path::filename(f->archiveName) + "(" +
- path::filename(f->getName()) + ")");
- return std::max(entry.objectFiles.lookup(filename), entry.anyObjectFile);
-}
-
-// Each section gets assigned the priority of the highest-priority symbol it
-// contains.
-static DenseMap<const InputSection *, size_t> buildInputSectionPriorities() {
- DenseMap<const InputSection *, size_t> sectionPriorities;
-
- if (config->priorities.empty())
- return sectionPriorities;
-
- auto addSym = [&](Defined &sym) {
- if (sym.isAbsolute())
- return;
-
- auto it = config->priorities.find(sym.getName());
- if (it == config->priorities.end())
- return;
-
- SymbolPriorityEntry &entry = it->second;
- size_t &priority = sectionPriorities[sym.isec];
- priority =
- std::max(priority, getSymbolPriority(entry, sym.isec->getFile()));
- };
-
- // TODO: Make sure this handles weak symbols correctly.
- for (const InputFile *file : inputFiles) {
- if (isa<ObjFile>(file))
- for (Symbol *sym : file->symbols)
- if (auto *d = dyn_cast_or_null<Defined>(sym))
- addSym(*d);
- }
-
- return sectionPriorities;
-}
-
// Sorting only can happen once all outputs have been collected. Here we sort
// segments, output sections within each segment, and input sections within each
// output segment.
@@ -908,13 +862,28 @@ static void sortSegmentsAndSections() {
uint32_t sectionIndex = 0;
for (OutputSegment *seg : outputSegments) {
seg->sortOutputSections();
+ // References from thread-local variable sections are treated as offsets
+ // relative to the start of the thread-local data memory area, which
+ // is initialized via copying all the TLV data sections (which are all
+ // contiguous). If later data sections require a greater alignment than
+ // earlier ones, the offsets of data within those sections won't be
+ // guaranteed to aligned unless we normalize alignments. We therefore use
+ // the largest alignment for all TLV data sections.
+ uint32_t tlvAlign = 0;
+ for (const OutputSection *osec : seg->getSections())
+ if (isThreadLocalData(osec->flags) && osec->align > tlvAlign)
+ tlvAlign = osec->align;
+
for (OutputSection *osec : seg->getSections()) {
// Now that the output sections are sorted, assign the final
// output section indices.
if (!osec->isHidden())
osec->index = ++sectionIndex;
- if (!firstTLVDataSection && isThreadLocalData(osec->flags))
- firstTLVDataSection = osec;
+ if (isThreadLocalData(osec->flags)) {
+ if (!firstTLVDataSection)
+ firstTLVDataSection = osec;
+ osec->align = tlvAlign;
+ }
if (!isecPriorities.empty()) {
if (auto *merged = dyn_cast<ConcatOutputSection>(osec)) {
@@ -1108,7 +1077,7 @@ void Writer::writeUuid() {
threadFutures.reserve(chunks.size());
for (size_t i = 0; i < chunks.size(); ++i)
threadFutures.emplace_back(threadPool.async(
- [&](size_t i) { hashes[i] = xxHash64(chunks[i]); }, i));
+ [&](size_t j) { hashes[j] = xxHash64(chunks[j]); }, i));
for (std::shared_future<void> &future : threadFutures)
future.wait();
@@ -1160,7 +1129,13 @@ template <class LP> void Writer::run() {
sortSegmentsAndSections();
createLoadCommands<LP>();
finalizeAddresses();
- threadPool.async(writeMapFile);
+ threadPool.async([&] {
+ if (LLVM_ENABLE_THREADS && config->timeTraceEnabled)
+ timeTraceProfilerInitialize(config->timeTraceGranularity, "writeMapFile");
+ writeMapFile();
+ if (LLVM_ENABLE_THREADS && config->timeTraceEnabled)
+ timeTraceProfilerFinishThread();
+ });
finalizeLinkEditSegment();
writeOutputFile();
}
@@ -1192,7 +1167,7 @@ void macho::createSyntheticSections() {
// This section contains space for just a single word, and will be used by
// dyld to cache an address to the image loader it uses.
- uint8_t *arr = bAlloc.Allocate<uint8_t>(target->wordSize);
+ uint8_t *arr = bAlloc().Allocate<uint8_t>(target->wordSize);
memset(arr, 0, target->wordSize);
in.imageLoaderCache = make<ConcatInputSection>(
segment_names::data, section_names::data, /*file=*/nullptr,
diff --git a/contrib/llvm-project/lld/MachO/ld64-vs-lld.rst b/contrib/llvm-project/lld/MachO/ld64-vs-lld.rst
index 14fb72a2ac8f..b31b4bb0ea89 100644
--- a/contrib/llvm-project/lld/MachO/ld64-vs-lld.rst
+++ b/contrib/llvm-project/lld/MachO/ld64-vs-lld.rst
@@ -4,6 +4,22 @@ LD64 vs LLD-MACHO
This doc lists all significant deliberate differences in behavior between LD64 and LLD-MachO.
+String literal deduplication
+****************************
+LD64 always deduplicates string literals. LLD only does it when the `--icf=` or
+the `--deduplicate-literals` flag is passed. Omitting deduplication by default
+ensures that our link is as fast as possible. However, it may also break some
+programs which have (incorrectly) relied on string deduplication always
+occurring. In particular, programs which compare string literals via pointer
+equality must be fixed to use value equality instead.
+
+``-no_deduplicate`` Flag
+**********************
+- LD64:
+ * This turns off ICF (deduplication pass) in the linker.
+- LLD
+ * This turns off ICF and string merging in the linker.
+
ObjC symbols treatment
**********************
There are differences in how LLD and LD64 handle ObjC symbols loaded from archives.
diff --git a/contrib/llvm-project/lld/docs/_templates/indexsidebar.html b/contrib/llvm-project/lld/docs/_templates/indexsidebar.html
index 588be9309bde..f9ecb724153d 100644
--- a/contrib/llvm-project/lld/docs/_templates/indexsidebar.html
+++ b/contrib/llvm-project/lld/docs/_templates/indexsidebar.html
@@ -1,4 +1,9 @@
<h3>Bugs</h3>
-<p>lld bugs should be reported at the
- LLVM <a href="https://bugs.llvm.org/">Bugzilla</a>.</p>
+<p>
+To report bugs, please visit
+<a href="https://github.com/llvm/llvm-project/labels/lld:COFF">PE/COFF</a>,
+<a href="https://github.com/llvm/llvm-project/labels/lld:ELF">ELF</a>,
+<a href="https://github.com/llvm/llvm-project/labels/lld:MachO">Mach-O</a>, or
+<a href="https://github.com/llvm/llvm-project/labels/lld:wasm">WebAssembly</a>.
+</p>
diff --git a/contrib/llvm-project/lld/docs/ld.lld.1 b/contrib/llvm-project/lld/docs/ld.lld.1
index 04f0982b4ced..da43cf0ef7ab 100644
--- a/contrib/llvm-project/lld/docs/ld.lld.1
+++ b/contrib/llvm-project/lld/docs/ld.lld.1
@@ -486,7 +486,7 @@ Save the current state of
and
.Fl -whole-archive.
.It Fl -pop-state
-Undo the effect of
+Restore the states saved by
.Fl -push-state.
.It Fl -relocatable , Fl r
Create relocatable object file.
diff --git a/contrib/llvm-project/lld/include/lld/Common/CommonLinkerContext.h b/contrib/llvm-project/lld/include/lld/Common/CommonLinkerContext.h
new file mode 100644
index 000000000000..3954d38ded63
--- /dev/null
+++ b/contrib/llvm-project/lld/include/lld/Common/CommonLinkerContext.h
@@ -0,0 +1,65 @@
+//===- CommonLinkerContext.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Entry point for all global state in lldCommon. The objective is for LLD to be
+// used "as a library" in a thread-safe manner.
+//
+// Instead of program-wide globals or function-local statics, we prefer
+// aggregating all "global" states into a heap-based structure
+// (CommonLinkerContext). This also achieves deterministic initialization &
+// shutdown for all "global" states.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_COMMON_COMMONLINKINGCONTEXT_H
+#define LLD_COMMON_COMMONLINKINGCONTEXT_H
+
+#include "lld/Common/ErrorHandler.h"
+#include "lld/Common/Memory.h"
+#include "llvm/CodeGen/CommandFlags.h"
+#include "llvm/Support/StringSaver.h"
+
+namespace llvm {
+class raw_ostream;
+} // namespace llvm
+
+namespace lld {
+struct SpecificAllocBase;
+class CommonLinkerContext {
+public:
+ CommonLinkerContext();
+ virtual ~CommonLinkerContext();
+
+ static void destroy();
+
+ llvm::BumpPtrAllocator bAlloc;
+ llvm::StringSaver saver{bAlloc};
+ llvm::DenseMap<void *, SpecificAllocBase *> instances;
+
+ ErrorHandler e;
+
+private:
+ llvm::codegen::RegisterCodeGenFlags cgf;
+};
+
+// Retrieve the global state. Currently only one state can exist per process,
+// but in the future we plan on supporting an arbitrary number of LLD instances
+// in a single process.
+CommonLinkerContext &commonContext();
+
+template <typename T = CommonLinkerContext> T &context() {
+ return static_cast<T &>(commonContext());
+}
+
+bool hasContext();
+
+inline llvm::StringSaver &saver() { return context().saver; }
+inline llvm::BumpPtrAllocator &bAlloc() { return context().bAlloc; }
+} // namespace lld
+
+#endif
diff --git a/contrib/llvm-project/lld/include/lld/Common/Driver.h b/contrib/llvm-project/lld/include/lld/Common/Driver.h
index 0e505a16463e..91cb91b9f808 100644
--- a/contrib/llvm-project/lld/include/lld/Common/Driver.h
+++ b/contrib/llvm-project/lld/include/lld/Common/Driver.h
@@ -9,6 +9,7 @@
#ifndef LLD_COMMON_DRIVER_H
#define LLD_COMMON_DRIVER_H
+#include "lld/Common/CommonLinkerContext.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/raw_ostream.h"
@@ -28,28 +29,28 @@ SafeReturn safeLldMain(int argc, const char **argv, llvm::raw_ostream &stdoutOS,
llvm::raw_ostream &stderrOS);
namespace coff {
-bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
- llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
}
namespace mingw {
-bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
- llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
}
namespace elf {
-bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
- llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
}
namespace macho {
-bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
- llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
}
namespace wasm {
-bool link(llvm::ArrayRef<const char *> args, bool canExitEarly,
- llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS);
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
}
}
diff --git a/contrib/llvm-project/lld/include/lld/Common/ErrorHandler.h b/contrib/llvm-project/lld/include/lld/Common/ErrorHandler.h
index d95a2537c1f2..ce077290d60b 100644
--- a/contrib/llvm-project/lld/include/lld/Common/ErrorHandler.h
+++ b/contrib/llvm-project/lld/include/lld/Common/ErrorHandler.h
@@ -73,6 +73,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileOutputBuffer.h"
+#include <mutex>
namespace llvm {
class DiagnosticInfo;
@@ -81,11 +82,6 @@ class raw_ostream;
namespace lld {
-// We wrap stdout and stderr so that you can pass alternative stdout/stderr as
-// arguments to lld::*::link() functions.
-extern llvm::raw_ostream *stdoutOS;
-extern llvm::raw_ostream *stderrOS;
-
llvm::raw_ostream &outs();
llvm::raw_ostream &errs();
@@ -93,6 +89,11 @@ enum class ErrorTag { LibNotFound, SymbolNotFound };
class ErrorHandler {
public:
+ ~ErrorHandler();
+
+ void initialize(llvm::raw_ostream &stdoutOS, llvm::raw_ostream &stderrOS,
+ bool exitEarly, bool disableOutput);
+
uint64_t errorCount = 0;
uint64_t errorLimit = 20;
StringRef errorLimitExceededMsg = "too many errors emitted, stopping now";
@@ -112,11 +113,9 @@ public:
void message(const Twine &msg, llvm::raw_ostream &s);
void warn(const Twine &msg);
- void reset() {
- if (cleanupCallback)
- cleanupCallback();
- *this = ErrorHandler();
- }
+ raw_ostream &outs();
+ raw_ostream &errs();
+ void flushStreams();
std::unique_ptr<llvm::FileOutputBuffer> outputBuffer;
@@ -126,6 +125,19 @@ private:
std::string getLocation(const Twine &msg);
void reportDiagnostic(StringRef location, Colors c, StringRef diagKind,
const Twine &msg);
+
+ // We want to separate multi-line messages with a newline. `sep` is "\n"
+ // if the last messages was multi-line. Otherwise "".
+ llvm::StringRef sep;
+
+ // We wrap stdout and stderr so that you can pass alternative stdout/stderr as
+ // arguments to lld::*::link() functions. Since lld::outs() or lld::errs() can
+ // be indirectly called from multiple threads, we protect them using a mutex.
+ // In the future, we plan on supporting several concurent linker contexts,
+ // which explains why the mutex is not a global but part of this context.
+ std::mutex mu;
+ llvm::raw_ostream *stdoutOS{};
+ llvm::raw_ostream *stderrOS{};
};
/// Returns the default error handler.
diff --git a/contrib/llvm-project/lld/include/lld/Common/Memory.h b/contrib/llvm-project/lld/include/lld/Common/Memory.h
index f516a327cfb2..0b2f474c3013 100644
--- a/contrib/llvm-project/lld/include/lld/Common/Memory.h
+++ b/contrib/llvm-project/lld/include/lld/Common/Memory.h
@@ -22,42 +22,41 @@
#define LLD_COMMON_MEMORY_H
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/StringSaver.h"
-#include <vector>
namespace lld {
-
-// Use this arena if your object doesn't have a destructor.
-extern llvm::BumpPtrAllocator bAlloc;
-extern llvm::StringSaver saver;
-
-void freeArena();
-
-// These two classes are hack to keep track of all
-// SpecificBumpPtrAllocator instances.
+// A base class only used by the CommonLinkerContext to keep track of the
+// SpecificAlloc<> instances.
struct SpecificAllocBase {
- SpecificAllocBase() { instances.push_back(this); }
virtual ~SpecificAllocBase() = default;
- virtual void reset() = 0;
- static std::vector<SpecificAllocBase *> instances;
+ static SpecificAllocBase *getOrCreate(void *tag, size_t size, size_t align,
+ SpecificAllocBase *(&creator)(void *));
};
+// An arena of specific types T, created on-demand.
template <class T> struct SpecificAlloc : public SpecificAllocBase {
- void reset() override { alloc.DestroyAll(); }
+ static SpecificAllocBase *create(void *storage) {
+ return new (storage) SpecificAlloc<T>();
+ }
llvm::SpecificBumpPtrAllocator<T> alloc;
+ static int tag;
};
-// Use a static local for these singletons so they are only registered if an
-// object of this instance is ever constructed. Otherwise we will create and
-// register ELF allocators for COFF and the reverse.
+// The address of this static member is only used as a key in
+// CommonLinkerContext::instances. Its value does not matter.
+template <class T> int SpecificAlloc<T>::tag = 0;
+
+// Creates the arena on-demand on the first call; or returns it, if it was
+// already created.
template <typename T>
inline llvm::SpecificBumpPtrAllocator<T> &getSpecificAllocSingleton() {
- static SpecificAlloc<T> instance;
- return instance.alloc;
+ SpecificAllocBase *instance = SpecificAllocBase::getOrCreate(
+ &SpecificAlloc<T>::tag, sizeof(SpecificAlloc<T>),
+ alignof(SpecificAlloc<T>), SpecificAlloc<T>::create);
+ return ((SpecificAlloc<T> *)instance)->alloc;
}
-// Use this arena if your object has a destructor.
-// Your destructor will be invoked from freeArena().
+// Creates new instances of T off a (almost) contiguous arena/object pool. The
+// instances are destroyed whenever lldMain() goes out of scope.
template <typename T, typename... U> T *make(U &&... args) {
return new (getSpecificAllocSingleton<T>().Allocate())
T(std::forward<U>(args)...);
diff --git a/contrib/llvm-project/lld/include/lld/Common/Strings.h b/contrib/llvm-project/lld/include/lld/Common/Strings.h
index 71126f615017..ece801892767 100644
--- a/contrib/llvm-project/lld/include/lld/Common/Strings.h
+++ b/contrib/llvm-project/lld/include/lld/Common/Strings.h
@@ -12,14 +12,19 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Demangle/Demangle.h"
#include "llvm/Support/GlobPattern.h"
#include <string>
#include <vector>
namespace lld {
-// Returns a demangled C++ symbol name. If Name is not a mangled
-// name, it returns name.
-std::string demangleItanium(llvm::StringRef name);
+// Returns a demangled symbol name. If Name is not a mangled name, it returns
+// name.
+inline std::string demangle(llvm::StringRef symName, bool shouldDemangle) {
+ if (shouldDemangle)
+ return llvm::demangle(symName.str().c_str());
+ return std::string(symName);
+}
std::vector<uint8_t> parseHex(llvm::StringRef s);
bool isValidCIdentifier(llvm::StringRef s);
diff --git a/contrib/llvm-project/lld/include/lld/Core/LinkingContext.h b/contrib/llvm-project/lld/include/lld/Core/LinkingContext.h
index e090ff990231..091369e14319 100644
--- a/contrib/llvm-project/lld/include/lld/Core/LinkingContext.h
+++ b/contrib/llvm-project/lld/include/lld/Core/LinkingContext.h
@@ -9,6 +9,7 @@
#ifndef LLD_CORE_LINKING_CONTEXT_H
#define LLD_CORE_LINKING_CONTEXT_H
+#include "lld/Common/CommonLinkerContext.h"
#include "lld/Core/Node.h"
#include "lld/Core/Reader.h"
#include "llvm/ADT/ArrayRef.h"
@@ -34,7 +35,7 @@ class SharedLibraryFile;
/// The base class LinkingContext contains the options needed by core linking.
/// Subclasses of LinkingContext have additional options needed by specific
/// Writers.
-class LinkingContext {
+class LinkingContext : public CommonLinkerContext {
public:
virtual ~LinkingContext();
diff --git a/contrib/llvm-project/lld/tools/lld/lld.cpp b/contrib/llvm-project/lld/tools/lld/lld.cpp
index 1929e16c7220..a6ed022e1fed 100644
--- a/contrib/llvm-project/lld/tools/lld/lld.cpp
+++ b/contrib/llvm-project/lld/tools/lld/lld.cpp
@@ -87,6 +87,8 @@ static bool isPETarget(std::vector<const char *> &v) {
// Expand response files (arguments in the form of @<filename>)
// to allow detecting the -m argument from arguments in them.
SmallVector<const char *, 256> expandedArgs(v.data(), v.data() + v.size());
+ BumpPtrAllocator a;
+ StringSaver saver(a);
cl::ExpandResponseFiles(saver, getDefaultQuotingStyle(), expandedArgs);
for (auto it = expandedArgs.begin(); it + 1 != expandedArgs.end(); ++it) {
if (StringRef(*it) != "-m")
@@ -134,32 +136,47 @@ static Flavor parseFlavor(std::vector<const char *> &v) {
return parseProgname(arg0);
}
+bool inTestOutputDisabled = false;
+
/// Universal linker main(). This linker emulates the gnu, darwin, or
/// windows linker based on the argv[0] or -flavor option.
static int lldMain(int argc, const char **argv, llvm::raw_ostream &stdoutOS,
llvm::raw_ostream &stderrOS, bool exitEarly = true) {
std::vector<const char *> args(argv, argv + argc);
+ auto link = [&args]() {
#if 1
- /* On FreeBSD we only build the ELF linker. */
- return !elf::link(args, exitEarly, stdoutOS, stderrOS);
+ // On FreeBSD we only build the ELF linker.
+ return elf::link;
#else
- switch (parseFlavor(args)) {
- case Gnu:
- if (isPETarget(args))
- return !mingw::link(args, exitEarly, stdoutOS, stderrOS);
- return !elf::link(args, exitEarly, stdoutOS, stderrOS);
- case WinLink:
- return !coff::link(args, exitEarly, stdoutOS, stderrOS);
- case Darwin:
- return !macho::link(args, exitEarly, stdoutOS, stderrOS);
- case Wasm:
- return !lld::wasm::link(args, exitEarly, stdoutOS, stderrOS);
- default:
- die("lld is a generic driver.\n"
- "Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld"
- " (WebAssembly) instead");
- }
+ Flavor f = parseFlavor(args);
+ if (f == Gnu && isPETarget(args))
+ return mingw::link;
+ else if (f == Gnu)
+ return elf::link;
+ else if (f == WinLink)
+ return coff::link;
+ else if (f == Darwin)
+ return macho::link;
+ else if (f == Wasm)
+ return lld::wasm::link;
+ else
+ die("lld is a generic driver.\n"
+ "Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld"
+ " (WebAssembly) instead");
#endif
+ };
+ // Run the driver. If an error occurs, false will be returned.
+ bool r = link()(args, stdoutOS, stderrOS, exitEarly, inTestOutputDisabled);
+
+ // Call exit() if we can to avoid calling destructors.
+ if (exitEarly)
+ exitLld(!r ? 1 : 0);
+
+ // Delete the global context and clear the global context pointer, so that it
+ // cannot be accessed anymore.
+ CommonLinkerContext::destroy();
+
+ return !r ? 1 : 0;
}
// Similar to lldMain except that exceptions are caught.
@@ -181,7 +198,7 @@ SafeReturn lld::safeLldMain(int argc, const char **argv,
// Cleanup memory and reset everything back in pristine condition. This path
// is only taken when LLD is in test, or when it is used as a library.
llvm::CrashRecoveryContext crc;
- if (!crc.RunSafely([&]() { errorHandler().reset(); })) {
+ if (!crc.RunSafely([&]() { CommonLinkerContext::destroy(); })) {
// The memory is corrupted beyond any possible recovery.
return {r, /*canRunAgain=*/false};
}
@@ -212,8 +229,7 @@ int main(int argc, const char **argv) {
for (unsigned i = inTestVerbosity(); i > 0; --i) {
// Disable stdout/stderr for all iterations but the last one.
- if (i != 1)
- errorHandler().disableOutput = true;
+ inTestOutputDisabled = (i != 1);
// Execute one iteration.
auto r = safeLldMain(argc, argv, llvm::outs(), llvm::errs());
diff --git a/contrib/llvm-project/lldb/bindings/interface/SBModule.i b/contrib/llvm-project/lldb/bindings/interface/SBModule.i
index 606c9a5bbd0c..bda602d15690 100644
--- a/contrib/llvm-project/lldb/bindings/interface/SBModule.i
+++ b/contrib/llvm-project/lldb/bindings/interface/SBModule.i
@@ -137,6 +137,13 @@ public:
void
Clear();
+ %feature("docstring", "Check if the module is file backed.
+ @return
+ True, if the module is backed by an object file on disk.
+ False, if the module is backed by an object file in memory.") IsFileBacked;
+ bool
+ IsFileBacked() const;
+
%feature("docstring", "
Get const accessor for the module file specification.
diff --git a/contrib/llvm-project/lldb/bindings/interface/SBPlatform.i b/contrib/llvm-project/lldb/bindings/interface/SBPlatform.i
index 65615be7a361..6413784e69e7 100644
--- a/contrib/llvm-project/lldb/bindings/interface/SBPlatform.i
+++ b/contrib/llvm-project/lldb/bindings/interface/SBPlatform.i
@@ -177,6 +177,9 @@ public:
uint32_t
GetOSUpdateVersion ();
+ void
+ SetSDKRoot(const char *sysroot);
+
lldb::SBError
Get (lldb::SBFileSpec &src, lldb::SBFileSpec &dst);
diff --git a/contrib/llvm-project/lldb/bindings/interface/SBThread.i b/contrib/llvm-project/lldb/bindings/interface/SBThread.i
index d847d38f0d66..ba7f5b3fdf76 100644
--- a/contrib/llvm-project/lldb/bindings/interface/SBThread.i
+++ b/contrib/llvm-project/lldb/bindings/interface/SBThread.i
@@ -405,6 +405,12 @@ public:
bool
SafeToCallFunctions ();
+ %feature("autodoc","
+ Retruns a SBValue object representing the siginfo for the current signal.
+ ") GetSiginfo;
+ lldb::SBValue
+ GetSiginfo(SBError &error);
+
STRING_EXTENSION(SBThread)
#ifdef SWIGPYTHON
diff --git a/contrib/llvm-project/lldb/bindings/python/python-swigsafecast.swig b/contrib/llvm-project/lldb/bindings/python/python-swigsafecast.swig
index 7d639e664f53..eb684133abef 100644
--- a/contrib/llvm-project/lldb/bindings/python/python-swigsafecast.swig
+++ b/contrib/llvm-project/lldb/bindings/python/python-swigsafecast.swig
@@ -1,19 +1,32 @@
namespace lldb_private {
namespace python {
-PyObject *SBTypeToSWIGWrapper(lldb::SBEvent &event_sb) {
- return SWIG_NewPointerObj(&event_sb, SWIGTYPE_p_lldb__SBEvent, 0);
-}
-
-PyObject *SBTypeToSWIGWrapper(lldb::SBCommandReturnObject &cmd_ret_obj_sb) {
- return SWIG_NewPointerObj(&cmd_ret_obj_sb,
- SWIGTYPE_p_lldb__SBCommandReturnObject, 0);
-}
-
PythonObject ToSWIGHelper(void *obj, swig_type_info *info) {
return {PyRefType::Owned, SWIG_NewPointerObj(obj, info, SWIG_POINTER_OWN)};
}
+/// A class that automatically clears an SB object when it goes out of scope.
+/// Use for cases where the SB object points to a temporary/unowned entity.
+template <typename T> class ScopedPythonObject : PythonObject {
+public:
+ ScopedPythonObject(T *sb, swig_type_info *info)
+ : PythonObject(ToSWIGHelper(sb, info)), m_sb(sb) {}
+ ~ScopedPythonObject() {
+ if (m_sb)
+ *m_sb = T();
+ }
+ ScopedPythonObject(ScopedPythonObject &&rhs)
+ : PythonObject(std::move(rhs)), m_sb(std::exchange(rhs.m_sb, nullptr)) {}
+ ScopedPythonObject(const ScopedPythonObject &) = delete;
+ ScopedPythonObject &operator=(const ScopedPythonObject &) = delete;
+ ScopedPythonObject &operator=(ScopedPythonObject &&) = delete;
+
+ const PythonObject &obj() const { return *this; }
+
+private:
+ T *m_sb;
+};
+
PythonObject ToSWIGWrapper(std::unique_ptr<lldb::SBValue> value_sb) {
return ToSWIGHelper(value_sb.release(), SWIGTYPE_p_lldb__SBValue);
}
@@ -94,5 +107,17 @@ PythonObject ToSWIGWrapper(const SymbolContext &sym_ctx) {
SWIGTYPE_p_lldb__SBSymbolContext);
}
+ScopedPythonObject<lldb::SBCommandReturnObject>
+ToSWIGWrapper(CommandReturnObject &cmd_retobj) {
+ return ScopedPythonObject<lldb::SBCommandReturnObject>(
+ new lldb::SBCommandReturnObject(cmd_retobj),
+ SWIGTYPE_p_lldb__SBCommandReturnObject);
+}
+
+ScopedPythonObject<lldb::SBEvent> ToSWIGWrapper(Event *event) {
+ return ScopedPythonObject<lldb::SBEvent>(new lldb::SBEvent(event),
+ SWIGTYPE_p_lldb__SBEvent);
+}
+
} // namespace python
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/bindings/python/python-wrapper.swig b/contrib/llvm-project/lldb/bindings/python/python-wrapper.swig
index a2c1f756a0a2..626fc47bebb9 100644
--- a/contrib/llvm-project/lldb/bindings/python/python-wrapper.swig
+++ b/contrib/llvm-project/lldb/bindings/python/python-wrapper.swig
@@ -152,12 +152,12 @@ bool lldb_private::LLDBSwigPythonCallTypeScript(
return true;
}
-void *lldb_private::LLDBSwigPythonCreateSyntheticProvider(
+PythonObject lldb_private::LLDBSwigPythonCreateSyntheticProvider(
const char *python_class_name, const char *session_dictionary_name,
const lldb::ValueObjectSP &valobj_sp) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -167,29 +167,29 @@ void *lldb_private::LLDBSwigPythonCreateSyntheticProvider(
python_class_name, dict);
if (!pfunc.IsAllocated())
- Py_RETURN_NONE;
+ return PythonObject();
auto sb_value = std::make_unique<lldb::SBValue>(valobj_sp);
sb_value->SetPreferSyntheticValue(false);
PythonObject val_arg = ToSWIGWrapper(std::move(sb_value));
if (!val_arg.IsAllocated())
- Py_RETURN_NONE;
+ return PythonObject();
PythonObject result = pfunc(val_arg, dict);
if (result.IsAllocated())
- return result.release();
+ return result;
- Py_RETURN_NONE;
+ return PythonObject();
}
-void *lldb_private::LLDBSwigPythonCreateCommandObject(
+PythonObject lldb_private::LLDBSwigPythonCreateCommandObject(
const char *python_class_name, const char *session_dictionary_name,
lldb::DebuggerSP debugger_sp) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
auto dict = PythonModule::MainModule().ResolveName<PythonDictionary>(
@@ -198,24 +198,19 @@ void *lldb_private::LLDBSwigPythonCreateCommandObject(
python_class_name, dict);
if (!pfunc.IsAllocated())
- return nullptr;
-
- PythonObject result = pfunc(ToSWIGWrapper(std::move(debugger_sp)), dict);
+ return PythonObject();
- if (result.IsAllocated())
- return result.release();
-
- Py_RETURN_NONE;
+ return pfunc(ToSWIGWrapper(std::move(debugger_sp)), dict);
}
-void *lldb_private::LLDBSwigPythonCreateScriptedProcess(
+PythonObject lldb_private::LLDBSwigPythonCreateScriptedProcess(
const char *python_class_name, const char *session_dictionary_name,
const lldb::TargetSP &target_sp,
const lldb_private::StructuredDataImpl &args_impl,
std::string &error_string) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -227,7 +222,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedProcess(
if (!pfunc.IsAllocated()) {
error_string.append("could not find script class: ");
error_string.append(python_class_name);
- return nullptr;
+ return PythonObject();
}
PythonObject target_arg = ToSWIGWrapper(target_sp);
@@ -240,7 +235,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedProcess(
[&](const llvm::ErrorInfoBase &E) {
error_string.append(E.message());
});
- Py_RETURN_NONE;
+ return PythonObject();
}
PythonObject result = {};
@@ -249,21 +244,17 @@ void *lldb_private::LLDBSwigPythonCreateScriptedProcess(
} else {
error_string.assign("wrong number of arguments in __init__, should be 2 "
"(not including self)");
- Py_RETURN_NONE;
}
-
- if (result.IsAllocated())
- return result.release();
- Py_RETURN_NONE;
+ return result;
}
-void *lldb_private::LLDBSwigPythonCreateScriptedThread(
+PythonObject lldb_private::LLDBSwigPythonCreateScriptedThread(
const char *python_class_name, const char *session_dictionary_name,
const lldb::ProcessSP &process_sp, const StructuredDataImpl &args_impl,
std::string &error_string) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -275,7 +266,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThread(
if (!pfunc.IsAllocated()) {
error_string.append("could not find script class: ");
error_string.append(python_class_name);
- return nullptr;
+ return PythonObject();
}
llvm::Expected<PythonCallable::ArgInfo> arg_info = pfunc.GetArgInfo();
@@ -286,30 +277,24 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThread(
[&](const llvm::ErrorInfoBase &E) {
error_string.append(E.message());
});
- Py_RETURN_NONE;
+ return PythonObject();
}
- PythonObject result = {};
- if (arg_info.get().max_positional_args == 2) {
- result = pfunc(ToSWIGWrapper(process_sp), ToSWIGWrapper(args_impl));
- } else {
- error_string.assign("wrong number of arguments in __init__, should be 2 "
- "(not including self)");
- Py_RETURN_NONE;
- }
+ if (arg_info.get().max_positional_args == 2)
+ return pfunc(ToSWIGWrapper(process_sp), ToSWIGWrapper(args_impl));
- if (result.IsAllocated())
- return result.release();
- Py_RETURN_NONE;
+ error_string.assign("wrong number of arguments in __init__, should be 2 "
+ "(not including self)");
+ return PythonObject();
}
-void *lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
+PythonObject lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
const char *python_class_name, const char *session_dictionary_name,
const lldb_private::StructuredDataImpl &args_impl,
std::string &error_string, const lldb::ThreadPlanSP &thread_plan_sp) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -321,7 +306,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
if (!pfunc.IsAllocated()) {
error_string.append("could not find script class: ");
error_string.append(python_class_name);
- return nullptr;
+ return PythonObject();
}
PythonObject tp_arg = ToSWIGWrapper(thread_plan_sp);
@@ -334,7 +319,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
[&](const llvm::ErrorInfoBase &E) {
error_string.append(E.message());
});
- Py_RETURN_NONE;
+ return PythonObject();
}
PythonObject result = {};
@@ -343,7 +328,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
if (args_sb->IsValid()) {
error_string.assign(
"args passed, but __init__ does not take an args dictionary");
- Py_RETURN_NONE;
+ return PythonObject();
}
result = pfunc(tp_arg, dict);
} else if (arg_info.get().max_positional_args >= 3) {
@@ -351,15 +336,13 @@ void *lldb_private::LLDBSwigPythonCreateScriptedThreadPlan(
} else {
error_string.assign("wrong number of arguments in __init__, should be 2 or "
"3 (not including self)");
- Py_RETURN_NONE;
+ return PythonObject();
}
// FIXME: At this point we should check that the class we found supports all
// the methods that we need.
- if (result.IsAllocated())
- return result.release();
- Py_RETURN_NONE;
+ return result;
}
bool lldb_private::LLDBSWIGPythonCallThreadPlan(
@@ -376,9 +359,8 @@ bool lldb_private::LLDBSWIGPythonCallThreadPlan(
PythonObject result;
if (event != nullptr) {
- lldb::SBEvent sb_event(event);
- PythonObject event_arg(PyRefType::Owned, SBTypeToSWIGWrapper(sb_event));
- result = pfunc(event_arg);
+ ScopedPythonObject<SBEvent> event_arg = ToSWIGWrapper(event);
+ result = pfunc(event_arg.obj());
} else
result = pfunc();
@@ -401,14 +383,14 @@ bool lldb_private::LLDBSWIGPythonCallThreadPlan(
return false;
}
-void *lldb_private::LLDBSwigPythonCreateScriptedBreakpointResolver(
+PythonObject lldb_private::LLDBSwigPythonCreateScriptedBreakpointResolver(
const char *python_class_name, const char *session_dictionary_name,
const StructuredDataImpl &args_impl,
const lldb::BreakpointSP &breakpoint_sp) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -418,7 +400,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedBreakpointResolver(
python_class_name, dict);
if (!pfunc.IsAllocated())
- return nullptr;
+ return PythonObject();
PythonObject result =
pfunc(ToSWIGWrapper(breakpoint_sp), ToSWIGWrapper(args_impl), dict);
@@ -429,11 +411,9 @@ void *lldb_private::LLDBSwigPythonCreateScriptedBreakpointResolver(
// Check that __callback__ is defined:
auto callback_func = result.ResolveName<PythonCallable>("__callback__");
if (callback_func.IsAllocated())
- return result.release();
- else
- result.release();
+ return result;
}
- Py_RETURN_NONE;
+ return PythonObject();
}
unsigned int lldb_private::LLDBSwigPythonCallBreakpointResolver(
@@ -475,17 +455,17 @@ unsigned int lldb_private::LLDBSwigPythonCallBreakpointResolver(
return ret_val;
}
-void *lldb_private::LLDBSwigPythonCreateScriptedStopHook(
+PythonObject lldb_private::LLDBSwigPythonCreateScriptedStopHook(
lldb::TargetSP target_sp, const char *python_class_name,
const char *session_dictionary_name, const StructuredDataImpl &args_impl,
Status &error) {
if (python_class_name == NULL || python_class_name[0] == '\0') {
error.SetErrorString("Empty class name.");
- Py_RETURN_NONE;
+ return PythonObject();
}
if (!session_dictionary_name) {
error.SetErrorString("No session dictionary");
- Py_RETURN_NONE;
+ return PythonObject();
}
PyErr_Cleaner py_err_cleaner(true);
@@ -498,7 +478,7 @@ void *lldb_private::LLDBSwigPythonCreateScriptedStopHook(
if (!pfunc.IsAllocated()) {
error.SetErrorStringWithFormat("Could not find class: %s.",
python_class_name);
- return nullptr;
+ return PythonObject();
}
PythonObject result =
@@ -515,23 +495,22 @@ void *lldb_private::LLDBSwigPythonCreateScriptedStopHook(
"Wrong number of args for "
"handle_stop callback, should be 2 (excluding self), got: %zu",
num_args);
- Py_RETURN_NONE;
+ return PythonObject();
} else
- return result.release();
+ return result;
} else {
error.SetErrorString("Couldn't get num arguments for handle_stop "
"callback.");
- Py_RETURN_NONE;
+ return PythonObject();
}
- return result.release();
+ return result;
} else {
error.SetErrorStringWithFormat("Class \"%s\" is missing the required "
"handle_stop callback.",
python_class_name);
- result.release();
}
}
- Py_RETURN_NONE;
+ return PythonObject();
}
bool lldb_private::LLDBSwigPythonStopHookCallHandleStop(
@@ -795,7 +774,6 @@ bool lldb_private::LLDBSwigPythonCallCommand(
lldb::DebuggerSP debugger, const char *args,
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp) {
- lldb::SBCommandReturnObject cmd_retobj_sb(cmd_retobj);
PyErr_Cleaner py_err_cleaner(true);
auto dict = PythonModule::MainModule().ResolveName<PythonDictionary>(
@@ -812,14 +790,13 @@ bool lldb_private::LLDBSwigPythonCallCommand(
return false;
}
PythonObject debugger_arg = ToSWIGWrapper(std::move(debugger));
- PythonObject cmd_retobj_arg(PyRefType::Owned,
- SBTypeToSWIGWrapper(cmd_retobj_sb));
+ auto cmd_retobj_arg = ToSWIGWrapper(cmd_retobj);
if (argc.get().max_positional_args < 5u)
- pfunc(debugger_arg, PythonString(args), cmd_retobj_arg, dict);
+ pfunc(debugger_arg, PythonString(args), cmd_retobj_arg.obj(), dict);
else
pfunc(debugger_arg, PythonString(args),
- ToSWIGWrapper(std::move(exe_ctx_ref_sp)), cmd_retobj_arg, dict);
+ ToSWIGWrapper(std::move(exe_ctx_ref_sp)), cmd_retobj_arg.obj(), dict);
return true;
}
@@ -828,7 +805,6 @@ bool lldb_private::LLDBSwigPythonCallCommandObject(
PyObject *implementor, lldb::DebuggerSP debugger, const char *args,
lldb_private::CommandReturnObject &cmd_retobj,
lldb::ExecutionContextRefSP exe_ctx_ref_sp) {
- lldb::SBCommandReturnObject cmd_retobj_sb(cmd_retobj);
PyErr_Cleaner py_err_cleaner(true);
@@ -838,21 +814,20 @@ bool lldb_private::LLDBSwigPythonCallCommandObject(
if (!pfunc.IsAllocated())
return false;
- PythonObject cmd_retobj_arg(PyRefType::Owned,
- SBTypeToSWIGWrapper(cmd_retobj_sb));
+ auto cmd_retobj_arg = ToSWIGWrapper(cmd_retobj);
pfunc(ToSWIGWrapper(std::move(debugger)), PythonString(args),
- ToSWIGWrapper(exe_ctx_ref_sp), cmd_retobj_arg);
+ ToSWIGWrapper(exe_ctx_ref_sp), cmd_retobj_arg.obj());
return true;
}
-void *lldb_private::LLDBSWIGPythonCreateOSPlugin(
+PythonObject lldb_private::LLDBSWIGPythonCreateOSPlugin(
const char *python_class_name, const char *session_dictionary_name,
const lldb::ProcessSP &process_sp) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -862,21 +837,16 @@ void *lldb_private::LLDBSWIGPythonCreateOSPlugin(
python_class_name, dict);
if (!pfunc.IsAllocated())
- Py_RETURN_NONE;
-
- auto result = pfunc(ToSWIGWrapper(process_sp));
-
- if (result.IsAllocated())
- return result.release();
+ return PythonObject();
- Py_RETURN_NONE;
+ return pfunc(ToSWIGWrapper(process_sp));
}
-void *lldb_private::LLDBSWIGPython_CreateFrameRecognizer(
+PythonObject lldb_private::LLDBSWIGPython_CreateFrameRecognizer(
const char *python_class_name, const char *session_dictionary_name) {
if (python_class_name == NULL || python_class_name[0] == '\0' ||
!session_dictionary_name)
- Py_RETURN_NONE;
+ return PythonObject();
PyErr_Cleaner py_err_cleaner(true);
@@ -886,14 +856,9 @@ void *lldb_private::LLDBSWIGPython_CreateFrameRecognizer(
python_class_name, dict);
if (!pfunc.IsAllocated())
- Py_RETURN_NONE;
-
- auto result = pfunc();
-
- if (result.IsAllocated())
- return result.release();
+ return PythonObject();
- Py_RETURN_NONE;
+ return pfunc();
}
PyObject *lldb_private::LLDBSwigPython_GetRecognizedArguments(
diff --git a/contrib/llvm-project/lldb/bindings/python/python.swig b/contrib/llvm-project/lldb/bindings/python/python.swig
index 5dcbd68d8544..cb80e1be61a8 100644
--- a/contrib/llvm-project/lldb/bindings/python/python.swig
+++ b/contrib/llvm-project/lldb/bindings/python/python.swig
@@ -133,15 +133,8 @@ using namespace lldb;
%include "python-wrapper.swig"
%pythoncode%{
-_initialize = True
-try:
- import lldbconfig
- _initialize = lldbconfig.INITIALIZE
-except ImportError:
- pass
debugger_unique_id = 0
-if _initialize:
- SBDebugger.Initialize()
+SBDebugger.Initialize()
debugger = None
target = None
process = None
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBDebugger.h b/contrib/llvm-project/lldb/include/lldb/API/SBDebugger.h
index 1c771330cddc..a82c147053eb 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBDebugger.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBDebugger.h
@@ -306,6 +306,10 @@ public:
void SetScriptLanguage(lldb::ScriptLanguage script_lang);
+ lldb::LanguageType GetREPLLanguage() const;
+
+ void SetREPLLanguage(lldb::LanguageType repl_lang);
+
bool GetCloseInputOnEOF() const;
void SetCloseInputOnEOF(bool b);
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBDefines.h b/contrib/llvm-project/lldb/include/lldb/API/SBDefines.h
index 3ab10ad8e061..ecf1dc34d8c5 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBDefines.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBDefines.h
@@ -15,6 +15,18 @@
#include "lldb/lldb-types.h"
#include "lldb/lldb-versioning.h"
+#ifndef LLDB_API
+#if defined(_WIN32)
+#if defined(LLDB_IN_LIBLLDB)
+#define LLDB_API __declspec(dllexport)
+#else
+#define LLDB_API __declspec(dllimport)
+#endif
+#else // defined (_WIN32)
+#define LLDB_API
+#endif
+#endif
+
// Forward Declarations
namespace lldb {
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBModule.h b/contrib/llvm-project/lldb/include/lldb/API/SBModule.h
index dd783fe4107d..7200a1ef53fd 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBModule.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBModule.h
@@ -37,6 +37,8 @@ public:
void Clear();
+ bool IsFileBacked() const;
+
/// Get const accessor for the module file specification.
///
/// This function returns the file for the module on the host system
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBPlatform.h b/contrib/llvm-project/lldb/include/lldb/API/SBPlatform.h
index 98291f18247d..4f5d04a24e95 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBPlatform.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBPlatform.h
@@ -137,6 +137,8 @@ public:
uint32_t GetOSUpdateVersion();
+ void SetSDKRoot(const char *sysroot);
+
SBError Put(SBFileSpec &src, SBFileSpec &dst);
SBError Get(SBFileSpec &src, SBFileSpec &dst);
@@ -170,6 +172,7 @@ public:
protected:
friend class SBDebugger;
friend class SBTarget;
+ friend class SBThread;
lldb::PlatformSP GetSP() const;
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBTarget.h b/contrib/llvm-project/lldb/include/lldb/API/SBTarget.h
index abd9ebf07407..9e75b5e503a8 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBTarget.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBTarget.h
@@ -875,6 +875,7 @@ protected:
friend class SBSection;
friend class SBSourceManager;
friend class SBSymbol;
+ friend class SBThread;
friend class SBValue;
friend class SBVariablesOptions;
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBThread.h b/contrib/llvm-project/lldb/include/lldb/API/SBThread.h
index ac1b8407a220..76f794c25d9e 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBThread.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBThread.h
@@ -208,6 +208,8 @@ public:
bool SafeToCallFunctions();
+ SBValue GetSiginfo(SBError &error);
+
private:
friend class SBBreakpoint;
friend class SBBreakpointLocation;
diff --git a/contrib/llvm-project/lldb/include/lldb/API/SBType.h b/contrib/llvm-project/lldb/include/lldb/API/SBType.h
index 529b4d0eeffc..5885432d0624 100644
--- a/contrib/llvm-project/lldb/include/lldb/API/SBType.h
+++ b/contrib/llvm-project/lldb/include/lldb/API/SBType.h
@@ -225,6 +225,7 @@ protected:
friend class SBFunction;
friend class SBModule;
friend class SBTarget;
+ friend class SBThread;
friend class SBTypeEnumMember;
friend class SBTypeEnumMemberList;
friend class SBTypeNameSpecifier;
diff --git a/contrib/llvm-project/lldb/include/lldb/Breakpoint/Breakpoint.h b/contrib/llvm-project/lldb/include/lldb/Breakpoint/Breakpoint.h
index 40435d5c3d0f..113f8c4905e1 100644
--- a/contrib/llvm-project/lldb/include/lldb/Breakpoint/Breakpoint.h
+++ b/contrib/llvm-project/lldb/include/lldb/Breakpoint/Breakpoint.h
@@ -581,7 +581,7 @@ public:
llvm::json::Value GetStatistics();
/// Get the time it took to resolve all locations in this breakpoint.
- StatsDuration GetResolveTime() const { return m_resolve_time; }
+ StatsDuration::Duration GetResolveTime() const { return m_resolve_time; }
protected:
friend class Target;
@@ -660,7 +660,7 @@ private:
BreakpointName::Permissions m_permissions;
- StatsDuration m_resolve_time{0.0};
+ StatsDuration m_resolve_time;
void SendBreakpointChangedEvent(lldb::BreakpointEventType eventKind);
diff --git a/contrib/llvm-project/lldb/include/lldb/Breakpoint/BreakpointOptions.h b/contrib/llvm-project/lldb/include/lldb/Breakpoint/BreakpointOptions.h
index 7b78265920a2..c2456024d919 100644
--- a/contrib/llvm-project/lldb/include/lldb/Breakpoint/BreakpointOptions.h
+++ b/contrib/llvm-project/lldb/include/lldb/Breakpoint/BreakpointOptions.h
@@ -43,11 +43,10 @@ public:
| eCondition | eAutoContinue)
};
struct CommandData {
- CommandData() : user_source(), script_source() {}
+ CommandData() {}
CommandData(const StringList &user_source, lldb::ScriptLanguage interp)
- : user_source(user_source), script_source(), interpreter(interp),
- stop_on_error(true) {}
+ : user_source(user_source), interpreter(interp), stop_on_error(true) {}
virtual ~CommandData() = default;
@@ -195,8 +194,8 @@ public:
/// The commands will be appended to this list.
///
/// \return
- /// \btrue if the command callback is a command-line callback,
- /// \bfalse otherwise.
+ /// \b true if the command callback is a command-line callback,
+ /// \b false otherwise.
bool GetCommandLineCallbacks(StringList &command_list);
/// Remove the callback from this option set.
diff --git a/contrib/llvm-project/lldb/include/lldb/Breakpoint/WatchpointOptions.h b/contrib/llvm-project/lldb/include/lldb/Breakpoint/WatchpointOptions.h
index fbfcb91c4249..c5ad90c334c4 100644
--- a/contrib/llvm-project/lldb/include/lldb/Breakpoint/WatchpointOptions.h
+++ b/contrib/llvm-project/lldb/include/lldb/Breakpoint/WatchpointOptions.h
@@ -166,7 +166,7 @@ public:
lldb::user_id_t watch_id);
struct CommandData {
- CommandData() : user_source(), script_source() {}
+ CommandData() {}
~CommandData() = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Address.h b/contrib/llvm-project/lldb/include/lldb/Core/Address.h
index dc50e27ca277..f77ffc2fd25e 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Address.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Address.h
@@ -116,7 +116,7 @@ public:
///
/// Initialize with a invalid section (NULL) and an invalid offset
/// (LLDB_INVALID_ADDRESS).
- Address() : m_section_wp() {}
+ Address() {}
/// Copy constructor
///
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Debugger.h b/contrib/llvm-project/lldb/include/lldb/Core/Debugger.h
index 1ab21bec54c9..f9a1f1eea54f 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Debugger.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Debugger.h
@@ -306,6 +306,10 @@ public:
bool SetScriptLanguage(lldb::ScriptLanguage script_lang);
+ lldb::LanguageType GetREPLLanguage() const;
+
+ bool SetREPLLanguage(lldb::LanguageType repl_lang);
+
uint32_t GetTerminalWidth() const;
bool SetTerminalWidth(uint32_t term_width);
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Declaration.h b/contrib/llvm-project/lldb/include/lldb/Core/Declaration.h
index f81de21bc8f8..6ae21eb65eb3 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Declaration.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Declaration.h
@@ -24,7 +24,7 @@ namespace lldb_private {
class Declaration {
public:
/// Default constructor.
- Declaration() : m_file() {}
+ Declaration() {}
/// Construct with file specification, and optional line and column.
///
@@ -45,7 +45,7 @@ public:
/// Construct with a pointer to another Declaration object.
Declaration(const Declaration *decl_ptr)
- : m_file(), m_line(0), m_column(LLDB_INVALID_COLUMN_NUMBER) {
+ : m_line(0), m_column(LLDB_INVALID_COLUMN_NUMBER) {
if (decl_ptr)
*this = *decl_ptr;
}
@@ -152,8 +152,6 @@ public:
/// The number of bytes that this object occupies in memory.
/// The returned value does not include the bytes for any
/// shared string values.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const;
/// Set accessor for the declaration file specification.
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h b/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
index 0925bf358b9c..87979fd92584 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
@@ -465,7 +465,7 @@ protected:
uint32_t line = LLDB_INVALID_LINE_NUMBER;
uint32_t column = 0;
- SourceLine() : file() {}
+ SourceLine() {}
bool operator==(const SourceLine &rhs) const {
return file == rhs.file && line == rhs.line && rhs.column == column;
@@ -489,7 +489,7 @@ protected:
// Whether to print a blank line at the end of the source lines.
bool print_source_context_end_eol = true;
- SourceLinesToDisplay() : lines() {}
+ SourceLinesToDisplay() {}
};
// Get the function's declaration line number, hopefully a line number
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/DumpDataExtractor.h b/contrib/llvm-project/lldb/include/lldb/Core/DumpDataExtractor.h
index 12188609e8c0..08501b512a2b 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/DumpDataExtractor.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/DumpDataExtractor.h
@@ -76,6 +76,15 @@ class Stream;
/// same integer value. If the items being displayed are not
/// bitfields, this value should be zero.
///
+/// \param[in] exe_scope
+/// If provided, this will be used to lookup language specific
+/// information, address information and memory tags.
+/// (if they are requested by the other options)
+///
+/// \param[in] show_memory_tags
+/// If exe_scope and base_addr are valid, include memory tags
+/// in the output. This does not apply to certain formats.
+///
/// \return
/// The offset at which dumping ended.
lldb::offset_t
@@ -83,7 +92,8 @@ DumpDataExtractor(const DataExtractor &DE, Stream *s, lldb::offset_t offset,
lldb::Format item_format, size_t item_byte_size,
size_t item_count, size_t num_per_line, uint64_t base_addr,
uint32_t item_bit_size, uint32_t item_bit_offset,
- ExecutionContextScope *exe_scope = nullptr);
+ ExecutionContextScope *exe_scope = nullptr,
+ bool show_memory_tags = false);
void DumpHexBytes(Stream *s, const void *src, size_t src_len,
uint32_t bytes_per_line, lldb::addr_t base_addr);
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/FileSpecList.h b/contrib/llvm-project/lldb/include/lldb/Core/FileSpecList.h
index cab8e9b9b43f..117c6b691498 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/FileSpecList.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/FileSpecList.h
@@ -152,8 +152,6 @@ public:
///
/// \return
/// The number of bytes that this object occupies in memory.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const;
bool IsEmpty() const { return m_files.empty(); }
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/FormatEntity.h b/contrib/llvm-project/lldb/include/lldb/Core/FormatEntity.h
index ecae8500df40..cd9019d55554 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/FormatEntity.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/FormatEntity.h
@@ -148,7 +148,7 @@ public:
Entry(Type t = Type::Invalid, const char *s = nullptr,
const char *f = nullptr)
- : string(s ? s : ""), printf_format(f ? f : ""), children(), type(t) {}
+ : string(s ? s : ""), printf_format(f ? f : ""), type(t) {}
Entry(llvm::StringRef s);
Entry(char ch);
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/LoadedModuleInfoList.h b/contrib/llvm-project/lldb/include/lldb/Core/LoadedModuleInfoList.h
index ad6da2bd7559..3f65ddc06d73 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/LoadedModuleInfoList.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/LoadedModuleInfoList.h
@@ -101,7 +101,7 @@ public:
lldb::addr_t m_dynamic;
};
- LoadedModuleInfoList() : m_list() {}
+ LoadedModuleInfoList() {}
void add(const LoadedModuleInfo &mod) { m_list.push_back(mod); }
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Mangled.h b/contrib/llvm-project/lldb/include/lldb/Core/Mangled.h
index 6c92591a0881..4e4f75e18e34 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Mangled.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Mangled.h
@@ -72,10 +72,10 @@ public:
return !(*this == rhs);
}
- /// Convert to pointer operator.
+ /// Convert to bool operator.
///
- /// This allows code to check a Mangled object to see if it contains a valid
- /// mangled name using code such as:
+ /// This allows code to check any Mangled objects to see if they contain
+ /// anything valid using code such as:
///
/// \code
/// Mangled mangled(...);
@@ -84,25 +84,9 @@ public:
/// \endcode
///
/// \return
- /// A pointer to this object if either the mangled or unmangled
- /// name is set, NULL otherwise.
- operator void *() const;
-
- /// Logical NOT operator.
- ///
- /// This allows code to check a Mangled object to see if it contains an
- /// empty mangled name using code such as:
- ///
- /// \code
- /// Mangled mangled(...);
- /// if (!mangled)
- /// { ...
- /// \endcode
- ///
- /// \return
- /// Returns \b true if the object has an empty mangled and
- /// unmangled name, \b false otherwise.
- bool operator!() const;
+ /// Returns \b true if either the mangled or unmangled name is set,
+ /// \b false if the object has an empty mangled and unmangled name.
+ explicit operator bool() const;
/// Clear the mangled and demangled values.
void Clear();
@@ -199,8 +183,6 @@ public:
///
/// \return
/// The number of bytes that this object occupies in memory.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const;
/// Set the string value in this object.
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Module.h b/contrib/llvm-project/lldb/include/lldb/Core/Module.h
index 6b1e4f8f3704..f6c32586eda8 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Module.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Module.h
@@ -905,7 +905,7 @@ public:
/// correctly.
class LookupInfo {
public:
- LookupInfo() : m_name(), m_lookup_name() {}
+ LookupInfo() {}
LookupInfo(ConstString name, lldb::FunctionNameType name_type_mask,
lldb::LanguageType language);
@@ -1047,11 +1047,11 @@ protected:
/// We store a symbol table parse time duration here because we might have
/// an object file and a symbol file which both have symbol tables. The parse
/// time for the symbol tables can be aggregated here.
- StatsDuration m_symtab_parse_time{0.0};
+ StatsDuration m_symtab_parse_time;
/// We store a symbol named index time duration here because we might have
/// an object file and a symbol file which both have symbol tables. The parse
/// time for the symbol tables can be aggregated here.
- StatsDuration m_symtab_index_time{0.0};
+ StatsDuration m_symtab_index_time;
/// Resolve a file or load virtual address.
///
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/ModuleSpec.h b/contrib/llvm-project/lldb/include/lldb/Core/ModuleSpec.h
index 8e5deebbab9a..bcf4ec30a673 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/ModuleSpec.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/ModuleSpec.h
@@ -25,18 +25,14 @@ namespace lldb_private {
class ModuleSpec {
public:
- ModuleSpec()
- : m_file(), m_platform_file(), m_symbol_file(), m_arch(), m_uuid(),
- m_object_name(), m_source_mappings() {}
+ ModuleSpec() {}
/// If the \c data argument is passed, its contents will be used
/// as the module contents instead of trying to read them from
/// \c file_spec .
ModuleSpec(const FileSpec &file_spec, const UUID &uuid = UUID(),
lldb::DataBufferSP data = lldb::DataBufferSP())
- : m_file(file_spec), m_platform_file(), m_symbol_file(), m_arch(),
- m_uuid(uuid), m_object_name(), m_object_offset(0), m_source_mappings(),
- m_data(data) {
+ : m_file(file_spec), m_uuid(uuid), m_object_offset(0), m_data(data) {
if (data)
m_object_size = data->GetByteSize();
else if (m_file)
@@ -44,10 +40,8 @@ public:
}
ModuleSpec(const FileSpec &file_spec, const ArchSpec &arch)
- : m_file(file_spec), m_platform_file(), m_symbol_file(), m_arch(arch),
- m_uuid(), m_object_name(), m_object_offset(0),
- m_object_size(FileSystem::Instance().GetByteSize(file_spec)),
- m_source_mappings() {}
+ : m_file(file_spec), m_arch(arch), m_object_offset(0),
+ m_object_size(FileSystem::Instance().GetByteSize(file_spec)) {}
FileSpec *GetFileSpecPtr() { return (m_file ? &m_file : nullptr); }
@@ -279,9 +273,9 @@ protected:
class ModuleSpecList {
public:
- ModuleSpecList() : m_specs(), m_mutex() {}
+ ModuleSpecList() {}
- ModuleSpecList(const ModuleSpecList &rhs) : m_specs(), m_mutex() {
+ ModuleSpecList(const ModuleSpecList &rhs) {
std::lock_guard<std::recursive_mutex> lhs_guard(m_mutex);
std::lock_guard<std::recursive_mutex> rhs_guard(rhs.m_mutex);
m_specs = rhs.m_specs;
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/StructuredDataImpl.h b/contrib/llvm-project/lldb/include/lldb/Core/StructuredDataImpl.h
index 8930ebff8166..355aa77c4e5c 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/StructuredDataImpl.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/StructuredDataImpl.h
@@ -25,7 +25,7 @@ namespace lldb_private {
class StructuredDataImpl {
public:
- StructuredDataImpl() : m_plugin_wp(), m_data_sp() {}
+ StructuredDataImpl() {}
StructuredDataImpl(const StructuredDataImpl &rhs) = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/ThreadSafeValue.h b/contrib/llvm-project/lldb/include/lldb/Core/ThreadSafeValue.h
index 51820ec9cd9d..979f008b3170 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/ThreadSafeValue.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/ThreadSafeValue.h
@@ -18,7 +18,7 @@ namespace lldb_private {
template <class T> class ThreadSafeValue {
public:
ThreadSafeValue() = default;
- ThreadSafeValue(const T &value) : m_value(value), m_mutex() {}
+ ThreadSafeValue(const T &value) : m_value(value) {}
~ThreadSafeValue() = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/UniqueCStringMap.h b/contrib/llvm-project/lldb/include/lldb/Core/UniqueCStringMap.h
index e37027a0150a..9c8b8081d374 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/UniqueCStringMap.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/UniqueCStringMap.h
@@ -165,7 +165,21 @@ public:
// my_map.Append (UniqueCStringMap::Entry(GetName(...), GetValue(...)));
// }
// my_map.Sort();
- void Sort() { llvm::sort(m_map.begin(), m_map.end(), Compare()); }
+ void Sort() {
+ Sort([](const T &, const T &) { return false; });
+ }
+
+ /// Sort contents of this map using the provided comparator to break ties for
+ /// entries with the same string value.
+ template <typename TCompare> void Sort(TCompare tc) {
+ Compare c;
+ llvm::sort(m_map, [&](const Entry &lhs, const Entry &rhs) -> bool {
+ int result = c.ThreeWay(lhs.cstring, rhs.cstring);
+ if (result == 0)
+ return tc(lhs.value, rhs.value);
+ return result < 0;
+ });
+ }
// Since we are using a vector to contain our items it will always double its
// memory consumption as things are added to the vector, so if you intend to
@@ -205,13 +219,24 @@ protected:
return operator()(lhs, rhs.cstring);
}
+ bool operator()(ConstString lhs, ConstString rhs) {
+ return ThreeWay(lhs, rhs) < 0;
+ }
+
// This is only for uniqueness, not lexicographical ordering, so we can
// just compare pointers. *However*, comparing pointers from different
// allocations is UB, so we need compare their integral values instead.
- bool operator()(ConstString lhs, ConstString rhs) {
- return uintptr_t(lhs.GetCString()) < uintptr_t(rhs.GetCString());
+ int ThreeWay(ConstString lhs, ConstString rhs) {
+ auto lhsint = uintptr_t(lhs.GetCString());
+ auto rhsint = uintptr_t(rhs.GetCString());
+ if (lhsint < rhsint)
+ return -1;
+ if (lhsint > rhsint)
+ return 1;
+ return 0;
}
};
+
collection m_map;
};
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Value.h b/contrib/llvm-project/lldb/include/lldb/Core/Value.h
index 1ee4fe639e6e..9f00a14251e2 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Value.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Value.h
@@ -156,7 +156,7 @@ protected:
class ValueList {
public:
- ValueList() : m_values() {}
+ ValueList() {}
ValueList(const ValueList &rhs);
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/ValueObject.h b/contrib/llvm-project/lldb/include/lldb/Core/ValueObject.h
index 192149f05436..dac7aac06246 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/ValueObject.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/ValueObject.h
@@ -795,7 +795,7 @@ protected:
class ChildrenManager {
public:
- ChildrenManager() : m_mutex(), m_children() {}
+ ChildrenManager() {}
bool HasChildAtIndex(size_t idx) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
diff --git a/contrib/llvm-project/lldb/include/lldb/DataFormatters/FormatClasses.h b/contrib/llvm-project/lldb/include/lldb/DataFormatters/FormatClasses.h
index b8540de3d740..2b47bcdf3a69 100644
--- a/contrib/llvm-project/lldb/include/lldb/DataFormatters/FormatClasses.h
+++ b/contrib/llvm-project/lldb/include/lldb/DataFormatters/FormatClasses.h
@@ -105,23 +105,23 @@ private:
class TypeNameSpecifierImpl {
public:
- TypeNameSpecifierImpl() : m_type() {}
+ TypeNameSpecifierImpl() {}
TypeNameSpecifierImpl(llvm::StringRef name, bool is_regex)
- : m_is_regex(is_regex), m_type() {
+ : m_is_regex(is_regex) {
m_type.m_type_name = std::string(name);
}
// if constructing with a given type, is_regex cannot be true since we are
// giving an exact type to match
- TypeNameSpecifierImpl(lldb::TypeSP type) : m_is_regex(false), m_type() {
+ TypeNameSpecifierImpl(lldb::TypeSP type) : m_is_regex(false) {
if (type) {
m_type.m_type_name = std::string(type->GetName().GetStringRef());
m_type.m_compiler_type = type->GetForwardCompilerType();
}
}
- TypeNameSpecifierImpl(CompilerType type) : m_is_regex(false), m_type() {
+ TypeNameSpecifierImpl(CompilerType type) : m_is_regex(false) {
if (type.IsValid()) {
m_type.m_type_name.assign(type.GetTypeName().GetCString());
m_type.m_compiler_type = type;
diff --git a/contrib/llvm-project/lldb/include/lldb/DataFormatters/TypeSynthetic.h b/contrib/llvm-project/lldb/include/lldb/DataFormatters/TypeSynthetic.h
index 24322bd51a0c..3f58297a529b 100644
--- a/contrib/llvm-project/lldb/include/lldb/DataFormatters/TypeSynthetic.h
+++ b/contrib/llvm-project/lldb/include/lldb/DataFormatters/TypeSynthetic.h
@@ -279,11 +279,11 @@ class TypeFilterImpl : public SyntheticChildren {
public:
TypeFilterImpl(const SyntheticChildren::Flags &flags)
- : SyntheticChildren(flags), m_expression_paths() {}
+ : SyntheticChildren(flags) {}
TypeFilterImpl(const SyntheticChildren::Flags &flags,
const std::initializer_list<const char *> items)
- : SyntheticChildren(flags), m_expression_paths() {
+ : SyntheticChildren(flags) {
for (auto path : items)
AddExpressionPath(path);
}
@@ -391,7 +391,7 @@ class ScriptedSyntheticChildren : public SyntheticChildren {
public:
ScriptedSyntheticChildren(const SyntheticChildren::Flags &flags,
const char *pclass, const char *pcode = nullptr)
- : SyntheticChildren(flags), m_python_class(), m_python_code() {
+ : SyntheticChildren(flags) {
if (pclass)
m_python_class = pclass;
if (pcode)
diff --git a/contrib/llvm-project/lldb/include/lldb/Expression/IRExecutionUnit.h b/contrib/llvm-project/lldb/include/lldb/Expression/IRExecutionUnit.h
index bb43851e17c9..ef53ca512d75 100644
--- a/contrib/llvm-project/lldb/include/lldb/Expression/IRExecutionUnit.h
+++ b/contrib/llvm-project/lldb/include/lldb/Expression/IRExecutionUnit.h
@@ -347,10 +347,9 @@ private:
AllocationRecord(uintptr_t host_address, uint32_t permissions,
lldb::SectionType sect_type, size_t size,
unsigned alignment, unsigned section_id, const char *name)
- : m_name(), m_process_address(LLDB_INVALID_ADDRESS),
- m_host_address(host_address), m_permissions(permissions),
- m_sect_type(sect_type), m_size(size), m_alignment(alignment),
- m_section_id(section_id) {
+ : m_process_address(LLDB_INVALID_ADDRESS), m_host_address(host_address),
+ m_permissions(permissions), m_sect_type(sect_type), m_size(size),
+ m_alignment(alignment), m_section_id(section_id) {
if (name && name[0])
m_name = name;
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/Debug.h b/contrib/llvm-project/lldb/include/lldb/Host/Debug.h
index 7da59dd04a66..e4e410474c02 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/Debug.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/Debug.h
@@ -32,15 +32,13 @@ struct ResumeAction {
// send a signal to the thread when the action is run or step.
class ResumeActionList {
public:
- ResumeActionList() : m_actions(), m_signal_handled() {}
+ ResumeActionList() {}
- ResumeActionList(lldb::StateType default_action, int signal)
- : m_actions(), m_signal_handled() {
+ ResumeActionList(lldb::StateType default_action, int signal) {
SetDefaultThreadActionIfNeeded(default_action, signal);
}
- ResumeActionList(const ResumeAction *actions, size_t num_actions)
- : m_actions(), m_signal_handled() {
+ ResumeActionList(const ResumeAction *actions, size_t num_actions) {
if (actions && num_actions) {
m_actions.assign(actions, actions + num_actions);
m_signal_handled.assign(num_actions, false);
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/FileSystem.h b/contrib/llvm-project/lldb/include/lldb/Host/FileSystem.h
index 54f7ac5ddb58..9f863e8a9d7e 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/FileSystem.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/FileSystem.h
@@ -31,16 +31,13 @@ public:
static const char *DEV_NULL;
static const char *PATH_CONVERSION_ERROR;
- FileSystem()
- : m_fs(llvm::vfs::getRealFileSystem()), m_collector(nullptr),
- m_home_directory() {}
+ FileSystem() : m_fs(llvm::vfs::getRealFileSystem()), m_collector(nullptr) {}
FileSystem(std::shared_ptr<llvm::FileCollectorBase> collector)
: m_fs(llvm::vfs::getRealFileSystem()), m_collector(std::move(collector)),
- m_home_directory(), m_mapped(false) {}
+ m_mapped(false) {}
FileSystem(llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> fs,
bool mapped = false)
- : m_fs(std::move(fs)), m_collector(nullptr), m_home_directory(),
- m_mapped(mapped) {}
+ : m_fs(std::move(fs)), m_collector(nullptr), m_mapped(mapped) {}
FileSystem(const FileSystem &fs) = delete;
FileSystem &operator=(const FileSystem &fs) = delete;
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/HostNativeThreadBase.h b/contrib/llvm-project/lldb/include/lldb/Host/HostNativeThreadBase.h
index bfd70d745593..1c581c279e1e 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/HostNativeThreadBase.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/HostNativeThreadBase.h
@@ -46,7 +46,7 @@ protected:
ThreadCreateTrampoline(lldb::thread_arg_t arg);
lldb::thread_t m_thread = LLDB_INVALID_HOST_THREAD;
- lldb::thread_result_t m_result = 0;
+ lldb::thread_result_t m_result = 0; // NOLINT(modernize-use-nullptr)
};
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/XML.h b/contrib/llvm-project/lldb/include/lldb/Host/XML.h
index 9edf46bf09df..da0f9cd7aa8c 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/XML.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/XML.h
@@ -76,8 +76,8 @@ public:
XMLNode GetChild() const;
- llvm::StringRef GetAttributeValue(const char *name,
- const char *fail_value = nullptr) const;
+ std::string GetAttributeValue(const char *name,
+ const char *fail_value = nullptr) const;
bool GetAttributeValueAsUnsigned(const char *name, uint64_t &value,
uint64_t fail_value = 0, int base = 0) const;
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/common/NativeProcessProtocol.h b/contrib/llvm-project/lldb/include/lldb/Host/common/NativeProcessProtocol.h
index 7c3458527616..c7494ba6b195 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/common/NativeProcessProtocol.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/common/NativeProcessProtocol.h
@@ -204,7 +204,7 @@ public:
void SetCurrentThreadID(lldb::tid_t tid) { m_current_thread_id = tid; }
- lldb::tid_t GetCurrentThreadID() { return m_current_thread_id; }
+ lldb::tid_t GetCurrentThreadID() const { return m_current_thread_id; }
NativeThreadProtocol *GetCurrentThread() {
return GetThreadByID(m_current_thread_id);
@@ -251,8 +251,9 @@ public:
libraries_svr4 = (1u << 5),
memory_tagging = (1u << 6),
savecore = (1u << 7),
+ siginfo_read = (1u << 8),
- LLVM_MARK_AS_BITMASK_ENUM(savecore)
+ LLVM_MARK_AS_BITMASK_ENUM(siginfo_read)
};
class Factory {
diff --git a/contrib/llvm-project/lldb/include/lldb/Host/common/NativeThreadProtocol.h b/contrib/llvm-project/lldb/include/lldb/Host/common/NativeThreadProtocol.h
index 5cf26bd95939..35ccc48d62d8 100644
--- a/contrib/llvm-project/lldb/include/lldb/Host/common/NativeThreadProtocol.h
+++ b/contrib/llvm-project/lldb/include/lldb/Host/common/NativeThreadProtocol.h
@@ -12,9 +12,13 @@
#include <memory>
#include "lldb/Host/Debug.h"
+#include "lldb/Utility/UnimplementedError.h"
#include "lldb/lldb-private-forward.h"
#include "lldb/lldb-types.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+
namespace lldb_private {
// NativeThreadProtocol
class NativeThreadProtocol {
@@ -47,6 +51,11 @@ public:
virtual Status RemoveHardwareBreakpoint(lldb::addr_t addr) = 0;
+ virtual llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ GetSiginfo() const {
+ return llvm::make_error<UnimplementedError>();
+ }
+
protected:
NativeProcessProtocol &m_process;
lldb::tid_t m_tid;
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandInterpreter.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandInterpreter.h
index e6f0d5f9c4d4..3efb59fc0564 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandInterpreter.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandInterpreter.h
@@ -649,7 +649,7 @@ private:
void FindCommandsForApropos(llvm::StringRef word, StringList &commands_found,
StringList &commands_help,
- CommandObject::CommandMap &command_map);
+ const CommandObject::CommandMap &command_map);
// An interruptible wrapper around the stream output
void PrintCommandOutput(Stream &stream, llvm::StringRef str);
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObject.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObject.h
index 89cc161993a9..bfddf559e5fe 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObject.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObject.h
@@ -172,11 +172,6 @@ public:
return nullptr;
}
- virtual void AproposAllSubCommands(llvm::StringRef prefix,
- llvm::StringRef search_word,
- StringList &commands_found,
- StringList &commands_help) {}
-
void FormatLongHelpText(Stream &output_strm, llvm::StringRef long_help);
void GenerateHelpText(CommandReturnObject &result);
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObjectMultiword.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObjectMultiword.h
index a0e8d163c4b6..ab580c11eb4f 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObjectMultiword.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/CommandObjectMultiword.h
@@ -51,11 +51,6 @@ public:
CommandObject *GetSubcommandObject(llvm::StringRef sub_cmd,
StringList *matches = nullptr) override;
- void AproposAllSubCommands(llvm::StringRef prefix,
- llvm::StringRef search_word,
- StringList &commands_found,
- StringList &commands_help) override;
-
bool WantsRawCommandString() override { return false; }
void HandleCompletion(CompletionRequest &request) override;
@@ -110,11 +105,6 @@ public:
CommandObject *GetSubcommandObject(llvm::StringRef sub_cmd,
StringList *matches = nullptr) override;
- void AproposAllSubCommands(llvm::StringRef prefix,
- llvm::StringRef search_word,
- StringList &commands_found,
- StringList &commands_help) override;
-
bool LoadSubCommand(llvm::StringRef cmd_name,
const lldb::CommandObjectSP &command_obj) override;
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueArray.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueArray.h
index 011eefc34251..af43887635e9 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueArray.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueArray.h
@@ -18,7 +18,7 @@ namespace lldb_private {
class OptionValueArray : public Cloneable<OptionValueArray, OptionValue> {
public:
OptionValueArray(uint32_t type_mask = UINT32_MAX, bool raw_value_dump = false)
- : m_type_mask(type_mask), m_values(), m_raw_value_dump(raw_value_dump) {}
+ : m_type_mask(type_mask), m_raw_value_dump(raw_value_dump) {}
~OptionValueArray() override = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueProperties.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueProperties.h
index 6fa5403ac142..3e5685b7f0bf 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueProperties.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/OptionValueProperties.h
@@ -114,6 +114,9 @@ public:
GetPropertyAtIndexAsOptionValueLanguage(const ExecutionContext *exe_ctx,
uint32_t idx) const;
+ bool SetPropertyAtIndexAsLanguage(const ExecutionContext *exe_ctx,
+ uint32_t idx, lldb::LanguageType lang);
+
bool GetPropertyAtIndexAsArgs(const ExecutionContext *exe_ctx, uint32_t idx,
Args &args) const;
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/Options.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/Options.h
index 6bf5c21fe98e..5899a9edc47f 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/Options.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/Options.h
@@ -169,7 +169,7 @@ public:
/// user wants returned.
///
/// \return
- /// \btrue if we were in an option, \bfalse otherwise.
+ /// \b true if we were in an option, \b false otherwise.
bool HandleOptionCompletion(lldb_private::CompletionRequest &request,
OptionElementVector &option_map,
CommandInterpreter &interpreter);
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedInterface.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedInterface.h
index 427fa3f4f793..9de5e60cfea3 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedInterface.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedInterface.h
@@ -27,12 +27,13 @@ public:
virtual StructuredData::GenericSP
CreatePluginObject(llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) = 0;
+ StructuredData::DictionarySP args_sp,
+ StructuredData::Generic *script_obj = nullptr) = 0;
template <typename Ret>
- Ret ErrorWithMessage(llvm::StringRef caller_name, llvm::StringRef error_msg,
- Status &error,
- uint32_t log_caterogy = LIBLLDB_LOG_PROCESS) {
+ static Ret ErrorWithMessage(llvm::StringRef caller_name,
+ llvm::StringRef error_msg, Status &error,
+ LLDBLog log_caterogy = LLDBLog::Process) {
LLDB_LOGF(GetLogIfAllCategoriesSet(log_caterogy), "%s ERROR = %s",
caller_name.data(), error_msg.data());
error.SetErrorString(llvm::Twine(caller_name + llvm::Twine(" ERROR = ") +
diff --git a/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedProcessInterface.h b/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedProcessInterface.h
index 26fd956f96bb..0712b3bf4a3e 100644
--- a/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedProcessInterface.h
+++ b/contrib/llvm-project/lldb/include/lldb/Interpreter/ScriptedProcessInterface.h
@@ -23,7 +23,8 @@ class ScriptedProcessInterface : virtual public ScriptedInterface {
public:
StructuredData::GenericSP
CreatePluginObject(llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) override {
+ StructuredData::DictionarySP args_sp,
+ StructuredData::Generic *script_obj = nullptr) override {
return nullptr;
}
@@ -41,6 +42,8 @@ public:
return {};
}
+ virtual StructuredData::DictionarySP GetThreadsInfo() { return nullptr; }
+
virtual StructuredData::DictionarySP GetThreadWithID(lldb::tid_t tid) {
return nullptr;
}
@@ -66,18 +69,17 @@ public:
protected:
friend class ScriptedThread;
- virtual lldb::ScriptedThreadInterfaceSP GetScriptedThreadInterface() {
+ virtual lldb::ScriptedThreadInterfaceSP CreateScriptedThreadInterface() {
return nullptr;
}
-
- lldb::ScriptedThreadInterfaceSP m_scripted_thread_interface_sp = nullptr;
};
class ScriptedThreadInterface : virtual public ScriptedInterface {
public:
StructuredData::GenericSP
CreatePluginObject(llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) override {
+ StructuredData::DictionarySP args_sp,
+ StructuredData::Generic *script_obj = nullptr) override {
return nullptr;
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/CompactUnwindInfo.h b/contrib/llvm-project/lldb/include/lldb/Symbol/CompactUnwindInfo.h
index ceb501e1c05f..402155cbe08d 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/CompactUnwindInfo.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/CompactUnwindInfo.h
@@ -86,7 +86,7 @@ private:
// valid for (start of the function)
uint32_t valid_range_offset_end =
0; // the offset of the start of the next function
- FunctionInfo() : lsda_address(), personality_ptr_address() {}
+ FunctionInfo() {}
};
struct UnwindHeader {
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/CompileUnit.h b/contrib/llvm-project/lldb/include/lldb/Symbol/CompileUnit.h
index 34e34e5514df..44e1d673f1fd 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/CompileUnit.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/CompileUnit.h
@@ -208,9 +208,9 @@ public:
/// unit file.
///
/// \param[in] exact
- /// If \btrue match only if there is a line table entry for this line
+ /// If \b true match only if there is a line table entry for this line
/// number.
- /// If \bfalse, find the line table entry equal to or after this line
+ /// If \b false, find the line table entry equal to or after this line
/// number.
///
/// \param[out] line_entry
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/DWARFCallFrameInfo.h b/contrib/llvm-project/lldb/include/lldb/Symbol/DWARFCallFrameInfo.h
index f85bc7e844a0..43baf4dd39a1 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/DWARFCallFrameInfo.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/DWARFCallFrameInfo.h
@@ -107,7 +107,7 @@ private:
: cie_offset(offset), version(-1), code_align(0), data_align(0),
return_addr_reg_num(LLDB_INVALID_REGNUM), inst_offset(0),
inst_length(0), ptr_encoding(0), lsda_addr_encoding(DW_EH_PE_omit),
- personality_loc(LLDB_INVALID_ADDRESS), initial_row() {}
+ personality_loc(LLDB_INVALID_ADDRESS) {}
};
typedef std::shared_ptr<CIE> CIESP;
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/Function.h b/contrib/llvm-project/lldb/include/lldb/Symbol/Function.h
index b703617773ff..83f9979c3c52 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/Function.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/Function.h
@@ -110,8 +110,6 @@ public:
/// The number of bytes that this object occupies in memory.
/// The returned value does not include the bytes for any
/// shared string values.
- ///
- /// \see ConstString::StaticMemorySize ()
virtual size_t MemorySize() const;
protected:
@@ -238,8 +236,6 @@ public:
/// The number of bytes that this object occupies in memory.
/// The returned value does not include the bytes for any
/// shared string values.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const override;
private:
@@ -595,8 +591,6 @@ public:
/// The number of bytes that this object occupies in memory.
/// The returned value does not include the bytes for any
/// shared string values.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const;
/// Get whether compiler optimizations were enabled for this function
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/LineTable.h b/contrib/llvm-project/lldb/include/lldb/Symbol/LineTable.h
index 5cd22bd831ee..b5121b29fe02 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/LineTable.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/LineTable.h
@@ -206,7 +206,6 @@ public:
LineTable *LinkLineTable(const FileRangeMap &file_range_map);
-protected:
struct Entry {
Entry()
: line(0), is_start_of_statement(false), is_start_of_basic_block(false),
@@ -303,6 +302,7 @@ protected:
uint16_t file_idx = 0;
};
+protected:
struct EntrySearchInfo {
LineTable *line_table;
lldb_private::Section *a_section;
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/ObjectContainer.h b/contrib/llvm-project/lldb/include/lldb/Symbol/ObjectContainer.h
index 1e01e93da9d2..4f0c73693295 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/ObjectContainer.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/ObjectContainer.h
@@ -39,7 +39,7 @@ public:
lldb::DataBufferSP &data_sp, lldb::offset_t data_offset)
: ModuleChild(module_sp),
m_file(), // This file can be different than the module's file spec
- m_offset(file_offset), m_length(length), m_data() {
+ m_offset(file_offset), m_length(length) {
if (file)
m_file = *file;
if (data_sp)
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/SymbolFile.h b/contrib/llvm-project/lldb/include/lldb/Symbol/SymbolFile.h
index 7c0365483c12..6cdb2bfb686b 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/SymbolFile.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/SymbolFile.h
@@ -67,8 +67,7 @@ public:
// Constructors and Destructors
SymbolFile(lldb::ObjectFileSP objfile_sp)
- : m_objfile_sp(std::move(objfile_sp)), m_abilities(0),
- m_calculated_abilities(false) {}
+ : m_objfile_sp(std::move(objfile_sp)) {}
~SymbolFile() override = default;
@@ -317,14 +316,37 @@ public:
///
/// \returns 0.0 if no information has been parsed or if there is
/// no computational cost to parsing the debug information.
- virtual StatsDuration GetDebugInfoParseTime() { return StatsDuration(0.0); }
+ virtual StatsDuration::Duration GetDebugInfoParseTime() { return {}; }
/// Return the time it took to index the debug information in the object
/// file.
///
/// \returns 0.0 if the file doesn't need to be indexed or if it
/// hasn't been indexed yet, or a valid duration if it has.
- virtual StatsDuration GetDebugInfoIndexTime() { return StatsDuration(0.0); }
+ virtual StatsDuration::Duration GetDebugInfoIndexTime() { return {}; }
+
+ /// Accessors for the bool that indicates if the debug info index was loaded
+ /// from, or saved to the module index cache.
+ ///
+ /// In statistics it is handy to know if a module's debug info was loaded from
+ /// or saved to the cache. When the debug info index is loaded from the cache
+ /// startup times can be faster. When the cache is enabled and the debug info
+ /// index is saved to the cache, debug sessions can be slower. These accessors
+ /// can be accessed by the statistics and emitted to help track these costs.
+ /// \{
+ bool GetDebugInfoIndexWasLoadedFromCache() const {
+ return m_index_was_loaded_from_cache;
+ }
+ void SetDebugInfoIndexWasLoadedFromCache() {
+ m_index_was_loaded_from_cache = true;
+ }
+ bool GetDebugInfoIndexWasSavedToCache() const {
+ return m_index_was_saved_to_cache;
+ }
+ void SetDebugInfoIndexWasSavedToCache() {
+ m_index_was_saved_to_cache = true;
+ }
+ /// \}
protected:
void AssertModuleLock();
@@ -341,8 +363,10 @@ protected:
llvm::Optional<std::vector<lldb::CompUnitSP>> m_compile_units;
TypeList m_type_list;
Symtab *m_symtab = nullptr;
- uint32_t m_abilities;
- bool m_calculated_abilities;
+ uint32_t m_abilities = 0;
+ bool m_calculated_abilities = false;
+ bool m_index_was_loaded_from_cache = false;
+ bool m_index_was_saved_to_cache = false;
private:
SymbolFile(const SymbolFile &) = delete;
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/Symtab.h b/contrib/llvm-project/lldb/include/lldb/Symbol/Symtab.h
index fe0a82306c4f..504b49c02674 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/Symtab.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/Symtab.h
@@ -212,6 +212,30 @@ public:
/// false if the symbol table wasn't cached or was out of date.
bool LoadFromCache();
+
+ /// Accessors for the bool that indicates if the debug info index was loaded
+ /// from, or saved to the module index cache.
+ ///
+ /// In statistics it is handy to know if a module's debug info was loaded from
+ /// or saved to the cache. When the debug info index is loaded from the cache
+ /// startup times can be faster. When the cache is enabled and the debug info
+ /// index is saved to the cache, debug sessions can be slower. These accessors
+ /// can be accessed by the statistics and emitted to help track these costs.
+ /// \{
+ bool GetWasLoadedFromCache() const {
+ return m_loaded_from_cache;
+ }
+ void SetWasLoadedFromCache() {
+ m_loaded_from_cache = true;
+ }
+ bool GetWasSavedToCache() const {
+ return m_saved_to_cache;
+ }
+ void SetWasSavedToCache() {
+ m_saved_to_cache = true;
+ }
+ /// \}
+
protected:
typedef std::vector<Symbol> collection;
typedef collection::iterator iterator;
@@ -252,7 +276,8 @@ protected:
m_name_to_symbol_indices;
mutable std::recursive_mutex
m_mutex; // Provide thread safety for this symbol table
- bool m_file_addr_to_index_computed : 1, m_name_indexes_computed : 1;
+ bool m_file_addr_to_index_computed : 1, m_name_indexes_computed : 1,
+ m_loaded_from_cache : 1, m_saved_to_cache : 1;
private:
UniqueCStringMap<uint32_t> &
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/Type.h b/contrib/llvm-project/lldb/include/lldb/Symbol/Type.h
index 7754e0299096..24b94921df94 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/Type.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/Type.h
@@ -314,7 +314,7 @@ private:
class TypeListImpl {
public:
- TypeListImpl() : m_content() {}
+ TypeListImpl() {}
void Append(const lldb::TypeImplSP &type) { m_content.push_back(type); }
@@ -345,10 +345,7 @@ private:
class TypeMemberImpl {
public:
- TypeMemberImpl()
- : m_type_impl_sp(), m_name()
-
- {}
+ TypeMemberImpl() {}
TypeMemberImpl(const lldb::TypeImplSP &type_impl_sp, uint64_t bit_offset,
ConstString name, uint32_t bitfield_bit_size = 0,
@@ -357,7 +354,7 @@ public:
m_bitfield_bit_size(bitfield_bit_size), m_is_bitfield(is_bitfield) {}
TypeMemberImpl(const lldb::TypeImplSP &type_impl_sp, uint64_t bit_offset)
- : m_type_impl_sp(type_impl_sp), m_bit_offset(bit_offset), m_name(),
+ : m_type_impl_sp(type_impl_sp), m_bit_offset(bit_offset),
m_bitfield_bit_size(0), m_is_bitfield(false) {
if (m_type_impl_sp)
m_name = m_type_impl_sp->GetName();
@@ -440,7 +437,7 @@ private:
class TypeMemberFunctionImpl {
public:
- TypeMemberFunctionImpl() : m_type(), m_decl(), m_name() {}
+ TypeMemberFunctionImpl() {}
TypeMemberFunctionImpl(const CompilerType &type, const CompilerDecl &decl,
const std::string &name,
@@ -477,7 +474,7 @@ private:
class TypeEnumMemberImpl {
public:
- TypeEnumMemberImpl() : m_integer_type_sp(), m_name("<invalid>"), m_value() {}
+ TypeEnumMemberImpl() : m_name("<invalid>") {}
TypeEnumMemberImpl(const lldb::TypeImplSP &integer_type_sp, ConstString name,
const llvm::APSInt &value);
@@ -505,7 +502,7 @@ protected:
class TypeEnumMemberListImpl {
public:
- TypeEnumMemberListImpl() : m_content() {}
+ TypeEnumMemberListImpl() {}
void Append(const lldb::TypeEnumMemberImplSP &type) {
m_content.push_back(type);
diff --git a/contrib/llvm-project/lldb/include/lldb/Symbol/UnwindPlan.h b/contrib/llvm-project/lldb/include/lldb/Symbol/UnwindPlan.h
index cc2302d25831..ebb0ec421da7 100644
--- a/contrib/llvm-project/lldb/include/lldb/Symbol/UnwindPlan.h
+++ b/contrib/llvm-project/lldb/include/lldb/Symbol/UnwindPlan.h
@@ -395,12 +395,10 @@ public:
typedef std::shared_ptr<Row> RowSP;
UnwindPlan(lldb::RegisterKind reg_kind)
- : m_row_list(), m_plan_valid_address_range(), m_register_kind(reg_kind),
- m_return_addr_register(LLDB_INVALID_REGNUM), m_source_name(),
+ : m_register_kind(reg_kind), m_return_addr_register(LLDB_INVALID_REGNUM),
m_plan_is_sourced_from_compiler(eLazyBoolCalculate),
m_plan_is_valid_at_all_instruction_locations(eLazyBoolCalculate),
- m_plan_is_for_signal_trap(eLazyBoolCalculate),
- m_lsda_address(), m_personality_func_addr() {}
+ m_plan_is_for_signal_trap(eLazyBoolCalculate) {}
// Performs a deep copy of the plan, including all the rows (expensive).
UnwindPlan(const UnwindPlan &rhs)
@@ -442,7 +440,7 @@ public:
m_return_addr_register = regnum;
}
- uint32_t GetReturnAddressRegister(void) { return m_return_addr_register; }
+ uint32_t GetReturnAddressRegister() { return m_return_addr_register; }
uint32_t GetInitialCFARegister() const {
if (m_row_list.empty())
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/InstrumentationRuntime.h b/contrib/llvm-project/lldb/include/lldb/Target/InstrumentationRuntime.h
index eeec91f36af4..a6121c24b956 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/InstrumentationRuntime.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/InstrumentationRuntime.h
@@ -42,8 +42,7 @@ class InstrumentationRuntime
protected:
InstrumentationRuntime(const lldb::ProcessSP &process_sp)
- : m_process_wp(), m_runtime_module(), m_breakpoint_id(0),
- m_is_active(false) {
+ : m_breakpoint_id(0), m_is_active(false) {
if (process_sp)
m_process_wp = process_sp;
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Language.h b/contrib/llvm-project/lldb/include/lldb/Target/Language.h
index 0b0891c14029..ce2d273a8277 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Language.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Language.h
@@ -57,8 +57,7 @@ public:
class ImageListTypeScavenger : public TypeScavenger {
class Result : public Language::TypeScavenger::Result {
public:
- Result(CompilerType type)
- : Language::TypeScavenger::Result(), m_compiler_type(type) {}
+ Result(CompilerType type) : m_compiler_type(type) {}
bool IsValid() override { return m_compiler_type.IsValid(); }
@@ -95,7 +94,7 @@ public:
template <typename... ScavengerTypes>
class EitherTypeScavenger : public TypeScavenger {
public:
- EitherTypeScavenger() : TypeScavenger(), m_scavengers() {
+ EitherTypeScavenger() : TypeScavenger() {
for (std::shared_ptr<TypeScavenger> scavenger : { std::shared_ptr<TypeScavenger>(new ScavengerTypes())... }) {
if (scavenger)
m_scavengers.push_back(scavenger);
@@ -118,7 +117,7 @@ public:
template <typename... ScavengerTypes>
class UnionTypeScavenger : public TypeScavenger {
public:
- UnionTypeScavenger() : TypeScavenger(), m_scavengers() {
+ UnionTypeScavenger() : TypeScavenger() {
for (std::shared_ptr<TypeScavenger> scavenger : { std::shared_ptr<TypeScavenger>(new ScavengerTypes())... }) {
if (scavenger)
m_scavengers.push_back(scavenger);
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
index a5e0deba14a9..859c43cb437a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
@@ -35,8 +35,8 @@ public:
// you get will have been shifted down 56 before being returned.
virtual lldb::addr_t GetLogicalTag(lldb::addr_t addr) const = 0;
- // Remove non address bits from a pointer
- virtual lldb::addr_t RemoveNonAddressBits(lldb::addr_t addr) const = 0;
+ // Remove tag bits from a pointer
+ virtual lldb::addr_t RemoveTagBits(lldb::addr_t addr) const = 0;
// Return the difference between two addresses, ignoring any logical tags they
// have. If your tags are just part of a larger set of ignored bits, this
@@ -64,7 +64,7 @@ public:
// (which may include one or more memory regions)
//
// If so, return a modified range which will have been expanded
- // to be granule aligned.
+ // to be granule aligned. Otherwise return an error.
//
// Tags in the input addresses are ignored and not present
// in the returned range.
@@ -72,6 +72,23 @@ public:
lldb::addr_t addr, lldb::addr_t end_addr,
const lldb_private::MemoryRegionInfos &memory_regions) const = 0;
+ // Given a range addr to end_addr, check that end_addr >= addr.
+ // If it is not, return an error saying so.
+ // Otherwise, granule align it and return a set of ranges representing
+ // subsections of the aligned range that have memory tagging enabled.
+ //
+ // Basically a sparse version of MakeTaggedRange. Use this when you
+ // want to know which parts of a larger range have memory tagging.
+ //
+ // Regions in memory_regions should be sorted in ascending order and
+ // not overlap. (use Process GetMemoryRegions)
+ //
+ // Tags in the input addresses are ignored and not present
+ // in the returned ranges.
+ virtual llvm::Expected<std::vector<TagRange>> MakeTaggedRanges(
+ lldb::addr_t addr, lldb::addr_t end_addr,
+ const lldb_private::MemoryRegionInfos &memory_regions) const = 0;
+
// Return the type value to use in GDB protocol qMemTags packets to read
// allocation tags. This is named "Allocation" specifically because the spec
// allows for logical tags to be read the same way, though we do not use that.
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagMap.h b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagMap.h
new file mode 100644
index 000000000000..acf3825bfb3f
--- /dev/null
+++ b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagMap.h
@@ -0,0 +1,98 @@
+//===-- MemoryTagMap.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TARGET_MEMORYTAGMAP_H
+#define LLDB_TARGET_MEMORYTAGMAP_H
+
+#include "lldb/Target/MemoryTagManager.h"
+#include "lldb/lldb-private.h"
+#include "llvm/ADT/Optional.h"
+#include <map>
+
+namespace lldb_private {
+
+/// MemoryTagMap provides a way to give a sparse read result
+/// when reading memory tags for a range. This is useful when
+/// you want to annotate some large memory dump that might include
+/// tagged memory but you don't know that it is all tagged.
+class MemoryTagMap {
+public:
+ /// Init an empty tag map
+ ///
+ /// \param [in] manager
+ /// Non-null pointer to a memory tag manager.
+ MemoryTagMap(const MemoryTagManager *manager);
+
+ /// Insert tags into the map starting from addr.
+ ///
+ /// \param [in] addr
+ /// Start address of the range to insert tags for.
+ /// This address should be granule aligned and have had
+ /// any non address bits removed.
+ /// (ideally you would use the base of the range you used
+ /// to read the tags in the first place)
+ ///
+ /// \param [in] tags
+ /// Vector of tags to insert. The first tag will be inserted
+ /// at addr, the next at addr+granule size and so on until
+ /// all tags have been inserted.
+ void InsertTags(lldb::addr_t addr, const std::vector<lldb::addr_t> tags);
+
+ bool Empty() const;
+
+ /// Lookup memory tags for a range of memory from addr to addr+len.
+ ///
+ /// \param [in] addr
+ /// The start of the range. This may include non address bits and
+ /// does not have to be granule aligned.
+ ///
+ /// \param [in] len
+ /// The length in bytes of the range to read tags for. This does
+ /// not need to be multiple of the granule size.
+ ///
+ /// \return
+ /// A vector containing the tags found for the granules in the
+ /// range. (which is the result of granule aligning the given range)
+ ///
+ /// Each item in the vector is an optional tag. Meaning that if
+ /// it is valid then the granule had a tag and if not, it didn't.
+ ///
+ /// If the range had no tags at all, the vector will be empty.
+ /// If some of the range was tagged it will have items and some
+ /// of them may be llvm::None.
+ /// (this saves the caller checking whether all items are llvm::None)
+ std::vector<llvm::Optional<lldb::addr_t>> GetTags(lldb::addr_t addr,
+ size_t len) const;
+
+private:
+ /// Lookup the tag for address
+ ///
+ /// \param [in] address
+ /// The address to lookup a tag for. This should be aligned
+ /// to a granule boundary.
+ ///
+ /// \return
+ /// The tag for the granule that address refers to, or llvm::None
+ /// if it has no memory tag.
+ llvm::Optional<lldb::addr_t> GetTag(lldb::addr_t addr) const;
+
+ // A map of granule aligned addresses to their memory tag
+ std::map<lldb::addr_t, lldb::addr_t> m_addr_to_tag;
+
+ // Memory tag manager used to align addresses and get granule size.
+ // Ideally this would be a const& but only certain architectures will
+ // have a memory tag manager class to provide here. So for a method
+ // returning a MemoryTagMap, Optional<MemoryTagMap> allows it to handle
+ // architectures without memory tagging. Optionals cannot hold references
+ // so we go with a pointer that we assume will be not be null.
+ const MemoryTagManager *m_manager;
+};
+
+} // namespace lldb_private
+
+#endif // LLDB_TARGET_MEMORYTAGMAP_H
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Platform.h b/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
index 26127359a322..4ef6c9d82b1a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
@@ -863,6 +863,8 @@ public:
return nullptr;
}
+ virtual CompilerType GetSiginfoType(const llvm::Triple &triple);
+
protected:
/// Create a list of ArchSpecs with the given OS and a architectures. The
/// vendor field is left as an "unspecified unknown".
@@ -958,7 +960,7 @@ private:
class PlatformList {
public:
- PlatformList() : m_mutex(), m_platforms(), m_selected_platform_sp() {}
+ PlatformList() {}
~PlatformList() = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Process.h b/contrib/llvm-project/lldb/include/lldb/Target/Process.h
index e27cb8cbf2aa..12ed1e09227c 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Process.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Process.h
@@ -112,15 +112,12 @@ protected:
class ProcessAttachInfo : public ProcessInstanceInfo {
public:
- ProcessAttachInfo()
- : ProcessInstanceInfo(), m_listener_sp(), m_hijack_listener_sp(),
- m_plugin_name() {}
+ ProcessAttachInfo() {}
ProcessAttachInfo(const ProcessLaunchInfo &launch_info)
- : ProcessInstanceInfo(), m_listener_sp(), m_hijack_listener_sp(),
- m_plugin_name(), m_resume_count(0), m_wait_for_launch(false),
- m_ignore_existing(true), m_continue_once_attached(false),
- m_detach_on_error(true), m_async(false) {
+ : m_resume_count(0), m_wait_for_launch(false), m_ignore_existing(true),
+ m_continue_once_attached(false), m_detach_on_error(true),
+ m_async(false) {
ProcessInfo::operator=(launch_info);
SetProcessPluginName(launch_info.GetProcessPluginName());
SetResumeCount(launch_info.GetResumeCount());
@@ -1887,7 +1884,7 @@ public:
/// want to deallocate.
///
/// \return
- /// \btrue if the memory was deallocated, \bfalse otherwise.
+ /// \b true if the memory was deallocated, \b false otherwise.
virtual Status DoDeallocateMemory(lldb::addr_t ptr) {
Status error;
error.SetErrorStringWithFormatv(
@@ -1906,7 +1903,7 @@ public:
/// want to deallocate.
///
/// \return
- /// \btrue if the memory was deallocated, \bfalse otherwise.
+ /// \b true if the memory was deallocated, \b false otherwise.
Status DeallocateMemory(lldb::addr_t ptr);
/// Get any available STDOUT.
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/RegisterCheckpoint.h b/contrib/llvm-project/lldb/include/lldb/Target/RegisterCheckpoint.h
index daf20db999b3..8681fb962801 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/RegisterCheckpoint.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/RegisterCheckpoint.h
@@ -31,8 +31,7 @@ public:
eDataBackup
};
- RegisterCheckpoint(Reason reason)
- : UserID(0), m_data_sp(), m_reason(reason) {}
+ RegisterCheckpoint(Reason reason) : UserID(0), m_reason(reason) {}
~RegisterCheckpoint() = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadHistory.h b/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadHistory.h
index dd024301d0cf..0240cebda85f 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadHistory.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadHistory.h
@@ -24,7 +24,7 @@ public:
eStopIDNow = UINT32_MAX
};
// Constructors and Destructors
- SectionLoadHistory() : m_stop_id_to_section_load_list(), m_mutex() {}
+ SectionLoadHistory() {}
~SectionLoadHistory() {
// Call clear since this takes a lock and clears the section load list in
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadList.h b/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadList.h
index e8535e7ac51a..548d44a181a7 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadList.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/SectionLoadList.h
@@ -22,7 +22,7 @@ namespace lldb_private {
class SectionLoadList {
public:
// Constructors and Destructors
- SectionLoadList() : m_addr_to_sect(), m_sect_to_addr(), m_mutex() {}
+ SectionLoadList() {}
SectionLoadList(const SectionLoadList &rhs);
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/StackFrame.h b/contrib/llvm-project/lldb/include/lldb/Target/StackFrame.h
index 1a9aaad1a4db..1b0485b22cac 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/StackFrame.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/StackFrame.h
@@ -171,7 +171,7 @@ public:
/// functions looking up symbolic information for a given pc value multiple
/// times.
///
- /// \params [in] resolve_scope
+ /// \param [in] resolve_scope
/// Flags from the SymbolContextItem enumerated type which specify what
/// type of symbol context is needed by this caller.
///
@@ -408,10 +408,10 @@ public:
/// Create a ValueObject for a given Variable in this StackFrame.
///
- /// \params [in] variable_sp
+ /// \param [in] variable_sp
/// The Variable to base this ValueObject on
///
- /// \params [in] use_dynamic
+ /// \param [in] use_dynamic
/// Whether the correct dynamic type of the variable should be
/// determined before creating the ValueObject, or if the static type
/// is sufficient. One of the DynamicValueType enumerated values.
@@ -437,7 +437,7 @@ public:
/// the current instruction. The ExpressionPath should indicate how to get
/// to this value using "frame variable."
///
- /// \params [in] addr
+ /// \param [in] addr
/// The raw address.
///
/// \return
@@ -448,10 +448,10 @@ public:
/// given register plus an offset. The ExpressionPath should indicate how
/// to get to this value using "frame variable."
///
- /// \params [in] reg
+ /// \param [in] reg
/// The name of the register.
///
- /// \params [in] offset
+ /// \param [in] offset
/// The offset from the register. Particularly important for sp...
///
/// \return
@@ -465,7 +465,7 @@ public:
/// PC in the stack frame and traverse through all parent blocks stopping at
/// inlined function boundaries.
///
- /// \params [in] name
+ /// \param [in] name
/// The name of the variable.
///
/// \return
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Statistics.h b/contrib/llvm-project/lldb/include/lldb/Target/Statistics.h
index 087fbee26328..d2b8f746a38c 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Statistics.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Statistics.h
@@ -9,20 +9,41 @@
#ifndef LLDB_TARGET_STATISTICS_H
#define LLDB_TARGET_STATISTICS_H
-#include <chrono>
-#include <string>
-#include <vector>
-
+#include "lldb/Utility/ConstString.h"
#include "lldb/Utility/Stream.h"
#include "lldb/lldb-forward.h"
#include "llvm/Support/JSON.h"
+#include <atomic>
+#include <chrono>
+#include <ratio>
+#include <string>
+#include <vector>
namespace lldb_private {
using StatsClock = std::chrono::high_resolution_clock;
-using StatsDuration = std::chrono::duration<double>;
using StatsTimepoint = std::chrono::time_point<StatsClock>;
+class StatsDuration {
+public:
+ using Duration = std::chrono::duration<double>;
+
+ Duration get() const {
+ return Duration(InternalDuration(value.load(std::memory_order_relaxed)));
+ }
+ operator Duration() const { return get(); }
+
+ StatsDuration &operator+=(Duration dur) {
+ value.fetch_add(std::chrono::duration_cast<InternalDuration>(dur).count(),
+ std::memory_order_relaxed);
+ return *this;
+ }
+
+private:
+ using InternalDuration = std::chrono::duration<uint64_t, std::micro>;
+ std::atomic<uint64_t> value{0};
+};
+
/// A class that measures elapsed time in an exception safe way.
///
/// This is a RAII class is designed to help gather timing statistics within
@@ -54,7 +75,7 @@ public:
m_start_time = StatsClock::now();
}
~ElapsedTime() {
- StatsDuration elapsed = StatsClock::now() - m_start_time;
+ StatsClock::duration elapsed = StatsClock::now() - m_start_time;
m_elapsed_time += elapsed;
}
};
@@ -84,6 +105,15 @@ struct ModuleStats {
double debug_parse_time = 0.0;
double debug_index_time = 0.0;
uint64_t debug_info_size = 0;
+ bool symtab_loaded_from_cache = false;
+ bool symtab_saved_to_cache = false;
+ bool debug_info_index_loaded_from_cache = false;
+ bool debug_info_index_saved_to_cache = false;
+};
+
+struct ConstStringStats {
+ llvm::json::Value ToJSON() const;
+ ConstString::MemoryStats stats = ConstString::GetMemoryStats();
};
/// A class that represents statistics for a since lldb_private::Target.
@@ -100,7 +130,7 @@ public:
StatsSuccessFail &GetFrameVariableStats() { return m_frame_var; }
protected:
- StatsDuration m_create_time{0.0};
+ StatsDuration m_create_time;
llvm::Optional<StatsTimepoint> m_launch_or_attach_time;
llvm::Optional<StatsTimepoint> m_first_private_stop_time;
llvm::Optional<StatsTimepoint> m_first_public_stop_time;
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Target.h b/contrib/llvm-project/lldb/include/lldb/Target/Target.h
index b85839c15b2f..2c8b36d1e3d9 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Target.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Target.h
@@ -991,7 +991,7 @@ public:
/// manually set following this function call).
///
/// \return
- /// \b true if the architecture was successfully set, \bfalse otherwise.
+ /// \b true if the architecture was successfully set, \b false otherwise.
bool SetArchitecture(const ArchSpec &arch_spec, bool set_platform = false);
bool MergeArchitecture(const ArchSpec &arch_spec);
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Thread.h b/contrib/llvm-project/lldb/include/lldb/Target/Thread.h
index 91feed310eb9..f1d4e6c7ef01 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Thread.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Thread.h
@@ -22,6 +22,7 @@
#include "lldb/Utility/CompletionRequest.h"
#include "lldb/Utility/Event.h"
#include "lldb/Utility/StructuredData.h"
+#include "lldb/Utility/UnimplementedError.h"
#include "lldb/Utility/UserID.h"
#include "lldb/lldb-private.h"
@@ -1184,6 +1185,11 @@ public:
lldb::ThreadSP GetCurrentExceptionBacktrace();
+ virtual llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ GetSiginfo(size_t max_size) const {
+ return llvm::make_error<UnimplementedError>();
+ }
+
protected:
friend class ThreadPlan;
friend class ThreadList;
@@ -1244,7 +1250,7 @@ protected:
// the stop info was checked against
// the stop info override
const uint32_t m_index_id; ///< A unique 1 based index assigned to each thread
- ///for easy UI/command line access.
+ /// for easy UI/command line access.
lldb::RegisterContextSP m_reg_context_sp; ///< The register context for this
///thread's current register state.
lldb::StateType m_state; ///< The state of our process.
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Unwind.h b/contrib/llvm-project/lldb/include/lldb/Target/Unwind.h
index 3faef139b00a..105383ddae8a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Unwind.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Unwind.h
@@ -18,7 +18,7 @@ namespace lldb_private {
class Unwind {
protected:
// Classes that inherit from Unwind can see and modify these
- Unwind(Thread &thread) : m_thread(thread), m_unwind_mutex() {}
+ Unwind(Thread &thread) : m_thread(thread) {}
public:
virtual ~Unwind() = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/UnwindLLDB.h b/contrib/llvm-project/lldb/include/lldb/Target/UnwindLLDB.h
index f6750171c54a..939226c8c5b9 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/UnwindLLDB.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/UnwindLLDB.h
@@ -119,7 +119,7 @@ private:
RegisterContextLLDBSP
reg_ctx_lldb_sp; // These are all RegisterContextUnwind's
- Cursor() : sctx(), reg_ctx_lldb_sp() {}
+ Cursor() {}
private:
Cursor(const Cursor &) = delete;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/ConstString.h b/contrib/llvm-project/lldb/include/lldb/Utility/ConstString.h
index 2756f1fd7203..31887d7f6115 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/ConstString.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/ConstString.h
@@ -394,19 +394,17 @@ public:
///
/// \return
/// The number of bytes that this object occupies in memory.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const { return sizeof(ConstString); }
- /// Get the size in bytes of the current global string pool.
- ///
- /// Reports the size in bytes of all shared C string values, containers and
- /// any other values as a byte size for the entire string pool.
- ///
- /// \return
- /// The number of bytes that the global string pool occupies
- /// in memory.
- static size_t StaticMemorySize();
+ struct MemoryStats {
+ size_t GetBytesTotal() const { return bytes_total; }
+ size_t GetBytesUsed() const { return bytes_used; }
+ size_t GetBytesUnused() const { return bytes_total - bytes_used; }
+ size_t bytes_total = 0;
+ size_t bytes_used = 0;
+ };
+
+ static MemoryStats GetMemoryStats();
protected:
template <typename T, typename Enable> friend struct ::llvm::DenseMapInfo;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Environment.h b/contrib/llvm-project/lldb/include/lldb/Utility/Environment.h
index 24cbee246f83..c1549a3d60a6 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/Environment.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Environment.h
@@ -56,7 +56,7 @@ public:
using Base::try_emplace;
using Base::operator[];
- Environment() : Base() {}
+ Environment() {}
Environment(const Environment &RHS) : Base(RHS) {}
Environment(Environment &&RHS) : Base(std::move(RHS)) {}
Environment(char *const *Env)
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Event.h b/contrib/llvm-project/lldb/include/lldb/Utility/Event.h
index 4e38f98a02f3..d3176216115a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/Event.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Event.h
@@ -99,7 +99,7 @@ private:
class EventDataReceipt : public EventData {
public:
- EventDataReceipt() : EventData(), m_predicate(false) {}
+ EventDataReceipt() : m_predicate(false) {}
~EventDataReceipt() override = default;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/FileSpec.h b/contrib/llvm-project/lldb/include/lldb/Utility/FileSpec.h
index 305cd04f95c0..ec473a2afeec 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/FileSpec.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/FileSpec.h
@@ -334,8 +334,6 @@ public:
///
/// \return
/// The number of bytes that this object occupies in memory.
- ///
- /// \see ConstString::StaticMemorySize ()
size_t MemorySize() const;
/// Change the file specified with a new path.
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/GDBRemote.h b/contrib/llvm-project/lldb/include/lldb/Utility/GDBRemote.h
index f658818de806..451181624c87 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/GDBRemote.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/GDBRemote.h
@@ -55,7 +55,7 @@ struct GDBRemotePacket {
enum Type { ePacketTypeInvalid = 0, ePacketTypeSend, ePacketTypeRecv };
- GDBRemotePacket() : packet() {}
+ GDBRemotePacket() {}
void Clear() {
packet.data.clear();
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Instrumentation.h b/contrib/llvm-project/lldb/include/lldb/Utility/Instrumentation.h
new file mode 100644
index 000000000000..ff6591d63437
--- /dev/null
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Instrumentation.h
@@ -0,0 +1,105 @@
+//===-- Instrumentation.h ---------------------------------------*- C++ -*-===//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_UTILITY_INSTRUMENTATION_H
+#define LLDB_UTILITY_INSTRUMENTATION_H
+
+#include "lldb/Utility/FileSpec.h"
+#include "lldb/Utility/Log.h"
+#include "lldb/Utility/Logging.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <map>
+#include <thread>
+#include <type_traits>
+
+namespace lldb_private {
+namespace instrumentation {
+
+template <typename T,
+ typename std::enable_if<std::is_fundamental<T>::value, int>::type = 0>
+inline void stringify_append(llvm::raw_string_ostream &ss, const T &t) {
+ ss << t;
+}
+
+template <typename T, typename std::enable_if<!std::is_fundamental<T>::value,
+ int>::type = 0>
+inline void stringify_append(llvm::raw_string_ostream &ss, const T &t) {
+ ss << &t;
+}
+
+template <typename T>
+inline void stringify_append(llvm::raw_string_ostream &ss, T *t) {
+ ss << reinterpret_cast<void *>(t);
+}
+
+template <typename T>
+inline void stringify_append(llvm::raw_string_ostream &ss, const T *t) {
+ ss << reinterpret_cast<const void *>(t);
+}
+
+template <>
+inline void stringify_append<char>(llvm::raw_string_ostream &ss,
+ const char *t) {
+ ss << '\"' << t << '\"';
+}
+
+template <>
+inline void stringify_append<std::nullptr_t>(llvm::raw_string_ostream &ss,
+ const std::nullptr_t &t) {
+ ss << "\"nullptr\"";
+}
+
+template <typename Head>
+inline void stringify_helper(llvm::raw_string_ostream &ss, const Head &head) {
+ stringify_append(ss, head);
+}
+
+template <typename Head, typename... Tail>
+inline void stringify_helper(llvm::raw_string_ostream &ss, const Head &head,
+ const Tail &...tail) {
+ stringify_append(ss, head);
+ ss << ", ";
+ stringify_helper(ss, tail...);
+}
+
+template <typename... Ts> inline std::string stringify_args(const Ts &...ts) {
+ std::string buffer;
+ llvm::raw_string_ostream ss(buffer);
+ stringify_helper(ss, ts...);
+ return ss.str();
+}
+
+/// RAII object for instrumenting LLDB API functions.
+class Instrumenter {
+public:
+ Instrumenter(llvm::StringRef pretty_func, std::string &&pretty_args = {});
+ ~Instrumenter();
+
+private:
+ void UpdateBoundary();
+
+ llvm::StringRef m_pretty_func;
+
+ /// Whether this function call was the one crossing the API boundary.
+ bool m_local_boundary = false;
+};
+} // namespace instrumentation
+} // namespace lldb_private
+
+#define LLDB_INSTRUMENT() \
+ lldb_private::instrumentation::Instrumenter _instr(LLVM_PRETTY_FUNCTION);
+
+#define LLDB_INSTRUMENT_VA(...) \
+ lldb_private::instrumentation::Instrumenter _instr( \
+ LLVM_PRETTY_FUNCTION, \
+ lldb_private::instrumentation::stringify_args(__VA_ARGS__));
+
+#endif // LLDB_UTILITY_INSTRUMENTATION_H
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Log.h b/contrib/llvm-project/lldb/include/lldb/Utility/Log.h
index 01edec044565..09fd2cb3a7e6 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/Log.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Log.h
@@ -10,7 +10,6 @@
#define LLDB_UTILITY_LOG_H
#include "lldb/Utility/Flags.h"
-#include "lldb/Utility/Logging.h"
#include "lldb/lldb-defines.h"
#include "llvm/ADT/ArrayRef.h"
@@ -48,11 +47,31 @@ namespace lldb_private {
class Log final {
public:
+ /// The underlying type of all log channel enums. Declare them as:
+ /// enum class MyLog : MaskType {
+ /// Channel0 = Log::ChannelFlag<0>,
+ /// Channel1 = Log::ChannelFlag<1>,
+ /// ...,
+ /// LLVM_MARK_AS_BITMASK_ENUM(LastChannel),
+ /// };
+ using MaskType = uint64_t;
+
+ template <MaskType Bit>
+ static constexpr MaskType ChannelFlag = MaskType(1) << Bit;
+
// Description of a log channel category.
struct Category {
llvm::StringLiteral name;
llvm::StringLiteral description;
- uint32_t flag;
+ MaskType flag;
+
+ template <typename Cat>
+ constexpr Category(llvm::StringLiteral name,
+ llvm::StringLiteral description, Cat mask)
+ : name(name), description(description), flag(MaskType(mask)) {
+ static_assert(
+ std::is_same<Log::MaskType, std::underlying_type_t<Cat>>::value, "");
+ }
};
// This class describes a log channel. It also encapsulates the behavior
@@ -63,18 +82,22 @@ public:
public:
const llvm::ArrayRef<Category> categories;
- const uint32_t default_flags;
+ const MaskType default_flags;
+ template <typename Cat>
constexpr Channel(llvm::ArrayRef<Log::Category> categories,
- uint32_t default_flags)
+ Cat default_flags)
: log_ptr(nullptr), categories(categories),
- default_flags(default_flags) {}
+ default_flags(MaskType(default_flags)) {
+ static_assert(
+ std::is_same<Log::MaskType, std::underlying_type_t<Cat>>::value, "");
+ }
// This function is safe to call at any time. If the channel is disabled
// after (or concurrently with) this function returning a non-null Log
// pointer, it is still safe to attempt to write to the Log object -- the
// output will be discarded.
- Log *GetLogIfAll(uint32_t mask) {
+ Log *GetLogIfAll(MaskType mask) {
Log *log = log_ptr.load(std::memory_order_relaxed);
if (log && log->GetMask().AllSet(mask))
return log;
@@ -85,7 +108,7 @@ public:
// after (or concurrently with) this function returning a non-null Log
// pointer, it is still safe to attempt to write to the Log object -- the
// output will be discarded.
- Log *GetLogIfAny(uint32_t mask) {
+ Log *GetLogIfAny(MaskType mask) {
Log *log = log_ptr.load(std::memory_order_relaxed);
if (log && log->GetMask().AnySet(mask))
return log;
@@ -180,7 +203,7 @@ private:
std::shared_ptr<llvm::raw_ostream> m_stream_sp;
std::atomic<uint32_t> m_options{0};
- std::atomic<uint32_t> m_mask{0};
+ std::atomic<MaskType> m_mask{0};
void WriteHeader(llvm::raw_ostream &OS, llvm::StringRef file,
llvm::StringRef function);
@@ -211,12 +234,23 @@ private:
static uint32_t GetFlags(llvm::raw_ostream &stream, const ChannelMap::value_type &entry,
llvm::ArrayRef<const char *> categories);
- static void DisableLoggingChild();
-
Log(const Log &) = delete;
void operator=(const Log &) = delete;
};
+// Must be specialized for a particular log type.
+template <typename Cat> Log::Channel &LogChannelFor() = delete;
+
+/// Retrieve the Log object for the channel associated with the given log enum.
+///
+/// Returns a valid Log object if any of the provided categories are enabled.
+/// Otherwise, returns nullptr.
+template <typename Cat> Log *GetLog(Cat mask) {
+ static_assert(std::is_same<Log::MaskType, std::underlying_type_t<Cat>>::value,
+ "");
+ return LogChannelFor<Cat>().GetLogIfAny(Log::MaskType(mask));
+}
+
} // namespace lldb_private
/// The LLDB_LOG* macros defined below are the way to emit log messages.
@@ -274,3 +308,6 @@ private:
} while (0)
#endif // LLDB_UTILITY_LOG_H
+
+// TODO: Remove this and fix includes everywhere.
+#include "lldb/Utility/Logging.h"
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Logging.h b/contrib/llvm-project/lldb/include/lldb/Utility/Logging.h
index 1a8a1022c5c0..db84da244954 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/Logging.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Logging.h
@@ -9,57 +9,89 @@
#ifndef LLDB_UTILITY_LOGGING_H
#define LLDB_UTILITY_LOGGING_H
+#include "lldb/Utility/Log.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include <cstdint>
-// Log Bits specific to logging in lldb
-#define LIBLLDB_LOG_PROCESS (1u << 1)
-#define LIBLLDB_LOG_THREAD (1u << 2)
-#define LIBLLDB_LOG_DYNAMIC_LOADER (1u << 3)
-#define LIBLLDB_LOG_EVENTS (1u << 4)
-#define LIBLLDB_LOG_BREAKPOINTS (1u << 5)
-#define LIBLLDB_LOG_WATCHPOINTS (1u << 6)
-#define LIBLLDB_LOG_STEP (1u << 7)
-#define LIBLLDB_LOG_EXPRESSIONS (1u << 8)
-#define LIBLLDB_LOG_TEMPORARY (1u << 9)
-#define LIBLLDB_LOG_STATE (1u << 10)
-#define LIBLLDB_LOG_OBJECT (1u << 11)
-#define LIBLLDB_LOG_COMMUNICATION (1u << 12)
-#define LIBLLDB_LOG_CONNECTION (1u << 13)
-#define LIBLLDB_LOG_HOST (1u << 14)
-#define LIBLLDB_LOG_UNWIND (1u << 15)
-#define LIBLLDB_LOG_API (1u << 16)
-#define LIBLLDB_LOG_SCRIPT (1u << 17)
-#define LIBLLDB_LOG_COMMANDS (1U << 18)
-#define LIBLLDB_LOG_TYPES (1u << 19)
-#define LIBLLDB_LOG_SYMBOLS (1u << 20)
-#define LIBLLDB_LOG_MODULES (1u << 21)
-#define LIBLLDB_LOG_TARGET (1u << 22)
-#define LIBLLDB_LOG_MMAP (1u << 23)
-#define LIBLLDB_LOG_OS (1u << 24)
-#define LIBLLDB_LOG_PLATFORM (1u << 25)
-#define LIBLLDB_LOG_SYSTEM_RUNTIME (1u << 26)
-#define LIBLLDB_LOG_JIT_LOADER (1u << 27)
-#define LIBLLDB_LOG_LANGUAGE (1u << 28)
-#define LIBLLDB_LOG_DATAFORMATTERS (1u << 29)
-#define LIBLLDB_LOG_DEMANGLE (1u << 30)
-#define LIBLLDB_LOG_AST (1u << 31)
-#define LIBLLDB_LOG_ALL (UINT32_MAX)
-#define LIBLLDB_LOG_DEFAULT \
- (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_THREAD | LIBLLDB_LOG_DYNAMIC_LOADER | \
- LIBLLDB_LOG_BREAKPOINTS | LIBLLDB_LOG_WATCHPOINTS | LIBLLDB_LOG_STEP | \
- LIBLLDB_LOG_STATE | LIBLLDB_LOG_SYMBOLS | LIBLLDB_LOG_TARGET | \
- LIBLLDB_LOG_COMMANDS)
-
namespace lldb_private {
-class Log;
+enum class LLDBLog : Log::MaskType {
+ API = Log::ChannelFlag<0>,
+ AST = Log::ChannelFlag<1>,
+ Breakpoints = Log::ChannelFlag<2>,
+ Commands = Log::ChannelFlag<3>,
+ Communication = Log::ChannelFlag<4>,
+ Connection = Log::ChannelFlag<5>,
+ DataFormatters = Log::ChannelFlag<6>,
+ Demangle = Log::ChannelFlag<7>,
+ DynamicLoader = Log::ChannelFlag<8>,
+ Events = Log::ChannelFlag<9>,
+ Expressions = Log::ChannelFlag<10>,
+ Host = Log::ChannelFlag<11>,
+ JITLoader = Log::ChannelFlag<12>,
+ Language = Log::ChannelFlag<13>,
+ MMap = Log::ChannelFlag<14>,
+ Modules = Log::ChannelFlag<15>,
+ Object = Log::ChannelFlag<16>,
+ OS = Log::ChannelFlag<17>,
+ Platform = Log::ChannelFlag<18>,
+ Process = Log::ChannelFlag<19>,
+ Script = Log::ChannelFlag<20>,
+ State = Log::ChannelFlag<21>,
+ Step = Log::ChannelFlag<22>,
+ Symbols = Log::ChannelFlag<23>,
+ SystemRuntime = Log::ChannelFlag<24>,
+ Target = Log::ChannelFlag<25>,
+ Temporary = Log::ChannelFlag<26>,
+ Thread = Log::ChannelFlag<27>,
+ Types = Log::ChannelFlag<28>,
+ Unwind = Log::ChannelFlag<29>,
+ Watchpoints = Log::ChannelFlag<30>,
+ LLVM_MARK_AS_BITMASK_ENUM(Watchpoints),
+};
+
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+// Log Bits specific to logging in lldb
+#define LIBLLDB_LOG_PROCESS ::lldb_private::LLDBLog::Process
+#define LIBLLDB_LOG_THREAD ::lldb_private::LLDBLog::Thread
+#define LIBLLDB_LOG_DYNAMIC_LOADER ::lldb_private::LLDBLog::DynamicLoader
+#define LIBLLDB_LOG_EVENTS ::lldb_private::LLDBLog::Events
+#define LIBLLDB_LOG_BREAKPOINTS ::lldb_private::LLDBLog::Breakpoints
+#define LIBLLDB_LOG_WATCHPOINTS ::lldb_private::LLDBLog::Watchpoints
+#define LIBLLDB_LOG_STEP ::lldb_private::LLDBLog::Step
+#define LIBLLDB_LOG_EXPRESSIONS ::lldb_private::LLDBLog::Expressions
+#define LIBLLDB_LOG_TEMPORARY ::lldb_private::LLDBLog::Temporary
+#define LIBLLDB_LOG_STATE ::lldb_private::LLDBLog::State
+#define LIBLLDB_LOG_OBJECT ::lldb_private::LLDBLog::Object
+#define LIBLLDB_LOG_COMMUNICATION ::lldb_private::LLDBLog::Communication
+#define LIBLLDB_LOG_CONNECTION ::lldb_private::LLDBLog::Connection
+#define LIBLLDB_LOG_HOST ::lldb_private::LLDBLog::Host
+#define LIBLLDB_LOG_UNWIND ::lldb_private::LLDBLog::Unwind
+#define LIBLLDB_LOG_API ::lldb_private::LLDBLog::API
+#define LIBLLDB_LOG_SCRIPT ::lldb_private::LLDBLog::Script
+#define LIBLLDB_LOG_COMMANDS ::lldb_private::LLDBLog::Commands
+#define LIBLLDB_LOG_TYPES ::lldb_private::LLDBLog::Types
+#define LIBLLDB_LOG_SYMBOLS ::lldb_private::LLDBLog::Symbols
+#define LIBLLDB_LOG_MODULES ::lldb_private::LLDBLog::Modules
+#define LIBLLDB_LOG_TARGET ::lldb_private::LLDBLog::Target
+#define LIBLLDB_LOG_MMAP ::lldb_private::LLDBLog::MMap
+#define LIBLLDB_LOG_OS ::lldb_private::LLDBLog::OS
+#define LIBLLDB_LOG_PLATFORM ::lldb_private::LLDBLog::Platform
+#define LIBLLDB_LOG_SYSTEM_RUNTIME ::lldb_private::LLDBLog::SystemRuntime
+#define LIBLLDB_LOG_JIT_LOADER ::lldb_private::LLDBLog::JITLoader
+#define LIBLLDB_LOG_LANGUAGE ::lldb_private::LLDBLog::Language
+#define LIBLLDB_LOG_DATAFORMATTERS ::lldb_private::LLDBLog::DataFormatters
+#define LIBLLDB_LOG_DEMANGLE ::lldb_private::LLDBLog::Demangle
+#define LIBLLDB_LOG_AST ::lldb_private::LLDBLog::AST
-Log *GetLogIfAllCategoriesSet(uint32_t mask);
+Log *GetLogIfAllCategoriesSet(LLDBLog mask);
-Log *GetLogIfAnyCategoriesSet(uint32_t mask);
+Log *GetLogIfAnyCategoriesSet(LLDBLog mask);
void InitializeLldbChannel();
+template <> Log::Channel &LogChannelFor<LLDBLog>();
} // namespace lldb_private
#endif // LLDB_UTILITY_LOGGING_H
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/Predicate.h b/contrib/llvm-project/lldb/include/lldb/Utility/Predicate.h
index af16abc1a1d3..e5d80acb11bc 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/Predicate.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/Predicate.h
@@ -44,7 +44,7 @@ public:
///
/// Initializes the mutex, condition and value with their default
/// constructors.
- Predicate() : m_value(), m_mutex(), m_condition() {}
+ Predicate() : m_value() {}
/// Construct with initial T value \a initial_value.
///
@@ -53,8 +53,7 @@ public:
///
/// \param[in] initial_value
/// The initial value for our T object.
- Predicate(T initial_value)
- : m_value(initial_value), m_mutex(), m_condition() {}
+ Predicate(T initial_value) : m_value(initial_value) {}
/// Destructor.
///
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/ProcessInfo.h b/contrib/llvm-project/lldb/include/lldb/Utility/ProcessInfo.h
index 3c5956926391..fc8b12768999 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/ProcessInfo.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/ProcessInfo.h
@@ -107,7 +107,7 @@ protected:
// to that process.
class ProcessInstanceInfo : public ProcessInfo {
public:
- ProcessInstanceInfo() : ProcessInfo() {}
+ ProcessInstanceInfo() {}
ProcessInstanceInfo(const char *name, const ArchSpec &arch, lldb::pid_t pid)
: ProcessInfo(name, arch, pid), m_euid(UINT32_MAX), m_egid(UINT32_MAX),
@@ -162,12 +162,11 @@ typedef std::vector<ProcessInstanceInfo> ProcessInstanceInfoList;
class ProcessInstanceInfoMatch {
public:
- ProcessInstanceInfoMatch() : m_match_info() {}
+ ProcessInstanceInfoMatch() {}
ProcessInstanceInfoMatch(const char *process_name,
NameMatch process_name_match_type)
- : m_match_info(), m_name_match_type(process_name_match_type),
- m_match_all_users(false) {
+ : m_name_match_type(process_name_match_type), m_match_all_users(false) {
m_match_info.GetExecutableFile().SetFile(process_name,
FileSpec::Style::native);
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerInstrumentation.h b/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerInstrumentation.h
deleted file mode 100644
index 6c5d27879d36..000000000000
--- a/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerInstrumentation.h
+++ /dev/null
@@ -1,1105 +0,0 @@
-//===-- ReproducerInstrumentation.h -----------------------------*- C++ -*-===//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLDB_UTILITY_REPRODUCERINSTRUMENTATION_H
-#define LLDB_UTILITY_REPRODUCERINSTRUMENTATION_H
-
-#include "lldb/Utility/FileSpec.h"
-#include "lldb/Utility/Log.h"
-#include "lldb/Utility/Logging.h"
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/ErrorHandling.h"
-
-#include <map>
-#include <thread>
-#include <type_traits>
-
-template <typename T,
- typename std::enable_if<std::is_fundamental<T>::value, int>::type = 0>
-inline void stringify_append(llvm::raw_string_ostream &ss, const T &t) {
- ss << t;
-}
-
-template <typename T, typename std::enable_if<!std::is_fundamental<T>::value,
- int>::type = 0>
-inline void stringify_append(llvm::raw_string_ostream &ss, const T &t) {
- ss << &t;
-}
-
-template <typename T>
-inline void stringify_append(llvm::raw_string_ostream &ss, T *t) {
- ss << reinterpret_cast<void *>(t);
-}
-
-template <typename T>
-inline void stringify_append(llvm::raw_string_ostream &ss, const T *t) {
- ss << reinterpret_cast<const void *>(t);
-}
-
-template <>
-inline void stringify_append<char>(llvm::raw_string_ostream &ss,
- const char *t) {
- ss << '\"' << t << '\"';
-}
-
-template <>
-inline void stringify_append<std::nullptr_t>(llvm::raw_string_ostream &ss,
- const std::nullptr_t &t) {
- ss << "\"nullptr\"";
-}
-
-template <typename Head>
-inline void stringify_helper(llvm::raw_string_ostream &ss, const Head &head) {
- stringify_append(ss, head);
-}
-
-template <typename Head, typename... Tail>
-inline void stringify_helper(llvm::raw_string_ostream &ss, const Head &head,
- const Tail &... tail) {
- stringify_append(ss, head);
- ss << ", ";
- stringify_helper(ss, tail...);
-}
-
-template <typename... Ts> inline std::string stringify_args(const Ts &... ts) {
- std::string buffer;
- llvm::raw_string_ostream ss(buffer);
- stringify_helper(ss, ts...);
- return ss.str();
-}
-
-// Define LLDB_REPRO_INSTR_TRACE to trace to stderr instead of LLDB's log
-// infrastructure. This is useful when you need to see traces before the logger
-// is initialized or enabled.
-// #define LLDB_REPRO_INSTR_TRACE
-
-#ifdef LLDB_REPRO_INSTR_TRACE
-inline llvm::raw_ostream &this_thread_id() {
- size_t tid = std::hash<std::thread::id>{}(std::this_thread::get_id());
- return llvm::errs().write_hex(tid) << " :: ";
-}
-#endif
-
-#define LLDB_REGISTER_CONSTRUCTOR(Class, Signature) \
- R.Register<Class * Signature>(&construct<Class Signature>::record, "", \
- #Class, #Class, #Signature)
-
-#define LLDB_REGISTER_METHOD(Result, Class, Method, Signature) \
- R.Register( \
- &invoke<Result(Class::*) Signature>::method<(&Class::Method)>::record, \
- #Result, #Class, #Method, #Signature)
-
-#define LLDB_REGISTER_METHOD_CONST(Result, Class, Method, Signature) \
- R.Register(&invoke<Result(Class::*) \
- Signature const>::method<(&Class::Method)>::record, \
- #Result, #Class, #Method, #Signature)
-
-#define LLDB_REGISTER_STATIC_METHOD(Result, Class, Method, Signature) \
- R.Register(&invoke<Result(*) Signature>::method<(&Class::Method)>::record, \
- #Result, #Class, #Method, #Signature)
-
-#define LLDB_REGISTER_CHAR_PTR_METHOD_STATIC(Result, Class, Method) \
- R.Register( \
- &invoke<Result (*)(char *, size_t)>::method<(&Class::Method)>::record, \
- &invoke_char_ptr<Result (*)(char *, \
- size_t)>::method<(&Class::Method)>::record, \
- #Result, #Class, #Method, "(char*, size_t");
-
-#define LLDB_REGISTER_CHAR_PTR_METHOD(Result, Class, Method) \
- R.Register(&invoke<Result (Class::*)(char *, size_t)>::method<( \
- &Class::Method)>::record, \
- &invoke_char_ptr<Result (Class::*)(char *, size_t)>::method<( \
- &Class::Method)>::record, \
- #Result, #Class, #Method, "(char*, size_t");
-
-#define LLDB_REGISTER_CHAR_PTR_METHOD_CONST(Result, Class, Method) \
- R.Register(&invoke<Result (Class::*)(char *, size_t) \
- const>::method<(&Class::Method)>::record, \
- &invoke_char_ptr<Result (Class::*)(char *, size_t) \
- const>::method<(&Class::Method)>::record, \
- #Result, #Class, #Method, "(char*, size_t");
-
-#define LLDB_CONSTRUCT_(T, Class, ...) \
- lldb_private::repro::Recorder _recorder(LLVM_PRETTY_FUNCTION); \
- lldb_private::repro::construct<T>::handle(LLDB_GET_INSTRUMENTATION_DATA(), \
- _recorder, Class, __VA_ARGS__);
-
-#define LLDB_RECORD_CONSTRUCTOR(Class, Signature, ...) \
- LLDB_CONSTRUCT_(Class Signature, this, __VA_ARGS__)
-
-#define LLDB_RECORD_CONSTRUCTOR_NO_ARGS(Class) \
- LLDB_CONSTRUCT_(Class(), this, lldb_private::repro::EmptyArg())
-
-#define LLDB_RECORD_(T1, T2, ...) \
- lldb_private::repro::Recorder _recorder(LLVM_PRETTY_FUNCTION, \
- stringify_args(__VA_ARGS__)); \
- if (lldb_private::repro::InstrumentationData _data = \
- LLDB_GET_INSTRUMENTATION_DATA()) { \
- if (lldb_private::repro::Serializer *_serializer = \
- _data.GetSerializer()) { \
- _recorder.Record(*_serializer, _data.GetRegistry(), \
- &lldb_private::repro::invoke<T1>::method<T2>::record, \
- __VA_ARGS__); \
- } else if (lldb_private::repro::Deserializer *_deserializer = \
- _data.GetDeserializer()) { \
- if (_recorder.ShouldCapture()) { \
- return lldb_private::repro::invoke<T1>::method<T2>::replay( \
- _recorder, *_deserializer, _data.GetRegistry()); \
- } \
- } \
- }
-
-#define LLDB_RECORD_METHOD(Result, Class, Method, Signature, ...) \
- LLDB_RECORD_(Result(Class::*) Signature, (&Class::Method), this, __VA_ARGS__)
-
-#define LLDB_RECORD_METHOD_CONST(Result, Class, Method, Signature, ...) \
- LLDB_RECORD_(Result(Class::*) Signature const, (&Class::Method), this, \
- __VA_ARGS__)
-
-#define LLDB_RECORD_METHOD_NO_ARGS(Result, Class, Method) \
- LLDB_RECORD_(Result (Class::*)(), (&Class::Method), this)
-
-#define LLDB_RECORD_METHOD_CONST_NO_ARGS(Result, Class, Method) \
- LLDB_RECORD_(Result (Class::*)() const, (&Class::Method), this)
-
-#define LLDB_RECORD_STATIC_METHOD(Result, Class, Method, Signature, ...) \
- LLDB_RECORD_(Result(*) Signature, (&Class::Method), __VA_ARGS__)
-
-#define LLDB_RECORD_STATIC_METHOD_NO_ARGS(Result, Class, Method) \
- LLDB_RECORD_(Result (*)(), (&Class::Method), lldb_private::repro::EmptyArg())
-
-#define LLDB_RECORD_CHAR_PTR_(T1, T2, StrOut, ...) \
- lldb_private::repro::Recorder _recorder(LLVM_PRETTY_FUNCTION, \
- stringify_args(__VA_ARGS__)); \
- if (lldb_private::repro::InstrumentationData _data = \
- LLDB_GET_INSTRUMENTATION_DATA()) { \
- if (lldb_private::repro::Serializer *_serializer = \
- _data.GetSerializer()) { \
- _recorder.Record(*_serializer, _data.GetRegistry(), \
- &lldb_private::repro::invoke<T1>::method<(T2)>::record, \
- __VA_ARGS__); \
- } else if (lldb_private::repro::Deserializer *_deserializer = \
- _data.GetDeserializer()) { \
- if (_recorder.ShouldCapture()) { \
- return lldb_private::repro::invoke_char_ptr<T1>::method<T2>::replay( \
- _recorder, *_deserializer, _data.GetRegistry(), StrOut); \
- } \
- } \
- }
-
-#define LLDB_RECORD_CHAR_PTR_METHOD(Result, Class, Method, Signature, StrOut, \
- ...) \
- LLDB_RECORD_CHAR_PTR_(Result(Class::*) Signature, (&Class::Method), StrOut, \
- this, __VA_ARGS__)
-
-#define LLDB_RECORD_CHAR_PTR_METHOD_CONST(Result, Class, Method, Signature, \
- StrOut, ...) \
- LLDB_RECORD_CHAR_PTR_(Result(Class::*) Signature const, (&Class::Method), \
- StrOut, this, __VA_ARGS__)
-
-#define LLDB_RECORD_CHAR_PTR_STATIC_METHOD(Result, Class, Method, Signature, \
- StrOut, ...) \
- LLDB_RECORD_CHAR_PTR_(Result(*) Signature, (&Class::Method), StrOut, \
- __VA_ARGS__)
-
-#define LLDB_RECORD_RESULT(Result) _recorder.RecordResult(Result, true);
-
-/// The LLDB_RECORD_DUMMY macro is special because it doesn't actually record
-/// anything. It's used to track API boundaries when we cannot record for
-/// technical reasons.
-#define LLDB_RECORD_DUMMY(Result, Class, Method, Signature, ...) \
- lldb_private::repro::Recorder _recorder;
-
-#define LLDB_RECORD_DUMMY_NO_ARGS(Result, Class, Method) \
- lldb_private::repro::Recorder _recorder;
-
-namespace lldb_private {
-namespace repro {
-
-template <class T>
-struct is_trivially_serializable
- : std::integral_constant<bool, std::is_fundamental<T>::value ||
- std::is_enum<T>::value> {};
-
-/// Mapping between serialized indices and their corresponding objects.
-///
-/// This class is used during replay to map indices back to in-memory objects.
-///
-/// When objects are constructed, they are added to this mapping using
-/// AddObjectForIndex.
-///
-/// When an object is passed to a function, its index is deserialized and
-/// AddObjectForIndex returns the corresponding object. If there is no object
-/// for the given index, a nullptr is returend. The latter is valid when custom
-/// replay code is in place and the actual object is ignored.
-class IndexToObject {
-public:
- /// Returns an object as a pointer for the given index or nullptr if not
- /// present in the map.
- template <typename T> T *GetObjectForIndex(unsigned idx) {
- assert(idx != 0 && "Cannot get object for sentinel");
- void *object = GetObjectForIndexImpl(idx);
- return static_cast<T *>(object);
- }
-
- /// Adds a pointer to an object to the mapping for the given index.
- template <typename T> T *AddObjectForIndex(unsigned idx, T *object) {
- AddObjectForIndexImpl(
- idx, static_cast<void *>(
- const_cast<typename std::remove_const<T>::type *>(object)));
- return object;
- }
-
- /// Adds a reference to an object to the mapping for the given index.
- template <typename T> T &AddObjectForIndex(unsigned idx, T &object) {
- AddObjectForIndexImpl(
- idx, static_cast<void *>(
- const_cast<typename std::remove_const<T>::type *>(&object)));
- return object;
- }
-
- /// Get all objects sorted by their index.
- std::vector<void *> GetAllObjects() const;
-
-private:
- /// Helper method that does the actual lookup. The void* result is later cast
- /// by the caller.
- void *GetObjectForIndexImpl(unsigned idx);
-
- /// Helper method that does the actual insertion.
- void AddObjectForIndexImpl(unsigned idx, void *object);
-
- /// Keeps a mapping between indices and their corresponding object.
- llvm::DenseMap<unsigned, void *> m_mapping;
-};
-
-/// We need to differentiate between pointers to fundamental and
-/// non-fundamental types. See the corresponding Deserializer::Read method
-/// for the reason why.
-struct PointerTag {};
-struct ReferenceTag {};
-struct ValueTag {};
-struct FundamentalPointerTag {};
-struct FundamentalReferenceTag {};
-
-/// Return the deserialization tag for the given type T.
-template <class T> struct serializer_tag {
- typedef typename std::conditional<std::is_trivially_copyable<T>::value,
- ValueTag, ReferenceTag>::type type;
-};
-template <class T> struct serializer_tag<T *> {
- typedef
- typename std::conditional<std::is_fundamental<T>::value,
- FundamentalPointerTag, PointerTag>::type type;
-};
-template <class T> struct serializer_tag<T &> {
- typedef typename std::conditional<std::is_fundamental<T>::value,
- FundamentalReferenceTag, ReferenceTag>::type
- type;
-};
-
-/// Deserializes data from a buffer. It is used to deserialize function indices
-/// to replay, their arguments and return values.
-///
-/// Fundamental types and strings are read by value. Objects are read by their
-/// index, which get translated by the IndexToObject mapping maintained in
-/// this class.
-///
-/// Additional bookkeeping with regards to the IndexToObject is required to
-/// deserialize objects. When a constructor is run or an object is returned by
-/// value, we need to capture the object and add it to the index together with
-/// its index. This is the job of HandleReplayResult(Void).
-class Deserializer {
-public:
- Deserializer(llvm::StringRef buffer) : m_buffer(buffer) {}
-
- /// Returns true when the buffer has unread data.
- bool HasData(unsigned size) { return size <= m_buffer.size(); }
-
- /// Deserialize and interpret value as T.
- template <typename T> T Deserialize() {
- T t = Read<T>(typename serializer_tag<T>::type());
-#ifdef LLDB_REPRO_INSTR_TRACE
- llvm::errs() << "Deserializing with " << LLVM_PRETTY_FUNCTION << " -> "
- << stringify_args(t) << "\n";
-#endif
- return t;
- }
-
- template <typename T> const T &HandleReplayResult(const T &t) {
- CheckSequence(Deserialize<unsigned>());
- unsigned result = Deserialize<unsigned>();
- if (is_trivially_serializable<T>::value)
- return t;
- // We need to make a copy as the original object might go out of scope.
- return *m_index_to_object.AddObjectForIndex(result, new T(t));
- }
-
- /// Store the returned value in the index-to-object mapping.
- template <typename T> T &HandleReplayResult(T &t) {
- CheckSequence(Deserialize<unsigned>());
- unsigned result = Deserialize<unsigned>();
- if (is_trivially_serializable<T>::value)
- return t;
- // We need to make a copy as the original object might go out of scope.
- return *m_index_to_object.AddObjectForIndex(result, new T(t));
- }
-
- /// Store the returned value in the index-to-object mapping.
- template <typename T> T *HandleReplayResult(T *t) {
- CheckSequence(Deserialize<unsigned>());
- unsigned result = Deserialize<unsigned>();
- if (is_trivially_serializable<T>::value)
- return t;
- return m_index_to_object.AddObjectForIndex(result, t);
- }
-
- /// All returned types are recorded, even when the function returns a void.
- /// The latter requires special handling.
- void HandleReplayResultVoid() {
- CheckSequence(Deserialize<unsigned>());
- unsigned result = Deserialize<unsigned>();
- assert(result == 0);
- (void)result;
- }
-
- std::vector<void *> GetAllObjects() const {
- return m_index_to_object.GetAllObjects();
- }
-
- void SetExpectedSequence(unsigned sequence) {
- m_expected_sequence = sequence;
- }
-
-private:
- template <typename T> T Read(ValueTag) {
- assert(HasData(sizeof(T)));
- T t;
- std::memcpy(reinterpret_cast<char *>(&t), m_buffer.data(), sizeof(T));
- m_buffer = m_buffer.drop_front(sizeof(T));
- return t;
- }
-
- template <typename T> T Read(PointerTag) {
- typedef typename std::remove_pointer<T>::type UnderlyingT;
- return m_index_to_object.template GetObjectForIndex<UnderlyingT>(
- Deserialize<unsigned>());
- }
-
- template <typename T> T Read(ReferenceTag) {
- typedef typename std::remove_reference<T>::type UnderlyingT;
- // If this is a reference to a fundamental type we just read its value.
- return *m_index_to_object.template GetObjectForIndex<UnderlyingT>(
- Deserialize<unsigned>());
- }
-
- /// This method is used to parse references to fundamental types. Because
- /// they're not recorded in the object table we have serialized their value.
- /// We read its value, allocate a copy on the heap, and return a pointer to
- /// the copy.
- template <typename T> T Read(FundamentalPointerTag) {
- typedef typename std::remove_pointer<T>::type UnderlyingT;
- return new UnderlyingT(Deserialize<UnderlyingT>());
- }
-
- /// This method is used to parse references to fundamental types. Because
- /// they're not recorded in the object table we have serialized their value.
- /// We read its value, allocate a copy on the heap, and return a reference to
- /// the copy.
- template <typename T> T Read(FundamentalReferenceTag) {
- // If this is a reference to a fundamental type we just read its value.
- typedef typename std::remove_reference<T>::type UnderlyingT;
- return *(new UnderlyingT(Deserialize<UnderlyingT>()));
- }
-
- /// Verify that the given sequence number matches what we expect.
- void CheckSequence(unsigned sequence);
-
- /// Mapping of indices to objects.
- IndexToObject m_index_to_object;
-
- /// Buffer containing the serialized data.
- llvm::StringRef m_buffer;
-
- /// The result's expected sequence number.
- llvm::Optional<unsigned> m_expected_sequence;
-};
-
-/// Partial specialization for C-style strings. We read the string value
-/// instead of treating it as pointer.
-template <> const char *Deserializer::Deserialize<const char *>();
-template <> const char **Deserializer::Deserialize<const char **>();
-template <> const uint8_t *Deserializer::Deserialize<const uint8_t *>();
-template <> const void *Deserializer::Deserialize<const void *>();
-template <> char *Deserializer::Deserialize<char *>();
-template <> void *Deserializer::Deserialize<void *>();
-
-/// Helpers to auto-synthesize function replay code. It deserializes the replay
-/// function's arguments one by one and finally calls the corresponding
-/// function.
-template <typename... Remaining> struct DeserializationHelper;
-
-template <typename Head, typename... Tail>
-struct DeserializationHelper<Head, Tail...> {
- template <typename Result, typename... Deserialized> struct deserialized {
- static Result doit(Deserializer &deserializer,
- Result (*f)(Deserialized..., Head, Tail...),
- Deserialized... d) {
- return DeserializationHelper<Tail...>::
- template deserialized<Result, Deserialized..., Head>::doit(
- deserializer, f, d..., deserializer.Deserialize<Head>());
- }
- };
-};
-
-template <> struct DeserializationHelper<> {
- template <typename Result, typename... Deserialized> struct deserialized {
- static Result doit(Deserializer &deserializer, Result (*f)(Deserialized...),
- Deserialized... d) {
- return f(d...);
- }
- };
-};
-
-/// The replayer interface.
-struct Replayer {
- virtual ~Replayer() = default;
- virtual void operator()(Deserializer &deserializer) const = 0;
-};
-
-/// The default replayer deserializes the arguments and calls the function.
-template <typename Signature> struct DefaultReplayer;
-template <typename Result, typename... Args>
-struct DefaultReplayer<Result(Args...)> : public Replayer {
- DefaultReplayer(Result (*f)(Args...)) : Replayer(), f(f) {}
-
- void operator()(Deserializer &deserializer) const override {
- Replay(deserializer);
- }
-
- Result Replay(Deserializer &deserializer) const {
- return deserializer.HandleReplayResult(
- DeserializationHelper<Args...>::template deserialized<Result>::doit(
- deserializer, f));
- }
-
- Result (*f)(Args...);
-};
-
-/// Partial specialization for function returning a void type. It ignores the
-/// (absent) return value.
-template <typename... Args>
-struct DefaultReplayer<void(Args...)> : public Replayer {
- DefaultReplayer(void (*f)(Args...)) : Replayer(), f(f) {}
-
- void operator()(Deserializer &deserializer) const override {
- Replay(deserializer);
- }
-
- void Replay(Deserializer &deserializer) const {
- DeserializationHelper<Args...>::template deserialized<void>::doit(
- deserializer, f);
- deserializer.HandleReplayResultVoid();
- }
-
- void (*f)(Args...);
-};
-
-/// The registry contains a unique mapping between functions and their ID. The
-/// IDs can be serialized and deserialized to replay a function. Functions need
-/// to be registered with the registry for this to work.
-class Registry {
-private:
- struct SignatureStr {
- SignatureStr(llvm::StringRef result = {}, llvm::StringRef scope = {},
- llvm::StringRef name = {}, llvm::StringRef args = {})
- : result(result), scope(scope), name(name), args(args) {}
-
- std::string ToString() const;
-
- llvm::StringRef result;
- llvm::StringRef scope;
- llvm::StringRef name;
- llvm::StringRef args;
- };
-
-public:
- Registry() = default;
- virtual ~Registry() = default;
-
- /// Register a default replayer for a function.
- template <typename Signature>
- void Register(Signature *f, llvm::StringRef result = {},
- llvm::StringRef scope = {}, llvm::StringRef name = {},
- llvm::StringRef args = {}) {
- DoRegister(uintptr_t(f), std::make_unique<DefaultReplayer<Signature>>(f),
- SignatureStr(result, scope, name, args));
- }
-
- /// Register a replayer that invokes a custom function with the same
- /// signature as the replayed function.
- template <typename Signature>
- void Register(Signature *f, Signature *g, llvm::StringRef result = {},
- llvm::StringRef scope = {}, llvm::StringRef name = {},
- llvm::StringRef args = {}) {
- DoRegister(uintptr_t(f), std::make_unique<DefaultReplayer<Signature>>(g),
- SignatureStr(result, scope, name, args));
- }
-
- /// Replay functions from a file.
- bool Replay(const FileSpec &file);
-
- /// Replay functions from a buffer.
- bool Replay(llvm::StringRef buffer);
-
- /// Replay functions from a deserializer.
- bool Replay(Deserializer &deserializer);
-
- /// Returns the ID for a given function address.
- unsigned GetID(uintptr_t addr);
-
- /// Get the replayer matching the given ID.
- Replayer *GetReplayer(unsigned id);
-
- std::string GetSignature(unsigned id);
-
- void CheckID(unsigned expected, unsigned actual);
-
-protected:
- /// Register the given replayer for a function (and the ID mapping).
- void DoRegister(uintptr_t RunID, std::unique_ptr<Replayer> replayer,
- SignatureStr signature);
-
-private:
- /// Mapping of function addresses to replayers and their ID.
- std::map<uintptr_t, std::pair<std::unique_ptr<Replayer>, unsigned>>
- m_replayers;
-
- /// Mapping of IDs to replayer instances.
- std::map<unsigned, std::pair<Replayer *, SignatureStr>> m_ids;
-};
-
-/// Maps an object to an index for serialization. Indices are unique and
-/// incremented for every new object.
-///
-/// Indices start at 1 in order to differentiate with an invalid index (0) in
-/// the serialized buffer.
-class ObjectToIndex {
-public:
- template <typename T> unsigned GetIndexForObject(T *t) {
- return GetIndexForObjectImpl(static_cast<const void *>(t));
- }
-
-private:
- unsigned GetIndexForObjectImpl(const void *object);
-
- llvm::DenseMap<const void *, unsigned> m_mapping;
-};
-
-/// Serializes functions, their arguments and their return type to a stream.
-class Serializer {
-public:
- Serializer(llvm::raw_ostream &stream = llvm::outs()) : m_stream(stream) {}
-
- /// Recursively serialize all the given arguments.
- template <typename Head, typename... Tail>
- void SerializeAll(const Head &head, const Tail &... tail) {
- Serialize(head);
- SerializeAll(tail...);
- }
-
- void SerializeAll() { m_stream.flush(); }
-
-private:
- /// Serialize pointers. We need to differentiate between pointers to
- /// fundamental types (in which case we serialize its value) and pointer to
- /// objects (in which case we serialize their index).
- template <typename T> void Serialize(T *t) {
-#ifdef LLDB_REPRO_INSTR_TRACE
- this_thread_id() << "Serializing with " << LLVM_PRETTY_FUNCTION << " -> "
- << stringify_args(t) << "\n";
-#endif
- if (std::is_fundamental<T>::value) {
- Serialize(*t);
- } else {
- unsigned idx = m_tracker.GetIndexForObject(t);
- Serialize(idx);
- }
- }
-
- /// Serialize references. We need to differentiate between references to
- /// fundamental types (in which case we serialize its value) and references
- /// to objects (in which case we serialize their index).
- template <typename T> void Serialize(T &t) {
-#ifdef LLDB_REPRO_INSTR_TRACE
- this_thread_id() << "Serializing with " << LLVM_PRETTY_FUNCTION << " -> "
- << stringify_args(t) << "\n";
-#endif
- if (is_trivially_serializable<T>::value) {
- m_stream.write(reinterpret_cast<const char *>(&t), sizeof(T));
- } else {
- unsigned idx = m_tracker.GetIndexForObject(&t);
- Serialize(idx);
- }
- }
-
- void Serialize(const void *v) {
- // FIXME: Support void*
- }
-
- void Serialize(void *v) {
- // FIXME: Support void*
- }
-
- void Serialize(const char *t) {
-#ifdef LLDB_REPRO_INSTR_TRACE
- this_thread_id() << "Serializing with " << LLVM_PRETTY_FUNCTION << " -> "
- << stringify_args(t) << "\n";
-#endif
- const size_t size = t ? strlen(t) : std::numeric_limits<size_t>::max();
- Serialize(size);
- if (t) {
- m_stream << t;
- m_stream.write(0x0);
- }
- }
-
- void Serialize(const char **t) {
- size_t size = 0;
- if (!t) {
- Serialize(size);
- return;
- }
-
- // Compute the size of the array.
- const char *const *temp = t;
- while (*temp++)
- size++;
- Serialize(size);
-
- // Serialize the content of the array.
- while (*t)
- Serialize(*t++);
- }
-
- /// Serialization stream.
- llvm::raw_ostream &m_stream;
-
- /// Mapping of objects to indices.
- ObjectToIndex m_tracker;
-}; // namespace repro
-
-class InstrumentationData {
-public:
- Serializer *GetSerializer() { return m_serializer; }
- Deserializer *GetDeserializer() { return m_deserializer; }
- Registry &GetRegistry() { return *m_registry; }
-
- operator bool() {
- return (m_serializer != nullptr || m_deserializer != nullptr) &&
- m_registry != nullptr;
- }
-
- static void Initialize(Serializer &serializer, Registry &registry);
- static void Initialize(Deserializer &serializer, Registry &registry);
- static InstrumentationData &Instance();
-
-protected:
- friend llvm::optional_detail::OptionalStorage<InstrumentationData, true>;
- friend llvm::Optional<InstrumentationData>;
-
- InstrumentationData() = default;
- InstrumentationData(Serializer &serializer, Registry &registry)
- : m_serializer(&serializer), m_deserializer(nullptr),
- m_registry(&registry) {}
- InstrumentationData(Deserializer &deserializer, Registry &registry)
- : m_serializer(nullptr), m_deserializer(&deserializer),
- m_registry(&registry) {}
-
-private:
- static llvm::Optional<InstrumentationData> &InstanceImpl();
-
- Serializer *m_serializer = nullptr;
- Deserializer *m_deserializer = nullptr;
- Registry *m_registry = nullptr;
-};
-
-struct EmptyArg {};
-
-/// RAII object that records function invocations and their return value.
-///
-/// API calls are only captured when the API boundary is crossed. Once we're in
-/// the API layer, and another API function is called, it doesn't need to be
-/// recorded.
-///
-/// When a call is recored, its result is always recorded as well, even if the
-/// function returns a void. For functions that return by value, RecordResult
-/// should be used. Otherwise a sentinel value (0) will be serialized.
-///
-/// Because of the functional overlap between logging and recording API calls,
-/// this class is also used for logging.
-class Recorder {
-public:
- Recorder();
- Recorder(llvm::StringRef pretty_func, std::string &&pretty_args = {});
- ~Recorder();
-
- /// Records a single function call.
- template <typename Result, typename... FArgs, typename... RArgs>
- void Record(Serializer &serializer, Registry &registry, Result (*f)(FArgs...),
- const RArgs &... args) {
- m_serializer = &serializer;
- if (!ShouldCapture())
- return;
-
- std::lock_guard<std::mutex> lock(g_mutex);
- unsigned sequence = GetSequenceNumber();
- unsigned id = registry.GetID(uintptr_t(f));
-
-#ifdef LLDB_REPRO_INSTR_TRACE
- Log(id);
-#endif
-
- serializer.SerializeAll(sequence);
- serializer.SerializeAll(id);
- serializer.SerializeAll(args...);
-
- if (std::is_class<typename std::remove_pointer<
- typename std::remove_reference<Result>::type>::type>::value) {
- m_result_recorded = false;
- } else {
- serializer.SerializeAll(sequence);
- serializer.SerializeAll(0);
- m_result_recorded = true;
- }
- }
-
- /// Records a single function call.
- template <typename... Args>
- void Record(Serializer &serializer, Registry &registry, void (*f)(Args...),
- const Args &... args) {
- m_serializer = &serializer;
- if (!ShouldCapture())
- return;
-
- std::lock_guard<std::mutex> lock(g_mutex);
- unsigned sequence = GetSequenceNumber();
- unsigned id = registry.GetID(uintptr_t(f));
-
-#ifdef LLDB_REPRO_INSTR_TRACE
- Log(id);
-#endif
-
- serializer.SerializeAll(sequence);
- serializer.SerializeAll(id);
- serializer.SerializeAll(args...);
-
- // Record result.
- serializer.SerializeAll(sequence);
- serializer.SerializeAll(0);
- m_result_recorded = true;
- }
-
- /// Specializations for the no-argument methods. These are passed an empty
- /// dummy argument so the same variadic macro can be used. These methods
- /// strip the arguments before forwarding them.
- template <typename Result>
- void Record(Serializer &serializer, Registry &registry, Result (*f)(),
- const EmptyArg &arg) {
- Record(serializer, registry, f);
- }
-
- /// Record the result of a function call.
- template <typename Result>
- Result RecordResult(Result &&r, bool update_boundary) {
- // When recording the result from the LLDB_RECORD_RESULT macro, we need to
- // update the boundary so we capture the copy constructor. However, when
- // called to record the this pointer of the (copy) constructor, the
- // boundary should not be toggled, because it is called from the
- // LLDB_RECORD_CONSTRUCTOR macro, which might be followed by other API
- // calls.
- if (update_boundary)
- UpdateBoundary();
- if (m_serializer && ShouldCapture()) {
- std::lock_guard<std::mutex> lock(g_mutex);
- assert(!m_result_recorded);
- m_serializer->SerializeAll(GetSequenceNumber());
- m_serializer->SerializeAll(r);
- m_result_recorded = true;
- }
- return std::forward<Result>(r);
- }
-
- template <typename Result, typename T>
- Result Replay(Deserializer &deserializer, Registry &registry, uintptr_t addr,
- bool update_boundary) {
- deserializer.SetExpectedSequence(deserializer.Deserialize<unsigned>());
- unsigned actual_id = registry.GetID(addr);
- unsigned id = deserializer.Deserialize<unsigned>();
- registry.CheckID(id, actual_id);
- return ReplayResult<Result>(
- static_cast<DefaultReplayer<T> *>(registry.GetReplayer(id))
- ->Replay(deserializer),
- update_boundary);
- }
-
- void Replay(Deserializer &deserializer, Registry &registry, uintptr_t addr) {
- deserializer.SetExpectedSequence(deserializer.Deserialize<unsigned>());
- unsigned actual_id = registry.GetID(addr);
- unsigned id = deserializer.Deserialize<unsigned>();
- registry.CheckID(id, actual_id);
- registry.GetReplayer(id)->operator()(deserializer);
- }
-
- template <typename Result>
- Result ReplayResult(Result &&r, bool update_boundary) {
- if (update_boundary)
- UpdateBoundary();
- return std::forward<Result>(r);
- }
-
- bool ShouldCapture() { return m_local_boundary; }
-
- /// Mark the current thread as a private thread and pretend that everything
- /// on this thread is behind happening behind the API boundary.
- static void PrivateThread();
-
-private:
- static unsigned GetNextSequenceNumber() { return g_sequence++; }
- unsigned GetSequenceNumber() const;
-
- template <typename T> friend struct replay;
- void UpdateBoundary();
-
-#ifdef LLDB_REPRO_INSTR_TRACE
- void Log(unsigned id) {
- this_thread_id() << "Recording " << id << ": " << m_pretty_func << " ("
- << m_pretty_args << ")\n";
- }
-#endif
-
- Serializer *m_serializer = nullptr;
-
- /// Pretty function for logging.
- llvm::StringRef m_pretty_func;
- std::string m_pretty_args;
-
- /// Whether this function call was the one crossing the API boundary.
- bool m_local_boundary = false;
-
- /// Whether the return value was recorded explicitly.
- bool m_result_recorded = true;
-
- /// The sequence number for this pair of function and result.
- unsigned m_sequence;
-
- /// Global mutex to protect concurrent access.
- static std::mutex g_mutex;
-
- /// Unique, monotonically increasing sequence number.
- static std::atomic<unsigned> g_sequence;
-};
-
-/// To be used as the "Runtime ID" of a constructor. It also invokes the
-/// constructor when called.
-template <typename Signature> struct construct;
-template <typename Class, typename... Args> struct construct<Class(Args...)> {
- static Class *handle(lldb_private::repro::InstrumentationData data,
- lldb_private::repro::Recorder &recorder, Class *c,
- const EmptyArg &) {
- return handle(data, recorder, c);
- }
-
- static Class *handle(lldb_private::repro::InstrumentationData data,
- lldb_private::repro::Recorder &recorder, Class *c,
- Args... args) {
- if (!data)
- return nullptr;
-
- if (Serializer *serializer = data.GetSerializer()) {
- recorder.Record(*serializer, data.GetRegistry(), &record, args...);
- recorder.RecordResult(c, false);
- } else if (Deserializer *deserializer = data.GetDeserializer()) {
- if (recorder.ShouldCapture()) {
- replay(recorder, *deserializer, data.GetRegistry());
- }
- }
-
- return nullptr;
- }
-
- static Class *record(Args... args) { return new Class(args...); }
-
- static Class *replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- return recorder.Replay<Class *, Class *(Args...)>(
- deserializer, registry, uintptr_t(&record), false);
- }
-};
-
-/// To be used as the "Runtime ID" of a member function. It also invokes the
-/// member function when called.
-template <typename Signature> struct invoke;
-template <typename Result, typename Class, typename... Args>
-struct invoke<Result (Class::*)(Args...)> {
- template <Result (Class::*m)(Args...)> struct method {
- static Result record(Class *c, Args... args) { return (c->*m)(args...); }
-
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- return recorder.Replay<Result, Result(Class *, Args...)>(
- deserializer, registry, uintptr_t(&record), true);
- }
- };
-};
-
-template <typename Class, typename... Args>
-struct invoke<void (Class::*)(Args...)> {
- template <void (Class::*m)(Args...)> struct method {
- static void record(Class *c, Args... args) { (c->*m)(args...); }
- static void replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- recorder.Replay(deserializer, registry, uintptr_t(&record));
- }
- };
-};
-
-template <typename Result, typename Class, typename... Args>
-struct invoke<Result (Class::*)(Args...) const> {
- template <Result (Class::*m)(Args...) const> struct method {
- static Result record(Class *c, Args... args) { return (c->*m)(args...); }
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- return recorder.Replay<Result, Result(Class *, Args...)>(
- deserializer, registry, uintptr_t(&record), true);
- }
- };
-};
-
-template <typename Class, typename... Args>
-struct invoke<void (Class::*)(Args...) const> {
- template <void (Class::*m)(Args...) const> struct method {
- static void record(Class *c, Args... args) { return (c->*m)(args...); }
- static void replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- recorder.Replay(deserializer, registry, uintptr_t(&record));
- }
- };
-};
-
-template <typename Signature> struct replay;
-
-template <typename Result, typename Class, typename... Args>
-struct replay<Result (Class::*)(Args...)> {
- template <Result (Class::*m)(Args...)> struct method {};
-};
-
-template <typename Result, typename... Args>
-struct invoke<Result (*)(Args...)> {
- template <Result (*m)(Args...)> struct method {
- static Result record(Args... args) { return (*m)(args...); }
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- return recorder.Replay<Result, Result(Args...)>(deserializer, registry,
- uintptr_t(&record), true);
- }
- };
-};
-
-template <typename... Args> struct invoke<void (*)(Args...)> {
- template <void (*m)(Args...)> struct method {
- static void record(Args... args) { return (*m)(args...); }
- static void replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry) {
- recorder.Replay(deserializer, registry, uintptr_t(&record));
- }
- };
-};
-
-/// Special handling for functions returning strings as (char*, size_t).
-/// {
-
-/// For inline replay, we ignore the arguments and use the ones from the
-/// serializer instead. This doesn't work for methods that use a char* and a
-/// size to return a string. For one these functions have a custom replayer to
-/// prevent override the input buffer. Furthermore, the template-generated
-/// deserialization is not easy to hook into.
-///
-/// The specializations below hand-implement the serialization logic for the
-/// inline replay. Instead of using the function from the registry, it uses the
-/// one passed into the macro.
-template <typename Signature> struct invoke_char_ptr;
-template <typename Result, typename Class, typename... Args>
-struct invoke_char_ptr<Result (Class::*)(Args...) const> {
- template <Result (Class::*m)(Args...) const> struct method {
- static Result record(Class *c, char *s, size_t l) {
- char *buffer = reinterpret_cast<char *>(calloc(l, sizeof(char)));
- return (c->*m)(buffer, l);
- }
-
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry, char *str) {
- deserializer.SetExpectedSequence(deserializer.Deserialize<unsigned>());
- deserializer.Deserialize<unsigned>();
- Class *c = deserializer.Deserialize<Class *>();
- deserializer.Deserialize<const char *>();
- size_t l = deserializer.Deserialize<size_t>();
- return recorder.ReplayResult(
- std::move(deserializer.HandleReplayResult((c->*m)(str, l))), true);
- }
- };
-};
-
-template <typename Signature> struct invoke_char_ptr;
-template <typename Result, typename Class, typename... Args>
-struct invoke_char_ptr<Result (Class::*)(Args...)> {
- template <Result (Class::*m)(Args...)> struct method {
- static Result record(Class *c, char *s, size_t l) {
- char *buffer = reinterpret_cast<char *>(calloc(l, sizeof(char)));
- return (c->*m)(buffer, l);
- }
-
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry, char *str) {
- deserializer.SetExpectedSequence(deserializer.Deserialize<unsigned>());
- deserializer.Deserialize<unsigned>();
- Class *c = deserializer.Deserialize<Class *>();
- deserializer.Deserialize<const char *>();
- size_t l = deserializer.Deserialize<size_t>();
- return recorder.ReplayResult(
- std::move(deserializer.HandleReplayResult((c->*m)(str, l))), true);
- }
- };
-};
-
-template <typename Result, typename... Args>
-struct invoke_char_ptr<Result (*)(Args...)> {
- template <Result (*m)(Args...)> struct method {
- static Result record(char *s, size_t l) {
- char *buffer = reinterpret_cast<char *>(calloc(l, sizeof(char)));
- return (*m)(buffer, l);
- }
-
- static Result replay(Recorder &recorder, Deserializer &deserializer,
- Registry &registry, char *str) {
- deserializer.SetExpectedSequence(deserializer.Deserialize<unsigned>());
- deserializer.Deserialize<unsigned>();
- deserializer.Deserialize<const char *>();
- size_t l = deserializer.Deserialize<size_t>();
- return recorder.ReplayResult(
- std::move(deserializer.HandleReplayResult((*m)(str, l))), true);
- }
- };
-};
-/// }
-
-} // namespace repro
-} // namespace lldb_private
-
-#endif // LLDB_UTILITY_REPRODUCERINSTRUMENTATION_H
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerProvider.h b/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerProvider.h
index db7378069a87..56aadf92369e 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerProvider.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/ReproducerProvider.h
@@ -222,8 +222,7 @@ public:
/// Provider for mapping UUIDs to symbol and executable files.
class SymbolFileProvider : public Provider<SymbolFileProvider> {
public:
- SymbolFileProvider(const FileSpec &directory)
- : Provider(directory), m_symbol_files() {}
+ SymbolFileProvider(const FileSpec &directory) : Provider(directory) {}
void AddSymbolFile(const UUID *uuid, const FileSpec &module_path,
const FileSpec &symbol_path);
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/SharedCluster.h b/contrib/llvm-project/lldb/include/lldb/Utility/SharedCluster.h
index 375c1c131a09..b3f41dbaa64b 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/SharedCluster.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/SharedCluster.h
@@ -48,7 +48,7 @@ public:
}
private:
- ClusterManager() : m_objects(), m_mutex() {}
+ ClusterManager() : m_objects() {}
llvm::SmallVector<T *, 16> m_objects;
std::mutex m_mutex;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/StreamTee.h b/contrib/llvm-project/lldb/include/lldb/Utility/StreamTee.h
index b5d3b9679e91..5695586171f3 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/StreamTee.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/StreamTee.h
@@ -19,18 +19,15 @@ namespace lldb_private {
class StreamTee : public Stream {
public:
- StreamTee(bool colors = false)
- : Stream(colors), m_streams_mutex(), m_streams() {}
+ StreamTee(bool colors = false) : Stream(colors) {}
- StreamTee(lldb::StreamSP &stream_sp)
- : Stream(), m_streams_mutex(), m_streams() {
+ StreamTee(lldb::StreamSP &stream_sp) {
// No need to lock mutex during construction
if (stream_sp)
m_streams.push_back(stream_sp);
}
- StreamTee(lldb::StreamSP &stream_sp, lldb::StreamSP &stream_2_sp)
- : Stream(), m_streams_mutex(), m_streams() {
+ StreamTee(lldb::StreamSP &stream_sp, lldb::StreamSP &stream_2_sp) {
// No need to lock mutex during construction
if (stream_sp)
m_streams.push_back(stream_sp);
@@ -38,8 +35,7 @@ public:
m_streams.push_back(stream_2_sp);
}
- StreamTee(const StreamTee &rhs)
- : Stream(rhs), m_streams_mutex(), m_streams() {
+ StreamTee(const StreamTee &rhs) : Stream(rhs) {
// Don't copy until we lock down "rhs"
std::lock_guard<std::recursive_mutex> guard(rhs.m_streams_mutex);
m_streams = rhs.m_streams;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/StringExtractorGDBRemote.h b/contrib/llvm-project/lldb/include/lldb/Utility/StringExtractorGDBRemote.h
index 1712c113d396..459adae04452 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/StringExtractorGDBRemote.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/StringExtractorGDBRemote.h
@@ -23,7 +23,7 @@ public:
typedef bool (*ResponseValidatorCallback)(
void *baton, const StringExtractorGDBRemote &response);
- StringExtractorGDBRemote() : StringExtractor() {}
+ StringExtractorGDBRemote() {}
StringExtractorGDBRemote(llvm::StringRef str)
: StringExtractor(str), m_validator(nullptr) {}
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/StringList.h b/contrib/llvm-project/lldb/include/lldb/Utility/StringList.h
index 70f4654a6ac9..1357cf17173a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/StringList.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/StringList.h
@@ -10,6 +10,7 @@
#define LLDB_UTILITY_STRINGLIST_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include <cstddef>
#include <string>
@@ -44,6 +45,8 @@ public:
void AppendString(llvm::StringRef str);
+ void AppendString(const llvm::Twine &str);
+
void AppendList(const char **strv, int strc);
void AppendList(StringList strings);
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/StructuredData.h b/contrib/llvm-project/lldb/include/lldb/Utility/StructuredData.h
index c1d136db1c2e..11eee92f8c3b 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/StructuredData.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/StructuredData.h
@@ -351,10 +351,9 @@ public:
class Dictionary : public Object {
public:
- Dictionary() : Object(lldb::eStructuredDataTypeDictionary), m_dict() {}
+ Dictionary() : Object(lldb::eStructuredDataTypeDictionary) {}
- Dictionary(ObjectSP obj_sp)
- : Object(lldb::eStructuredDataTypeDictionary), m_dict() {
+ Dictionary(ObjectSP obj_sp) : Object(lldb::eStructuredDataTypeDictionary) {
if (!obj_sp || obj_sp->GetType() != lldb::eStructuredDataTypeDictionary) {
SetType(lldb::eStructuredDataTypeInvalid);
return;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/UserIDResolver.h b/contrib/llvm-project/lldb/include/lldb/Utility/UserIDResolver.h
index e0f7b69cc075..15afafd65a1a 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/UserIDResolver.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/UserIDResolver.h
@@ -10,6 +10,7 @@
#define LLDB_UTILITY_USERIDRESOLVER_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include <mutex>
diff --git a/contrib/llvm-project/lldb/include/lldb/lldb-defines.h b/contrib/llvm-project/lldb/include/lldb/lldb-defines.h
index 4bf01c3f86c6..339071bbfc31 100644
--- a/contrib/llvm-project/lldb/include/lldb/lldb-defines.h
+++ b/contrib/llvm-project/lldb/include/lldb/lldb-defines.h
@@ -11,18 +11,6 @@
#include "lldb/lldb-types.h"
-#if defined(_WIN32)
-#if defined(EXPORT_LIBLLDB)
-#define LLDB_API __declspec(dllexport)
-#elif defined(IMPORT_LIBLLDB)
-#define LLDB_API __declspec(dllimport)
-#else
-#define LLDB_API
-#endif
-#else // defined (_WIN32)
-#define LLDB_API
-#endif
-
#if !defined(INT32_MAX)
#define INT32_MAX 2147483647
#endif
diff --git a/contrib/llvm-project/lldb/include/lldb/module.modulemap b/contrib/llvm-project/lldb/include/lldb/module.modulemap
index c0d467a6505e..303d6b15e808 100644
--- a/contrib/llvm-project/lldb/include/lldb/module.modulemap
+++ b/contrib/llvm-project/lldb/include/lldb/module.modulemap
@@ -2,7 +2,7 @@
module lldb_API {
requires cplusplus
- textual header "Utility/ReproducerInstrumentation.h"
+ textual header "Utility/Instrumentation.h"
umbrella "API"
module * { export * }
diff --git a/contrib/llvm-project/lldb/source/API/SBAddress.cpp b/contrib/llvm-project/lldb/source/API/SBAddress.cpp
index 7c102270a87c..e519f0bcc83c 100644
--- a/contrib/llvm-project/lldb/source/API/SBAddress.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBAddress.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBAddress.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBProcess.h"
#include "lldb/API/SBSection.h"
@@ -16,35 +15,34 @@
#include "lldb/Core/Module.h"
#include "lldb/Symbol/LineEntry.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StreamString.h"
using namespace lldb;
using namespace lldb_private;
SBAddress::SBAddress() : m_opaque_up(new Address()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBAddress);
+ LLDB_INSTRUMENT_VA(this);
}
SBAddress::SBAddress(const Address &address)
: m_opaque_up(std::make_unique<Address>(address)) {}
SBAddress::SBAddress(const SBAddress &rhs) : m_opaque_up(new Address()) {
- LLDB_RECORD_CONSTRUCTOR(SBAddress, (const lldb::SBAddress &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
SBAddress::SBAddress(lldb::SBSection section, lldb::addr_t offset)
: m_opaque_up(new Address(section.GetSP(), offset)) {
- LLDB_RECORD_CONSTRUCTOR(SBAddress, (lldb::SBSection, lldb::addr_t), section,
- offset);
+ LLDB_INSTRUMENT_VA(this, section, offset);
}
// Create an address by resolving a load address using the supplied target
SBAddress::SBAddress(lldb::addr_t load_addr, lldb::SBTarget &target)
: m_opaque_up(new Address()) {
- LLDB_RECORD_CONSTRUCTOR(SBAddress, (lldb::addr_t, lldb::SBTarget &),
- load_addr, target);
+ LLDB_INSTRUMENT_VA(this, load_addr, target);
SetLoadAddress(load_addr, target);
}
@@ -52,12 +50,11 @@ SBAddress::SBAddress(lldb::addr_t load_addr, lldb::SBTarget &target)
SBAddress::~SBAddress() = default;
const SBAddress &SBAddress::operator=(const SBAddress &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBAddress &,
- SBAddress, operator=,(const lldb::SBAddress &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool lldb::operator==(const SBAddress &lhs, const SBAddress &rhs) {
@@ -67,31 +64,29 @@ bool lldb::operator==(const SBAddress &lhs, const SBAddress &rhs) {
}
bool SBAddress::operator!=(const SBAddress &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBAddress, operator!=,(const SBAddress &),
- &rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return !(*this == rhs);
}
bool SBAddress::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBAddress, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBAddress::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBAddress, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr && m_opaque_up->IsValid();
}
void SBAddress::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBAddress, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up = std::make_unique<Address>();
}
void SBAddress::SetAddress(lldb::SBSection section, lldb::addr_t offset) {
- LLDB_RECORD_METHOD(void, SBAddress, SetAddress,
- (lldb::SBSection, lldb::addr_t), section, offset);
+ LLDB_INSTRUMENT_VA(this, section, offset);
Address &addr = ref();
addr.SetSection(section.GetSP());
@@ -101,7 +96,7 @@ void SBAddress::SetAddress(lldb::SBSection section, lldb::addr_t offset) {
void SBAddress::SetAddress(const Address &address) { ref() = address; }
lldb::addr_t SBAddress::GetFileAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::addr_t, SBAddress, GetFileAddress);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up->IsValid())
return m_opaque_up->GetFileAddress();
@@ -110,8 +105,7 @@ lldb::addr_t SBAddress::GetFileAddress() const {
}
lldb::addr_t SBAddress::GetLoadAddress(const SBTarget &target) const {
- LLDB_RECORD_METHOD_CONST(lldb::addr_t, SBAddress, GetLoadAddress,
- (const lldb::SBTarget &), target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::addr_t addr = LLDB_INVALID_ADDRESS;
TargetSP target_sp(target.GetSP());
@@ -126,8 +120,7 @@ lldb::addr_t SBAddress::GetLoadAddress(const SBTarget &target) const {
}
void SBAddress::SetLoadAddress(lldb::addr_t load_addr, lldb::SBTarget &target) {
- LLDB_RECORD_METHOD(void, SBAddress, SetLoadAddress,
- (lldb::addr_t, lldb::SBTarget &), load_addr, target);
+ LLDB_INSTRUMENT_VA(this, load_addr, target);
// Create the address object if we don't already have one
ref();
@@ -144,7 +137,7 @@ void SBAddress::SetLoadAddress(lldb::addr_t load_addr, lldb::SBTarget &target) {
}
bool SBAddress::OffsetAddress(addr_t offset) {
- LLDB_RECORD_METHOD(bool, SBAddress, OffsetAddress, (lldb::addr_t), offset);
+ LLDB_INSTRUMENT_VA(this, offset);
if (m_opaque_up->IsValid()) {
addr_t addr_offset = m_opaque_up->GetOffset();
@@ -157,16 +150,16 @@ bool SBAddress::OffsetAddress(addr_t offset) {
}
lldb::SBSection SBAddress::GetSection() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSection, SBAddress, GetSection);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBSection sb_section;
if (m_opaque_up->IsValid())
sb_section.SetSP(m_opaque_up->GetSection());
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
lldb::addr_t SBAddress::GetOffset() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBAddress, GetOffset);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up->IsValid())
return m_opaque_up->GetOffset();
@@ -193,8 +186,7 @@ const Address &SBAddress::ref() const {
Address *SBAddress::get() { return m_opaque_up.get(); }
bool SBAddress::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBAddress, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
// Call "ref()" on the stream to make sure it creates a backing stream in
// case there isn't one already...
@@ -209,63 +201,62 @@ bool SBAddress::GetDescription(SBStream &description) {
}
SBModule SBAddress::GetModule() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBModule, SBAddress, GetModule);
+ LLDB_INSTRUMENT_VA(this);
SBModule sb_module;
if (m_opaque_up->IsValid())
sb_module.SetSP(m_opaque_up->GetModule());
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
SBSymbolContext SBAddress::GetSymbolContext(uint32_t resolve_scope) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContext, SBAddress, GetSymbolContext,
- (uint32_t), resolve_scope);
+ LLDB_INSTRUMENT_VA(this, resolve_scope);
SBSymbolContext sb_sc;
SymbolContextItem scope = static_cast<SymbolContextItem>(resolve_scope);
if (m_opaque_up->IsValid())
m_opaque_up->CalculateSymbolContext(&sb_sc.ref(), scope);
- return LLDB_RECORD_RESULT(sb_sc);
+ return sb_sc;
}
SBCompileUnit SBAddress::GetCompileUnit() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBCompileUnit, SBAddress, GetCompileUnit);
+ LLDB_INSTRUMENT_VA(this);
SBCompileUnit sb_comp_unit;
if (m_opaque_up->IsValid())
sb_comp_unit.reset(m_opaque_up->CalculateSymbolContextCompileUnit());
- return LLDB_RECORD_RESULT(sb_comp_unit);
+ return sb_comp_unit;
}
SBFunction SBAddress::GetFunction() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFunction, SBAddress, GetFunction);
+ LLDB_INSTRUMENT_VA(this);
SBFunction sb_function;
if (m_opaque_up->IsValid())
sb_function.reset(m_opaque_up->CalculateSymbolContextFunction());
- return LLDB_RECORD_RESULT(sb_function);
+ return sb_function;
}
SBBlock SBAddress::GetBlock() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBAddress, GetBlock);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_up->IsValid())
sb_block.SetPtr(m_opaque_up->CalculateSymbolContextBlock());
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
SBSymbol SBAddress::GetSymbol() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSymbol, SBAddress, GetSymbol);
+ LLDB_INSTRUMENT_VA(this);
SBSymbol sb_symbol;
if (m_opaque_up->IsValid())
sb_symbol.reset(m_opaque_up->CalculateSymbolContextSymbol());
- return LLDB_RECORD_RESULT(sb_symbol);
+ return sb_symbol;
}
SBLineEntry SBAddress::GetLineEntry() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBLineEntry, SBAddress, GetLineEntry);
+ LLDB_INSTRUMENT_VA(this);
SBLineEntry sb_line_entry;
if (m_opaque_up->IsValid()) {
@@ -273,45 +264,5 @@ SBLineEntry SBAddress::GetLineEntry() {
if (m_opaque_up->CalculateSymbolContextLineEntry(line_entry))
sb_line_entry.SetLineEntry(line_entry);
}
- return LLDB_RECORD_RESULT(sb_line_entry);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBAddress>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBAddress, ());
- LLDB_REGISTER_CONSTRUCTOR(SBAddress, (const lldb::SBAddress &));
- LLDB_REGISTER_CONSTRUCTOR(SBAddress, (lldb::SBSection, lldb::addr_t));
- LLDB_REGISTER_CONSTRUCTOR(SBAddress, (lldb::addr_t, lldb::SBTarget &));
- LLDB_REGISTER_METHOD(const lldb::SBAddress &,
- SBAddress, operator=,(const lldb::SBAddress &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBAddress, operator!=,(const lldb::SBAddress &));
- LLDB_REGISTER_METHOD_CONST(bool, SBAddress, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBAddress, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBAddress, Clear, ());
- LLDB_REGISTER_METHOD(void, SBAddress, SetAddress,
- (lldb::SBSection, lldb::addr_t));
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBAddress, GetFileAddress, ());
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBAddress, GetLoadAddress,
- (const lldb::SBTarget &));
- LLDB_REGISTER_METHOD(void, SBAddress, SetLoadAddress,
- (lldb::addr_t, lldb::SBTarget &));
- LLDB_REGISTER_METHOD(bool, SBAddress, OffsetAddress, (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBSection, SBAddress, GetSection, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBAddress, GetOffset, ());
- LLDB_REGISTER_METHOD(bool, SBAddress, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBModule, SBAddress, GetModule, ());
- LLDB_REGISTER_METHOD(lldb::SBSymbolContext, SBAddress, GetSymbolContext,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBCompileUnit, SBAddress, GetCompileUnit, ());
- LLDB_REGISTER_METHOD(lldb::SBFunction, SBAddress, GetFunction, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBAddress, GetBlock, ());
- LLDB_REGISTER_METHOD(lldb::SBSymbol, SBAddress, GetSymbol, ());
- LLDB_REGISTER_METHOD(lldb::SBLineEntry, SBAddress, GetLineEntry, ());
-}
-
-}
+ return sb_line_entry;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBAttachInfo.cpp b/contrib/llvm-project/lldb/source/API/SBAttachInfo.cpp
index b21589cf2708..edb4f7104d41 100644
--- a/contrib/llvm-project/lldb/source/API/SBAttachInfo.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBAttachInfo.cpp
@@ -7,29 +7,29 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBAttachInfo.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBListener.h"
#include "lldb/Target/Process.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
SBAttachInfo::SBAttachInfo() : m_opaque_sp(new ProcessAttachInfo()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBAttachInfo);
+ LLDB_INSTRUMENT_VA(this);
}
SBAttachInfo::SBAttachInfo(lldb::pid_t pid)
: m_opaque_sp(new ProcessAttachInfo()) {
- LLDB_RECORD_CONSTRUCTOR(SBAttachInfo, (lldb::pid_t), pid);
+ LLDB_INSTRUMENT_VA(this, pid);
m_opaque_sp->SetProcessID(pid);
}
SBAttachInfo::SBAttachInfo(const char *path, bool wait_for)
: m_opaque_sp(new ProcessAttachInfo()) {
- LLDB_RECORD_CONSTRUCTOR(SBAttachInfo, (const char *, bool), path, wait_for);
+ LLDB_INSTRUMENT_VA(this, path, wait_for);
if (path && path[0])
m_opaque_sp->GetExecutableFile().SetFile(path, FileSpec::Style::native);
@@ -38,8 +38,7 @@ SBAttachInfo::SBAttachInfo(const char *path, bool wait_for)
SBAttachInfo::SBAttachInfo(const char *path, bool wait_for, bool async)
: m_opaque_sp(new ProcessAttachInfo()) {
- LLDB_RECORD_CONSTRUCTOR(SBAttachInfo, (const char *, bool, bool), path,
- wait_for, async);
+ LLDB_INSTRUMENT_VA(this, path, wait_for, async);
if (path && path[0])
m_opaque_sp->GetExecutableFile().SetFile(path, FileSpec::Style::native);
@@ -49,7 +48,7 @@ SBAttachInfo::SBAttachInfo(const char *path, bool wait_for, bool async)
SBAttachInfo::SBAttachInfo(const SBAttachInfo &rhs)
: m_opaque_sp(new ProcessAttachInfo()) {
- LLDB_RECORD_CONSTRUCTOR(SBAttachInfo, (const lldb::SBAttachInfo &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = clone(rhs.m_opaque_sp);
}
@@ -59,53 +58,51 @@ SBAttachInfo::~SBAttachInfo() = default;
lldb_private::ProcessAttachInfo &SBAttachInfo::ref() { return *m_opaque_sp; }
SBAttachInfo &SBAttachInfo::operator=(const SBAttachInfo &rhs) {
- LLDB_RECORD_METHOD(lldb::SBAttachInfo &,
- SBAttachInfo, operator=,(const lldb::SBAttachInfo &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = clone(rhs.m_opaque_sp);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
lldb::pid_t SBAttachInfo::GetProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBAttachInfo, GetProcessID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetProcessID();
}
void SBAttachInfo::SetProcessID(lldb::pid_t pid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetProcessID, (lldb::pid_t), pid);
+ LLDB_INSTRUMENT_VA(this, pid);
m_opaque_sp->SetProcessID(pid);
}
uint32_t SBAttachInfo::GetResumeCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBAttachInfo, GetResumeCount);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetResumeCount();
}
void SBAttachInfo::SetResumeCount(uint32_t c) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetResumeCount, (uint32_t), c);
+ LLDB_INSTRUMENT_VA(this, c);
m_opaque_sp->SetResumeCount(c);
}
const char *SBAttachInfo::GetProcessPluginName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBAttachInfo, GetProcessPluginName);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetProcessPluginName();
}
void SBAttachInfo::SetProcessPluginName(const char *plugin_name) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetProcessPluginName, (const char *),
- plugin_name);
+ LLDB_INSTRUMENT_VA(this, plugin_name);
return m_opaque_sp->SetProcessPluginName(plugin_name);
}
void SBAttachInfo::SetExecutable(const char *path) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetExecutable, (const char *), path);
+ LLDB_INSTRUMENT_VA(this, path);
if (path && path[0])
m_opaque_sp->GetExecutableFile().SetFile(path, FileSpec::Style::native);
@@ -114,8 +111,7 @@ void SBAttachInfo::SetExecutable(const char *path) {
}
void SBAttachInfo::SetExecutable(SBFileSpec exe_file) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetExecutable, (lldb::SBFileSpec),
- exe_file);
+ LLDB_INSTRUMENT_VA(this, exe_file);
if (exe_file.IsValid())
m_opaque_sp->GetExecutableFile() = exe_file.ref();
@@ -124,185 +120,134 @@ void SBAttachInfo::SetExecutable(SBFileSpec exe_file) {
}
bool SBAttachInfo::GetWaitForLaunch() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, GetWaitForLaunch);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetWaitForLaunch();
}
void SBAttachInfo::SetWaitForLaunch(bool b) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetWaitForLaunch, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
m_opaque_sp->SetWaitForLaunch(b);
}
void SBAttachInfo::SetWaitForLaunch(bool b, bool async) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetWaitForLaunch, (bool, bool), b,
- async);
+ LLDB_INSTRUMENT_VA(this, b, async);
m_opaque_sp->SetWaitForLaunch(b);
m_opaque_sp->SetAsync(async);
}
bool SBAttachInfo::GetIgnoreExisting() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, GetIgnoreExisting);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetIgnoreExisting();
}
void SBAttachInfo::SetIgnoreExisting(bool b) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetIgnoreExisting, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
m_opaque_sp->SetIgnoreExisting(b);
}
uint32_t SBAttachInfo::GetUserID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBAttachInfo, GetUserID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetUserID();
}
uint32_t SBAttachInfo::GetGroupID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBAttachInfo, GetGroupID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetGroupID();
}
bool SBAttachInfo::UserIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, UserIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->UserIDIsValid();
}
bool SBAttachInfo::GroupIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, GroupIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GroupIDIsValid();
}
void SBAttachInfo::SetUserID(uint32_t uid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetUserID, (uint32_t), uid);
+ LLDB_INSTRUMENT_VA(this, uid);
m_opaque_sp->SetUserID(uid);
}
void SBAttachInfo::SetGroupID(uint32_t gid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetGroupID, (uint32_t), gid);
+ LLDB_INSTRUMENT_VA(this, gid);
m_opaque_sp->SetGroupID(gid);
}
uint32_t SBAttachInfo::GetEffectiveUserID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBAttachInfo, GetEffectiveUserID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetEffectiveUserID();
}
uint32_t SBAttachInfo::GetEffectiveGroupID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBAttachInfo, GetEffectiveGroupID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetEffectiveGroupID();
}
bool SBAttachInfo::EffectiveUserIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, EffectiveUserIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->EffectiveUserIDIsValid();
}
bool SBAttachInfo::EffectiveGroupIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, EffectiveGroupIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->EffectiveGroupIDIsValid();
}
void SBAttachInfo::SetEffectiveUserID(uint32_t uid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetEffectiveUserID, (uint32_t), uid);
+ LLDB_INSTRUMENT_VA(this, uid);
m_opaque_sp->SetEffectiveUserID(uid);
}
void SBAttachInfo::SetEffectiveGroupID(uint32_t gid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetEffectiveGroupID, (uint32_t), gid);
+ LLDB_INSTRUMENT_VA(this, gid);
m_opaque_sp->SetEffectiveGroupID(gid);
}
lldb::pid_t SBAttachInfo::GetParentProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBAttachInfo, GetParentProcessID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetParentProcessID();
}
void SBAttachInfo::SetParentProcessID(lldb::pid_t pid) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetParentProcessID, (lldb::pid_t),
- pid);
+ LLDB_INSTRUMENT_VA(this, pid);
m_opaque_sp->SetParentProcessID(pid);
}
bool SBAttachInfo::ParentProcessIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBAttachInfo, ParentProcessIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->ParentProcessIDIsValid();
}
SBListener SBAttachInfo::GetListener() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBListener, SBAttachInfo, GetListener);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(SBListener(m_opaque_sp->GetListener()));
+ return SBListener(m_opaque_sp->GetListener());
}
void SBAttachInfo::SetListener(SBListener &listener) {
- LLDB_RECORD_METHOD(void, SBAttachInfo, SetListener, (lldb::SBListener &),
- listener);
+ LLDB_INSTRUMENT_VA(this, listener);
m_opaque_sp->SetListener(listener.GetSP());
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBAttachInfo>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBAttachInfo, ());
- LLDB_REGISTER_CONSTRUCTOR(SBAttachInfo, (lldb::pid_t));
- LLDB_REGISTER_CONSTRUCTOR(SBAttachInfo, (const char *, bool));
- LLDB_REGISTER_CONSTRUCTOR(SBAttachInfo, (const char *, bool, bool));
- LLDB_REGISTER_CONSTRUCTOR(SBAttachInfo, (const lldb::SBAttachInfo &));
- LLDB_REGISTER_METHOD(lldb::SBAttachInfo &,
- SBAttachInfo, operator=,(const lldb::SBAttachInfo &));
- LLDB_REGISTER_METHOD(lldb::pid_t, SBAttachInfo, GetProcessID, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetProcessID, (lldb::pid_t));
- LLDB_REGISTER_METHOD(uint32_t, SBAttachInfo, GetResumeCount, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetResumeCount, (uint32_t));
- LLDB_REGISTER_METHOD(const char *, SBAttachInfo, GetProcessPluginName, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetProcessPluginName,
- (const char *));
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetExecutable, (const char *));
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetExecutable, (lldb::SBFileSpec));
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, GetWaitForLaunch, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetWaitForLaunch, (bool));
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetWaitForLaunch, (bool, bool));
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, GetIgnoreExisting, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetIgnoreExisting, (bool));
- LLDB_REGISTER_METHOD(uint32_t, SBAttachInfo, GetUserID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBAttachInfo, GetGroupID, ());
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, UserIDIsValid, ());
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, GroupIDIsValid, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetUserID, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetGroupID, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBAttachInfo, GetEffectiveUserID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBAttachInfo, GetEffectiveGroupID, ());
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, EffectiveUserIDIsValid, ());
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, EffectiveGroupIDIsValid, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetEffectiveUserID, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetEffectiveGroupID, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::pid_t, SBAttachInfo, GetParentProcessID, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetParentProcessID, (lldb::pid_t));
- LLDB_REGISTER_METHOD(bool, SBAttachInfo, ParentProcessIDIsValid, ());
- LLDB_REGISTER_METHOD(lldb::SBListener, SBAttachInfo, GetListener, ());
- LLDB_REGISTER_METHOD(void, SBAttachInfo, SetListener, (lldb::SBListener &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBBlock.cpp b/contrib/llvm-project/lldb/source/API/SBBlock.cpp
index 5c49053dd972..7d7565340836 100644
--- a/contrib/llvm-project/lldb/source/API/SBBlock.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBlock.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBBlock.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBFrame.h"
@@ -21,41 +20,41 @@
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/StackFrame.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBBlock::SBBlock() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBBlock); }
+SBBlock::SBBlock() { LLDB_INSTRUMENT_VA(this); }
SBBlock::SBBlock(lldb_private::Block *lldb_object_ptr)
: m_opaque_ptr(lldb_object_ptr) {}
SBBlock::SBBlock(const SBBlock &rhs) : m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBBlock, (const lldb::SBBlock &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBBlock &SBBlock::operator=(const SBBlock &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBBlock &,
- SBBlock, operator=,(const lldb::SBBlock &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_ptr = rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBBlock::~SBBlock() { m_opaque_ptr = nullptr; }
bool SBBlock::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBlock, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBBlock::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBlock, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
bool SBBlock::IsInlined() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBlock, IsInlined);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetInlinedFunctionInfo() != nullptr;
@@ -63,7 +62,7 @@ bool SBBlock::IsInlined() const {
}
const char *SBBlock::GetInlinedName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBlock, GetInlinedName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
const InlineFunctionInfo *inlined_info =
@@ -76,8 +75,7 @@ const char *SBBlock::GetInlinedName() const {
}
SBFileSpec SBBlock::GetInlinedCallSiteFile() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBBlock,
- GetInlinedCallSiteFile);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_file;
if (m_opaque_ptr) {
@@ -86,11 +84,11 @@ SBFileSpec SBBlock::GetInlinedCallSiteFile() const {
if (inlined_info)
sb_file.SetFileSpec(inlined_info->GetCallSite().GetFile());
}
- return LLDB_RECORD_RESULT(sb_file);
+ return sb_file;
}
uint32_t SBBlock::GetInlinedCallSiteLine() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBlock, GetInlinedCallSiteLine);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
const InlineFunctionInfo *inlined_info =
@@ -102,7 +100,7 @@ uint32_t SBBlock::GetInlinedCallSiteLine() const {
}
uint32_t SBBlock::GetInlinedCallSiteColumn() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBlock, GetInlinedCallSiteColumn);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
const InlineFunctionInfo *inlined_info =
@@ -123,39 +121,39 @@ void SBBlock::AppendVariables(bool can_create, bool get_parent_variables,
}
SBBlock SBBlock::GetParent() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBBlock, GetParent);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_ptr)
sb_block.m_opaque_ptr = m_opaque_ptr->GetParent();
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
lldb::SBBlock SBBlock::GetContainingInlinedBlock() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBBlock, GetContainingInlinedBlock);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_ptr)
sb_block.m_opaque_ptr = m_opaque_ptr->GetContainingInlinedBlock();
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
SBBlock SBBlock::GetSibling() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBBlock, GetSibling);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_ptr)
sb_block.m_opaque_ptr = m_opaque_ptr->GetSibling();
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
SBBlock SBBlock::GetFirstChild() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBBlock, GetFirstChild);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_ptr)
sb_block.m_opaque_ptr = m_opaque_ptr->GetFirstChild();
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
lldb_private::Block *SBBlock::GetPtr() { return m_opaque_ptr; }
@@ -163,8 +161,7 @@ lldb_private::Block *SBBlock::GetPtr() { return m_opaque_ptr; }
void SBBlock::SetPtr(lldb_private::Block *block) { m_opaque_ptr = block; }
bool SBBlock::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBBlock, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -188,7 +185,7 @@ bool SBBlock::GetDescription(SBStream &description) {
}
uint32_t SBBlock::GetNumRanges() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBBlock, GetNumRanges);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetNumRanges();
@@ -196,8 +193,7 @@ uint32_t SBBlock::GetNumRanges() {
}
lldb::SBAddress SBBlock::GetRangeStartAddress(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBBlock, GetRangeStartAddress, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
lldb::SBAddress sb_addr;
if (m_opaque_ptr) {
@@ -206,12 +202,11 @@ lldb::SBAddress SBBlock::GetRangeStartAddress(uint32_t idx) {
sb_addr.ref() = range.GetBaseAddress();
}
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
lldb::SBAddress SBBlock::GetRangeEndAddress(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBBlock, GetRangeEndAddress, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
lldb::SBAddress sb_addr;
if (m_opaque_ptr) {
@@ -221,12 +216,11 @@ lldb::SBAddress SBBlock::GetRangeEndAddress(uint32_t idx) {
sb_addr.ref().Slide(range.GetByteSize());
}
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
uint32_t SBBlock::GetRangeIndexForBlockAddress(lldb::SBAddress block_addr) {
- LLDB_RECORD_METHOD(uint32_t, SBBlock, GetRangeIndexForBlockAddress,
- (lldb::SBAddress), block_addr);
+ LLDB_INSTRUMENT_VA(this, block_addr);
if (m_opaque_ptr && block_addr.IsValid()) {
return m_opaque_ptr->GetRangeIndexContainingAddress(block_addr.ref());
@@ -238,10 +232,7 @@ uint32_t SBBlock::GetRangeIndexForBlockAddress(lldb::SBAddress block_addr) {
lldb::SBValueList SBBlock::GetVariables(lldb::SBFrame &frame, bool arguments,
bool locals, bool statics,
lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(
- lldb::SBValueList, SBBlock, GetVariables,
- (lldb::SBFrame &, bool, bool, bool, lldb::DynamicValueType), frame,
- arguments, locals, statics, use_dynamic);
+ LLDB_INSTRUMENT_VA(this, frame, arguments, locals, statics, use_dynamic);
Block *block = GetPtr();
SBValueList value_list;
@@ -289,14 +280,12 @@ lldb::SBValueList SBBlock::GetVariables(lldb::SBFrame &frame, bool arguments,
}
}
}
- return LLDB_RECORD_RESULT(value_list);
+ return value_list;
}
lldb::SBValueList SBBlock::GetVariables(lldb::SBTarget &target, bool arguments,
bool locals, bool statics) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBBlock, GetVariables,
- (lldb::SBTarget &, bool, bool, bool), target, arguments,
- locals, statics);
+ LLDB_INSTRUMENT_VA(this, target, arguments, locals, statics);
Block *block = GetPtr();
@@ -341,44 +330,5 @@ lldb::SBValueList SBBlock::GetVariables(lldb::SBTarget &target, bool arguments,
}
}
}
- return LLDB_RECORD_RESULT(value_list);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBBlock>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBlock, ());
- LLDB_REGISTER_CONSTRUCTOR(SBBlock, (const lldb::SBBlock &));
- LLDB_REGISTER_METHOD(const lldb::SBBlock &,
- SBBlock, operator=,(const lldb::SBBlock &));
- LLDB_REGISTER_METHOD_CONST(bool, SBBlock, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBlock, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBlock, IsInlined, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBBlock, GetInlinedName, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBBlock,
- GetInlinedCallSiteFile, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBlock, GetInlinedCallSiteLine, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBlock, GetInlinedCallSiteColumn, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBBlock, GetParent, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBBlock, GetContainingInlinedBlock, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBBlock, GetSibling, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBBlock, GetFirstChild, ());
- LLDB_REGISTER_METHOD(bool, SBBlock, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(uint32_t, SBBlock, GetNumRanges, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBBlock, GetRangeStartAddress,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBBlock, GetRangeEndAddress,
- (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBBlock, GetRangeIndexForBlockAddress,
- (lldb::SBAddress));
- LLDB_REGISTER_METHOD(
- lldb::SBValueList, SBBlock, GetVariables,
- (lldb::SBFrame &, bool, bool, bool, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBBlock, GetVariables,
- (lldb::SBTarget &, bool, bool, bool));
-}
-
-}
+ return value_list;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBBreakpoint.cpp b/contrib/llvm-project/lldb/source/API/SBBreakpoint.cpp
index 0f0a93519993..5fe8f7fe0583 100644
--- a/contrib/llvm-project/lldb/source/API/SBBreakpoint.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBreakpoint.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBBreakpoint.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBBreakpointLocation.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBEvent.h"
@@ -16,6 +15,7 @@
#include "lldb/API/SBStringList.h"
#include "lldb/API/SBStructuredData.h"
#include "lldb/API/SBThread.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Breakpoint/Breakpoint.h"
#include "lldb/Breakpoint/BreakpointIDList.h"
@@ -45,54 +45,51 @@
using namespace lldb;
using namespace lldb_private;
-SBBreakpoint::SBBreakpoint() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBBreakpoint); }
+SBBreakpoint::SBBreakpoint() { LLDB_INSTRUMENT_VA(this); }
SBBreakpoint::SBBreakpoint(const SBBreakpoint &rhs)
: m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpoint, (const lldb::SBBreakpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBBreakpoint::SBBreakpoint(const lldb::BreakpointSP &bp_sp)
: m_opaque_wp(bp_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpoint, (const lldb::BreakpointSP &), bp_sp);
+ LLDB_INSTRUMENT_VA(this, bp_sp);
}
SBBreakpoint::~SBBreakpoint() = default;
const SBBreakpoint &SBBreakpoint::operator=(const SBBreakpoint &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBBreakpoint &,
- SBBreakpoint, operator=,(const lldb::SBBreakpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBBreakpoint::operator==(const lldb::SBBreakpoint &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBBreakpoint, operator==,(const lldb::SBBreakpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_wp.lock() == rhs.m_opaque_wp.lock();
}
bool SBBreakpoint::operator!=(const lldb::SBBreakpoint &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBBreakpoint, operator!=,(const lldb::SBBreakpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_wp.lock() != rhs.m_opaque_wp.lock();
}
SBTarget SBBreakpoint::GetTarget() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBTarget, SBBreakpoint, GetTarget);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp)
- return LLDB_RECORD_RESULT(SBTarget(bkpt_sp->GetTargetSP()));
+ return SBTarget(bkpt_sp->GetTargetSP());
- return LLDB_RECORD_RESULT(SBTarget());
+ return SBTarget();
}
break_id_t SBBreakpoint::GetID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::break_id_t, SBBreakpoint, GetID);
+ LLDB_INSTRUMENT_VA(this);
break_id_t break_id = LLDB_INVALID_BREAK_ID;
BreakpointSP bkpt_sp = GetSP();
@@ -103,11 +100,11 @@ break_id_t SBBreakpoint::GetID() const {
}
bool SBBreakpoint::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpoint, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBBreakpoint::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpoint, operator bool);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (!bkpt_sp)
@@ -119,7 +116,7 @@ SBBreakpoint::operator bool() const {
}
void SBBreakpoint::ClearAllBreakpointSites() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBBreakpoint, ClearAllBreakpointSites);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -130,8 +127,7 @@ void SBBreakpoint::ClearAllBreakpointSites() {
}
SBBreakpointLocation SBBreakpoint::FindLocationByAddress(addr_t vm_addr) {
- LLDB_RECORD_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- FindLocationByAddress, (lldb::addr_t), vm_addr);
+ LLDB_INSTRUMENT_VA(this, vm_addr);
SBBreakpointLocation sb_bp_location;
@@ -148,12 +144,11 @@ SBBreakpointLocation SBBreakpoint::FindLocationByAddress(addr_t vm_addr) {
sb_bp_location.SetLocation(bkpt_sp->FindLocationByAddress(address));
}
}
- return LLDB_RECORD_RESULT(sb_bp_location);
+ return sb_bp_location;
}
break_id_t SBBreakpoint::FindLocationIDByAddress(addr_t vm_addr) {
- LLDB_RECORD_METHOD(lldb::break_id_t, SBBreakpoint, FindLocationIDByAddress,
- (lldb::addr_t), vm_addr);
+ LLDB_INSTRUMENT_VA(this, vm_addr);
break_id_t break_id = LLDB_INVALID_BREAK_ID;
BreakpointSP bkpt_sp = GetSP();
@@ -173,8 +168,7 @@ break_id_t SBBreakpoint::FindLocationIDByAddress(addr_t vm_addr) {
}
SBBreakpointLocation SBBreakpoint::FindLocationByID(break_id_t bp_loc_id) {
- LLDB_RECORD_METHOD(lldb::SBBreakpointLocation, SBBreakpoint, FindLocationByID,
- (lldb::break_id_t), bp_loc_id);
+ LLDB_INSTRUMENT_VA(this, bp_loc_id);
SBBreakpointLocation sb_bp_location;
BreakpointSP bkpt_sp = GetSP();
@@ -185,12 +179,11 @@ SBBreakpointLocation SBBreakpoint::FindLocationByID(break_id_t bp_loc_id) {
sb_bp_location.SetLocation(bkpt_sp->FindLocationByID(bp_loc_id));
}
- return LLDB_RECORD_RESULT(sb_bp_location);
+ return sb_bp_location;
}
SBBreakpointLocation SBBreakpoint::GetLocationAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- GetLocationAtIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
SBBreakpointLocation sb_bp_location;
BreakpointSP bkpt_sp = GetSP();
@@ -201,11 +194,11 @@ SBBreakpointLocation SBBreakpoint::GetLocationAtIndex(uint32_t index) {
sb_bp_location.SetLocation(bkpt_sp->GetLocationAtIndex(index));
}
- return LLDB_RECORD_RESULT(sb_bp_location);
+ return sb_bp_location;
}
void SBBreakpoint::SetEnabled(bool enable) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetEnabled, (bool), enable);
+ LLDB_INSTRUMENT_VA(this, enable);
BreakpointSP bkpt_sp = GetSP();
@@ -217,7 +210,7 @@ void SBBreakpoint::SetEnabled(bool enable) {
}
bool SBBreakpoint::IsEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpoint, IsEnabled);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -229,7 +222,7 @@ bool SBBreakpoint::IsEnabled() {
}
void SBBreakpoint::SetOneShot(bool one_shot) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetOneShot, (bool), one_shot);
+ LLDB_INSTRUMENT_VA(this, one_shot);
BreakpointSP bkpt_sp = GetSP();
@@ -241,7 +234,7 @@ void SBBreakpoint::SetOneShot(bool one_shot) {
}
bool SBBreakpoint::IsOneShot() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpoint, IsOneShot);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -253,7 +246,7 @@ bool SBBreakpoint::IsOneShot() const {
}
bool SBBreakpoint::IsInternal() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpoint, IsInternal);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -265,7 +258,7 @@ bool SBBreakpoint::IsInternal() {
}
void SBBreakpoint::SetIgnoreCount(uint32_t count) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetIgnoreCount, (uint32_t), count);
+ LLDB_INSTRUMENT_VA(this, count);
BreakpointSP bkpt_sp = GetSP();
@@ -277,8 +270,7 @@ void SBBreakpoint::SetIgnoreCount(uint32_t count) {
}
void SBBreakpoint::SetCondition(const char *condition) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetCondition, (const char *),
- condition);
+ LLDB_INSTRUMENT_VA(this, condition);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -289,7 +281,7 @@ void SBBreakpoint::SetCondition(const char *condition) {
}
const char *SBBreakpoint::GetCondition() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBBreakpoint, GetCondition);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -301,8 +293,7 @@ const char *SBBreakpoint::GetCondition() {
}
void SBBreakpoint::SetAutoContinue(bool auto_continue) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetAutoContinue, (bool),
- auto_continue);
+ LLDB_INSTRUMENT_VA(this, auto_continue);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -313,7 +304,7 @@ void SBBreakpoint::SetAutoContinue(bool auto_continue) {
}
bool SBBreakpoint::GetAutoContinue() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpoint, GetAutoContinue);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -325,7 +316,7 @@ bool SBBreakpoint::GetAutoContinue() {
}
uint32_t SBBreakpoint::GetHitCount() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpoint, GetHitCount);
+ LLDB_INSTRUMENT_VA(this);
uint32_t count = 0;
BreakpointSP bkpt_sp = GetSP();
@@ -339,7 +330,7 @@ uint32_t SBBreakpoint::GetHitCount() const {
}
uint32_t SBBreakpoint::GetIgnoreCount() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpoint, GetIgnoreCount);
+ LLDB_INSTRUMENT_VA(this);
uint32_t count = 0;
BreakpointSP bkpt_sp = GetSP();
@@ -353,7 +344,7 @@ uint32_t SBBreakpoint::GetIgnoreCount() const {
}
void SBBreakpoint::SetThreadID(tid_t tid) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetThreadID, (lldb::tid_t), tid);
+ LLDB_INSTRUMENT_VA(this, tid);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -364,7 +355,7 @@ void SBBreakpoint::SetThreadID(tid_t tid) {
}
tid_t SBBreakpoint::GetThreadID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::tid_t, SBBreakpoint, GetThreadID);
+ LLDB_INSTRUMENT_VA(this);
tid_t tid = LLDB_INVALID_THREAD_ID;
BreakpointSP bkpt_sp = GetSP();
@@ -378,7 +369,7 @@ tid_t SBBreakpoint::GetThreadID() {
}
void SBBreakpoint::SetThreadIndex(uint32_t index) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetThreadIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -389,7 +380,7 @@ void SBBreakpoint::SetThreadIndex(uint32_t index) {
}
uint32_t SBBreakpoint::GetThreadIndex() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpoint, GetThreadIndex);
+ LLDB_INSTRUMENT_VA(this);
uint32_t thread_idx = UINT32_MAX;
BreakpointSP bkpt_sp = GetSP();
@@ -406,8 +397,7 @@ uint32_t SBBreakpoint::GetThreadIndex() const {
}
void SBBreakpoint::SetThreadName(const char *thread_name) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetThreadName, (const char *),
- thread_name);
+ LLDB_INSTRUMENT_VA(this, thread_name);
BreakpointSP bkpt_sp = GetSP();
@@ -419,7 +409,7 @@ void SBBreakpoint::SetThreadName(const char *thread_name) {
}
const char *SBBreakpoint::GetThreadName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpoint, GetThreadName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
BreakpointSP bkpt_sp = GetSP();
@@ -436,8 +426,7 @@ const char *SBBreakpoint::GetThreadName() const {
}
void SBBreakpoint::SetQueueName(const char *queue_name) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetQueueName, (const char *),
- queue_name);
+ LLDB_INSTRUMENT_VA(this, queue_name);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -448,7 +437,7 @@ void SBBreakpoint::SetQueueName(const char *queue_name) {
}
const char *SBBreakpoint::GetQueueName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpoint, GetQueueName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
BreakpointSP bkpt_sp = GetSP();
@@ -465,8 +454,7 @@ const char *SBBreakpoint::GetQueueName() const {
}
size_t SBBreakpoint::GetNumResolvedLocations() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(size_t, SBBreakpoint,
- GetNumResolvedLocations);
+ LLDB_INSTRUMENT_VA(this);
size_t num_resolved = 0;
BreakpointSP bkpt_sp = GetSP();
@@ -479,7 +467,7 @@ size_t SBBreakpoint::GetNumResolvedLocations() const {
}
size_t SBBreakpoint::GetNumLocations() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(size_t, SBBreakpoint, GetNumLocations);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
size_t num_locs = 0;
@@ -492,8 +480,7 @@ size_t SBBreakpoint::GetNumLocations() const {
}
void SBBreakpoint::SetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, SetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointSP bkpt_sp = GetSP();
if (!bkpt_sp)
@@ -510,8 +497,7 @@ void SBBreakpoint::SetCommandLineCommands(SBStringList &commands) {
}
bool SBBreakpoint::GetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(bool, SBBreakpoint, GetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointSP bkpt_sp = GetSP();
if (!bkpt_sp)
@@ -525,14 +511,13 @@ bool SBBreakpoint::GetCommandLineCommands(SBStringList &commands) {
}
bool SBBreakpoint::GetDescription(SBStream &s) {
- LLDB_RECORD_METHOD(bool, SBBreakpoint, GetDescription, (lldb::SBStream &), s);
+ LLDB_INSTRUMENT_VA(this, s);
return GetDescription(s, true);
}
bool SBBreakpoint::GetDescription(SBStream &s, bool include_locations) {
- LLDB_RECORD_METHOD(bool, SBBreakpoint, GetDescription,
- (lldb::SBStream &, bool), s, include_locations);
+ LLDB_INSTRUMENT_VA(this, s, include_locations);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp) {
@@ -552,25 +537,24 @@ bool SBBreakpoint::GetDescription(SBStream &s, bool include_locations) {
}
SBError SBBreakpoint::AddLocation(SBAddress &address) {
- LLDB_RECORD_METHOD(lldb::SBError, SBBreakpoint, AddLocation,
- (lldb::SBAddress &), address);
+ LLDB_INSTRUMENT_VA(this, address);
BreakpointSP bkpt_sp = GetSP();
SBError error;
if (!address.IsValid()) {
error.SetErrorString("Can't add an invalid address.");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (!bkpt_sp) {
error.SetErrorString("No breakpoint to add a location to.");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (!llvm::isa<BreakpointResolverScripted>(bkpt_sp->GetResolver().get())) {
error.SetErrorString("Only a scripted resolver can add locations.");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (bkpt_sp->GetSearchFilter()->AddressPasses(address.ref()))
@@ -582,27 +566,25 @@ SBError SBBreakpoint::AddLocation(SBAddress &address) {
error.SetErrorStringWithFormat("Address: %s didn't pass the filter.",
s.GetData());
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBStructuredData SBBreakpoint::SerializeToStructuredData() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBStructuredData, SBBreakpoint,
- SerializeToStructuredData);
+ LLDB_INSTRUMENT_VA(this);
SBStructuredData data;
BreakpointSP bkpt_sp = GetSP();
if (!bkpt_sp)
- return LLDB_RECORD_RESULT(data);
+ return data;
StructuredData::ObjectSP bkpt_dict = bkpt_sp->SerializeToStructuredData();
data.m_impl_up->SetObjectSP(bkpt_dict);
- return LLDB_RECORD_RESULT(data);
+ return data;
}
void SBBreakpoint::SetCallback(SBBreakpointHitCallback callback, void *baton) {
- LLDB_RECORD_DUMMY(void, SBBreakpoint, SetCallback,
- (lldb::SBBreakpointHitCallback, void *), callback, baton);
+ LLDB_INSTRUMENT_VA(this, callback, baton);
BreakpointSP bkpt_sp = GetSP();
@@ -618,8 +600,7 @@ void SBBreakpoint::SetCallback(SBBreakpointHitCallback callback, void *baton) {
void SBBreakpoint::SetScriptCallbackFunction(
const char *callback_function_name) {
-LLDB_RECORD_METHOD(void, SBBreakpoint, SetScriptCallbackFunction,
- (const char *), callback_function_name);
+ LLDB_INSTRUMENT_VA(this, callback_function_name);
SBStructuredData empty_args;
SetScriptCallbackFunction(callback_function_name, empty_args);
}
@@ -627,8 +608,7 @@ LLDB_RECORD_METHOD(void, SBBreakpoint, SetScriptCallbackFunction,
SBError SBBreakpoint::SetScriptCallbackFunction(
const char *callback_function_name,
SBStructuredData &extra_args) {
- LLDB_RECORD_METHOD(SBError, SBBreakpoint, SetScriptCallbackFunction,
- (const char *, SBStructuredData &), callback_function_name, extra_args);
+ LLDB_INSTRUMENT_VA(this, callback_function_name, extra_args);
SBError sb_error;
BreakpointSP bkpt_sp = GetSP();
@@ -647,13 +627,12 @@ SBError SBBreakpoint::SetScriptCallbackFunction(
sb_error.SetError(error);
} else
sb_error.SetErrorString("invalid breakpoint");
-
- return LLDB_RECORD_RESULT(sb_error);
+
+ return sb_error;
}
SBError SBBreakpoint::SetScriptCallbackBody(const char *callback_body_text) {
- LLDB_RECORD_METHOD(lldb::SBError, SBBreakpoint, SetScriptCallbackBody,
- (const char *), callback_body_text);
+ LLDB_INSTRUMENT_VA(this, callback_body_text);
BreakpointSP bkpt_sp = GetSP();
@@ -671,19 +650,18 @@ SBError SBBreakpoint::SetScriptCallbackBody(const char *callback_body_text) {
} else
sb_error.SetErrorString("invalid breakpoint");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
bool SBBreakpoint::AddName(const char *new_name) {
- LLDB_RECORD_METHOD(bool, SBBreakpoint, AddName, (const char *), new_name);
+ LLDB_INSTRUMENT_VA(this, new_name);
SBError status = AddNameWithErrorHandling(new_name);
return status.Success();
}
SBError SBBreakpoint::AddNameWithErrorHandling(const char *new_name) {
- LLDB_RECORD_METHOD(SBError, SBBreakpoint, AddNameWithErrorHandling,
- (const char *), new_name);
+ LLDB_INSTRUMENT_VA(this, new_name);
BreakpointSP bkpt_sp = GetSP();
@@ -698,12 +676,11 @@ SBError SBBreakpoint::AddNameWithErrorHandling(const char *new_name) {
status.SetErrorString("invalid breakpoint");
}
- return LLDB_RECORD_RESULT(status);
+ return status;
}
void SBBreakpoint::RemoveName(const char *name_to_remove) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, RemoveName, (const char *),
- name_to_remove);
+ LLDB_INSTRUMENT_VA(this, name_to_remove);
BreakpointSP bkpt_sp = GetSP();
@@ -716,7 +693,7 @@ void SBBreakpoint::RemoveName(const char *name_to_remove) {
}
bool SBBreakpoint::MatchesName(const char *name) {
- LLDB_RECORD_METHOD(bool, SBBreakpoint, MatchesName, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
BreakpointSP bkpt_sp = GetSP();
@@ -730,8 +707,7 @@ bool SBBreakpoint::MatchesName(const char *name) {
}
void SBBreakpoint::GetNames(SBStringList &names) {
- LLDB_RECORD_METHOD(void, SBBreakpoint, GetNames, (lldb::SBStringList &),
- names);
+ LLDB_INSTRUMENT_VA(this, names);
BreakpointSP bkpt_sp = GetSP();
@@ -747,8 +723,7 @@ void SBBreakpoint::GetNames(SBStringList &names) {
}
bool SBBreakpoint::EventIsBreakpointEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBBreakpoint, EventIsBreakpointEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Breakpoint::BreakpointEventData::GetEventDataFromEvent(event.get()) !=
nullptr;
@@ -756,9 +731,7 @@ bool SBBreakpoint::EventIsBreakpointEvent(const lldb::SBEvent &event) {
BreakpointEventType
SBBreakpoint::GetBreakpointEventTypeFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::BreakpointEventType, SBBreakpoint,
- GetBreakpointEventTypeFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
if (event.IsValid())
return Breakpoint::BreakpointEventData::GetBreakpointEventTypeFromEvent(
@@ -767,37 +740,30 @@ SBBreakpoint::GetBreakpointEventTypeFromEvent(const SBEvent &event) {
}
SBBreakpoint SBBreakpoint::GetBreakpointFromEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBBreakpoint, SBBreakpoint,
- GetBreakpointFromEvent, (const lldb::SBEvent &),
- event);
+ LLDB_INSTRUMENT_VA(event);
if (event.IsValid())
- return LLDB_RECORD_RESULT(
- SBBreakpoint(Breakpoint::BreakpointEventData::GetBreakpointFromEvent(
- event.GetSP())));
- return LLDB_RECORD_RESULT(SBBreakpoint());
+ return SBBreakpoint(
+ Breakpoint::BreakpointEventData::GetBreakpointFromEvent(event.GetSP()));
+ return SBBreakpoint();
}
SBBreakpointLocation
SBBreakpoint::GetBreakpointLocationAtIndexFromEvent(const lldb::SBEvent &event,
uint32_t loc_idx) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- GetBreakpointLocationAtIndexFromEvent,
- (const lldb::SBEvent &, uint32_t), event, loc_idx);
+ LLDB_INSTRUMENT_VA(event, loc_idx);
SBBreakpointLocation sb_breakpoint_loc;
if (event.IsValid())
sb_breakpoint_loc.SetLocation(
Breakpoint::BreakpointEventData::GetBreakpointLocationAtIndexFromEvent(
event.GetSP(), loc_idx));
- return LLDB_RECORD_RESULT(sb_breakpoint_loc);
+ return sb_breakpoint_loc;
}
uint32_t
SBBreakpoint::GetNumBreakpointLocationsFromEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(uint32_t, SBBreakpoint,
- GetNumBreakpointLocationsFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
uint32_t num_locations = 0;
if (event.IsValid())
@@ -808,7 +774,7 @@ SBBreakpoint::GetNumBreakpointLocationsFromEvent(const lldb::SBEvent &event) {
}
bool SBBreakpoint::IsHardware() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpoint, IsHardware);
+ LLDB_INSTRUMENT_VA(this);
BreakpointSP bkpt_sp = GetSP();
if (bkpt_sp)
@@ -821,7 +787,7 @@ BreakpointSP SBBreakpoint::GetSP() const { return m_opaque_wp.lock(); }
// This is simple collection of breakpoint id's and their target.
class SBBreakpointListImpl {
public:
- SBBreakpointListImpl(lldb::TargetSP target_sp) : m_target_wp() {
+ SBBreakpointListImpl(lldb::TargetSP target_sp) {
if (target_sp && target_sp->IsValid())
m_target_wp = target_sp;
}
@@ -904,13 +870,13 @@ private:
SBBreakpointList::SBBreakpointList(SBTarget &target)
: m_opaque_sp(new SBBreakpointListImpl(target.GetSP())) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointList, (lldb::SBTarget &), target);
+ LLDB_INSTRUMENT_VA(this, target);
}
SBBreakpointList::~SBBreakpointList() = default;
size_t SBBreakpointList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(size_t, SBBreakpointList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (!m_opaque_sp)
return 0;
@@ -919,29 +885,26 @@ size_t SBBreakpointList::GetSize() const {
}
SBBreakpoint SBBreakpointList::GetBreakpointAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBBreakpointList, GetBreakpointAtIndex,
- (size_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (!m_opaque_sp)
- return LLDB_RECORD_RESULT(SBBreakpoint());
+ return SBBreakpoint();
BreakpointSP bkpt_sp = m_opaque_sp->GetBreakpointAtIndex(idx);
- return LLDB_RECORD_RESULT(SBBreakpoint(bkpt_sp));
+ return SBBreakpoint(bkpt_sp);
}
SBBreakpoint SBBreakpointList::FindBreakpointByID(lldb::break_id_t id) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBBreakpointList, FindBreakpointByID,
- (lldb::break_id_t), id);
+ LLDB_INSTRUMENT_VA(this, id);
if (!m_opaque_sp)
- return LLDB_RECORD_RESULT(SBBreakpoint());
+ return SBBreakpoint();
BreakpointSP bkpt_sp = m_opaque_sp->FindBreakpointByID(id);
- return LLDB_RECORD_RESULT(SBBreakpoint(bkpt_sp));
+ return SBBreakpoint(bkpt_sp);
}
void SBBreakpointList::Append(const SBBreakpoint &sb_bkpt) {
- LLDB_RECORD_METHOD(void, SBBreakpointList, Append,
- (const lldb::SBBreakpoint &), sb_bkpt);
+ LLDB_INSTRUMENT_VA(this, sb_bkpt);
if (!sb_bkpt.IsValid())
return;
@@ -951,8 +914,7 @@ void SBBreakpointList::Append(const SBBreakpoint &sb_bkpt) {
}
void SBBreakpointList::AppendByID(lldb::break_id_t id) {
- LLDB_RECORD_METHOD(void, SBBreakpointList, AppendByID, (lldb::break_id_t),
- id);
+ LLDB_INSTRUMENT_VA(this, id);
if (!m_opaque_sp)
return;
@@ -960,8 +922,7 @@ void SBBreakpointList::AppendByID(lldb::break_id_t id) {
}
bool SBBreakpointList::AppendIfUnique(const SBBreakpoint &sb_bkpt) {
- LLDB_RECORD_METHOD(bool, SBBreakpointList, AppendIfUnique,
- (const lldb::SBBreakpoint &), sb_bkpt);
+ LLDB_INSTRUMENT_VA(this, sb_bkpt);
if (!sb_bkpt.IsValid())
return false;
@@ -971,7 +932,7 @@ bool SBBreakpointList::AppendIfUnique(const SBBreakpoint &sb_bkpt) {
}
void SBBreakpointList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBBreakpointList, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->Clear();
@@ -982,114 +943,3 @@ void SBBreakpointList::CopyToBreakpointIDList(
if (m_opaque_sp)
m_opaque_sp->CopyToBreakpointIDList(bp_id_list);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBBreakpoint>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpoint, ());
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpoint, (const lldb::SBBreakpoint &));
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpoint, (const lldb::BreakpointSP &));
- LLDB_REGISTER_METHOD(const lldb::SBBreakpoint &,
- SBBreakpoint, operator=,(const lldb::SBBreakpoint &));
- LLDB_REGISTER_METHOD(bool,
- SBBreakpoint, operator==,(const lldb::SBBreakpoint &));
- LLDB_REGISTER_METHOD(bool,
- SBBreakpoint, operator!=,(const lldb::SBBreakpoint &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBTarget, SBBreakpoint, GetTarget, ());
- LLDB_REGISTER_METHOD_CONST(lldb::break_id_t, SBBreakpoint, GetID, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpoint, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpoint, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, ClearAllBreakpointSites, ());
- LLDB_REGISTER_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- FindLocationByAddress, (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::break_id_t, SBBreakpoint,
- FindLocationIDByAddress, (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- FindLocationByID, (lldb::break_id_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- GetLocationAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetEnabled, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, IsEnabled, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetOneShot, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpoint, IsOneShot, ());
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, IsInternal, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetIgnoreCount, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetCondition, (const char *));
- LLDB_REGISTER_METHOD(const char *, SBBreakpoint, GetCondition, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetAutoContinue, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, GetAutoContinue, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpoint, GetHitCount, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpoint, GetIgnoreCount, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetThreadID, (lldb::tid_t));
- LLDB_REGISTER_METHOD(lldb::tid_t, SBBreakpoint, GetThreadID, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetThreadIndex, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpoint, GetThreadIndex, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetThreadName, (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpoint, GetThreadName, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetQueueName, (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpoint, GetQueueName, ());
- LLDB_REGISTER_METHOD_CONST(size_t, SBBreakpoint, GetNumResolvedLocations,
- ());
- LLDB_REGISTER_METHOD_CONST(size_t, SBBreakpoint, GetNumLocations, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, GetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, GetDescription,
- (lldb::SBStream &, bool));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpoint, AddLocation,
- (lldb::SBAddress &));
- LLDB_REGISTER_METHOD(lldb::SBStructuredData, SBBreakpoint,
- SerializeToStructuredData, ());
- LLDB_REGISTER_METHOD(void, SBBreakpoint, SetScriptCallbackFunction,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpoint, SetScriptCallbackFunction,
- (const char *, SBStructuredData &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpoint, SetScriptCallbackBody,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, AddName, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpoint, AddNameWithErrorHandling,
- (const char *));
- LLDB_REGISTER_METHOD(void, SBBreakpoint, RemoveName, (const char *));
- LLDB_REGISTER_METHOD(bool, SBBreakpoint, MatchesName, (const char *));
- LLDB_REGISTER_METHOD(void, SBBreakpoint, GetNames, (lldb::SBStringList &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBBreakpoint, EventIsBreakpointEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::BreakpointEventType, SBBreakpoint,
- GetBreakpointEventTypeFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBBreakpoint, SBBreakpoint,
- GetBreakpointFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBBreakpointLocation, SBBreakpoint,
- GetBreakpointLocationAtIndexFromEvent,
- (const lldb::SBEvent &, uint32_t));
- LLDB_REGISTER_STATIC_METHOD(uint32_t, SBBreakpoint,
- GetNumBreakpointLocationsFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpoint, IsHardware, ());
-}
-
-template <>
-void RegisterMethods<SBBreakpointList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointList, (lldb::SBTarget &));
- LLDB_REGISTER_METHOD_CONST(size_t, SBBreakpointList, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBBreakpointList,
- GetBreakpointAtIndex, (size_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBBreakpointList,
- FindBreakpointByID, (lldb::break_id_t));
- LLDB_REGISTER_METHOD(void, SBBreakpointList, Append,
- (const lldb::SBBreakpoint &));
- LLDB_REGISTER_METHOD(void, SBBreakpointList, AppendByID,
- (lldb::break_id_t));
- LLDB_REGISTER_METHOD(bool, SBBreakpointList, AppendIfUnique,
- (const lldb::SBBreakpoint &));
- LLDB_REGISTER_METHOD(void, SBBreakpointList, Clear, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBBreakpointLocation.cpp b/contrib/llvm-project/lldb/source/API/SBBreakpointLocation.cpp
index 175120429925..914317437723 100644
--- a/contrib/llvm-project/lldb/source/API/SBBreakpointLocation.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBreakpointLocation.cpp
@@ -7,13 +7,13 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBBreakpointLocation.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBDefines.h"
#include "lldb/API/SBStream.h"
-#include "lldb/API/SBStructuredData.h"
#include "lldb/API/SBStringList.h"
+#include "lldb/API/SBStructuredData.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Breakpoint/Breakpoint.h"
#include "lldb/Breakpoint/BreakpointLocation.h"
@@ -31,32 +31,25 @@
using namespace lldb;
using namespace lldb_private;
-SBBreakpointLocation::SBBreakpointLocation() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBBreakpointLocation);
-}
+SBBreakpointLocation::SBBreakpointLocation() { LLDB_INSTRUMENT_VA(this); }
SBBreakpointLocation::SBBreakpointLocation(
const lldb::BreakpointLocationSP &break_loc_sp)
: m_opaque_wp(break_loc_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointLocation,
- (const lldb::BreakpointLocationSP &), break_loc_sp);
+ LLDB_INSTRUMENT_VA(this, break_loc_sp);
}
SBBreakpointLocation::SBBreakpointLocation(const SBBreakpointLocation &rhs)
: m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointLocation,
- (const lldb::SBBreakpointLocation &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBBreakpointLocation &SBBreakpointLocation::
operator=(const SBBreakpointLocation &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBBreakpointLocation &,
- SBBreakpointLocation, operator=,(const lldb::SBBreakpointLocation &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBBreakpointLocation::~SBBreakpointLocation() = default;
@@ -66,29 +59,28 @@ BreakpointLocationSP SBBreakpointLocation::GetSP() const {
}
bool SBBreakpointLocation::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointLocation, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBBreakpointLocation::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointLocation, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return bool(GetSP());
}
SBAddress SBBreakpointLocation::GetAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBBreakpointLocation, GetAddress);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
- return LLDB_RECORD_RESULT(SBAddress(loc_sp->GetAddress()));
+ return SBAddress(loc_sp->GetAddress());
}
- return LLDB_RECORD_RESULT(SBAddress());
+ return SBAddress();
}
addr_t SBBreakpointLocation::GetLoadAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBBreakpointLocation,
- GetLoadAddress);
+ LLDB_INSTRUMENT_VA(this);
addr_t ret_addr = LLDB_INVALID_ADDRESS;
BreakpointLocationSP loc_sp = GetSP();
@@ -103,7 +95,7 @@ addr_t SBBreakpointLocation::GetLoadAddress() {
}
void SBBreakpointLocation::SetEnabled(bool enabled) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetEnabled, (bool), enabled);
+ LLDB_INSTRUMENT_VA(this, enabled);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -114,7 +106,7 @@ void SBBreakpointLocation::SetEnabled(bool enabled) {
}
bool SBBreakpointLocation::IsEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointLocation, IsEnabled);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -126,7 +118,7 @@ bool SBBreakpointLocation::IsEnabled() {
}
uint32_t SBBreakpointLocation::GetHitCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBBreakpointLocation, GetHitCount);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -138,7 +130,7 @@ uint32_t SBBreakpointLocation::GetHitCount() {
}
uint32_t SBBreakpointLocation::GetIgnoreCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBBreakpointLocation, GetIgnoreCount);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -150,7 +142,7 @@ uint32_t SBBreakpointLocation::GetIgnoreCount() {
}
void SBBreakpointLocation::SetIgnoreCount(uint32_t n) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetIgnoreCount, (uint32_t), n);
+ LLDB_INSTRUMENT_VA(this, n);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -161,8 +153,7 @@ void SBBreakpointLocation::SetIgnoreCount(uint32_t n) {
}
void SBBreakpointLocation::SetCondition(const char *condition) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetCondition, (const char *),
- condition);
+ LLDB_INSTRUMENT_VA(this, condition);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -173,7 +164,7 @@ void SBBreakpointLocation::SetCondition(const char *condition) {
}
const char *SBBreakpointLocation::GetCondition() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBBreakpointLocation, GetCondition);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -185,8 +176,7 @@ const char *SBBreakpointLocation::GetCondition() {
}
void SBBreakpointLocation::SetAutoContinue(bool auto_continue) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetAutoContinue, (bool),
- auto_continue);
+ LLDB_INSTRUMENT_VA(this, auto_continue);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -197,7 +187,7 @@ void SBBreakpointLocation::SetAutoContinue(bool auto_continue) {
}
bool SBBreakpointLocation::GetAutoContinue() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointLocation, GetAutoContinue);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -210,16 +200,13 @@ bool SBBreakpointLocation::GetAutoContinue() {
void SBBreakpointLocation::SetScriptCallbackFunction(
const char *callback_function_name) {
-LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetScriptCallbackFunction,
- (const char *), callback_function_name);
+ LLDB_INSTRUMENT_VA(this, callback_function_name);
}
SBError SBBreakpointLocation::SetScriptCallbackFunction(
const char *callback_function_name,
SBStructuredData &extra_args) {
- LLDB_RECORD_METHOD(SBError, SBBreakpointLocation, SetScriptCallbackFunction,
- (const char *, SBStructuredData &), callback_function_name,
- extra_args);
+ LLDB_INSTRUMENT_VA(this, callback_function_name, extra_args);
SBError sb_error;
BreakpointLocationSP loc_sp = GetSP();
@@ -240,13 +227,12 @@ SBError SBBreakpointLocation::SetScriptCallbackFunction(
} else
sb_error.SetErrorString("invalid breakpoint");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError
SBBreakpointLocation::SetScriptCallbackBody(const char *callback_body_text) {
- LLDB_RECORD_METHOD(lldb::SBError, SBBreakpointLocation, SetScriptCallbackBody,
- (const char *), callback_body_text);
+ LLDB_INSTRUMENT_VA(this, callback_body_text);
BreakpointLocationSP loc_sp = GetSP();
@@ -265,12 +251,11 @@ SBBreakpointLocation::SetScriptCallbackBody(const char *callback_body_text) {
} else
sb_error.SetErrorString("invalid breakpoint");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
void SBBreakpointLocation::SetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointLocationSP loc_sp = GetSP();
if (!loc_sp)
@@ -287,8 +272,7 @@ void SBBreakpointLocation::SetCommandLineCommands(SBStringList &commands) {
}
bool SBBreakpointLocation::GetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(bool, SBBreakpointLocation, GetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointLocationSP loc_sp = GetSP();
if (!loc_sp)
@@ -302,8 +286,7 @@ bool SBBreakpointLocation::GetCommandLineCommands(SBStringList &commands) {
}
void SBBreakpointLocation::SetThreadID(tid_t thread_id) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetThreadID, (lldb::tid_t),
- thread_id);
+ LLDB_INSTRUMENT_VA(this, thread_id);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -314,7 +297,7 @@ void SBBreakpointLocation::SetThreadID(tid_t thread_id) {
}
tid_t SBBreakpointLocation::GetThreadID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::tid_t, SBBreakpointLocation, GetThreadID);
+ LLDB_INSTRUMENT_VA(this);
tid_t tid = LLDB_INVALID_THREAD_ID;
BreakpointLocationSP loc_sp = GetSP();
@@ -327,8 +310,7 @@ tid_t SBBreakpointLocation::GetThreadID() {
}
void SBBreakpointLocation::SetThreadIndex(uint32_t index) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetThreadIndex, (uint32_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -339,8 +321,7 @@ void SBBreakpointLocation::SetThreadIndex(uint32_t index) {
}
uint32_t SBBreakpointLocation::GetThreadIndex() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpointLocation,
- GetThreadIndex);
+ LLDB_INSTRUMENT_VA(this);
uint32_t thread_idx = UINT32_MAX;
BreakpointLocationSP loc_sp = GetSP();
@@ -353,8 +334,7 @@ uint32_t SBBreakpointLocation::GetThreadIndex() const {
}
void SBBreakpointLocation::SetThreadName(const char *thread_name) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetThreadName, (const char *),
- thread_name);
+ LLDB_INSTRUMENT_VA(this, thread_name);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -365,8 +345,7 @@ void SBBreakpointLocation::SetThreadName(const char *thread_name) {
}
const char *SBBreakpointLocation::GetThreadName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointLocation,
- GetThreadName);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -378,8 +357,7 @@ const char *SBBreakpointLocation::GetThreadName() const {
}
void SBBreakpointLocation::SetQueueName(const char *queue_name) {
- LLDB_RECORD_METHOD(void, SBBreakpointLocation, SetQueueName, (const char *),
- queue_name);
+ LLDB_INSTRUMENT_VA(this, queue_name);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -390,8 +368,7 @@ void SBBreakpointLocation::SetQueueName(const char *queue_name) {
}
const char *SBBreakpointLocation::GetQueueName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointLocation,
- GetQueueName);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -403,7 +380,7 @@ const char *SBBreakpointLocation::GetQueueName() const {
}
bool SBBreakpointLocation::IsResolved() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointLocation, IsResolved);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -422,9 +399,7 @@ void SBBreakpointLocation::SetLocation(
bool SBBreakpointLocation::GetDescription(SBStream &description,
DescriptionLevel level) {
- LLDB_RECORD_METHOD(bool, SBBreakpointLocation, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- level);
+ LLDB_INSTRUMENT_VA(this, description, level);
Stream &strm = description.ref();
BreakpointLocationSP loc_sp = GetSP();
@@ -441,7 +416,7 @@ bool SBBreakpointLocation::GetDescription(SBStream &description,
}
break_id_t SBBreakpointLocation::GetID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::break_id_t, SBBreakpointLocation, GetID);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
if (loc_sp) {
@@ -453,8 +428,7 @@ break_id_t SBBreakpointLocation::GetID() {
}
SBBreakpoint SBBreakpointLocation::GetBreakpoint() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBreakpoint, SBBreakpointLocation,
- GetBreakpoint);
+ LLDB_INSTRUMENT_VA(this);
BreakpointLocationSP loc_sp = GetSP();
@@ -465,70 +439,5 @@ SBBreakpoint SBBreakpointLocation::GetBreakpoint() {
sb_bp = loc_sp->GetBreakpoint().shared_from_this();
}
- return LLDB_RECORD_RESULT(sb_bp);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBBreakpointLocation>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointLocation, ());
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointLocation,
- (const lldb::BreakpointLocationSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointLocation,
- (const lldb::SBBreakpointLocation &));
- LLDB_REGISTER_METHOD(
- const lldb::SBBreakpointLocation &,
- SBBreakpointLocation, operator=,(const lldb::SBBreakpointLocation &));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointLocation, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointLocation, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBBreakpointLocation, GetAddress, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBBreakpointLocation, GetLoadAddress,
- ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetEnabled, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointLocation, IsEnabled, ());
- LLDB_REGISTER_METHOD(uint32_t, SBBreakpointLocation, GetHitCount, ());
- LLDB_REGISTER_METHOD(uint32_t, SBBreakpointLocation, GetIgnoreCount, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetIgnoreCount,
- (uint32_t));
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetCondition,
- (const char *));
- LLDB_REGISTER_METHOD(const char *, SBBreakpointLocation, GetCondition, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetAutoContinue, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointLocation, GetAutoContinue, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetScriptCallbackFunction,
- (const char *));
- LLDB_REGISTER_METHOD(SBError, SBBreakpointLocation, SetScriptCallbackFunction,
- (const char *, SBStructuredData &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpointLocation,
- SetScriptCallbackBody, (const char *));
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(bool, SBBreakpointLocation, GetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetThreadID,
- (lldb::tid_t));
- LLDB_REGISTER_METHOD(lldb::tid_t, SBBreakpointLocation, GetThreadID, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetThreadIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpointLocation, GetThreadIndex,
- ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetThreadName,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointLocation,
- GetThreadName, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointLocation, SetQueueName,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointLocation, GetQueueName,
- ());
- LLDB_REGISTER_METHOD(bool, SBBreakpointLocation, IsResolved, ());
- LLDB_REGISTER_METHOD(bool, SBBreakpointLocation, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(lldb::break_id_t, SBBreakpointLocation, GetID, ());
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBBreakpointLocation,
- GetBreakpoint, ());
-}
-
-}
+ return sb_bp;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBBreakpointName.cpp b/contrib/llvm-project/lldb/source/API/SBBreakpointName.cpp
index b5c700c78bbb..796229d04ce4 100644
--- a/contrib/llvm-project/lldb/source/API/SBBreakpointName.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBreakpointName.cpp
@@ -7,13 +7,13 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBBreakpointName.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBError.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBStringList.h"
#include "lldb/API/SBStructuredData.h"
#include "lldb/API/SBTarget.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Breakpoint/BreakpointName.h"
#include "lldb/Breakpoint/StoppointCallbackContext.h"
@@ -107,13 +107,10 @@ lldb_private::BreakpointName *SBBreakpointNameImpl::GetBreakpointName() const {
} // namespace lldb
-SBBreakpointName::SBBreakpointName() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBBreakpointName);
-}
+SBBreakpointName::SBBreakpointName() { LLDB_INSTRUMENT_VA(this); }
SBBreakpointName::SBBreakpointName(SBTarget &sb_target, const char *name) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointName, (lldb::SBTarget &, const char *),
- sb_target, name);
+ LLDB_INSTRUMENT_VA(this, sb_target, name);
m_impl_up = std::make_unique<SBBreakpointNameImpl>(sb_target, name);
// Call FindBreakpointName here to make sure the name is valid, reset if not:
@@ -123,8 +120,7 @@ SBBreakpointName::SBBreakpointName(SBTarget &sb_target, const char *name) {
}
SBBreakpointName::SBBreakpointName(SBBreakpoint &sb_bkpt, const char *name) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointName,
- (lldb::SBBreakpoint &, const char *), sb_bkpt, name);
+ LLDB_INSTRUMENT_VA(this, sb_bkpt, name);
if (!sb_bkpt.IsValid()) {
m_impl_up.reset();
@@ -149,8 +145,7 @@ SBBreakpointName::SBBreakpointName(SBBreakpoint &sb_bkpt, const char *name) {
}
SBBreakpointName::SBBreakpointName(const SBBreakpointName &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBBreakpointName, (const lldb::SBBreakpointName &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!rhs.m_impl_up)
return;
@@ -163,40 +158,36 @@ SBBreakpointName::~SBBreakpointName() = default;
const SBBreakpointName &SBBreakpointName::
operator=(const SBBreakpointName &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBBreakpointName &,
- SBBreakpointName, operator=,(const lldb::SBBreakpointName &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!rhs.m_impl_up) {
m_impl_up.reset();
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
m_impl_up = std::make_unique<SBBreakpointNameImpl>(rhs.m_impl_up->GetTarget(),
rhs.m_impl_up->GetName());
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBBreakpointName::operator==(const lldb::SBBreakpointName &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBBreakpointName, operator==,(const lldb::SBBreakpointName &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return *m_impl_up == *rhs.m_impl_up;
}
bool SBBreakpointName::operator!=(const lldb::SBBreakpointName &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBBreakpointName, operator!=,(const lldb::SBBreakpointName &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return *m_impl_up != *rhs.m_impl_up;
}
bool SBBreakpointName::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointName, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBBreakpointName::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointName, operator bool);
+ LLDB_INSTRUMENT_VA(this);
if (!m_impl_up)
return false;
@@ -204,7 +195,7 @@ SBBreakpointName::operator bool() const {
}
const char *SBBreakpointName::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointName, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (!m_impl_up)
return "<Invalid Breakpoint Name Object>";
@@ -212,7 +203,7 @@ const char *SBBreakpointName::GetName() const {
}
void SBBreakpointName::SetEnabled(bool enable) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetEnabled, (bool), enable);
+ LLDB_INSTRUMENT_VA(this, enable);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -236,7 +227,7 @@ void SBBreakpointName::UpdateName(BreakpointName &bp_name) {
}
bool SBBreakpointName::IsEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointName, IsEnabled);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -249,7 +240,7 @@ bool SBBreakpointName::IsEnabled() {
}
void SBBreakpointName::SetOneShot(bool one_shot) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetOneShot, (bool), one_shot);
+ LLDB_INSTRUMENT_VA(this, one_shot);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -263,7 +254,7 @@ void SBBreakpointName::SetOneShot(bool one_shot) {
}
bool SBBreakpointName::IsOneShot() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointName, IsOneShot);
+ LLDB_INSTRUMENT_VA(this);
const BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -276,7 +267,7 @@ bool SBBreakpointName::IsOneShot() const {
}
void SBBreakpointName::SetIgnoreCount(uint32_t count) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetIgnoreCount, (uint32_t), count);
+ LLDB_INSTRUMENT_VA(this, count);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -290,7 +281,7 @@ void SBBreakpointName::SetIgnoreCount(uint32_t count) {
}
uint32_t SBBreakpointName::GetIgnoreCount() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpointName, GetIgnoreCount);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -303,8 +294,7 @@ uint32_t SBBreakpointName::GetIgnoreCount() const {
}
void SBBreakpointName::SetCondition(const char *condition) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetCondition, (const char *),
- condition);
+ LLDB_INSTRUMENT_VA(this, condition);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -318,7 +308,7 @@ void SBBreakpointName::SetCondition(const char *condition) {
}
const char *SBBreakpointName::GetCondition() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBBreakpointName, GetCondition);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -331,8 +321,7 @@ const char *SBBreakpointName::GetCondition() {
}
void SBBreakpointName::SetAutoContinue(bool auto_continue) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetAutoContinue, (bool),
- auto_continue);
+ LLDB_INSTRUMENT_VA(this, auto_continue);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -346,7 +335,7 @@ void SBBreakpointName::SetAutoContinue(bool auto_continue) {
}
bool SBBreakpointName::GetAutoContinue() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointName, GetAutoContinue);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -359,7 +348,7 @@ bool SBBreakpointName::GetAutoContinue() {
}
void SBBreakpointName::SetThreadID(tid_t tid) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetThreadID, (lldb::tid_t), tid);
+ LLDB_INSTRUMENT_VA(this, tid);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -373,7 +362,7 @@ void SBBreakpointName::SetThreadID(tid_t tid) {
}
tid_t SBBreakpointName::GetThreadID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::tid_t, SBBreakpointName, GetThreadID);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -386,7 +375,7 @@ tid_t SBBreakpointName::GetThreadID() {
}
void SBBreakpointName::SetThreadIndex(uint32_t index) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetThreadIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -400,7 +389,7 @@ void SBBreakpointName::SetThreadIndex(uint32_t index) {
}
uint32_t SBBreakpointName::GetThreadIndex() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBBreakpointName, GetThreadIndex);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -413,8 +402,7 @@ uint32_t SBBreakpointName::GetThreadIndex() const {
}
void SBBreakpointName::SetThreadName(const char *thread_name) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetThreadName, (const char *),
- thread_name);
+ LLDB_INSTRUMENT_VA(this, thread_name);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -428,8 +416,7 @@ void SBBreakpointName::SetThreadName(const char *thread_name) {
}
const char *SBBreakpointName::GetThreadName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointName,
- GetThreadName);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -442,8 +429,7 @@ const char *SBBreakpointName::GetThreadName() const {
}
void SBBreakpointName::SetQueueName(const char *queue_name) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetQueueName, (const char *),
- queue_name);
+ LLDB_INSTRUMENT_VA(this, queue_name);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -457,8 +443,7 @@ void SBBreakpointName::SetQueueName(const char *queue_name) {
}
const char *SBBreakpointName::GetQueueName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointName,
- GetQueueName);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -471,8 +456,7 @@ const char *SBBreakpointName::GetQueueName() const {
}
void SBBreakpointName::SetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -491,8 +475,7 @@ void SBBreakpointName::SetCommandLineCommands(SBStringList &commands) {
}
bool SBBreakpointName::GetCommandLineCommands(SBStringList &commands) {
- LLDB_RECORD_METHOD(bool, SBBreakpointName, GetCommandLineCommands,
- (lldb::SBStringList &), commands);
+ LLDB_INSTRUMENT_VA(this, commands);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -507,8 +490,7 @@ bool SBBreakpointName::GetCommandLineCommands(SBStringList &commands) {
}
const char *SBBreakpointName::GetHelpString() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBreakpointName,
- GetHelpString);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -518,8 +500,7 @@ const char *SBBreakpointName::GetHelpString() const {
}
void SBBreakpointName::SetHelpString(const char *help_string) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetHelpString, (const char *),
- help_string);
+ LLDB_INSTRUMENT_VA(this, help_string);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -532,8 +513,7 @@ void SBBreakpointName::SetHelpString(const char *help_string) {
}
bool SBBreakpointName::GetDescription(SBStream &s) {
- LLDB_RECORD_METHOD(bool, SBBreakpointName, GetDescription, (lldb::SBStream &),
- s);
+ LLDB_INSTRUMENT_VA(this, s);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -550,8 +530,7 @@ bool SBBreakpointName::GetDescription(SBStream &s) {
void SBBreakpointName::SetCallback(SBBreakpointHitCallback callback,
void *baton) {
- LLDB_RECORD_DUMMY(void, SBBreakpointName, SetCallback,
- (lldb::SBBreakpointHitCallback, void *), callback, baton);
+ LLDB_INSTRUMENT_VA(this, callback, baton);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -569,8 +548,7 @@ void SBBreakpointName::SetCallback(SBBreakpointHitCallback callback,
void SBBreakpointName::SetScriptCallbackFunction(
const char *callback_function_name) {
-LLDB_RECORD_METHOD(void, SBBreakpointName, SetScriptCallbackFunction,
- (const char *), callback_function_name);
+ LLDB_INSTRUMENT_VA(this, callback_function_name);
SBStructuredData empty_args;
SetScriptCallbackFunction(callback_function_name, empty_args);
}
@@ -578,14 +556,12 @@ LLDB_RECORD_METHOD(void, SBBreakpointName, SetScriptCallbackFunction,
SBError SBBreakpointName::SetScriptCallbackFunction(
const char *callback_function_name,
SBStructuredData &extra_args) {
- LLDB_RECORD_METHOD(SBError, SBBreakpointName, SetScriptCallbackFunction,
- (const char *, SBStructuredData &),
- callback_function_name, extra_args);
+ LLDB_INSTRUMENT_VA(this, callback_function_name, extra_args);
SBError sb_error;
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name) {
sb_error.SetErrorString("unrecognized breakpoint name");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
std::lock_guard<std::recursive_mutex> guard(
@@ -601,18 +577,17 @@ SBError SBBreakpointName::SetScriptCallbackFunction(
extra_args.m_impl_up->GetObjectSP());
sb_error.SetError(error);
UpdateName(*bp_name);
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError
SBBreakpointName::SetScriptCallbackBody(const char *callback_body_text) {
- LLDB_RECORD_METHOD(lldb::SBError, SBBreakpointName, SetScriptCallbackBody,
- (const char *), callback_body_text);
+ LLDB_INSTRUMENT_VA(this, callback_body_text);
SBError sb_error;
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
std::lock_guard<std::recursive_mutex> guard(
m_impl_up->GetTarget()->GetAPIMutex());
@@ -627,11 +602,11 @@ SBBreakpointName::SetScriptCallbackBody(const char *callback_body_text) {
if (!sb_error.Fail())
UpdateName(*bp_name);
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
bool SBBreakpointName::GetAllowList() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBreakpointName, GetAllowList);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -640,8 +615,7 @@ bool SBBreakpointName::GetAllowList() const {
}
void SBBreakpointName::SetAllowList(bool value) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetAllowList, (bool), value);
-
+ LLDB_INSTRUMENT_VA(this, value);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -650,7 +624,7 @@ void SBBreakpointName::SetAllowList(bool value) {
}
bool SBBreakpointName::GetAllowDelete() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointName, GetAllowDelete);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -659,8 +633,7 @@ bool SBBreakpointName::GetAllowDelete() {
}
void SBBreakpointName::SetAllowDelete(bool value) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetAllowDelete, (bool), value);
-
+ LLDB_INSTRUMENT_VA(this, value);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -669,7 +642,7 @@ void SBBreakpointName::SetAllowDelete(bool value) {
}
bool SBBreakpointName::GetAllowDisable() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBBreakpointName, GetAllowDisable);
+ LLDB_INSTRUMENT_VA(this);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -678,7 +651,7 @@ bool SBBreakpointName::GetAllowDisable() {
}
void SBBreakpointName::SetAllowDisable(bool value) {
- LLDB_RECORD_METHOD(void, SBBreakpointName, SetAllowDisable, (bool), value);
+ LLDB_INSTRUMENT_VA(this, value);
BreakpointName *bp_name = GetBreakpointName();
if (!bp_name)
@@ -692,72 +665,3 @@ lldb_private::BreakpointName *SBBreakpointName::GetBreakpointName() const
return nullptr;
return m_impl_up->GetBreakpointName();
}
-
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBBreakpointName>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointName, ());
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointName,
- (lldb::SBTarget &, const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointName,
- (lldb::SBBreakpoint &, const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBBreakpointName,
- (const lldb::SBBreakpointName &));
- LLDB_REGISTER_METHOD(
- const lldb::SBBreakpointName &,
- SBBreakpointName, operator=,(const lldb::SBBreakpointName &));
- LLDB_REGISTER_METHOD(
- bool, SBBreakpointName, operator==,(const lldb::SBBreakpointName &));
- LLDB_REGISTER_METHOD(
- bool, SBBreakpointName, operator!=,(const lldb::SBBreakpointName &));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointName, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointName, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointName, GetName, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetEnabled, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, IsEnabled, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetOneShot, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointName, IsOneShot, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetIgnoreCount, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpointName, GetIgnoreCount, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetCondition, (const char *));
- LLDB_REGISTER_METHOD(const char *, SBBreakpointName, GetCondition, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetAutoContinue, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, GetAutoContinue, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetThreadID, (lldb::tid_t));
- LLDB_REGISTER_METHOD(lldb::tid_t, SBBreakpointName, GetThreadID, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetThreadIndex, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBBreakpointName, GetThreadIndex, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetThreadName, (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointName, GetThreadName,
- ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetQueueName, (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointName, GetQueueName,
- ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, GetCommandLineCommands,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBreakpointName, GetHelpString,
- ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetHelpString, (const char *));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetScriptCallbackFunction,
- (const char *));
- LLDB_REGISTER_METHOD(SBError, SBBreakpointName, SetScriptCallbackFunction,
- (const char *, SBStructuredData &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBBreakpointName, SetScriptCallbackBody,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBBreakpointName, GetAllowList, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetAllowList, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, GetAllowDelete, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetAllowDelete, (bool));
- LLDB_REGISTER_METHOD(bool, SBBreakpointName, GetAllowDisable, ());
- LLDB_REGISTER_METHOD(void, SBBreakpointName, SetAllowDisable, (bool));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBBreakpointOptionCommon.cpp b/contrib/llvm-project/lldb/source/API/SBBreakpointOptionCommon.cpp
index 2ee47ff7795c..685ed172c820 100644
--- a/contrib/llvm-project/lldb/source/API/SBBreakpointOptionCommon.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBreakpointOptionCommon.cpp
@@ -26,6 +26,7 @@
#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadSpec.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/Stream.h"
@@ -38,19 +39,18 @@
using namespace lldb;
using namespace lldb_private;
-SBBreakpointCallbackBaton::SBBreakpointCallbackBaton(SBBreakpointHitCallback
- callback,
- void *baton)
- : TypedBaton(std::make_unique<CallbackData>()) {
- getItem()->callback = callback;
- getItem()->callback_baton = baton;
- }
+SBBreakpointCallbackBaton::SBBreakpointCallbackBaton(
+ SBBreakpointHitCallback callback, void *baton)
+ : TypedBaton(std::make_unique<CallbackData>()) {
+ LLDB_INSTRUMENT_VA(this, callback, baton);
+ getItem()->callback = callback;
+ getItem()->callback_baton = baton;
+}
- bool SBBreakpointCallbackBaton::PrivateBreakpointHitCallback(void *baton,
- StoppointCallbackContext *ctx,
- lldb::user_id_t break_id,
- lldb::user_id_t break_loc_id)
-{
+bool SBBreakpointCallbackBaton::PrivateBreakpointHitCallback(
+ void *baton, StoppointCallbackContext *ctx, lldb::user_id_t break_id,
+ lldb::user_id_t break_loc_id) {
+ LLDB_INSTRUMENT_VA(baton, ctx, break_id, break_loc_id);
ExecutionContext exe_ctx(ctx->exe_ctx_ref);
BreakpointSP bp_sp(
exe_ctx.GetTargetRef().GetBreakpointList().FindBreakpointByID(break_id));
diff --git a/contrib/llvm-project/lldb/source/API/SBBroadcaster.cpp b/contrib/llvm-project/lldb/source/API/SBBroadcaster.cpp
index 2e6d837f102b..f145bc6e99d8 100644
--- a/contrib/llvm-project/lldb/source/API/SBBroadcaster.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBBroadcaster.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
#include "lldb/Utility/Broadcaster.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBBroadcaster.h"
#include "lldb/API/SBEvent.h"
@@ -16,13 +16,11 @@
using namespace lldb;
using namespace lldb_private;
-SBBroadcaster::SBBroadcaster() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBBroadcaster);
-}
+SBBroadcaster::SBBroadcaster() { LLDB_INSTRUMENT_VA(this); }
SBBroadcaster::SBBroadcaster(const char *name)
: m_opaque_sp(new Broadcaster(nullptr, name)), m_opaque_ptr(nullptr) {
- LLDB_RECORD_CONSTRUCTOR(SBBroadcaster, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
m_opaque_ptr = m_opaque_sp.get();
}
@@ -32,26 +30,23 @@ SBBroadcaster::SBBroadcaster(lldb_private::Broadcaster *broadcaster, bool owns)
SBBroadcaster::SBBroadcaster(const SBBroadcaster &rhs)
: m_opaque_sp(rhs.m_opaque_sp), m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBBroadcaster, (const lldb::SBBroadcaster &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBBroadcaster &SBBroadcaster::operator=(const SBBroadcaster &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBBroadcaster &,
- SBBroadcaster, operator=,(const lldb::SBBroadcaster &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
m_opaque_ptr = rhs.m_opaque_ptr;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBBroadcaster::~SBBroadcaster() { reset(nullptr, false); }
void SBBroadcaster::BroadcastEventByType(uint32_t event_type, bool unique) {
- LLDB_RECORD_METHOD(void, SBBroadcaster, BroadcastEventByType,
- (uint32_t, bool), event_type, unique);
+ LLDB_INSTRUMENT_VA(this, event_type, unique);
if (m_opaque_ptr == nullptr)
return;
@@ -63,8 +58,7 @@ void SBBroadcaster::BroadcastEventByType(uint32_t event_type, bool unique) {
}
void SBBroadcaster::BroadcastEvent(const SBEvent &event, bool unique) {
- LLDB_RECORD_METHOD(void, SBBroadcaster, BroadcastEvent,
- (const lldb::SBEvent &, bool), event, unique);
+ LLDB_INSTRUMENT_VA(this, event, unique);
if (m_opaque_ptr == nullptr)
return;
@@ -78,9 +72,7 @@ void SBBroadcaster::BroadcastEvent(const SBEvent &event, bool unique) {
void SBBroadcaster::AddInitialEventsToListener(const SBListener &listener,
uint32_t requested_events) {
- LLDB_RECORD_METHOD(void, SBBroadcaster, AddInitialEventsToListener,
- (const lldb::SBListener &, uint32_t), listener,
- requested_events);
+ LLDB_INSTRUMENT_VA(this, listener, requested_events);
if (m_opaque_ptr)
m_opaque_ptr->AddInitialEventsToListener(listener.m_opaque_sp,
@@ -89,9 +81,7 @@ void SBBroadcaster::AddInitialEventsToListener(const SBListener &listener,
uint32_t SBBroadcaster::AddListener(const SBListener &listener,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(uint32_t, SBBroadcaster, AddListener,
- (const lldb::SBListener &, uint32_t), listener,
- event_mask);
+ LLDB_INSTRUMENT_VA(this, listener, event_mask);
if (m_opaque_ptr)
return m_opaque_ptr->AddListener(listener.m_opaque_sp, event_mask);
@@ -99,7 +89,7 @@ uint32_t SBBroadcaster::AddListener(const SBListener &listener,
}
const char *SBBroadcaster::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBBroadcaster, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetBroadcasterName().GetCString();
@@ -107,8 +97,7 @@ const char *SBBroadcaster::GetName() const {
}
bool SBBroadcaster::EventTypeHasListeners(uint32_t event_type) {
- LLDB_RECORD_METHOD(bool, SBBroadcaster, EventTypeHasListeners, (uint32_t),
- event_type);
+ LLDB_INSTRUMENT_VA(this, event_type);
if (m_opaque_ptr)
return m_opaque_ptr->EventTypeHasListeners(event_type);
@@ -117,9 +106,7 @@ bool SBBroadcaster::EventTypeHasListeners(uint32_t event_type) {
bool SBBroadcaster::RemoveListener(const SBListener &listener,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(bool, SBBroadcaster, RemoveListener,
- (const lldb::SBListener &, uint32_t), listener,
- event_mask);
+ LLDB_INSTRUMENT_VA(this, listener, event_mask);
if (m_opaque_ptr)
return m_opaque_ptr->RemoveListener(listener.m_opaque_sp, event_mask);
@@ -137,77 +124,36 @@ void SBBroadcaster::reset(Broadcaster *broadcaster, bool owns) {
}
bool SBBroadcaster::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBroadcaster, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBBroadcaster::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBBroadcaster, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
void SBBroadcaster::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBBroadcaster, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
m_opaque_ptr = nullptr;
}
bool SBBroadcaster::operator==(const SBBroadcaster &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBBroadcaster, operator==,(const lldb::SBBroadcaster &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr == rhs.m_opaque_ptr;
}
bool SBBroadcaster::operator!=(const SBBroadcaster &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBBroadcaster, operator!=,(const lldb::SBBroadcaster &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr != rhs.m_opaque_ptr;
}
bool SBBroadcaster::operator<(const SBBroadcaster &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBBroadcaster, operator<,(const lldb::SBBroadcaster &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr < rhs.m_opaque_ptr;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBBroadcaster>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBBroadcaster, ());
- LLDB_REGISTER_CONSTRUCTOR(SBBroadcaster, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBBroadcaster, (const lldb::SBBroadcaster &));
- LLDB_REGISTER_METHOD(
- const lldb::SBBroadcaster &,
- SBBroadcaster, operator=,(const lldb::SBBroadcaster &));
- LLDB_REGISTER_METHOD(void, SBBroadcaster, BroadcastEventByType,
- (uint32_t, bool));
- LLDB_REGISTER_METHOD(void, SBBroadcaster, BroadcastEvent,
- (const lldb::SBEvent &, bool));
- LLDB_REGISTER_METHOD(void, SBBroadcaster, AddInitialEventsToListener,
- (const lldb::SBListener &, uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBBroadcaster, AddListener,
- (const lldb::SBListener &, uint32_t));
- LLDB_REGISTER_METHOD_CONST(const char *, SBBroadcaster, GetName, ());
- LLDB_REGISTER_METHOD(bool, SBBroadcaster, EventTypeHasListeners,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBBroadcaster, RemoveListener,
- (const lldb::SBListener &, uint32_t));
- LLDB_REGISTER_METHOD_CONST(bool, SBBroadcaster, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBBroadcaster, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBBroadcaster, Clear, ());
- LLDB_REGISTER_METHOD_CONST(
- bool, SBBroadcaster, operator==,(const lldb::SBBroadcaster &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBBroadcaster, operator!=,(const lldb::SBBroadcaster &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBBroadcaster, operator<,(const lldb::SBBroadcaster &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBCommandInterpreter.cpp b/contrib/llvm-project/lldb/source/API/SBCommandInterpreter.cpp
index 3830f6ed80ba..073c1a1b042c 100644
--- a/contrib/llvm-project/lldb/source/API/SBCommandInterpreter.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBCommandInterpreter.cpp
@@ -8,11 +8,11 @@
#include "lldb/lldb-types.h"
-#include "SBReproducerPrivate.h"
#include "lldb/Interpreter/CommandInterpreter.h"
#include "lldb/Interpreter/CommandObjectMultiword.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Listener.h"
#include "lldb/API/SBBroadcaster.h"
@@ -78,71 +78,62 @@ protected:
SBCommandInterpreter::SBCommandInterpreter(CommandInterpreter *interpreter)
: m_opaque_ptr(interpreter) {
- LLDB_RECORD_CONSTRUCTOR(SBCommandInterpreter,
- (lldb_private::CommandInterpreter *), interpreter);
-
+ LLDB_INSTRUMENT_VA(this, interpreter);
}
SBCommandInterpreter::SBCommandInterpreter(const SBCommandInterpreter &rhs)
: m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBCommandInterpreter,
- (const lldb::SBCommandInterpreter &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBCommandInterpreter::~SBCommandInterpreter() = default;
const SBCommandInterpreter &SBCommandInterpreter::
operator=(const SBCommandInterpreter &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBCommandInterpreter &,
- SBCommandInterpreter, operator=,(const lldb::SBCommandInterpreter &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_ptr = rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBCommandInterpreter::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreter, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBCommandInterpreter::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreter, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
bool SBCommandInterpreter::CommandExists(const char *cmd) {
- LLDB_RECORD_METHOD(bool, SBCommandInterpreter, CommandExists, (const char *),
- cmd);
+ LLDB_INSTRUMENT_VA(this, cmd);
return (((cmd != nullptr) && IsValid()) ? m_opaque_ptr->CommandExists(cmd)
: false);
}
bool SBCommandInterpreter::AliasExists(const char *cmd) {
- LLDB_RECORD_METHOD(bool, SBCommandInterpreter, AliasExists, (const char *),
- cmd);
+ LLDB_INSTRUMENT_VA(this, cmd);
return (((cmd != nullptr) && IsValid()) ? m_opaque_ptr->AliasExists(cmd)
: false);
}
bool SBCommandInterpreter::IsActive() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, IsActive);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->IsActive() : false);
}
bool SBCommandInterpreter::WasInterrupted() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreter, WasInterrupted);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->WasInterrupted() : false);
}
const char *SBCommandInterpreter::GetIOHandlerControlSequence(char ch) {
- LLDB_RECORD_METHOD(const char *, SBCommandInterpreter,
- GetIOHandlerControlSequence, (char), ch);
+ LLDB_INSTRUMENT_VA(this, ch);
return (IsValid()
? m_opaque_ptr->GetDebugger()
@@ -155,9 +146,7 @@ lldb::ReturnStatus
SBCommandInterpreter::HandleCommand(const char *command_line,
SBCommandReturnObject &result,
bool add_to_history) {
- LLDB_RECORD_METHOD(lldb::ReturnStatus, SBCommandInterpreter, HandleCommand,
- (const char *, lldb::SBCommandReturnObject &, bool),
- command_line, result, add_to_history);
+ LLDB_INSTRUMENT_VA(this, command_line, result, add_to_history);
SBExecutionContext sb_exe_ctx;
return HandleCommand(command_line, sb_exe_ctx, result, add_to_history);
@@ -166,10 +155,8 @@ SBCommandInterpreter::HandleCommand(const char *command_line,
lldb::ReturnStatus SBCommandInterpreter::HandleCommand(
const char *command_line, SBExecutionContext &override_context,
SBCommandReturnObject &result, bool add_to_history) {
- LLDB_RECORD_METHOD(lldb::ReturnStatus, SBCommandInterpreter, HandleCommand,
- (const char *, lldb::SBExecutionContext &,
- lldb::SBCommandReturnObject &, bool),
- command_line, override_context, result, add_to_history);
+ LLDB_INSTRUMENT_VA(this, command_line, override_context, result,
+ add_to_history);
result.Clear();
if (command_line && IsValid()) {
@@ -194,11 +181,7 @@ void SBCommandInterpreter::HandleCommandsFromFile(
lldb::SBFileSpec &file, lldb::SBExecutionContext &override_context,
lldb::SBCommandInterpreterRunOptions &options,
lldb::SBCommandReturnObject result) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, HandleCommandsFromFile,
- (lldb::SBFileSpec &, lldb::SBExecutionContext &,
- lldb::SBCommandInterpreterRunOptions &,
- lldb::SBCommandReturnObject),
- file, override_context, options, result);
+ LLDB_INSTRUMENT_VA(this, file, override_context, options, result);
if (!IsValid()) {
result->AppendError("SBCommandInterpreter is not valid.");
@@ -225,10 +208,7 @@ void SBCommandInterpreter::HandleCommandsFromFile(
int SBCommandInterpreter::HandleCompletion(
const char *current_line, const char *cursor, const char *last_char,
int match_start_point, int max_return_elements, SBStringList &matches) {
- LLDB_RECORD_METHOD(int, SBCommandInterpreter, HandleCompletion,
- (const char *, const char *, const char *, int, int,
- lldb::SBStringList &),
- current_line, cursor, last_char, match_start_point,
+ LLDB_INSTRUMENT_VA(this, current_line, cursor, last_char, match_start_point,
max_return_elements, matches);
SBStringList dummy_descriptions;
@@ -241,11 +221,7 @@ int SBCommandInterpreter::HandleCompletionWithDescriptions(
const char *current_line, const char *cursor, const char *last_char,
int match_start_point, int max_return_elements, SBStringList &matches,
SBStringList &descriptions) {
- LLDB_RECORD_METHOD(int, SBCommandInterpreter,
- HandleCompletionWithDescriptions,
- (const char *, const char *, const char *, int, int,
- lldb::SBStringList &, lldb::SBStringList &),
- current_line, cursor, last_char, match_start_point,
+ LLDB_INSTRUMENT_VA(this, current_line, cursor, last_char, match_start_point,
max_return_elements, matches, descriptions);
// Sanity check the arguments that are passed in: cursor & last_char have to
@@ -311,11 +287,7 @@ int SBCommandInterpreter::HandleCompletionWithDescriptions(
const char *current_line, uint32_t cursor_pos, int match_start_point,
int max_return_elements, SBStringList &matches,
SBStringList &descriptions) {
- LLDB_RECORD_METHOD(int, SBCommandInterpreter,
- HandleCompletionWithDescriptions,
- (const char *, uint32_t, int, int, lldb::SBStringList &,
- lldb::SBStringList &),
- current_line, cursor_pos, match_start_point,
+ LLDB_INSTRUMENT_VA(this, current_line, cursor_pos, match_start_point,
max_return_elements, matches, descriptions);
const char *cursor = current_line + cursor_pos;
@@ -330,9 +302,7 @@ int SBCommandInterpreter::HandleCompletion(const char *current_line,
int match_start_point,
int max_return_elements,
lldb::SBStringList &matches) {
- LLDB_RECORD_METHOD(int, SBCommandInterpreter, HandleCompletion,
- (const char *, uint32_t, int, int, lldb::SBStringList &),
- current_line, cursor_pos, match_start_point,
+ LLDB_INSTRUMENT_VA(this, current_line, cursor_pos, match_start_point,
max_return_elements, matches);
const char *cursor = current_line + cursor_pos;
@@ -342,25 +312,25 @@ int SBCommandInterpreter::HandleCompletion(const char *current_line,
}
bool SBCommandInterpreter::HasCommands() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, HasCommands);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->HasCommands() : false);
}
bool SBCommandInterpreter::HasAliases() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, HasAliases);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->HasAliases() : false);
}
bool SBCommandInterpreter::HasAliasOptions() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, HasAliasOptions);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->HasAliasOptions() : false);
}
SBProcess SBCommandInterpreter::GetProcess() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcess, SBCommandInterpreter, GetProcess);
+ LLDB_INSTRUMENT_VA(this);
SBProcess sb_process;
ProcessSP process_sp;
@@ -373,43 +343,41 @@ SBProcess SBCommandInterpreter::GetProcess() {
}
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBDebugger SBCommandInterpreter::GetDebugger() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBDebugger, SBCommandInterpreter,
- GetDebugger);
+ LLDB_INSTRUMENT_VA(this);
SBDebugger sb_debugger;
if (IsValid())
sb_debugger.reset(m_opaque_ptr->GetDebugger().shared_from_this());
- return LLDB_RECORD_RESULT(sb_debugger);
+ return sb_debugger;
}
bool SBCommandInterpreter::GetPromptOnQuit() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, GetPromptOnQuit);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_ptr->GetPromptOnQuit() : false);
}
void SBCommandInterpreter::SetPromptOnQuit(bool b) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, SetPromptOnQuit, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (IsValid())
m_opaque_ptr->SetPromptOnQuit(b);
}
void SBCommandInterpreter::AllowExitCodeOnQuit(bool allow) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, AllowExitCodeOnQuit, (bool),
- allow);
+ LLDB_INSTRUMENT_VA(this, allow);
if (m_opaque_ptr)
m_opaque_ptr->AllowExitCodeOnQuit(allow);
}
bool SBCommandInterpreter::HasCustomQuitExitCode() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandInterpreter, HasCustomQuitExitCode);
+ LLDB_INSTRUMENT_VA(this);
bool exited = false;
if (m_opaque_ptr)
@@ -418,7 +386,7 @@ bool SBCommandInterpreter::HasCustomQuitExitCode() {
}
int SBCommandInterpreter::GetQuitStatus() {
- LLDB_RECORD_METHOD_NO_ARGS(int, SBCommandInterpreter, GetQuitStatus);
+ LLDB_INSTRUMENT_VA(this);
bool exited = false;
return (m_opaque_ptr ? m_opaque_ptr->GetQuitExitCode(exited) : 0);
@@ -426,9 +394,7 @@ int SBCommandInterpreter::GetQuitStatus() {
void SBCommandInterpreter::ResolveCommand(const char *command_line,
SBCommandReturnObject &result) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, ResolveCommand,
- (const char *, lldb::SBCommandReturnObject &),
- command_line, result);
+ LLDB_INSTRUMENT_VA(this, command_line, result);
result.Clear();
if (command_line && IsValid()) {
@@ -453,8 +419,7 @@ void SBCommandInterpreter::reset(
void SBCommandInterpreter::SourceInitFileInHomeDirectory(
SBCommandReturnObject &result) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, SourceInitFileInHomeDirectory,
- (lldb::SBCommandReturnObject &), result);
+ LLDB_INSTRUMENT_VA(this, result);
result.Clear();
if (IsValid()) {
@@ -470,8 +435,7 @@ void SBCommandInterpreter::SourceInitFileInHomeDirectory(
void SBCommandInterpreter::SourceInitFileInHomeDirectory(
SBCommandReturnObject &result, bool is_repl) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter, SourceInitFileInHomeDirectory,
- (lldb::SBCommandReturnObject &, bool), result, is_repl);
+ LLDB_INSTRUMENT_VA(this, result, is_repl);
result.Clear();
if (IsValid()) {
@@ -487,9 +451,7 @@ void SBCommandInterpreter::SourceInitFileInHomeDirectory(
void SBCommandInterpreter::SourceInitFileInCurrentWorkingDirectory(
SBCommandReturnObject &result) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreter,
- SourceInitFileInCurrentWorkingDirectory,
- (lldb::SBCommandReturnObject &), result);
+ LLDB_INSTRUMENT_VA(this, result);
result.Clear();
if (IsValid()) {
@@ -504,46 +466,36 @@ void SBCommandInterpreter::SourceInitFileInCurrentWorkingDirectory(
}
SBBroadcaster SBCommandInterpreter::GetBroadcaster() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBroadcaster, SBCommandInterpreter,
- GetBroadcaster);
-
+ LLDB_INSTRUMENT_VA(this);
SBBroadcaster broadcaster(m_opaque_ptr, false);
-
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
const char *SBCommandInterpreter::GetBroadcasterClass() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBCommandInterpreter,
- GetBroadcasterClass);
+ LLDB_INSTRUMENT();
return CommandInterpreter::GetStaticBroadcasterClass().AsCString();
}
const char *SBCommandInterpreter::GetArgumentTypeAsCString(
const lldb::CommandArgumentType arg_type) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBCommandInterpreter,
- GetArgumentTypeAsCString,
- (const lldb::CommandArgumentType), arg_type);
+ LLDB_INSTRUMENT_VA(arg_type);
return CommandObject::GetArgumentTypeAsCString(arg_type);
}
const char *SBCommandInterpreter::GetArgumentDescriptionAsCString(
const lldb::CommandArgumentType arg_type) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBCommandInterpreter,
- GetArgumentDescriptionAsCString,
- (const lldb::CommandArgumentType), arg_type);
+ LLDB_INSTRUMENT_VA(arg_type);
return CommandObject::GetArgumentDescriptionAsCString(arg_type);
}
bool SBCommandInterpreter::EventIsCommandInterpreterEvent(
const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBCommandInterpreter,
- EventIsCommandInterpreterEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return event.GetBroadcasterClass() ==
SBCommandInterpreter::GetBroadcasterClass();
@@ -552,9 +504,7 @@ bool SBCommandInterpreter::EventIsCommandInterpreterEvent(
bool SBCommandInterpreter::SetCommandOverrideCallback(
const char *command_name, lldb::CommandOverrideCallback callback,
void *baton) {
- LLDB_RECORD_DUMMY(bool, SBCommandInterpreter, SetCommandOverrideCallback,
- (const char *, lldb::CommandOverrideCallback, void *),
- command_name, callback, baton);
+ LLDB_INSTRUMENT_VA(this, command_name, callback, baton);
if (command_name && command_name[0] && IsValid()) {
llvm::StringRef command_name_str = command_name;
@@ -571,48 +521,37 @@ bool SBCommandInterpreter::SetCommandOverrideCallback(
lldb::SBCommand SBCommandInterpreter::AddMultiwordCommand(const char *name,
const char *help) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommandInterpreter, AddMultiwordCommand,
- (const char *, const char *), name, help);
+ LLDB_INSTRUMENT_VA(this, name, help);
lldb::CommandObjectSP new_command_sp(
new CommandObjectMultiword(*m_opaque_ptr, name, help));
new_command_sp->GetAsMultiwordCommand()->SetRemovable(true);
Status add_error = m_opaque_ptr->AddUserCommand(name, new_command_sp, true);
if (add_error.Success())
- return LLDB_RECORD_RESULT(lldb::SBCommand(new_command_sp));
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand(new_command_sp);
+ return lldb::SBCommand();
}
lldb::SBCommand SBCommandInterpreter::AddCommand(
const char *name, lldb::SBCommandPluginInterface *impl, const char *help) {
- LLDB_RECORD_METHOD(
- lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *, const char *), name,
- impl, help);
+ LLDB_INSTRUMENT_VA(this, name, impl, help);
- return LLDB_RECORD_RESULT(AddCommand(name, impl, help, /*syntax=*/nullptr,
- /*auto_repeat_command=*/""))
+ return AddCommand(name, impl, help, /*syntax=*/nullptr,
+ /*auto_repeat_command=*/"");
}
lldb::SBCommand
SBCommandInterpreter::AddCommand(const char *name,
lldb::SBCommandPluginInterface *impl,
const char *help, const char *syntax) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *),
- name, impl, help, syntax);
- return LLDB_RECORD_RESULT(
- AddCommand(name, impl, help, syntax, /*auto_repeat_command=*/""))
+ LLDB_INSTRUMENT_VA(this, name, impl, help, syntax);
+ return AddCommand(name, impl, help, syntax, /*auto_repeat_command=*/"");
}
lldb::SBCommand SBCommandInterpreter::AddCommand(
const char *name, lldb::SBCommandPluginInterface *impl, const char *help,
const char *syntax, const char *auto_repeat_command) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *, const char *),
- name, impl, help, syntax, auto_repeat_command);
+ LLDB_INSTRUMENT_VA(this, name, impl, help, syntax, auto_repeat_command);
lldb::CommandObjectSP new_command_sp;
new_command_sp = std::make_shared<CommandPluginInterfaceImplementation>(
@@ -621,53 +560,53 @@ lldb::SBCommand SBCommandInterpreter::AddCommand(
Status add_error = m_opaque_ptr->AddUserCommand(name, new_command_sp, true);
if (add_error.Success())
- return LLDB_RECORD_RESULT(lldb::SBCommand(new_command_sp));
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand(new_command_sp);
+ return lldb::SBCommand();
}
-SBCommand::SBCommand() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCommand); }
+SBCommand::SBCommand() { LLDB_INSTRUMENT_VA(this); }
SBCommand::SBCommand(lldb::CommandObjectSP cmd_sp) : m_opaque_sp(cmd_sp) {}
bool SBCommand::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommand, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBCommand::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommand, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
const char *SBCommand::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBCommand, GetName);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? ConstString(m_opaque_sp->GetCommandName()).AsCString() : nullptr);
}
const char *SBCommand::GetHelp() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBCommand, GetHelp);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? ConstString(m_opaque_sp->GetHelp()).AsCString()
: nullptr);
}
const char *SBCommand::GetHelpLong() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBCommand, GetHelpLong);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? ConstString(m_opaque_sp->GetHelpLong()).AsCString()
: nullptr);
}
void SBCommand::SetHelp(const char *help) {
- LLDB_RECORD_METHOD(void, SBCommand, SetHelp, (const char *), help);
+ LLDB_INSTRUMENT_VA(this, help);
if (IsValid())
m_opaque_sp->SetHelp(help);
}
void SBCommand::SetHelpLong(const char *help) {
- LLDB_RECORD_METHOD(void, SBCommand, SetHelpLong, (const char *), help);
+ LLDB_INSTRUMENT_VA(this, help);
if (IsValid())
m_opaque_sp->SetHelpLong(help);
@@ -675,193 +614,64 @@ void SBCommand::SetHelpLong(const char *help) {
lldb::SBCommand SBCommand::AddMultiwordCommand(const char *name,
const char *help) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommand, AddMultiwordCommand,
- (const char *, const char *), name, help);
+ LLDB_INSTRUMENT_VA(this, name, help);
if (!IsValid())
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand();
if (!m_opaque_sp->IsMultiwordObject())
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand();
CommandObjectMultiword *new_command = new CommandObjectMultiword(
m_opaque_sp->GetCommandInterpreter(), name, help);
new_command->SetRemovable(true);
lldb::CommandObjectSP new_command_sp(new_command);
if (new_command_sp && m_opaque_sp->LoadSubCommand(name, new_command_sp))
- return LLDB_RECORD_RESULT(lldb::SBCommand(new_command_sp));
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand(new_command_sp);
+ return lldb::SBCommand();
}
lldb::SBCommand SBCommand::AddCommand(const char *name,
lldb::SBCommandPluginInterface *impl,
const char *help) {
- LLDB_RECORD_METHOD(
- lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *, const char *), name,
- impl, help);
- return LLDB_RECORD_RESULT(AddCommand(name, impl, help, /*syntax=*/nullptr,
- /*auto_repeat_command=*/""))
+ LLDB_INSTRUMENT_VA(this, name, impl, help);
+ return AddCommand(name, impl, help, /*syntax=*/nullptr,
+ /*auto_repeat_command=*/"");
}
lldb::SBCommand SBCommand::AddCommand(const char *name,
lldb::SBCommandPluginInterface *impl,
const char *help, const char *syntax) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *),
- name, impl, help, syntax);
- return LLDB_RECORD_RESULT(
- AddCommand(name, impl, help, syntax, /*auto_repeat_command=*/""))
+ LLDB_INSTRUMENT_VA(this, name, impl, help, syntax);
+ return AddCommand(name, impl, help, syntax, /*auto_repeat_command=*/"");
}
lldb::SBCommand SBCommand::AddCommand(const char *name,
lldb::SBCommandPluginInterface *impl,
const char *help, const char *syntax,
const char *auto_repeat_command) {
- LLDB_RECORD_METHOD(lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *, const char *),
- name, impl, help, syntax, auto_repeat_command);
+ LLDB_INSTRUMENT_VA(this, name, impl, help, syntax, auto_repeat_command);
if (!IsValid())
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand();
if (!m_opaque_sp->IsMultiwordObject())
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand();
lldb::CommandObjectSP new_command_sp;
new_command_sp = std::make_shared<CommandPluginInterfaceImplementation>(
m_opaque_sp->GetCommandInterpreter(), name, impl, help, syntax,
/*flags=*/0, auto_repeat_command);
if (new_command_sp && m_opaque_sp->LoadSubCommand(name, new_command_sp))
- return LLDB_RECORD_RESULT(lldb::SBCommand(new_command_sp));
- return LLDB_RECORD_RESULT(lldb::SBCommand());
+ return lldb::SBCommand(new_command_sp);
+ return lldb::SBCommand();
}
uint32_t SBCommand::GetFlags() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBCommand, GetFlags);
+ LLDB_INSTRUMENT_VA(this);
return (IsValid() ? m_opaque_sp->GetFlags().Get() : 0);
}
void SBCommand::SetFlags(uint32_t flags) {
- LLDB_RECORD_METHOD(void, SBCommand, SetFlags, (uint32_t), flags);
+ LLDB_INSTRUMENT_VA(this, flags);
if (IsValid())
m_opaque_sp->GetFlags().Set(flags);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBCommandInterpreter>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreter,
- (lldb_private::CommandInterpreter *));
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreter,
- (const lldb::SBCommandInterpreter &));
- LLDB_REGISTER_METHOD(
- const lldb::SBCommandInterpreter &,
- SBCommandInterpreter, operator=,(const lldb::SBCommandInterpreter &));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreter, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreter, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, CommandExists,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, AliasExists,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, IsActive, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreter, WasInterrupted, ());
- LLDB_REGISTER_METHOD(const char *, SBCommandInterpreter,
- GetIOHandlerControlSequence, (char));
- LLDB_REGISTER_METHOD(lldb::ReturnStatus, SBCommandInterpreter,
- HandleCommand,
- (const char *, lldb::SBCommandReturnObject &, bool));
- LLDB_REGISTER_METHOD(lldb::ReturnStatus, SBCommandInterpreter,
- HandleCommand,
- (const char *, lldb::SBExecutionContext &,
- lldb::SBCommandReturnObject &, bool));
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter, HandleCommandsFromFile,
- (lldb::SBFileSpec &, lldb::SBExecutionContext &,
- lldb::SBCommandInterpreterRunOptions &,
- lldb::SBCommandReturnObject));
- LLDB_REGISTER_METHOD(int, SBCommandInterpreter, HandleCompletion,
- (const char *, const char *, const char *, int, int,
- lldb::SBStringList &));
- LLDB_REGISTER_METHOD(int, SBCommandInterpreter,
- HandleCompletionWithDescriptions,
- (const char *, const char *, const char *, int, int,
- lldb::SBStringList &, lldb::SBStringList &));
- LLDB_REGISTER_METHOD(int, SBCommandInterpreter,
- HandleCompletionWithDescriptions,
- (const char *, uint32_t, int, int,
- lldb::SBStringList &, lldb::SBStringList &));
- LLDB_REGISTER_METHOD(
- int, SBCommandInterpreter, HandleCompletion,
- (const char *, uint32_t, int, int, lldb::SBStringList &));
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, HasCommands, ());
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, HasAliases, ());
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, HasAliasOptions, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBCommandInterpreter, GetProcess, ());
- LLDB_REGISTER_METHOD(lldb::SBDebugger, SBCommandInterpreter, GetDebugger,
- ());
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, GetPromptOnQuit, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter, SetPromptOnQuit, (bool));
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter, AllowExitCodeOnQuit,
- (bool));
- LLDB_REGISTER_METHOD(bool, SBCommandInterpreter, HasCustomQuitExitCode, ());
- LLDB_REGISTER_METHOD(int, SBCommandInterpreter, GetQuitStatus, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter, ResolveCommand,
- (const char *, lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter,
- SourceInitFileInHomeDirectory,
- (lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter,
- SourceInitFileInHomeDirectory,
- (lldb::SBCommandReturnObject &, bool));
- LLDB_REGISTER_METHOD(void, SBCommandInterpreter,
- SourceInitFileInCurrentWorkingDirectory,
- (lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD(lldb::SBBroadcaster, SBCommandInterpreter,
- GetBroadcaster, ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBCommandInterpreter,
- GetBroadcasterClass, ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBCommandInterpreter,
- GetArgumentTypeAsCString,
- (const lldb::CommandArgumentType));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBCommandInterpreter,
- GetArgumentDescriptionAsCString,
- (const lldb::CommandArgumentType));
- LLDB_REGISTER_STATIC_METHOD(bool, SBCommandInterpreter,
- EventIsCommandInterpreterEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommandInterpreter,
- AddMultiwordCommand, (const char *, const char *));
- LLDB_REGISTER_METHOD(
- lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommandInterpreter, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *, const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBCommand, ());
- LLDB_REGISTER_METHOD(bool, SBCommand, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommand, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBCommand, GetName, ());
- LLDB_REGISTER_METHOD(const char *, SBCommand, GetHelp, ());
- LLDB_REGISTER_METHOD(const char *, SBCommand, GetHelpLong, ());
- LLDB_REGISTER_METHOD(void, SBCommand, SetHelp, (const char *));
- LLDB_REGISTER_METHOD(void, SBCommand, SetHelpLong, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommand, AddMultiwordCommand,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(
- lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBCommand, SBCommand, AddCommand,
- (const char *, lldb::SBCommandPluginInterface *,
- const char *, const char *, const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBCommand, GetFlags, ());
- LLDB_REGISTER_METHOD(void, SBCommand, SetFlags, (uint32_t));
-}
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBCommandInterpreterRunOptions.cpp b/contrib/llvm-project/lldb/source/API/SBCommandInterpreterRunOptions.cpp
index 317ec6d37127..6c6b2aa15a79 100644
--- a/contrib/llvm-project/lldb/source/API/SBCommandInterpreterRunOptions.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBCommandInterpreterRunOptions.cpp
@@ -8,7 +8,7 @@
#include "lldb/lldb-types.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBCommandInterpreterRunOptions.h"
#include "lldb/Interpreter/CommandInterpreter.h"
@@ -19,16 +19,14 @@ using namespace lldb;
using namespace lldb_private;
SBCommandInterpreterRunOptions::SBCommandInterpreterRunOptions() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCommandInterpreterRunOptions);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up = std::make_unique<CommandInterpreterRunOptions>();
}
SBCommandInterpreterRunOptions::SBCommandInterpreterRunOptions(
- const SBCommandInterpreterRunOptions &rhs)
- : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBCommandInterpreterRunOptions,
- (const lldb::SBCommandInterpreterRunOptions &), rhs);
+ const SBCommandInterpreterRunOptions &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = std::make_unique<CommandInterpreterRunOptions>(rhs.ref());
}
@@ -37,153 +35,131 @@ SBCommandInterpreterRunOptions::~SBCommandInterpreterRunOptions() = default;
SBCommandInterpreterRunOptions &SBCommandInterpreterRunOptions::operator=(
const SBCommandInterpreterRunOptions &rhs) {
- LLDB_RECORD_METHOD(lldb::SBCommandInterpreterRunOptions &,
- SBCommandInterpreterRunOptions, operator=,
- (const lldb::SBCommandInterpreterRunOptions &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this == &rhs)
- return LLDB_RECORD_RESULT(*this);
+ return *this;
*m_opaque_up = *rhs.m_opaque_up;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBCommandInterpreterRunOptions::GetStopOnContinue() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetStopOnContinue);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetStopOnContinue();
}
void SBCommandInterpreterRunOptions::SetStopOnContinue(bool stop_on_continue) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnContinue,
- (bool), stop_on_continue);
+ LLDB_INSTRUMENT_VA(this, stop_on_continue);
m_opaque_up->SetStopOnContinue(stop_on_continue);
}
bool SBCommandInterpreterRunOptions::GetStopOnError() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetStopOnError);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetStopOnError();
}
void SBCommandInterpreterRunOptions::SetStopOnError(bool stop_on_error) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnError,
- (bool), stop_on_error);
+ LLDB_INSTRUMENT_VA(this, stop_on_error);
m_opaque_up->SetStopOnError(stop_on_error);
}
bool SBCommandInterpreterRunOptions::GetStopOnCrash() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetStopOnCrash);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetStopOnCrash();
}
void SBCommandInterpreterRunOptions::SetStopOnCrash(bool stop_on_crash) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnCrash,
- (bool), stop_on_crash);
+ LLDB_INSTRUMENT_VA(this, stop_on_crash);
m_opaque_up->SetStopOnCrash(stop_on_crash);
}
bool SBCommandInterpreterRunOptions::GetEchoCommands() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetEchoCommands);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetEchoCommands();
}
void SBCommandInterpreterRunOptions::SetEchoCommands(bool echo_commands) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetEchoCommands,
- (bool), echo_commands);
+ LLDB_INSTRUMENT_VA(this, echo_commands);
m_opaque_up->SetEchoCommands(echo_commands);
}
bool SBCommandInterpreterRunOptions::GetEchoCommentCommands() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetEchoCommentCommands);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetEchoCommentCommands();
}
void SBCommandInterpreterRunOptions::SetEchoCommentCommands(bool echo) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions,
- SetEchoCommentCommands, (bool), echo);
+ LLDB_INSTRUMENT_VA(this, echo);
m_opaque_up->SetEchoCommentCommands(echo);
}
bool SBCommandInterpreterRunOptions::GetPrintResults() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetPrintResults);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetPrintResults();
}
void SBCommandInterpreterRunOptions::SetPrintResults(bool print_results) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetPrintResults,
- (bool), print_results);
+ LLDB_INSTRUMENT_VA(this, print_results);
m_opaque_up->SetPrintResults(print_results);
}
bool SBCommandInterpreterRunOptions::GetPrintErrors() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetPrintErrors);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetPrintErrors();
}
void SBCommandInterpreterRunOptions::SetPrintErrors(bool print_errors) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetPrintErrors,
- (bool), print_errors);
+ LLDB_INSTRUMENT_VA(this, print_errors);
m_opaque_up->SetPrintErrors(print_errors);
}
bool SBCommandInterpreterRunOptions::GetAddToHistory() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetAddToHistory);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetAddToHistory();
}
void SBCommandInterpreterRunOptions::SetAddToHistory(bool add_to_history) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetAddToHistory,
- (bool), add_to_history);
+ LLDB_INSTRUMENT_VA(this, add_to_history);
m_opaque_up->SetAddToHistory(add_to_history);
}
bool SBCommandInterpreterRunOptions::GetAutoHandleEvents() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetAutoHandleEvents);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetAutoHandleEvents();
}
void SBCommandInterpreterRunOptions::SetAutoHandleEvents(
bool auto_handle_events) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetAutoHandleEvents,
- (bool), auto_handle_events);
+ LLDB_INSTRUMENT_VA(this, auto_handle_events);
m_opaque_up->SetAutoHandleEvents(auto_handle_events);
}
bool SBCommandInterpreterRunOptions::GetSpawnThread() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandInterpreterRunOptions,
- GetSpawnThread);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSpawnThread();
}
void SBCommandInterpreterRunOptions::SetSpawnThread(bool spawn_thread) {
- LLDB_RECORD_METHOD(void, SBCommandInterpreterRunOptions, SetSpawnThread,
- (bool), spawn_thread);
+ LLDB_INSTRUMENT_VA(this, spawn_thread);
m_opaque_up->SetSpawnThread(spawn_thread);
}
@@ -202,21 +178,19 @@ SBCommandInterpreterRunResult::SBCommandInterpreterRunResult()
: m_opaque_up(new CommandInterpreterRunResult())
{
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCommandInterpreterRunResult);
+ LLDB_INSTRUMENT_VA(this);
}
SBCommandInterpreterRunResult::SBCommandInterpreterRunResult(
const SBCommandInterpreterRunResult &rhs)
: m_opaque_up(new CommandInterpreterRunResult()) {
- LLDB_RECORD_CONSTRUCTOR(SBCommandInterpreterRunResult,
- (const lldb::SBCommandInterpreterRunResult &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_opaque_up = *rhs.m_opaque_up;
}
SBCommandInterpreterRunResult::SBCommandInterpreterRunResult(
- const CommandInterpreterRunResult &rhs)
- : m_opaque_up() {
+ const CommandInterpreterRunResult &rhs) {
m_opaque_up = std::make_unique<CommandInterpreterRunResult>(rhs);
}
@@ -224,92 +198,23 @@ SBCommandInterpreterRunResult::~SBCommandInterpreterRunResult() = default;
SBCommandInterpreterRunResult &SBCommandInterpreterRunResult::operator=(
const SBCommandInterpreterRunResult &rhs) {
- LLDB_RECORD_METHOD(lldb::SBCommandInterpreterRunResult &,
- SBCommandInterpreterRunResult, operator=,
- (const lldb::SBCommandInterpreterRunResult &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this == &rhs)
- return LLDB_RECORD_RESULT(*this);
+ return *this;
*m_opaque_up = *rhs.m_opaque_up;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
int SBCommandInterpreterRunResult::GetNumberOfErrors() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(int, SBCommandInterpreterRunResult,
- GetNumberOfErrors);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetNumErrors();
}
lldb::CommandInterpreterResult
SBCommandInterpreterRunResult::GetResult() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::CommandInterpreterResult,
- SBCommandInterpreterRunResult, GetResult);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetResult();
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBCommandInterpreterRunOptions>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreterRunOptions, ());
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreterRunOptions,
- (const lldb::SBCommandInterpreterRunOptions &));
- LLDB_REGISTER_METHOD(lldb::SBCommandInterpreterRunOptions &,
- SBCommandInterpreterRunOptions, operator=,
- (const lldb::SBCommandInterpreterRunOptions &));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetStopOnContinue, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnContinue,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetStopOnError, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnError,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetStopOnCrash, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetStopOnCrash,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetEchoCommands, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetEchoCommands,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetEchoCommentCommands, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions,
- SetEchoCommentCommands, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetPrintResults, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetPrintResults,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetPrintErrors, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetPrintErrors,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetAddToHistory, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetAddToHistory,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetAutoHandleEvents, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions,
- SetAutoHandleEvents, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandInterpreterRunOptions,
- GetSpawnThread, ());
- LLDB_REGISTER_METHOD(void, SBCommandInterpreterRunOptions, SetSpawnThread,
- (bool));
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreterRunResult, ());
- LLDB_REGISTER_CONSTRUCTOR(SBCommandInterpreterRunResult,
- (const lldb::SBCommandInterpreterRunResult &));
- LLDB_REGISTER_METHOD(lldb::SBCommandInterpreterRunResult &,
- SBCommandInterpreterRunResult, operator=,
- (const lldb::SBCommandInterpreterRunResult &));
- LLDB_REGISTER_METHOD_CONST(int, SBCommandInterpreterRunResult,
- GetNumberOfErrors, ());
- LLDB_REGISTER_METHOD_CONST(lldb::CommandInterpreterResult,
- SBCommandInterpreterRunResult, GetResult, ());
-}
-
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBCommandReturnObject.cpp b/contrib/llvm-project/lldb/source/API/SBCommandReturnObject.cpp
index 00150d198fca..7d2c102b3d8c 100644
--- a/contrib/llvm-project/lldb/source/API/SBCommandReturnObject.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBCommandReturnObject.cpp
@@ -7,13 +7,13 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBCommandReturnObject.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBError.h"
#include "lldb/API/SBFile.h"
#include "lldb/API/SBStream.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Utility/ConstString.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Status.h"
using namespace lldb;
@@ -46,76 +46,70 @@ private:
SBCommandReturnObject::SBCommandReturnObject()
: m_opaque_up(new SBCommandReturnObjectImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCommandReturnObject);
+ LLDB_INSTRUMENT_VA(this);
}
SBCommandReturnObject::SBCommandReturnObject(CommandReturnObject &ref)
: m_opaque_up(new SBCommandReturnObjectImpl(ref)) {
- LLDB_RECORD_CONSTRUCTOR(SBCommandReturnObject,
- (lldb_private::CommandReturnObject &), ref);
+ LLDB_INSTRUMENT_VA(this, ref);
}
-SBCommandReturnObject::SBCommandReturnObject(const SBCommandReturnObject &rhs)
- : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBCommandReturnObject,
- (const lldb::SBCommandReturnObject &), rhs);
+SBCommandReturnObject::SBCommandReturnObject(const SBCommandReturnObject &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
SBCommandReturnObject &SBCommandReturnObject::
operator=(const SBCommandReturnObject &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBCommandReturnObject &,
- SBCommandReturnObject, operator=,(const lldb::SBCommandReturnObject &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBCommandReturnObject::~SBCommandReturnObject() = default;
bool SBCommandReturnObject::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandReturnObject, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBCommandReturnObject::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommandReturnObject, operator bool);
+ LLDB_INSTRUMENT_VA(this);
// This method is not useful but it needs to stay to keep SB API stable.
return true;
}
const char *SBCommandReturnObject::GetOutput() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBCommandReturnObject, GetOutput);
+ LLDB_INSTRUMENT_VA(this);
ConstString output(ref().GetOutputData());
return output.AsCString(/*value_if_empty*/ "");
}
const char *SBCommandReturnObject::GetError() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBCommandReturnObject, GetError);
+ LLDB_INSTRUMENT_VA(this);
ConstString output(ref().GetErrorData());
return output.AsCString(/*value_if_empty*/ "");
}
size_t SBCommandReturnObject::GetOutputSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBCommandReturnObject, GetOutputSize);
+ LLDB_INSTRUMENT_VA(this);
return ref().GetOutputData().size();
}
size_t SBCommandReturnObject::GetErrorSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBCommandReturnObject, GetErrorSize);
+ LLDB_INSTRUMENT_VA(this);
return ref().GetErrorData().size();
}
size_t SBCommandReturnObject::PutOutput(FILE *fh) {
- LLDB_RECORD_DUMMY(size_t, SBCommandReturnObject, PutOutput, (FILE *), fh);
+ LLDB_INSTRUMENT_VA(this, fh);
if (fh) {
size_t num_bytes = GetOutputSize();
if (num_bytes)
@@ -125,22 +119,21 @@ size_t SBCommandReturnObject::PutOutput(FILE *fh) {
}
size_t SBCommandReturnObject::PutOutput(FileSP file_sp) {
- LLDB_RECORD_METHOD(size_t, SBCommandReturnObject, PutOutput, (FileSP),
- file_sp);
+ LLDB_INSTRUMENT_VA(this, file_sp);
if (!file_sp)
return 0;
return file_sp->Printf("%s", GetOutput());
}
size_t SBCommandReturnObject::PutOutput(SBFile file) {
- LLDB_RECORD_METHOD(size_t, SBCommandReturnObject, PutOutput, (SBFile), file);
+ LLDB_INSTRUMENT_VA(this, file);
if (!file.m_opaque_sp)
return 0;
return file.m_opaque_sp->Printf("%s", GetOutput());
}
size_t SBCommandReturnObject::PutError(FILE *fh) {
- LLDB_RECORD_DUMMY(size_t, SBCommandReturnObject, PutError, (FILE *), fh);
+ LLDB_INSTRUMENT_VA(this, fh);
if (fh) {
size_t num_bytes = GetErrorSize();
if (num_bytes)
@@ -150,62 +143,57 @@ size_t SBCommandReturnObject::PutError(FILE *fh) {
}
size_t SBCommandReturnObject::PutError(FileSP file_sp) {
- LLDB_RECORD_METHOD(size_t, SBCommandReturnObject, PutError, (FileSP),
- file_sp);
+ LLDB_INSTRUMENT_VA(this, file_sp);
if (!file_sp)
return 0;
return file_sp->Printf("%s", GetError());
}
size_t SBCommandReturnObject::PutError(SBFile file) {
- LLDB_RECORD_METHOD(size_t, SBCommandReturnObject, PutError, (SBFile), file);
+ LLDB_INSTRUMENT_VA(this, file);
if (!file.m_opaque_sp)
return 0;
return file.m_opaque_sp->Printf("%s", GetError());
}
void SBCommandReturnObject::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBCommandReturnObject, Clear);
+ LLDB_INSTRUMENT_VA(this);
ref().Clear();
}
lldb::ReturnStatus SBCommandReturnObject::GetStatus() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ReturnStatus, SBCommandReturnObject,
- GetStatus);
+ LLDB_INSTRUMENT_VA(this);
return ref().GetStatus();
}
void SBCommandReturnObject::SetStatus(lldb::ReturnStatus status) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetStatus,
- (lldb::ReturnStatus), status);
+ LLDB_INSTRUMENT_VA(this, status);
ref().SetStatus(status);
}
bool SBCommandReturnObject::Succeeded() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandReturnObject, Succeeded);
+ LLDB_INSTRUMENT_VA(this);
return ref().Succeeded();
}
bool SBCommandReturnObject::HasResult() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommandReturnObject, HasResult);
+ LLDB_INSTRUMENT_VA(this);
return ref().HasResult();
}
void SBCommandReturnObject::AppendMessage(const char *message) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, AppendMessage, (const char *),
- message);
+ LLDB_INSTRUMENT_VA(this, message);
ref().AppendMessage(message);
}
void SBCommandReturnObject::AppendWarning(const char *message) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, AppendWarning, (const char *),
- message);
+ LLDB_INSTRUMENT_VA(this, message);
ref().AppendWarning(message);
}
@@ -227,8 +215,7 @@ CommandReturnObject &SBCommandReturnObject::ref() const {
}
bool SBCommandReturnObject::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBCommandReturnObject, GetDescription,
- (lldb::SBStream &), description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -253,62 +240,53 @@ bool SBCommandReturnObject::GetDescription(SBStream &description) {
}
void SBCommandReturnObject::SetImmediateOutputFile(FILE *fh) {
- LLDB_RECORD_DUMMY(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FILE *), fh);
+ LLDB_INSTRUMENT_VA(this, fh);
SetImmediateOutputFile(fh, false);
}
void SBCommandReturnObject::SetImmediateErrorFile(FILE *fh) {
- LLDB_RECORD_DUMMY(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FILE *), fh);
+ LLDB_INSTRUMENT_VA(this, fh);
SetImmediateErrorFile(fh, false);
}
void SBCommandReturnObject::SetImmediateOutputFile(FILE *fh,
bool transfer_ownership) {
- LLDB_RECORD_DUMMY(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FILE *, bool), fh, transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_ownership);
FileSP file = std::make_shared<NativeFile>(fh, transfer_ownership);
ref().SetImmediateOutputFile(file);
}
void SBCommandReturnObject::SetImmediateErrorFile(FILE *fh,
bool transfer_ownership) {
- LLDB_RECORD_DUMMY(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FILE *, bool), fh, transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_ownership);
FileSP file = std::make_shared<NativeFile>(fh, transfer_ownership);
ref().SetImmediateErrorFile(file);
}
void SBCommandReturnObject::SetImmediateOutputFile(SBFile file) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (SBFile), file);
+ LLDB_INSTRUMENT_VA(this, file);
ref().SetImmediateOutputFile(file.m_opaque_sp);
}
void SBCommandReturnObject::SetImmediateErrorFile(SBFile file) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (SBFile), file);
+ LLDB_INSTRUMENT_VA(this, file);
ref().SetImmediateErrorFile(file.m_opaque_sp);
}
void SBCommandReturnObject::SetImmediateOutputFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FileSP), file_sp);
+ LLDB_INSTRUMENT_VA(this, file_sp);
SetImmediateOutputFile(SBFile(file_sp));
}
void SBCommandReturnObject::SetImmediateErrorFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FileSP), file_sp);
+ LLDB_INSTRUMENT_VA(this, file_sp);
SetImmediateErrorFile(SBFile(file_sp));
}
void SBCommandReturnObject::PutCString(const char *string, int len) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, PutCString,
- (const char *, int), string, len);
+ LLDB_INSTRUMENT_VA(this, string, len);
if (len == 0 || string == nullptr || *string == 0) {
return;
@@ -320,8 +298,7 @@ void SBCommandReturnObject::PutCString(const char *string, int len) {
}
const char *SBCommandReturnObject::GetOutput(bool only_if_no_immediate) {
- LLDB_RECORD_METHOD(const char *, SBCommandReturnObject, GetOutput, (bool),
- only_if_no_immediate);
+ LLDB_INSTRUMENT_VA(this, only_if_no_immediate);
if (!only_if_no_immediate ||
ref().GetImmediateOutputStream().get() == nullptr)
@@ -330,8 +307,7 @@ const char *SBCommandReturnObject::GetOutput(bool only_if_no_immediate) {
}
const char *SBCommandReturnObject::GetError(bool only_if_no_immediate) {
- LLDB_RECORD_METHOD(const char *, SBCommandReturnObject, GetError, (bool),
- only_if_no_immediate);
+ LLDB_INSTRUMENT_VA(this, only_if_no_immediate);
if (!only_if_no_immediate || ref().GetImmediateErrorStream().get() == nullptr)
return GetError();
@@ -348,9 +324,7 @@ size_t SBCommandReturnObject::Printf(const char *format, ...) {
void SBCommandReturnObject::SetError(lldb::SBError &error,
const char *fallback_error_cstr) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetError,
- (lldb::SBError &, const char *), error,
- fallback_error_cstr);
+ LLDB_INSTRUMENT_VA(this, error, fallback_error_cstr);
if (error.IsValid())
ref().SetError(error.ref(), fallback_error_cstr);
@@ -359,76 +333,8 @@ void SBCommandReturnObject::SetError(lldb::SBError &error,
}
void SBCommandReturnObject::SetError(const char *error_cstr) {
- LLDB_RECORD_METHOD(void, SBCommandReturnObject, SetError, (const char *),
- error_cstr);
+ LLDB_INSTRUMENT_VA(this, error_cstr);
if (error_cstr)
ref().AppendError(error_cstr);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBCommandReturnObject>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBCommandReturnObject, ());
- LLDB_REGISTER_CONSTRUCTOR(SBCommandReturnObject,
- (lldb_private::CommandReturnObject &));
- LLDB_REGISTER_CONSTRUCTOR(SBCommandReturnObject,
- (const lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD(
- lldb::SBCommandReturnObject &,
- SBCommandReturnObject, operator=,(const lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandReturnObject, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommandReturnObject, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBCommandReturnObject, GetOutput, ());
- LLDB_REGISTER_METHOD(const char *, SBCommandReturnObject, GetError, ());
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, GetOutputSize, ());
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, GetErrorSize, ());
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutOutput, (FILE *));
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutError, (FILE *));
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutOutput, (SBFile));
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutError, (SBFile));
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutOutput, (FileSP));
- LLDB_REGISTER_METHOD(size_t, SBCommandReturnObject, PutError, (FileSP));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, Clear, ());
- LLDB_REGISTER_METHOD(lldb::ReturnStatus, SBCommandReturnObject, GetStatus,
- ());
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetStatus,
- (lldb::ReturnStatus));
- LLDB_REGISTER_METHOD(bool, SBCommandReturnObject, Succeeded, ());
- LLDB_REGISTER_METHOD(bool, SBCommandReturnObject, HasResult, ());
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, AppendMessage,
- (const char *));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, AppendWarning,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBCommandReturnObject, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FILE *));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FILE *));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (SBFile));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (SBFile));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FileSP));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FileSP));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateOutputFile,
- (FILE *, bool));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetImmediateErrorFile,
- (FILE *, bool));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, PutCString,
- (const char *, int));
- LLDB_REGISTER_METHOD(const char *, SBCommandReturnObject, GetOutput,
- (bool));
- LLDB_REGISTER_METHOD(const char *, SBCommandReturnObject, GetError, (bool));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetError,
- (lldb::SBError &, const char *));
- LLDB_REGISTER_METHOD(void, SBCommandReturnObject, SetError, (const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBCommunication.cpp b/contrib/llvm-project/lldb/source/API/SBCommunication.cpp
index 9a2ab89d5e4e..0a1dad1e2e8f 100644
--- a/contrib/llvm-project/lldb/source/API/SBCommunication.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBCommunication.cpp
@@ -7,22 +7,20 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBCommunication.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBBroadcaster.h"
#include "lldb/Core/Communication.h"
#include "lldb/Host/ConnectionFileDescriptor.h"
#include "lldb/Host/Host.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBCommunication::SBCommunication() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCommunication);
-}
+SBCommunication::SBCommunication() { LLDB_INSTRUMENT_VA(this); }
SBCommunication::SBCommunication(const char *broadcaster_name)
: m_opaque(new Communication(broadcaster_name)), m_opaque_owned(true) {
- LLDB_RECORD_CONSTRUCTOR(SBCommunication, (const char *), broadcaster_name);
+ LLDB_INSTRUMENT_VA(this, broadcaster_name);
}
SBCommunication::~SBCommunication() {
@@ -33,17 +31,17 @@ SBCommunication::~SBCommunication() {
}
bool SBCommunication::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommunication, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBCommunication::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommunication, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque != nullptr;
}
bool SBCommunication::GetCloseOnEOF() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommunication, GetCloseOnEOF);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque)
return m_opaque->GetCloseOnEOF();
@@ -51,15 +49,14 @@ bool SBCommunication::GetCloseOnEOF() {
}
void SBCommunication::SetCloseOnEOF(bool b) {
- LLDB_RECORD_METHOD(void, SBCommunication, SetCloseOnEOF, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (m_opaque)
m_opaque->SetCloseOnEOF(b);
}
ConnectionStatus SBCommunication::Connect(const char *url) {
- LLDB_RECORD_METHOD(lldb::ConnectionStatus, SBCommunication, Connect,
- (const char *), url);
+ LLDB_INSTRUMENT_VA(this, url);
if (m_opaque) {
if (!m_opaque->HasConnection())
@@ -70,8 +67,7 @@ ConnectionStatus SBCommunication::Connect(const char *url) {
}
ConnectionStatus SBCommunication::AdoptFileDesriptor(int fd, bool owns_fd) {
- LLDB_RECORD_METHOD(lldb::ConnectionStatus, SBCommunication,
- AdoptFileDesriptor, (int, bool), fd, owns_fd);
+ LLDB_INSTRUMENT_VA(this, fd, owns_fd);
ConnectionStatus status = eConnectionStatusNoConnection;
if (m_opaque) {
@@ -90,8 +86,7 @@ ConnectionStatus SBCommunication::AdoptFileDesriptor(int fd, bool owns_fd) {
}
ConnectionStatus SBCommunication::Disconnect() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ConnectionStatus, SBCommunication,
- Disconnect);
+ LLDB_INSTRUMENT_VA(this);
ConnectionStatus status = eConnectionStatusNoConnection;
if (m_opaque)
@@ -100,16 +95,14 @@ ConnectionStatus SBCommunication::Disconnect() {
}
bool SBCommunication::IsConnected() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCommunication, IsConnected);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque ? m_opaque->IsConnected() : false;
}
size_t SBCommunication::Read(void *dst, size_t dst_len, uint32_t timeout_usec,
ConnectionStatus &status) {
- LLDB_RECORD_DUMMY(size_t, SBCommunication, Read,
- (void *, size_t, uint32_t, lldb::ConnectionStatus &), dst,
- dst_len, timeout_usec, status);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len, timeout_usec, status);
size_t bytes_read = 0;
Timeout<std::micro> timeout = timeout_usec == UINT32_MAX
@@ -125,9 +118,7 @@ size_t SBCommunication::Read(void *dst, size_t dst_len, uint32_t timeout_usec,
size_t SBCommunication::Write(const void *src, size_t src_len,
ConnectionStatus &status) {
- LLDB_RECORD_DUMMY(size_t, SBCommunication, Write,
- (const void *, size_t, lldb::ConnectionStatus &), src,
- src_len, status);
+ LLDB_INSTRUMENT_VA(this, src, src_len, status);
size_t bytes_written = 0;
if (m_opaque)
@@ -139,28 +130,26 @@ size_t SBCommunication::Write(const void *src, size_t src_len,
}
bool SBCommunication::ReadThreadStart() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommunication, ReadThreadStart);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque ? m_opaque->StartReadThread() : false;
}
bool SBCommunication::ReadThreadStop() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommunication, ReadThreadStop);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque ? m_opaque->StopReadThread() : false;
}
bool SBCommunication::ReadThreadIsRunning() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBCommunication, ReadThreadIsRunning);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque ? m_opaque->ReadThreadIsRunning() : false;
}
bool SBCommunication::SetReadThreadBytesReceivedCallback(
ReadThreadBytesReceived callback, void *callback_baton) {
- LLDB_RECORD_DUMMY(bool, SBCommunication, SetReadThreadBytesReceivedCallback,
- (lldb::SBCommunication::ReadThreadBytesReceived, void *),
- callback, callback_baton);
+ LLDB_INSTRUMENT_VA(this, callback, callback_baton);
bool result = false;
if (m_opaque) {
@@ -171,46 +160,14 @@ bool SBCommunication::SetReadThreadBytesReceivedCallback(
}
SBBroadcaster SBCommunication::GetBroadcaster() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBroadcaster, SBCommunication,
- GetBroadcaster);
+ LLDB_INSTRUMENT_VA(this);
SBBroadcaster broadcaster(m_opaque, false);
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
const char *SBCommunication::GetBroadcasterClass() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBCommunication,
- GetBroadcasterClass);
+ LLDB_INSTRUMENT();
return Communication::GetStaticBroadcasterClass().AsCString();
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBCommunication>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBCommunication, ());
- LLDB_REGISTER_CONSTRUCTOR(SBCommunication, (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBCommunication, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommunication, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBCommunication, GetCloseOnEOF, ());
- LLDB_REGISTER_METHOD(void, SBCommunication, SetCloseOnEOF, (bool));
- LLDB_REGISTER_METHOD(lldb::ConnectionStatus, SBCommunication, Connect,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::ConnectionStatus, SBCommunication,
- AdoptFileDesriptor, (int, bool));
- LLDB_REGISTER_METHOD(lldb::ConnectionStatus, SBCommunication, Disconnect,
- ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCommunication, IsConnected, ());
- LLDB_REGISTER_METHOD(bool, SBCommunication, ReadThreadStart, ());
- LLDB_REGISTER_METHOD(bool, SBCommunication, ReadThreadStop, ());
- LLDB_REGISTER_METHOD(bool, SBCommunication, ReadThreadIsRunning, ());
- LLDB_REGISTER_METHOD(lldb::SBBroadcaster, SBCommunication, GetBroadcaster,
- ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBCommunication,
- GetBroadcasterClass, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBCompileUnit.cpp b/contrib/llvm-project/lldb/source/API/SBCompileUnit.cpp
index a44d3b897110..46a319c6b7a3 100644
--- a/contrib/llvm-project/lldb/source/API/SBCompileUnit.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBCompileUnit.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBCompileUnit.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBLineEntry.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Module.h"
@@ -17,45 +16,41 @@
#include "lldb/Symbol/SymbolFile.h"
#include "lldb/Symbol/Type.h"
#include "lldb/Symbol/TypeList.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBCompileUnit::SBCompileUnit() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBCompileUnit);
-}
+SBCompileUnit::SBCompileUnit() { LLDB_INSTRUMENT_VA(this); }
SBCompileUnit::SBCompileUnit(lldb_private::CompileUnit *lldb_object_ptr)
: m_opaque_ptr(lldb_object_ptr) {}
SBCompileUnit::SBCompileUnit(const SBCompileUnit &rhs)
: m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBCompileUnit, (const lldb::SBCompileUnit &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBCompileUnit &SBCompileUnit::operator=(const SBCompileUnit &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBCompileUnit &,
- SBCompileUnit, operator=,(const lldb::SBCompileUnit &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_ptr = rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBCompileUnit::~SBCompileUnit() { m_opaque_ptr = nullptr; }
SBFileSpec SBCompileUnit::GetFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBCompileUnit,
- GetFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec file_spec;
if (m_opaque_ptr)
file_spec.SetFileSpec(m_opaque_ptr->GetPrimaryFile());
- return LLDB_RECORD_RESULT(file_spec);
+ return file_spec;
}
uint32_t SBCompileUnit::GetNumLineEntries() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBCompileUnit, GetNumLineEntries);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
LineTable *line_table = m_opaque_ptr->GetLineTable();
@@ -67,8 +62,7 @@ uint32_t SBCompileUnit::GetNumLineEntries() const {
}
SBLineEntry SBCompileUnit::GetLineEntryAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBLineEntry, SBCompileUnit,
- GetLineEntryAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBLineEntry sb_line_entry;
if (m_opaque_ptr) {
@@ -80,14 +74,12 @@ SBLineEntry SBCompileUnit::GetLineEntryAtIndex(uint32_t idx) const {
}
}
- return LLDB_RECORD_RESULT(sb_line_entry);
+ return sb_line_entry;
}
uint32_t SBCompileUnit::FindLineEntryIndex(uint32_t start_idx, uint32_t line,
SBFileSpec *inline_file_spec) const {
- LLDB_RECORD_METHOD_CONST(uint32_t, SBCompileUnit, FindLineEntryIndex,
- (uint32_t, uint32_t, lldb::SBFileSpec *), start_idx,
- line, inline_file_spec);
+ LLDB_INSTRUMENT_VA(this, start_idx, line, inline_file_spec);
const bool exact = true;
return FindLineEntryIndex(start_idx, line, inline_file_spec, exact);
@@ -96,9 +88,7 @@ uint32_t SBCompileUnit::FindLineEntryIndex(uint32_t start_idx, uint32_t line,
uint32_t SBCompileUnit::FindLineEntryIndex(uint32_t start_idx, uint32_t line,
SBFileSpec *inline_file_spec,
bool exact) const {
- LLDB_RECORD_METHOD_CONST(uint32_t, SBCompileUnit, FindLineEntryIndex,
- (uint32_t, uint32_t, lldb::SBFileSpec *, bool),
- start_idx, line, inline_file_spec, exact);
+ LLDB_INSTRUMENT_VA(this, start_idx, line, inline_file_spec, exact);
uint32_t index = UINT32_MAX;
if (m_opaque_ptr) {
@@ -118,7 +108,7 @@ uint32_t SBCompileUnit::FindLineEntryIndex(uint32_t start_idx, uint32_t line,
}
uint32_t SBCompileUnit::GetNumSupportFiles() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBCompileUnit, GetNumSupportFiles);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetSupportFiles().GetSize();
@@ -127,32 +117,30 @@ uint32_t SBCompileUnit::GetNumSupportFiles() const {
}
lldb::SBTypeList SBCompileUnit::GetTypes(uint32_t type_mask) {
- LLDB_RECORD_METHOD(lldb::SBTypeList, SBCompileUnit, GetTypes, (uint32_t),
- type_mask);
+ LLDB_INSTRUMENT_VA(this, type_mask);
SBTypeList sb_type_list;
if (!m_opaque_ptr)
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
ModuleSP module_sp(m_opaque_ptr->GetModule());
if (!module_sp)
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
SymbolFile *symfile = module_sp->GetSymbolFile();
if (!symfile)
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
TypeClass type_class = static_cast<TypeClass>(type_mask);
TypeList type_list;
symfile->GetTypes(m_opaque_ptr, type_class, type_list);
sb_type_list.m_opaque_up->Append(type_list);
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
}
SBFileSpec SBCompileUnit::GetSupportFileAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBFileSpec, SBCompileUnit,
- GetSupportFileAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBFileSpec sb_file_spec;
if (m_opaque_ptr) {
@@ -160,16 +148,13 @@ SBFileSpec SBCompileUnit::GetSupportFileAtIndex(uint32_t idx) const {
sb_file_spec.SetFileSpec(spec);
}
-
- return LLDB_RECORD_RESULT(sb_file_spec);
+ return sb_file_spec;
}
uint32_t SBCompileUnit::FindSupportFileIndex(uint32_t start_idx,
const SBFileSpec &sb_file,
bool full) {
- LLDB_RECORD_METHOD(uint32_t, SBCompileUnit, FindSupportFileIndex,
- (uint32_t, const lldb::SBFileSpec &, bool), start_idx,
- sb_file, full);
+ LLDB_INSTRUMENT_VA(this, start_idx, sb_file, full);
if (m_opaque_ptr) {
const FileSpecList &support_files = m_opaque_ptr->GetSupportFiles();
@@ -179,7 +164,7 @@ uint32_t SBCompileUnit::FindSupportFileIndex(uint32_t start_idx,
}
lldb::LanguageType SBCompileUnit::GetLanguage() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::LanguageType, SBCompileUnit, GetLanguage);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetLanguage();
@@ -187,25 +172,23 @@ lldb::LanguageType SBCompileUnit::GetLanguage() {
}
bool SBCompileUnit::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCompileUnit, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBCompileUnit::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBCompileUnit, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
bool SBCompileUnit::operator==(const SBCompileUnit &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBCompileUnit, operator==,(const lldb::SBCompileUnit &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr == rhs.m_opaque_ptr;
}
bool SBCompileUnit::operator!=(const SBCompileUnit &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBCompileUnit, operator!=,(const lldb::SBCompileUnit &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr != rhs.m_opaque_ptr;
}
@@ -225,8 +208,7 @@ void SBCompileUnit::reset(lldb_private::CompileUnit *lldb_object_ptr) {
}
bool SBCompileUnit::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBCompileUnit, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -237,42 +219,3 @@ bool SBCompileUnit::GetDescription(SBStream &description) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBCompileUnit>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBCompileUnit, ());
- LLDB_REGISTER_CONSTRUCTOR(SBCompileUnit, (const lldb::SBCompileUnit &));
- LLDB_REGISTER_METHOD(
- const lldb::SBCompileUnit &,
- SBCompileUnit, operator=,(const lldb::SBCompileUnit &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBCompileUnit, GetFileSpec,
- ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBCompileUnit, GetNumLineEntries, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBLineEntry, SBCompileUnit,
- GetLineEntryAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBCompileUnit, FindLineEntryIndex,
- (uint32_t, uint32_t, lldb::SBFileSpec *));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBCompileUnit, FindLineEntryIndex,
- (uint32_t, uint32_t, lldb::SBFileSpec *, bool));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBCompileUnit, GetNumSupportFiles, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeList, SBCompileUnit, GetTypes, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBCompileUnit,
- GetSupportFileAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBCompileUnit, FindSupportFileIndex,
- (uint32_t, const lldb::SBFileSpec &, bool));
- LLDB_REGISTER_METHOD(lldb::LanguageType, SBCompileUnit, GetLanguage, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCompileUnit, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBCompileUnit, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(
- bool, SBCompileUnit, operator==,(const lldb::SBCompileUnit &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBCompileUnit, operator!=,(const lldb::SBCompileUnit &));
- LLDB_REGISTER_METHOD(bool, SBCompileUnit, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBData.cpp b/contrib/llvm-project/lldb/source/API/SBData.cpp
index 9fc590578bce..5232bdde1ded 100644
--- a/contrib/llvm-project/lldb/source/API/SBData.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBData.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBData.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBError.h"
#include "lldb/API/SBStream.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Core/DumpDataExtractor.h"
#include "lldb/Utility/DataBufferHeap.h"
@@ -23,22 +23,21 @@ using namespace lldb;
using namespace lldb_private;
SBData::SBData() : m_opaque_sp(new DataExtractor()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBData);
+ LLDB_INSTRUMENT_VA(this);
}
SBData::SBData(const lldb::DataExtractorSP &data_sp) : m_opaque_sp(data_sp) {}
SBData::SBData(const SBData &rhs) : m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBData, (const lldb::SBData &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBData &SBData::operator=(const SBData &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBData &,
- SBData, operator=,(const lldb::SBData &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBData::~SBData() = default;
@@ -58,17 +57,17 @@ lldb::DataExtractorSP &SBData::operator*() { return m_opaque_sp; }
const lldb::DataExtractorSP &SBData::operator*() const { return m_opaque_sp; }
bool SBData::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBData, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBData::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBData, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
uint8_t SBData::GetAddressByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint8_t, SBData, GetAddressByteSize);
+ LLDB_INSTRUMENT_VA(this);
uint8_t value = 0;
if (m_opaque_sp.get())
@@ -77,22 +76,21 @@ uint8_t SBData::GetAddressByteSize() {
}
void SBData::SetAddressByteSize(uint8_t addr_byte_size) {
- LLDB_RECORD_METHOD(void, SBData, SetAddressByteSize, (uint8_t),
- addr_byte_size);
+ LLDB_INSTRUMENT_VA(this, addr_byte_size);
if (m_opaque_sp.get())
m_opaque_sp->SetAddressByteSize(addr_byte_size);
}
void SBData::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBData, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp.get())
m_opaque_sp->Clear();
}
size_t SBData::GetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBData, GetByteSize);
+ LLDB_INSTRUMENT_VA(this);
size_t value = 0;
if (m_opaque_sp.get())
@@ -101,7 +99,7 @@ size_t SBData::GetByteSize() {
}
lldb::ByteOrder SBData::GetByteOrder() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ByteOrder, SBData, GetByteOrder);
+ LLDB_INSTRUMENT_VA(this);
lldb::ByteOrder value = eByteOrderInvalid;
if (m_opaque_sp.get())
@@ -110,15 +108,14 @@ lldb::ByteOrder SBData::GetByteOrder() {
}
void SBData::SetByteOrder(lldb::ByteOrder endian) {
- LLDB_RECORD_METHOD(void, SBData, SetByteOrder, (lldb::ByteOrder), endian);
+ LLDB_INSTRUMENT_VA(this, endian);
if (m_opaque_sp.get())
m_opaque_sp->SetByteOrder(endian);
}
float SBData::GetFloat(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(float, SBData, GetFloat, (lldb::SBError &, lldb::offset_t),
- error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
float value = 0;
if (!m_opaque_sp.get()) {
@@ -133,8 +130,7 @@ float SBData::GetFloat(lldb::SBError &error, lldb::offset_t offset) {
}
double SBData::GetDouble(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(double, SBData, GetDouble,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
double value = 0;
if (!m_opaque_sp.get()) {
@@ -149,8 +145,7 @@ double SBData::GetDouble(lldb::SBError &error, lldb::offset_t offset) {
}
long double SBData::GetLongDouble(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(long double, SBData, GetLongDouble,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
long double value = 0;
if (!m_opaque_sp.get()) {
@@ -165,8 +160,7 @@ long double SBData::GetLongDouble(lldb::SBError &error, lldb::offset_t offset) {
}
lldb::addr_t SBData::GetAddress(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(lldb::addr_t, SBData, GetAddress,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
lldb::addr_t value = 0;
if (!m_opaque_sp.get()) {
@@ -181,8 +175,7 @@ lldb::addr_t SBData::GetAddress(lldb::SBError &error, lldb::offset_t offset) {
}
uint8_t SBData::GetUnsignedInt8(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(uint8_t, SBData, GetUnsignedInt8,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
uint8_t value = 0;
if (!m_opaque_sp.get()) {
@@ -197,8 +190,7 @@ uint8_t SBData::GetUnsignedInt8(lldb::SBError &error, lldb::offset_t offset) {
}
uint16_t SBData::GetUnsignedInt16(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(uint16_t, SBData, GetUnsignedInt16,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
uint16_t value = 0;
if (!m_opaque_sp.get()) {
@@ -213,8 +205,7 @@ uint16_t SBData::GetUnsignedInt16(lldb::SBError &error, lldb::offset_t offset) {
}
uint32_t SBData::GetUnsignedInt32(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(uint32_t, SBData, GetUnsignedInt32,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
uint32_t value = 0;
if (!m_opaque_sp.get()) {
@@ -229,8 +220,7 @@ uint32_t SBData::GetUnsignedInt32(lldb::SBError &error, lldb::offset_t offset) {
}
uint64_t SBData::GetUnsignedInt64(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(uint64_t, SBData, GetUnsignedInt64,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
uint64_t value = 0;
if (!m_opaque_sp.get()) {
@@ -245,8 +235,7 @@ uint64_t SBData::GetUnsignedInt64(lldb::SBError &error, lldb::offset_t offset) {
}
int8_t SBData::GetSignedInt8(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(int8_t, SBData, GetSignedInt8,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
int8_t value = 0;
if (!m_opaque_sp.get()) {
@@ -261,8 +250,7 @@ int8_t SBData::GetSignedInt8(lldb::SBError &error, lldb::offset_t offset) {
}
int16_t SBData::GetSignedInt16(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(int16_t, SBData, GetSignedInt16,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
int16_t value = 0;
if (!m_opaque_sp.get()) {
@@ -277,8 +265,7 @@ int16_t SBData::GetSignedInt16(lldb::SBError &error, lldb::offset_t offset) {
}
int32_t SBData::GetSignedInt32(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(int32_t, SBData, GetSignedInt32,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
int32_t value = 0;
if (!m_opaque_sp.get()) {
@@ -293,8 +280,7 @@ int32_t SBData::GetSignedInt32(lldb::SBError &error, lldb::offset_t offset) {
}
int64_t SBData::GetSignedInt64(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(int64_t, SBData, GetSignedInt64,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
int64_t value = 0;
if (!m_opaque_sp.get()) {
@@ -309,8 +295,7 @@ int64_t SBData::GetSignedInt64(lldb::SBError &error, lldb::offset_t offset) {
}
const char *SBData::GetString(lldb::SBError &error, lldb::offset_t offset) {
- LLDB_RECORD_METHOD(const char *, SBData, GetString,
- (lldb::SBError &, lldb::offset_t), error, offset);
+ LLDB_INSTRUMENT_VA(this, error, offset);
const char *value = nullptr;
if (!m_opaque_sp.get()) {
@@ -326,8 +311,7 @@ const char *SBData::GetString(lldb::SBError &error, lldb::offset_t offset) {
bool SBData::GetDescription(lldb::SBStream &description,
lldb::addr_t base_addr) {
- LLDB_RECORD_METHOD(bool, SBData, GetDescription,
- (lldb::SBStream &, lldb::addr_t), description, base_addr);
+ LLDB_INSTRUMENT_VA(this, description, base_addr);
Stream &strm = description.ref();
@@ -342,9 +326,7 @@ bool SBData::GetDescription(lldb::SBStream &description,
size_t SBData::ReadRawData(lldb::SBError &error, lldb::offset_t offset,
void *buf, size_t size) {
- LLDB_RECORD_DUMMY(size_t, SBData, ReadRawData,
- (lldb::SBError &, lldb::offset_t, void *, size_t), error,
- offset, buf, size);
+ LLDB_INSTRUMENT_VA(this, error, offset, buf, size);
void *ok = nullptr;
if (!m_opaque_sp.get()) {
@@ -360,10 +342,7 @@ size_t SBData::ReadRawData(lldb::SBError &error, lldb::offset_t offset,
void SBData::SetData(lldb::SBError &error, const void *buf, size_t size,
lldb::ByteOrder endian, uint8_t addr_size) {
- LLDB_RECORD_DUMMY(
- void, SBData, SetData,
- (lldb::SBError &, const void *, size_t, lldb::ByteOrder, uint8_t), error,
- buf, size, endian, addr_size);
+ LLDB_INSTRUMENT_VA(this, error, buf, size, endian, addr_size);
if (!m_opaque_sp.get())
m_opaque_sp = std::make_shared<DataExtractor>(buf, size, endian, addr_size);
@@ -377,10 +356,7 @@ void SBData::SetData(lldb::SBError &error, const void *buf, size_t size,
void SBData::SetDataWithOwnership(lldb::SBError &error, const void *buf,
size_t size, lldb::ByteOrder endian,
uint8_t addr_size) {
- LLDB_RECORD_DUMMY(
- void, SBData, SetData,
- (lldb::SBError &, const void *, size_t, lldb::ByteOrder, uint8_t, bool),
- error, buf, size, endian, addr_size, copy);
+ LLDB_INSTRUMENT_VA(this, error, buf, size, endian, addr_size);
lldb::DataBufferSP buffer_sp = std::make_shared<DataBufferHeap>(buf, size);
@@ -394,7 +370,7 @@ void SBData::SetDataWithOwnership(lldb::SBError &error, const void *buf,
}
bool SBData::Append(const SBData &rhs) {
- LLDB_RECORD_METHOD(bool, SBData, Append, (const lldb::SBData &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
bool value = false;
if (m_opaque_sp.get() && rhs.m_opaque_sp.get())
@@ -405,12 +381,10 @@ bool SBData::Append(const SBData &rhs) {
lldb::SBData SBData::CreateDataFromCString(lldb::ByteOrder endian,
uint32_t addr_byte_size,
const char *data) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromCString,
- (lldb::ByteOrder, uint32_t, const char *), endian,
- addr_byte_size, data);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, data);
if (!data || !data[0])
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
uint32_t data_len = strlen(data);
@@ -420,19 +394,17 @@ lldb::SBData SBData::CreateDataFromCString(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
lldb::SBData SBData::CreateDataFromUInt64Array(lldb::ByteOrder endian,
uint32_t addr_byte_size,
uint64_t *array,
size_t array_len) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromUInt64Array,
- (lldb::ByteOrder, uint32_t, uint64_t *, size_t),
- endian, addr_byte_size, array, array_len);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, array, array_len);
if (!array || array_len == 0)
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
size_t data_len = array_len * sizeof(uint64_t);
@@ -442,19 +414,17 @@ lldb::SBData SBData::CreateDataFromUInt64Array(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
lldb::SBData SBData::CreateDataFromUInt32Array(lldb::ByteOrder endian,
uint32_t addr_byte_size,
uint32_t *array,
size_t array_len) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromUInt32Array,
- (lldb::ByteOrder, uint32_t, uint32_t *, size_t),
- endian, addr_byte_size, array, array_len);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, array, array_len);
if (!array || array_len == 0)
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
size_t data_len = array_len * sizeof(uint32_t);
@@ -464,19 +434,17 @@ lldb::SBData SBData::CreateDataFromUInt32Array(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
lldb::SBData SBData::CreateDataFromSInt64Array(lldb::ByteOrder endian,
uint32_t addr_byte_size,
int64_t *array,
size_t array_len) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromSInt64Array,
- (lldb::ByteOrder, uint32_t, int64_t *, size_t),
- endian, addr_byte_size, array, array_len);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, array, array_len);
if (!array || array_len == 0)
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
size_t data_len = array_len * sizeof(int64_t);
@@ -486,19 +454,17 @@ lldb::SBData SBData::CreateDataFromSInt64Array(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
lldb::SBData SBData::CreateDataFromSInt32Array(lldb::ByteOrder endian,
uint32_t addr_byte_size,
int32_t *array,
size_t array_len) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromSInt32Array,
- (lldb::ByteOrder, uint32_t, int32_t *, size_t),
- endian, addr_byte_size, array, array_len);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, array, array_len);
if (!array || array_len == 0)
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
size_t data_len = array_len * sizeof(int32_t);
@@ -508,19 +474,17 @@ lldb::SBData SBData::CreateDataFromSInt32Array(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
lldb::SBData SBData::CreateDataFromDoubleArray(lldb::ByteOrder endian,
uint32_t addr_byte_size,
double *array,
size_t array_len) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromDoubleArray,
- (lldb::ByteOrder, uint32_t, double *, size_t),
- endian, addr_byte_size, array, array_len);
+ LLDB_INSTRUMENT_VA(endian, addr_byte_size, array, array_len);
if (!array || array_len == 0)
- return LLDB_RECORD_RESULT(SBData());
+ return SBData();
size_t data_len = array_len * sizeof(double);
@@ -530,12 +494,11 @@ lldb::SBData SBData::CreateDataFromDoubleArray(lldb::ByteOrder endian,
SBData ret(data_sp);
- return LLDB_RECORD_RESULT(ret);
+ return ret;
}
bool SBData::SetDataFromCString(const char *data) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromCString, (const char *), data);
-
+ LLDB_INSTRUMENT_VA(this, data);
if (!data) {
return false;
@@ -556,9 +519,7 @@ bool SBData::SetDataFromCString(const char *data) {
}
bool SBData::SetDataFromUInt64Array(uint64_t *array, size_t array_len) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromUInt64Array, (uint64_t *, size_t),
- array, array_len);
-
+ LLDB_INSTRUMENT_VA(this, array, array_len);
if (!array || array_len == 0) {
return false;
@@ -579,9 +540,7 @@ bool SBData::SetDataFromUInt64Array(uint64_t *array, size_t array_len) {
}
bool SBData::SetDataFromUInt32Array(uint32_t *array, size_t array_len) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromUInt32Array, (uint32_t *, size_t),
- array, array_len);
-
+ LLDB_INSTRUMENT_VA(this, array, array_len);
if (!array || array_len == 0) {
return false;
@@ -601,9 +560,7 @@ bool SBData::SetDataFromUInt32Array(uint32_t *array, size_t array_len) {
}
bool SBData::SetDataFromSInt64Array(int64_t *array, size_t array_len) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromSInt64Array, (int64_t *, size_t),
- array, array_len);
-
+ LLDB_INSTRUMENT_VA(this, array, array_len);
if (!array || array_len == 0) {
return false;
@@ -623,9 +580,7 @@ bool SBData::SetDataFromSInt64Array(int64_t *array, size_t array_len) {
}
bool SBData::SetDataFromSInt32Array(int32_t *array, size_t array_len) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromSInt32Array, (int32_t *, size_t),
- array, array_len);
-
+ LLDB_INSTRUMENT_VA(this, array, array_len);
if (!array || array_len == 0) {
return false;
@@ -645,9 +600,7 @@ bool SBData::SetDataFromSInt32Array(int32_t *array, size_t array_len) {
}
bool SBData::SetDataFromDoubleArray(double *array, size_t array_len) {
- LLDB_RECORD_METHOD(bool, SBData, SetDataFromDoubleArray, (double *, size_t),
- array, array_len);
-
+ LLDB_INSTRUMENT_VA(this, array, array_len);
if (!array || array_len == 0) {
return false;
@@ -665,79 +618,3 @@ bool SBData::SetDataFromDoubleArray(double *array, size_t array_len) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBData>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBData, ());
- LLDB_REGISTER_CONSTRUCTOR(SBData, (const lldb::SBData &));
- LLDB_REGISTER_METHOD(const lldb::SBData &,
- SBData, operator=,(const lldb::SBData &));
- LLDB_REGISTER_METHOD(bool, SBData, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBData, operator bool, ());
- LLDB_REGISTER_METHOD(uint8_t, SBData, GetAddressByteSize, ());
- LLDB_REGISTER_METHOD(void, SBData, SetAddressByteSize, (uint8_t));
- LLDB_REGISTER_METHOD(void, SBData, Clear, ());
- LLDB_REGISTER_METHOD(size_t, SBData, GetByteSize, ());
- LLDB_REGISTER_METHOD(lldb::ByteOrder, SBData, GetByteOrder, ());
- LLDB_REGISTER_METHOD(void, SBData, SetByteOrder, (lldb::ByteOrder));
- LLDB_REGISTER_METHOD(float, SBData, GetFloat,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(double, SBData, GetDouble,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(long double, SBData, GetLongDouble,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBData, GetAddress,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(uint8_t, SBData, GetUnsignedInt8,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(uint16_t, SBData, GetUnsignedInt16,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(uint32_t, SBData, GetUnsignedInt32,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(uint64_t, SBData, GetUnsignedInt64,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(int8_t, SBData, GetSignedInt8,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(int16_t, SBData, GetSignedInt16,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(int32_t, SBData, GetSignedInt32,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(int64_t, SBData, GetSignedInt64,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(const char *, SBData, GetString,
- (lldb::SBError &, lldb::offset_t));
- LLDB_REGISTER_METHOD(bool, SBData, GetDescription,
- (lldb::SBStream &, lldb::addr_t));
- LLDB_REGISTER_METHOD(bool, SBData, Append, (const lldb::SBData &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromCString,
- (lldb::ByteOrder, uint32_t, const char *));
- LLDB_REGISTER_STATIC_METHOD(
- lldb::SBData, SBData, CreateDataFromUInt64Array,
- (lldb::ByteOrder, uint32_t, uint64_t *, size_t));
- LLDB_REGISTER_STATIC_METHOD(
- lldb::SBData, SBData, CreateDataFromUInt32Array,
- (lldb::ByteOrder, uint32_t, uint32_t *, size_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromSInt64Array,
- (lldb::ByteOrder, uint32_t, int64_t *, size_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromSInt32Array,
- (lldb::ByteOrder, uint32_t, int32_t *, size_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBData, SBData, CreateDataFromDoubleArray,
- (lldb::ByteOrder, uint32_t, double *, size_t));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromCString, (const char *));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromUInt64Array,
- (uint64_t *, size_t));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromUInt32Array,
- (uint32_t *, size_t));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromSInt64Array,
- (int64_t *, size_t));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromSInt32Array,
- (int32_t *, size_t));
- LLDB_REGISTER_METHOD(bool, SBData, SetDataFromDoubleArray,
- (double *, size_t));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBDebugger.cpp b/contrib/llvm-project/lldb/source/API/SBDebugger.cpp
index fa5dcb57de7e..8b09d6a8e435 100644
--- a/contrib/llvm-project/lldb/source/API/SBDebugger.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBDebugger.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
#include "SystemInitializerFull.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBDebugger.h"
@@ -105,53 +105,43 @@ SBError SBInputReader::Initialize(
unsigned long),
void *a, lldb::InputReaderGranularity b, char const *c, char const *d,
bool e) {
- LLDB_RECORD_DUMMY(
- lldb::SBError, SBInputReader, Initialize,
- (lldb::SBDebugger &,
- unsigned long (*)(void *, lldb::SBInputReader *, lldb::InputReaderAction,
- const char *, unsigned long),
- void *, lldb::InputReaderGranularity, const char *, const char *, bool),
- sb_debugger, callback, a, b, c, d, e);
+ LLDB_INSTRUMENT_VA(this, sb_debugger, callback, a, b, c, d, e);
return SBError();
}
-void SBInputReader::SetIsDone(bool b) {
- LLDB_RECORD_METHOD(void, SBInputReader, SetIsDone, (bool), b);
-}
+void SBInputReader::SetIsDone(bool b) { LLDB_INSTRUMENT_VA(this, b); }
bool SBInputReader::IsActive() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBInputReader, IsActive);
+ LLDB_INSTRUMENT_VA(this);
return false;
}
-SBDebugger::SBDebugger() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBDebugger); }
+SBDebugger::SBDebugger() { LLDB_INSTRUMENT_VA(this); }
SBDebugger::SBDebugger(const lldb::DebuggerSP &debugger_sp)
: m_opaque_sp(debugger_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBDebugger, (const lldb::DebuggerSP &), debugger_sp);
+ LLDB_INSTRUMENT_VA(this, debugger_sp);
}
SBDebugger::SBDebugger(const SBDebugger &rhs) : m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBDebugger, (const lldb::SBDebugger &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBDebugger::~SBDebugger() = default;
SBDebugger &SBDebugger::operator=(const SBDebugger &rhs) {
- LLDB_RECORD_METHOD(lldb::SBDebugger &,
- SBDebugger, operator=,(const lldb::SBDebugger &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
const char *SBDebugger::GetBroadcasterClass() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBDebugger,
- GetBroadcasterClass);
+ LLDB_INSTRUMENT();
return Debugger::GetStaticBroadcasterClass().AsCString();
}
@@ -161,6 +151,8 @@ const char *SBDebugger::GetProgressFromEvent(const lldb::SBEvent &event,
uint64_t &completed,
uint64_t &total,
bool &is_debugger_specific) {
+ LLDB_INSTRUMENT_VA(event, progress_id, completed, total,
+ is_debugger_specific);
const Debugger::ProgressEventData *progress_data =
Debugger::ProgressEventData::GetEventDataFromEvent(event.get());
if (progress_data == nullptr)
@@ -169,46 +161,39 @@ const char *SBDebugger::GetProgressFromEvent(const lldb::SBEvent &event,
completed = progress_data->GetCompleted();
total = progress_data->GetTotal();
is_debugger_specific = progress_data->IsDebuggerSpecific();
- // We must record the static method _after_ the out parameters have been
- // filled in.
- LLDB_RECORD_STATIC_METHOD(
- const char *, SBDebugger, GetProgressFromEvent,
- (const lldb::SBEvent &, uint64_t &, uint64_t &, uint64_t &, bool &),
- event, progress_id, completed, total, is_debugger_specific);
- return LLDB_RECORD_RESULT(progress_data->GetMessage().c_str())
+ return progress_data->GetMessage().c_str();
}
SBBroadcaster SBDebugger::GetBroadcaster() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBroadcaster, SBDebugger, GetBroadcaster);
+ LLDB_INSTRUMENT_VA(this);
SBBroadcaster broadcaster(&m_opaque_sp->GetBroadcaster(), false);
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
void SBDebugger::Initialize() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(void, SBDebugger, Initialize);
+ LLDB_INSTRUMENT();
SBError ignored = SBDebugger::InitializeWithErrorHandling();
}
lldb::SBError SBDebugger::InitializeWithErrorHandling() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBError, SBDebugger,
- InitializeWithErrorHandling);
+ LLDB_INSTRUMENT();
SBError error;
if (auto e = g_debugger_lifetime->Initialize(
std::make_unique<SystemInitializerFull>(), LoadPlugin)) {
error.SetError(Status(std::move(e)));
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
void SBDebugger::Terminate() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(void, SBDebugger, Terminate);
+ LLDB_INSTRUMENT();
g_debugger_lifetime->Terminate();
}
void SBDebugger::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBDebugger, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->ClearIOHandlers();
@@ -217,26 +202,22 @@ void SBDebugger::Clear() {
}
SBDebugger SBDebugger::Create() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBDebugger, SBDebugger, Create);
+ LLDB_INSTRUMENT();
- return LLDB_RECORD_RESULT(SBDebugger::Create(false, nullptr, nullptr));
+ return SBDebugger::Create(false, nullptr, nullptr);
}
SBDebugger SBDebugger::Create(bool source_init_files) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBDebugger, SBDebugger, Create, (bool),
- source_init_files);
+ LLDB_INSTRUMENT_VA(source_init_files);
- return LLDB_RECORD_RESULT(
- SBDebugger::Create(source_init_files, nullptr, nullptr));
+ return SBDebugger::Create(source_init_files, nullptr, nullptr);
}
SBDebugger SBDebugger::Create(bool source_init_files,
lldb::LogOutputCallback callback, void *baton)
{
- LLDB_RECORD_DUMMY(lldb::SBDebugger, SBDebugger, Create,
- (bool, lldb::LogOutputCallback, void *), source_init_files,
- callback, baton);
+ LLDB_INSTRUMENT_VA(source_init_files, callback, baton);
SBDebugger debugger;
@@ -264,8 +245,7 @@ SBDebugger SBDebugger::Create(bool source_init_files,
}
void SBDebugger::Destroy(SBDebugger &debugger) {
- LLDB_RECORD_STATIC_METHOD(void, SBDebugger, Destroy, (lldb::SBDebugger &),
- debugger);
+ LLDB_INSTRUMENT_VA(debugger);
Debugger::Destroy(debugger.m_opaque_sp);
@@ -274,7 +254,7 @@ void SBDebugger::Destroy(SBDebugger &debugger) {
}
void SBDebugger::MemoryPressureDetected() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(void, SBDebugger, MemoryPressureDetected);
+ LLDB_INSTRUMENT();
// Since this function can be call asynchronously, we allow it to be non-
// mandatory. We have seen deadlocks with this function when called so we
@@ -287,226 +267,222 @@ void SBDebugger::MemoryPressureDetected() {
}
bool SBDebugger::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDebugger, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBDebugger::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDebugger, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
void SBDebugger::SetAsync(bool b) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetAsync, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (m_opaque_sp)
m_opaque_sp->SetAsyncExecution(b);
}
bool SBDebugger::GetAsync() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBDebugger, GetAsync);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetAsyncExecution() : false);
}
void SBDebugger::SkipLLDBInitFiles(bool b) {
- LLDB_RECORD_METHOD(void, SBDebugger, SkipLLDBInitFiles, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (m_opaque_sp)
m_opaque_sp->GetCommandInterpreter().SkipLLDBInitFiles(b);
}
void SBDebugger::SkipAppInitFiles(bool b) {
- LLDB_RECORD_METHOD(void, SBDebugger, SkipAppInitFiles, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (m_opaque_sp)
m_opaque_sp->GetCommandInterpreter().SkipAppInitFiles(b);
}
void SBDebugger::SetInputFileHandle(FILE *fh, bool transfer_ownership) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetInputFileHandle, (FILE *, bool), fh,
- transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_ownership);
if (m_opaque_sp)
m_opaque_sp->SetInputFile(
(FileSP)std::make_shared<NativeFile>(fh, transfer_ownership));
}
SBError SBDebugger::SetInputString(const char *data) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetInputString, (const char *), data);
+ LLDB_INSTRUMENT_VA(this, data);
SBError sb_error;
if (data == nullptr) {
sb_error.SetErrorString("String data is null");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
size_t size = strlen(data);
if (size == 0) {
sb_error.SetErrorString("String data is empty");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
if (!m_opaque_sp) {
sb_error.SetErrorString("invalid debugger");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
sb_error.SetError(m_opaque_sp->SetInputString(data));
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
// Shouldn't really be settable after initialization as this could cause lots
// of problems; don't want users trying to switch modes in the middle of a
// debugging session.
SBError SBDebugger::SetInputFile(SBFile file) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetInputFile, (SBFile), file);
+ LLDB_INSTRUMENT_VA(this, file);
SBError error;
if (!m_opaque_sp) {
error.ref().SetErrorString("invalid debugger");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
error.SetError(m_opaque_sp->SetInputFile(file.m_opaque_sp));
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBDebugger::SetInputFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetInputFile, (FileSP), file_sp);
- return LLDB_RECORD_RESULT(SetInputFile(SBFile(file_sp)));
+ LLDB_INSTRUMENT_VA(this, file_sp);
+ return SetInputFile(SBFile(file_sp));
}
SBError SBDebugger::SetOutputFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetOutputFile, (FileSP), file_sp);
- return LLDB_RECORD_RESULT(SetOutputFile(SBFile(file_sp)));
+ LLDB_INSTRUMENT_VA(this, file_sp);
+ return SetOutputFile(SBFile(file_sp));
}
void SBDebugger::SetOutputFileHandle(FILE *fh, bool transfer_ownership) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetOutputFileHandle, (FILE *, bool), fh,
- transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_ownership);
SetOutputFile((FileSP)std::make_shared<NativeFile>(fh, transfer_ownership));
}
SBError SBDebugger::SetOutputFile(SBFile file) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetOutputFile, (SBFile file), file);
+ LLDB_INSTRUMENT_VA(this, file);
SBError error;
if (!m_opaque_sp) {
error.ref().SetErrorString("invalid debugger");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (!file) {
error.ref().SetErrorString("invalid file");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
m_opaque_sp->SetOutputFile(file.m_opaque_sp);
- return LLDB_RECORD_RESULT(error);
+ return error;
}
void SBDebugger::SetErrorFileHandle(FILE *fh, bool transfer_ownership) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetErrorFileHandle, (FILE *, bool), fh,
- transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_ownership);
SetErrorFile((FileSP)std::make_shared<NativeFile>(fh, transfer_ownership));
}
SBError SBDebugger::SetErrorFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetErrorFile, (FileSP), file_sp);
- return LLDB_RECORD_RESULT(SetErrorFile(SBFile(file_sp)));
+ LLDB_INSTRUMENT_VA(this, file_sp);
+ return SetErrorFile(SBFile(file_sp));
}
SBError SBDebugger::SetErrorFile(SBFile file) {
- LLDB_RECORD_METHOD(SBError, SBDebugger, SetErrorFile, (SBFile file), file);
+ LLDB_INSTRUMENT_VA(this, file);
SBError error;
if (!m_opaque_sp) {
error.ref().SetErrorString("invalid debugger");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (!file) {
error.ref().SetErrorString("invalid file");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
m_opaque_sp->SetErrorFile(file.m_opaque_sp);
- return LLDB_RECORD_RESULT(error);
+ return error;
}
FILE *SBDebugger::GetInputFileHandle() {
- LLDB_RECORD_METHOD_NO_ARGS(FILE *, SBDebugger, GetInputFileHandle);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
File &file_sp = m_opaque_sp->GetInputFile();
- return LLDB_RECORD_RESULT(file_sp.GetStream());
+ return file_sp.GetStream();
}
- return LLDB_RECORD_RESULT(nullptr);
+ return nullptr;
}
SBFile SBDebugger::GetInputFile() {
- LLDB_RECORD_METHOD_NO_ARGS(SBFile, SBDebugger, GetInputFile);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
- return LLDB_RECORD_RESULT(SBFile(m_opaque_sp->GetInputFileSP()));
+ return SBFile(m_opaque_sp->GetInputFileSP());
}
- return LLDB_RECORD_RESULT(SBFile());
+ return SBFile();
}
FILE *SBDebugger::GetOutputFileHandle() {
- LLDB_RECORD_METHOD_NO_ARGS(FILE *, SBDebugger, GetOutputFileHandle);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
StreamFile &stream_file = m_opaque_sp->GetOutputStream();
- return LLDB_RECORD_RESULT(stream_file.GetFile().GetStream());
+ return stream_file.GetFile().GetStream();
}
- return LLDB_RECORD_RESULT(nullptr);
+ return nullptr;
}
SBFile SBDebugger::GetOutputFile() {
- LLDB_RECORD_METHOD_NO_ARGS(SBFile, SBDebugger, GetOutputFile);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
SBFile file(m_opaque_sp->GetOutputStream().GetFileSP());
- return LLDB_RECORD_RESULT(file);
+ return file;
}
- return LLDB_RECORD_RESULT(SBFile());
+ return SBFile();
}
FILE *SBDebugger::GetErrorFileHandle() {
- LLDB_RECORD_METHOD_NO_ARGS(FILE *, SBDebugger, GetErrorFileHandle);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
StreamFile &stream_file = m_opaque_sp->GetErrorStream();
- return LLDB_RECORD_RESULT(stream_file.GetFile().GetStream());
+ return stream_file.GetFile().GetStream();
}
- return LLDB_RECORD_RESULT(nullptr);
+ return nullptr;
}
SBFile SBDebugger::GetErrorFile() {
- LLDB_RECORD_METHOD_NO_ARGS(SBFile, SBDebugger, GetErrorFile);
+ LLDB_INSTRUMENT_VA(this);
SBFile file;
if (m_opaque_sp) {
SBFile file(m_opaque_sp->GetErrorStream().GetFileSP());
- return LLDB_RECORD_RESULT(file);
+ return file;
}
- return LLDB_RECORD_RESULT(SBFile());
+ return SBFile();
}
void SBDebugger::SaveInputTerminalState() {
- LLDB_RECORD_DUMMY_NO_ARGS(void, SBDebugger, SaveInputTerminalState);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->SaveInputTerminalState();
}
void SBDebugger::RestoreInputTerminalState() {
- LLDB_RECORD_DUMMY_NO_ARGS(void, SBDebugger, RestoreInputTerminalState);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->RestoreInputTerminalState();
}
SBCommandInterpreter SBDebugger::GetCommandInterpreter() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBCommandInterpreter, SBDebugger,
- GetCommandInterpreter);
+ LLDB_INSTRUMENT_VA(this);
SBCommandInterpreter sb_interpreter;
if (m_opaque_sp)
sb_interpreter.reset(&m_opaque_sp->GetCommandInterpreter());
- return LLDB_RECORD_RESULT(sb_interpreter);
+ return sb_interpreter;
}
void SBDebugger::HandleCommand(const char *command) {
- LLDB_RECORD_METHOD(void, SBDebugger, HandleCommand, (const char *), command);
+ LLDB_INSTRUMENT_VA(this, command);
if (m_opaque_sp) {
TargetSP target_sp(m_opaque_sp->GetSelectedTarget());
@@ -539,22 +515,19 @@ void SBDebugger::HandleCommand(const char *command) {
}
SBListener SBDebugger::GetListener() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBListener, SBDebugger, GetListener);
+ LLDB_INSTRUMENT_VA(this);
SBListener sb_listener;
if (m_opaque_sp)
sb_listener.reset(m_opaque_sp->GetListener());
- return LLDB_RECORD_RESULT(sb_listener);
+ return sb_listener;
}
void SBDebugger::HandleProcessEvent(const SBProcess &process,
const SBEvent &event, SBFile out,
SBFile err) {
- LLDB_RECORD_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, SBFile, SBFile), process,
- event, out, err);
+ LLDB_INSTRUMENT_VA(this, process, event, out, err);
return HandleProcessEvent(process, event, out.m_opaque_sp, err.m_opaque_sp);
}
@@ -562,10 +535,7 @@ void SBDebugger::HandleProcessEvent(const SBProcess &process,
void SBDebugger::HandleProcessEvent(const SBProcess &process,
const SBEvent &event, FILE *out,
FILE *err) {
- LLDB_RECORD_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, FILE *, FILE *), process,
- event, out, err);
+ LLDB_INSTRUMENT_VA(this, process, event, out, err);
FileSP outfile = std::make_shared<NativeFile>(out, false);
FileSP errfile = std::make_shared<NativeFile>(err, false);
@@ -576,10 +546,7 @@ void SBDebugger::HandleProcessEvent(const SBProcess &process,
const SBEvent &event, FileSP out_sp,
FileSP err_sp) {
- LLDB_RECORD_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, FileSP, FileSP), process,
- event, out_sp, err_sp);
+ LLDB_INSTRUMENT_VA(this, process, event, out_sp, err_sp);
if (!process.IsValid())
return;
@@ -623,17 +590,14 @@ void SBDebugger::HandleProcessEvent(const SBProcess &process,
}
SBSourceManager SBDebugger::GetSourceManager() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSourceManager, SBDebugger,
- GetSourceManager);
+ LLDB_INSTRUMENT_VA(this);
SBSourceManager sb_source_manager(*this);
- return LLDB_RECORD_RESULT(sb_source_manager);
+ return sb_source_manager;
}
bool SBDebugger::GetDefaultArchitecture(char *arch_name, size_t arch_name_len) {
- LLDB_RECORD_CHAR_PTR_STATIC_METHOD(bool, SBDebugger, GetDefaultArchitecture,
- (char *, size_t), arch_name, "",
- arch_name_len);
+ LLDB_INSTRUMENT_VA(arch_name, arch_name_len);
if (arch_name && arch_name_len) {
ArchSpec default_arch = Target::GetDefaultArchitecture();
@@ -654,8 +618,7 @@ bool SBDebugger::GetDefaultArchitecture(char *arch_name, size_t arch_name_len) {
}
bool SBDebugger::SetDefaultArchitecture(const char *arch_name) {
- LLDB_RECORD_STATIC_METHOD(bool, SBDebugger, SetDefaultArchitecture,
- (const char *), arch_name);
+ LLDB_INSTRUMENT_VA(arch_name);
if (arch_name) {
ArchSpec arch(arch_name);
@@ -669,8 +632,7 @@ bool SBDebugger::SetDefaultArchitecture(const char *arch_name) {
ScriptLanguage
SBDebugger::GetScriptingLanguage(const char *script_language_name) {
- LLDB_RECORD_METHOD(lldb::ScriptLanguage, SBDebugger, GetScriptingLanguage,
- (const char *), script_language_name);
+ LLDB_INSTRUMENT_VA(this, script_language_name);
if (!script_language_name)
return eScriptLanguageDefault;
@@ -680,8 +642,7 @@ SBDebugger::GetScriptingLanguage(const char *script_language_name) {
SBStructuredData
SBDebugger::GetScriptInterpreterInfo(lldb::ScriptLanguage language) {
- LLDB_RECORD_METHOD(SBStructuredData, SBDebugger, GetScriptInterpreterInfo,
- (lldb::ScriptLanguage), language);
+ LLDB_INSTRUMENT_VA(this, language);
SBStructuredData data;
if (m_opaque_sp) {
lldb_private::ScriptInterpreter *interp =
@@ -690,18 +651,17 @@ SBDebugger::GetScriptInterpreterInfo(lldb::ScriptLanguage language) {
data.m_impl_up->SetObjectSP(interp->GetInterpreterInfo());
}
}
- return LLDB_RECORD_RESULT(data);
+ return data;
}
const char *SBDebugger::GetVersionString() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBDebugger, GetVersionString);
+ LLDB_INSTRUMENT();
return lldb_private::GetVersion();
}
const char *SBDebugger::StateAsCString(StateType state) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBDebugger, StateAsCString,
- (lldb::StateType), state);
+ LLDB_INSTRUMENT_VA(state);
return lldb_private::StateAsCString(state);
}
@@ -727,8 +687,7 @@ static void AddLLVMTargets(StructuredData::Dictionary &dict) {
}
SBStructuredData SBDebugger::GetBuildConfiguration() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBStructuredData, SBDebugger,
- GetBuildConfiguration);
+ LLDB_INSTRUMENT();
auto config_up = std::make_unique<StructuredData::Dictionary>();
AddBoolConfigEntry(
@@ -756,12 +715,11 @@ SBStructuredData SBDebugger::GetBuildConfiguration() {
SBStructuredData data;
data.m_impl_up->SetObjectSP(std::move(config_up));
- return LLDB_RECORD_RESULT(data);
+ return data;
}
bool SBDebugger::StateIsRunningState(StateType state) {
- LLDB_RECORD_STATIC_METHOD(bool, SBDebugger, StateIsRunningState,
- (lldb::StateType), state);
+ LLDB_INSTRUMENT_VA(state);
const bool result = lldb_private::StateIsRunningState(state);
@@ -769,8 +727,7 @@ bool SBDebugger::StateIsRunningState(StateType state) {
}
bool SBDebugger::StateIsStoppedState(StateType state) {
- LLDB_RECORD_STATIC_METHOD(bool, SBDebugger, StateIsStoppedState,
- (lldb::StateType), state);
+ LLDB_INSTRUMENT_VA(state);
const bool result = lldb_private::StateIsStoppedState(state, false);
@@ -782,10 +739,8 @@ lldb::SBTarget SBDebugger::CreateTarget(const char *filename,
const char *platform_name,
bool add_dependent_modules,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(
- lldb::SBTarget, SBDebugger, CreateTarget,
- (const char *, const char *, const char *, bool, lldb::SBError &),
- filename, target_triple, platform_name, add_dependent_modules, sb_error);
+ LLDB_INSTRUMENT_VA(this, filename, target_triple, platform_name,
+ add_dependent_modules, sb_error);
SBTarget sb_target;
TargetSP target_sp;
@@ -814,15 +769,13 @@ lldb::SBTarget SBDebugger::CreateTarget(const char *filename,
platform_name, add_dependent_modules, sb_error.GetCString(),
static_cast<void *>(target_sp.get()));
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget
SBDebugger::CreateTargetWithFileAndTargetTriple(const char *filename,
const char *target_triple) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger,
- CreateTargetWithFileAndTargetTriple,
- (const char *, const char *), filename, target_triple);
+ LLDB_INSTRUMENT_VA(this, filename, target_triple);
SBTarget sb_target;
TargetSP target_sp;
@@ -842,13 +795,12 @@ SBDebugger::CreateTargetWithFileAndTargetTriple(const char *filename,
static_cast<void *>(m_opaque_sp.get()), filename, target_triple,
static_cast<void *>(target_sp.get()));
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget SBDebugger::CreateTargetWithFileAndArch(const char *filename,
const char *arch_cstr) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger, CreateTargetWithFileAndArch,
- (const char *, const char *), filename, arch_cstr);
+ LLDB_INSTRUMENT_VA(this, filename, arch_cstr);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -860,16 +812,18 @@ SBTarget SBDebugger::CreateTargetWithFileAndArch(const char *filename,
// The version of CreateTarget that takes an ArchSpec won't accept an
// empty ArchSpec, so when the arch hasn't been specified, we need to
// call the target triple version.
- error = m_opaque_sp->GetTargetList().CreateTarget(*m_opaque_sp, filename,
- arch_cstr, eLoadDependentsYes, nullptr, target_sp);
+ error = m_opaque_sp->GetTargetList().CreateTarget(
+ *m_opaque_sp, filename, arch_cstr, eLoadDependentsYes, nullptr,
+ target_sp);
} else {
- PlatformSP platform_sp = m_opaque_sp->GetPlatformList()
- .GetSelectedPlatform();
- ArchSpec arch = Platform::GetAugmentedArchSpec(platform_sp.get(),
- arch_cstr);
+ PlatformSP platform_sp =
+ m_opaque_sp->GetPlatformList().GetSelectedPlatform();
+ ArchSpec arch =
+ Platform::GetAugmentedArchSpec(platform_sp.get(), arch_cstr);
if (arch.IsValid())
- error = m_opaque_sp->GetTargetList().CreateTarget(*m_opaque_sp, filename,
- arch, eLoadDependentsYes, platform_sp, target_sp);
+ error = m_opaque_sp->GetTargetList().CreateTarget(
+ *m_opaque_sp, filename, arch, eLoadDependentsYes, platform_sp,
+ target_sp);
else
error.SetErrorStringWithFormat("invalid arch_cstr: %s", arch_cstr);
}
@@ -885,12 +839,11 @@ SBTarget SBDebugger::CreateTargetWithFileAndArch(const char *filename,
arch_cstr ? arch_cstr : "<unspecified>",
static_cast<void *>(target_sp.get()));
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget SBDebugger::CreateTarget(const char *filename) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger, CreateTarget, (const char *),
- filename);
+ LLDB_INSTRUMENT_VA(this, filename);
SBTarget sb_target;
TargetSP target_sp;
@@ -910,11 +863,11 @@ SBTarget SBDebugger::CreateTarget(const char *filename) {
"SBDebugger(%p)::CreateTarget (filename=\"%s\") => SBTarget(%p)",
static_cast<void *>(m_opaque_sp.get()), filename,
static_cast<void *>(target_sp.get()));
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget SBDebugger::GetDummyTarget() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTarget, SBDebugger, GetDummyTarget);
+ LLDB_INSTRUMENT_VA(this);
SBTarget sb_target;
if (m_opaque_sp) {
@@ -924,12 +877,11 @@ SBTarget SBDebugger::GetDummyTarget() {
LLDB_LOGF(log, "SBDebugger(%p)::GetDummyTarget() => SBTarget(%p)",
static_cast<void *>(m_opaque_sp.get()),
static_cast<void *>(sb_target.GetSP().get()));
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
bool SBDebugger::DeleteTarget(lldb::SBTarget &target) {
- LLDB_RECORD_METHOD(bool, SBDebugger, DeleteTarget, (lldb::SBTarget &),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
bool result = false;
if (m_opaque_sp) {
@@ -951,20 +903,18 @@ bool SBDebugger::DeleteTarget(lldb::SBTarget &target) {
}
SBTarget SBDebugger::GetTargetAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger, GetTargetAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBTarget sb_target;
if (m_opaque_sp) {
// No need to lock, the target list is thread safe
sb_target.SetSP(m_opaque_sp->GetTargetList().GetTargetAtIndex(idx));
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
uint32_t SBDebugger::GetIndexOfTarget(lldb::SBTarget target) {
- LLDB_RECORD_METHOD(uint32_t, SBDebugger, GetIndexOfTarget, (lldb::SBTarget),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::TargetSP target_sp = target.GetSP();
if (!target_sp)
@@ -977,21 +927,19 @@ uint32_t SBDebugger::GetIndexOfTarget(lldb::SBTarget target) {
}
SBTarget SBDebugger::FindTargetWithProcessID(lldb::pid_t pid) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger, FindTargetWithProcessID,
- (lldb::pid_t), pid);
+ LLDB_INSTRUMENT_VA(this, pid);
SBTarget sb_target;
if (m_opaque_sp) {
// No need to lock, the target list is thread safe
sb_target.SetSP(m_opaque_sp->GetTargetList().FindTargetWithProcessID(pid));
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget SBDebugger::FindTargetWithFileAndArch(const char *filename,
const char *arch_name) {
- LLDB_RECORD_METHOD(lldb::SBTarget, SBDebugger, FindTargetWithFileAndArch,
- (const char *, const char *), filename, arch_name);
+ LLDB_INSTRUMENT_VA(this, filename, arch_name);
SBTarget sb_target;
if (m_opaque_sp && filename && filename[0]) {
@@ -1003,7 +951,7 @@ SBTarget SBDebugger::FindTargetWithFileAndArch(const char *filename,
FileSpec(filename), arch_name ? &arch : nullptr));
sb_target.SetSP(target_sp);
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBTarget SBDebugger::FindTargetWithLLDBProcess(const ProcessSP &process_sp) {
@@ -1017,7 +965,7 @@ SBTarget SBDebugger::FindTargetWithLLDBProcess(const ProcessSP &process_sp) {
}
uint32_t SBDebugger::GetNumTargets() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBDebugger, GetNumTargets);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
// No need to lock, the target list is thread safe
@@ -1027,7 +975,7 @@ uint32_t SBDebugger::GetNumTargets() {
}
SBTarget SBDebugger::GetSelectedTarget() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTarget, SBDebugger, GetSelectedTarget);
+ LLDB_INSTRUMENT_VA(this);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -1047,12 +995,11 @@ SBTarget SBDebugger::GetSelectedTarget() {
static_cast<void *>(target_sp.get()), sstr.GetData());
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
void SBDebugger::SetSelectedTarget(SBTarget &sb_target) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetSelectedTarget, (lldb::SBTarget &),
- sb_target);
+ LLDB_INSTRUMENT_VA(this, sb_target);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -1070,7 +1017,7 @@ void SBDebugger::SetSelectedTarget(SBTarget &sb_target) {
}
SBPlatform SBDebugger::GetSelectedPlatform() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBPlatform, SBDebugger, GetSelectedPlatform);
+ LLDB_INSTRUMENT_VA(this);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -1083,12 +1030,11 @@ SBPlatform SBDebugger::GetSelectedPlatform() {
static_cast<void *>(m_opaque_sp.get()),
static_cast<void *>(sb_platform.GetSP().get()),
sb_platform.GetName());
- return LLDB_RECORD_RESULT(sb_platform);
+ return sb_platform;
}
void SBDebugger::SetSelectedPlatform(SBPlatform &sb_platform) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetSelectedPlatform,
- (lldb::SBPlatform &), sb_platform);
+ LLDB_INSTRUMENT_VA(this, sb_platform);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -1104,7 +1050,7 @@ void SBDebugger::SetSelectedPlatform(SBPlatform &sb_platform) {
}
uint32_t SBDebugger::GetNumPlatforms() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBDebugger, GetNumPlatforms);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
// No need to lock, the platform list is thread safe
@@ -1114,19 +1060,18 @@ uint32_t SBDebugger::GetNumPlatforms() {
}
SBPlatform SBDebugger::GetPlatformAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBPlatform, SBDebugger, GetPlatformAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBPlatform sb_platform;
if (m_opaque_sp) {
// No need to lock, the platform list is thread safe
sb_platform.SetSP(m_opaque_sp->GetPlatformList().GetAtIndex(idx));
}
- return LLDB_RECORD_RESULT(sb_platform);
+ return sb_platform;
}
uint32_t SBDebugger::GetNumAvailablePlatforms() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBDebugger, GetNumAvailablePlatforms);
+ LLDB_INSTRUMENT_VA(this);
uint32_t idx = 0;
while (true) {
@@ -1140,8 +1085,7 @@ uint32_t SBDebugger::GetNumAvailablePlatforms() {
}
SBStructuredData SBDebugger::GetAvailablePlatformInfoAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBStructuredData, SBDebugger,
- GetAvailablePlatformInfoAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBStructuredData data;
auto platform_dict = std::make_unique<StructuredData::Dictionary>();
@@ -1156,7 +1100,7 @@ SBStructuredData SBDebugger::GetAvailablePlatformInfoAtIndex(uint32_t idx) {
llvm::StringRef plugin_name =
PluginManager::GetPlatformPluginNameAtIndex(idx - 1);
if (plugin_name.empty()) {
- return LLDB_RECORD_RESULT(data);
+ return data;
}
platform_dict->AddStringItem(name_str, llvm::StringRef(plugin_name));
@@ -1167,19 +1111,17 @@ SBStructuredData SBDebugger::GetAvailablePlatformInfoAtIndex(uint32_t idx) {
data.m_impl_up->SetObjectSP(
StructuredData::ObjectSP(platform_dict.release()));
- return LLDB_RECORD_RESULT(data);
+ return data;
}
void SBDebugger::DispatchInput(void *baton, const void *data, size_t data_len) {
- LLDB_RECORD_DUMMY(void, SBDebugger, DispatchInput,
- (void *, const void *, size_t), baton, data, data_len);
+ LLDB_INSTRUMENT_VA(this, baton, data, data_len);
DispatchInput(data, data_len);
}
void SBDebugger::DispatchInput(const void *data, size_t data_len) {
- LLDB_RECORD_DUMMY(void, SBDebugger, DispatchInput, (const void *, size_t),
- data, data_len);
+ LLDB_INSTRUMENT_VA(this, data, data_len);
// Log *log(GetLogIfAllCategoriesSet (LIBLLDB_LOG_API));
//
@@ -1196,28 +1138,26 @@ void SBDebugger::DispatchInput(const void *data, size_t data_len) {
}
void SBDebugger::DispatchInputInterrupt() {
- LLDB_RECORD_DUMMY_NO_ARGS(void, SBDebugger, DispatchInputInterrupt);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->DispatchInputInterrupt();
}
void SBDebugger::DispatchInputEndOfFile() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBDebugger, DispatchInputEndOfFile);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->DispatchInputEndOfFile();
}
void SBDebugger::PushInputReader(SBInputReader &reader) {
- LLDB_RECORD_METHOD(void, SBDebugger, PushInputReader, (lldb::SBInputReader &),
- reader);
+ LLDB_INSTRUMENT_VA(this, reader);
}
void SBDebugger::RunCommandInterpreter(bool auto_handle_events,
bool spawn_thread) {
- LLDB_RECORD_METHOD(void, SBDebugger, RunCommandInterpreter, (bool, bool),
- auto_handle_events, spawn_thread);
+ LLDB_INSTRUMENT_VA(this, auto_handle_events, spawn_thread);
if (m_opaque_sp) {
CommandInterpreterRunOptions options;
@@ -1234,11 +1174,8 @@ void SBDebugger::RunCommandInterpreter(bool auto_handle_events,
bool &stopped_for_crash)
{
- LLDB_RECORD_METHOD(void, SBDebugger, RunCommandInterpreter,
- (bool, bool, lldb::SBCommandInterpreterRunOptions &, int &,
- bool &, bool &),
- auto_handle_events, spawn_thread, options, num_errors,
- quit_requested, stopped_for_crash);
+ LLDB_INSTRUMENT_VA(this, auto_handle_events, spawn_thread, options,
+ num_errors, quit_requested, stopped_for_crash);
if (m_opaque_sp) {
options.SetAutoHandleEvents(auto_handle_events);
@@ -1256,32 +1193,28 @@ void SBDebugger::RunCommandInterpreter(bool auto_handle_events,
SBCommandInterpreterRunResult SBDebugger::RunCommandInterpreter(
const SBCommandInterpreterRunOptions &options) {
- LLDB_RECORD_METHOD(lldb::SBCommandInterpreterRunResult, SBDebugger,
- RunCommandInterpreter,
- (const lldb::SBCommandInterpreterRunOptions &), options);
+ LLDB_INSTRUMENT_VA(this, options);
if (!m_opaque_sp)
- return LLDB_RECORD_RESULT(SBCommandInterpreterRunResult());
+ return SBCommandInterpreterRunResult();
CommandInterpreter &interp = m_opaque_sp->GetCommandInterpreter();
CommandInterpreterRunResult result =
interp.RunCommandInterpreter(options.ref());
- return LLDB_RECORD_RESULT(SBCommandInterpreterRunResult(result));
+ return SBCommandInterpreterRunResult(result);
}
SBError SBDebugger::RunREPL(lldb::LanguageType language,
const char *repl_options) {
- LLDB_RECORD_METHOD(lldb::SBError, SBDebugger, RunREPL,
- (lldb::LanguageType, const char *), language,
- repl_options);
+ LLDB_INSTRUMENT_VA(this, language, repl_options);
SBError error;
if (m_opaque_sp)
error.ref() = m_opaque_sp->RunREPL(language, repl_options);
else
error.SetErrorString("invalid debugger");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
void SBDebugger::reset(const DebuggerSP &debugger_sp) {
@@ -1298,28 +1231,25 @@ Debugger &SBDebugger::ref() const {
const lldb::DebuggerSP &SBDebugger::get_sp() const { return m_opaque_sp; }
SBDebugger SBDebugger::FindDebuggerWithID(int id) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBDebugger, SBDebugger, FindDebuggerWithID,
- (int), id);
+ LLDB_INSTRUMENT_VA(id);
// No need to lock, the debugger list is thread safe
SBDebugger sb_debugger;
DebuggerSP debugger_sp = Debugger::FindDebuggerWithID(id);
if (debugger_sp)
sb_debugger.reset(debugger_sp);
- return LLDB_RECORD_RESULT(sb_debugger);
+ return sb_debugger;
}
const char *SBDebugger::GetInstanceName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBDebugger, GetInstanceName);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetInstanceName().AsCString() : nullptr);
}
SBError SBDebugger::SetInternalVariable(const char *var_name, const char *value,
const char *debugger_instance_name) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBError, SBDebugger, SetInternalVariable,
- (const char *, const char *, const char *),
- var_name, value, debugger_instance_name);
+ LLDB_INSTRUMENT_VA(var_name, value, debugger_instance_name);
SBError sb_error;
DebuggerSP debugger_sp(Debugger::FindDebuggerWithInstanceName(
@@ -1336,15 +1266,13 @@ SBError SBDebugger::SetInternalVariable(const char *var_name, const char *value,
}
if (error.Fail())
sb_error.SetError(error);
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBStringList
SBDebugger::GetInternalVariableValue(const char *var_name,
const char *debugger_instance_name) {
- LLDB_RECORD_STATIC_METHOD(
- lldb::SBStringList, SBDebugger, GetInternalVariableValue,
- (const char *, const char *), var_name, debugger_instance_name);
+ LLDB_INSTRUMENT_VA(var_name, debugger_instance_name);
DebuggerSP debugger_sp(Debugger::FindDebuggerWithInstanceName(
ConstString(debugger_instance_name)));
@@ -1361,28 +1289,28 @@ SBDebugger::GetInternalVariableValue(const char *var_name,
if (!value_str.empty()) {
StringList string_list;
string_list.SplitIntoLines(value_str);
- return LLDB_RECORD_RESULT(SBStringList(&string_list));
+ return SBStringList(&string_list);
}
}
}
- return LLDB_RECORD_RESULT(SBStringList());
+ return SBStringList();
}
uint32_t SBDebugger::GetTerminalWidth() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBDebugger, GetTerminalWidth);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetTerminalWidth() : 0);
}
void SBDebugger::SetTerminalWidth(uint32_t term_width) {
- LLDB_RECORD_DUMMY(void, SBDebugger, SetTerminalWidth, (uint32_t), term_width);
+ LLDB_INSTRUMENT_VA(this, term_width);
if (m_opaque_sp)
m_opaque_sp->SetTerminalWidth(term_width);
}
const char *SBDebugger::GetPrompt() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBDebugger, GetPrompt);
+ LLDB_INSTRUMENT_VA(this);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
@@ -1395,14 +1323,14 @@ const char *SBDebugger::GetPrompt() const {
}
void SBDebugger::SetPrompt(const char *prompt) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetPrompt, (const char *), prompt);
+ LLDB_INSTRUMENT_VA(this, prompt);
if (m_opaque_sp)
m_opaque_sp->SetPrompt(llvm::StringRef(prompt));
}
const char *SBDebugger::GetReproducerPath() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBDebugger, GetReproducerPath);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp
? ConstString(m_opaque_sp->GetReproducerPath()).GetCString()
@@ -1410,60 +1338,71 @@ const char *SBDebugger::GetReproducerPath() const {
}
ScriptLanguage SBDebugger::GetScriptLanguage() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::ScriptLanguage, SBDebugger,
- GetScriptLanguage);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetScriptLanguage() : eScriptLanguageNone);
}
void SBDebugger::SetScriptLanguage(ScriptLanguage script_lang) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetScriptLanguage,
- (lldb::ScriptLanguage), script_lang);
+ LLDB_INSTRUMENT_VA(this, script_lang);
if (m_opaque_sp) {
m_opaque_sp->SetScriptLanguage(script_lang);
}
}
+LanguageType SBDebugger::GetREPLLanguage() const {
+ LLDB_INSTRUMENT_VA(this);
+
+ return (m_opaque_sp ? m_opaque_sp->GetREPLLanguage() : eLanguageTypeUnknown);
+}
+
+void SBDebugger::SetREPLLanguage(LanguageType repl_lang) {
+ LLDB_INSTRUMENT_VA(this, repl_lang);
+
+ if (m_opaque_sp) {
+ m_opaque_sp->SetREPLLanguage(repl_lang);
+ }
+}
+
bool SBDebugger::SetUseExternalEditor(bool value) {
- LLDB_RECORD_METHOD(bool, SBDebugger, SetUseExternalEditor, (bool), value);
+ LLDB_INSTRUMENT_VA(this, value);
return (m_opaque_sp ? m_opaque_sp->SetUseExternalEditor(value) : false);
}
bool SBDebugger::GetUseExternalEditor() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBDebugger, GetUseExternalEditor);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetUseExternalEditor() : false);
}
bool SBDebugger::SetUseColor(bool value) {
- LLDB_RECORD_METHOD(bool, SBDebugger, SetUseColor, (bool), value);
+ LLDB_INSTRUMENT_VA(this, value);
return (m_opaque_sp ? m_opaque_sp->SetUseColor(value) : false);
}
bool SBDebugger::GetUseColor() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDebugger, GetUseColor);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetUseColor() : false);
}
bool SBDebugger::SetUseSourceCache(bool value) {
- LLDB_RECORD_METHOD(bool, SBDebugger, SetUseSourceCache, (bool), value);
+ LLDB_INSTRUMENT_VA(this, value);
return (m_opaque_sp ? m_opaque_sp->SetUseSourceCache(value) : false);
}
bool SBDebugger::GetUseSourceCache() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDebugger, GetUseSourceCache);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetUseSourceCache() : false);
}
bool SBDebugger::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBDebugger, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -1478,14 +1417,13 @@ bool SBDebugger::GetDescription(SBStream &description) {
}
user_id_t SBDebugger::GetID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::user_id_t, SBDebugger, GetID);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetID() : LLDB_INVALID_UID);
}
SBError SBDebugger::SetCurrentPlatform(const char *platform_name_cstr) {
- LLDB_RECORD_METHOD(lldb::SBError, SBDebugger, SetCurrentPlatform,
- (const char *), platform_name_cstr);
+ LLDB_INSTRUMENT_VA(this, platform_name_cstr);
SBError sb_error;
if (m_opaque_sp) {
@@ -1511,91 +1449,77 @@ SBError SBDebugger::SetCurrentPlatform(const char *platform_name_cstr) {
} else {
sb_error.ref().SetErrorString("invalid debugger");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
bool SBDebugger::SetCurrentPlatformSDKRoot(const char *sysroot) {
- LLDB_RECORD_METHOD(bool, SBDebugger, SetCurrentPlatformSDKRoot,
- (const char *), sysroot);
+ LLDB_INSTRUMENT_VA(this, sysroot);
- Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
- if (m_opaque_sp) {
- PlatformSP platform_sp(
- m_opaque_sp->GetPlatformList().GetSelectedPlatform());
-
- if (platform_sp) {
- if (log && sysroot)
- LLDB_LOGF(log, "SBDebugger::SetCurrentPlatformSDKRoot (\"%s\")",
- sysroot);
- platform_sp->SetSDKRootDirectory(ConstString(sysroot));
- return true;
- }
+ if (SBPlatform platform = GetSelectedPlatform()) {
+ platform.SetSDKRoot(sysroot);
+ return true;
}
return false;
}
bool SBDebugger::GetCloseInputOnEOF() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDebugger, GetCloseInputOnEOF);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp ? m_opaque_sp->GetCloseInputOnEOF() : false);
}
void SBDebugger::SetCloseInputOnEOF(bool b) {
- LLDB_RECORD_METHOD(void, SBDebugger, SetCloseInputOnEOF, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
if (m_opaque_sp)
m_opaque_sp->SetCloseInputOnEOF(b);
}
SBTypeCategory SBDebugger::GetCategory(const char *category_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategory,
- (const char *), category_name);
+ LLDB_INSTRUMENT_VA(this, category_name);
if (!category_name || *category_name == 0)
- return LLDB_RECORD_RESULT(SBTypeCategory());
+ return SBTypeCategory();
TypeCategoryImplSP category_sp;
if (DataVisualization::Categories::GetCategory(ConstString(category_name),
category_sp, false)) {
- return LLDB_RECORD_RESULT(SBTypeCategory(category_sp));
+ return SBTypeCategory(category_sp);
} else {
- return LLDB_RECORD_RESULT(SBTypeCategory());
+ return SBTypeCategory();
}
}
SBTypeCategory SBDebugger::GetCategory(lldb::LanguageType lang_type) {
- LLDB_RECORD_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategory,
- (lldb::LanguageType), lang_type);
+ LLDB_INSTRUMENT_VA(this, lang_type);
TypeCategoryImplSP category_sp;
if (DataVisualization::Categories::GetCategory(lang_type, category_sp)) {
- return LLDB_RECORD_RESULT(SBTypeCategory(category_sp));
+ return SBTypeCategory(category_sp);
} else {
- return LLDB_RECORD_RESULT(SBTypeCategory());
+ return SBTypeCategory();
}
}
SBTypeCategory SBDebugger::CreateCategory(const char *category_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeCategory, SBDebugger, CreateCategory,
- (const char *), category_name);
+ LLDB_INSTRUMENT_VA(this, category_name);
if (!category_name || *category_name == 0)
- return LLDB_RECORD_RESULT(SBTypeCategory());
+ return SBTypeCategory();
TypeCategoryImplSP category_sp;
if (DataVisualization::Categories::GetCategory(ConstString(category_name),
category_sp, true)) {
- return LLDB_RECORD_RESULT(SBTypeCategory(category_sp));
+ return SBTypeCategory(category_sp);
} else {
- return LLDB_RECORD_RESULT(SBTypeCategory());
+ return SBTypeCategory();
}
}
bool SBDebugger::DeleteCategory(const char *category_name) {
- LLDB_RECORD_METHOD(bool, SBDebugger, DeleteCategory, (const char *),
- category_name);
+ LLDB_INSTRUMENT_VA(this, category_name);
if (!category_name || *category_name == 0)
return false;
@@ -1604,64 +1528,56 @@ bool SBDebugger::DeleteCategory(const char *category_name) {
}
uint32_t SBDebugger::GetNumCategories() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBDebugger, GetNumCategories);
+ LLDB_INSTRUMENT_VA(this);
return DataVisualization::Categories::GetCount();
}
SBTypeCategory SBDebugger::GetCategoryAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategoryAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
- return LLDB_RECORD_RESULT(
- SBTypeCategory(DataVisualization::Categories::GetCategoryAtIndex(index)));
+ return SBTypeCategory(
+ DataVisualization::Categories::GetCategoryAtIndex(index));
}
SBTypeCategory SBDebugger::GetDefaultCategory() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeCategory, SBDebugger,
- GetDefaultCategory);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(GetCategory("default"));
+ return GetCategory("default");
}
SBTypeFormat SBDebugger::GetFormatForType(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeFormat, SBDebugger, GetFormatForType,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
SBTypeCategory default_category_sb = GetDefaultCategory();
if (default_category_sb.GetEnabled())
- return LLDB_RECORD_RESULT(default_category_sb.GetFormatForType(type_name));
- return LLDB_RECORD_RESULT(SBTypeFormat());
+ return default_category_sb.GetFormatForType(type_name);
+ return SBTypeFormat();
}
SBTypeSummary SBDebugger::GetSummaryForType(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeSummary, SBDebugger, GetSummaryForType,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!type_name.IsValid())
- return LLDB_RECORD_RESULT(SBTypeSummary());
- return LLDB_RECORD_RESULT(
- SBTypeSummary(DataVisualization::GetSummaryForType(type_name.GetSP())));
+ return SBTypeSummary();
+ return SBTypeSummary(DataVisualization::GetSummaryForType(type_name.GetSP()));
}
SBTypeFilter SBDebugger::GetFilterForType(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeFilter, SBDebugger, GetFilterForType,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!type_name.IsValid())
- return LLDB_RECORD_RESULT(SBTypeFilter());
- return LLDB_RECORD_RESULT(
- SBTypeFilter(DataVisualization::GetFilterForType(type_name.GetSP())));
+ return SBTypeFilter();
+ return SBTypeFilter(DataVisualization::GetFilterForType(type_name.GetSP()));
}
SBTypeSynthetic SBDebugger::GetSyntheticForType(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(lldb::SBTypeSynthetic, SBDebugger, GetSyntheticForType,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!type_name.IsValid())
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
- return LLDB_RECORD_RESULT(SBTypeSynthetic(
- DataVisualization::GetSyntheticForType(type_name.GetSP())));
+ return SBTypeSynthetic();
+ return SBTypeSynthetic(
+ DataVisualization::GetSyntheticForType(type_name.GetSP()));
}
static llvm::ArrayRef<const char *> GetCategoryArray(const char **categories) {
@@ -1674,8 +1590,7 @@ static llvm::ArrayRef<const char *> GetCategoryArray(const char **categories) {
}
bool SBDebugger::EnableLog(const char *channel, const char **categories) {
- LLDB_RECORD_METHOD(bool, SBDebugger, EnableLog, (const char *, const char **),
- channel, categories);
+ LLDB_INSTRUMENT_VA(this, channel, categories);
if (m_opaque_sp) {
uint32_t log_options =
@@ -1690,224 +1605,9 @@ bool SBDebugger::EnableLog(const char *channel, const char **categories) {
void SBDebugger::SetLoggingCallback(lldb::LogOutputCallback log_callback,
void *baton) {
- LLDB_RECORD_DUMMY(void, SBDebugger, SetLoggingCallback,
- (lldb::LogOutputCallback, void *), log_callback, baton);
+ LLDB_INSTRUMENT_VA(this, log_callback, baton);
if (m_opaque_sp) {
return m_opaque_sp->SetLoggingCallback(log_callback, baton);
}
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBInputReader>(Registry &R) {
- LLDB_REGISTER_METHOD(void, SBInputReader, SetIsDone, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBInputReader, IsActive, ());
-}
-
-static void SetFileHandleRedirect(SBDebugger *, FILE *, bool) {
- // Do nothing.
-}
-
-static SBError SetFileRedirect(SBDebugger *, SBFile file) { return SBError(); }
-
-static SBError SetFileRedirect(SBDebugger *, FileSP file) { return SBError(); }
-
-template <> void RegisterMethods<SBDebugger>(Registry &R) {
- // Custom implementation.
- R.Register(&invoke<void (SBDebugger::*)(FILE *, bool)>::method<
- &SBDebugger::SetErrorFileHandle>::record,
- &SetFileHandleRedirect);
- R.Register(&invoke<void (SBDebugger::*)(FILE *, bool)>::method<
- &SBDebugger::SetOutputFileHandle>::record,
- &SetFileHandleRedirect);
-
- R.Register(&invoke<SBError (SBDebugger::*)(
- SBFile)>::method<&SBDebugger::SetInputFile>::record,
- &SetFileRedirect);
- R.Register(&invoke<SBError (SBDebugger::*)(
- SBFile)>::method<&SBDebugger::SetOutputFile>::record,
- &SetFileRedirect);
- R.Register(&invoke<SBError (SBDebugger::*)(
- SBFile)>::method<&SBDebugger::SetErrorFile>::record,
- &SetFileRedirect);
-
- R.Register(&invoke<SBError (SBDebugger::*)(
- FileSP)>::method<&SBDebugger::SetInputFile>::record,
- &SetFileRedirect);
- R.Register(&invoke<SBError (SBDebugger::*)(
- FileSP)>::method<&SBDebugger::SetOutputFile>::record,
- &SetFileRedirect);
- R.Register(&invoke<SBError (SBDebugger::*)(
- FileSP)>::method<&SBDebugger::SetErrorFile>::record,
- &SetFileRedirect);
-
- LLDB_REGISTER_CHAR_PTR_METHOD_STATIC(bool, SBDebugger,
- GetDefaultArchitecture);
-
- LLDB_REGISTER_CONSTRUCTOR(SBDebugger, ());
- LLDB_REGISTER_CONSTRUCTOR(SBDebugger, (const lldb::DebuggerSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBDebugger, (const lldb::SBDebugger &));
- LLDB_REGISTER_METHOD(lldb::SBDebugger &,
- SBDebugger, operator=,(const lldb::SBDebugger &));
- LLDB_REGISTER_STATIC_METHOD(void, SBDebugger, Initialize, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBError, SBDebugger,
- InitializeWithErrorHandling, ());
- LLDB_REGISTER_STATIC_METHOD(void, SBDebugger, Terminate, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, Clear, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBDebugger, SBDebugger, Create, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBDebugger, SBDebugger, Create, (bool));
- LLDB_REGISTER_STATIC_METHOD(
- const char *, SBDebugger, GetProgressFromEvent,
- (const lldb::SBEvent &, uint64_t &, uint64_t &, uint64_t &, bool &));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBDebugger, GetBroadcasterClass,
- ());
- LLDB_REGISTER_METHOD(SBBroadcaster, SBDebugger, GetBroadcaster, ());
- LLDB_REGISTER_STATIC_METHOD(void, SBDebugger, Destroy, (lldb::SBDebugger &));
- LLDB_REGISTER_STATIC_METHOD(void, SBDebugger, MemoryPressureDetected, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBDebugger, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBDebugger, operator bool,());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetAsync, (bool));
- LLDB_REGISTER_METHOD(bool, SBDebugger, GetAsync, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SkipLLDBInitFiles, (bool));
- LLDB_REGISTER_METHOD(void, SBDebugger, SkipAppInitFiles, (bool));
- LLDB_REGISTER_METHOD(SBError, SBDebugger, SetInputString, (const char *));
- LLDB_REGISTER_METHOD(void, SBDebugger, SetInputFileHandle, (FILE *, bool));
- LLDB_REGISTER_METHOD(FILE *, SBDebugger, GetInputFileHandle, ());
- LLDB_REGISTER_METHOD(FILE *, SBDebugger, GetOutputFileHandle, ());
- LLDB_REGISTER_METHOD(FILE *, SBDebugger, GetErrorFileHandle, ());
- LLDB_REGISTER_METHOD(SBFile, SBDebugger, GetInputFile, ());
- LLDB_REGISTER_METHOD(SBFile, SBDebugger, GetOutputFile, ());
- LLDB_REGISTER_METHOD(SBFile, SBDebugger, GetErrorFile, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SaveInputTerminalState, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, RestoreInputTerminalState, ());
- LLDB_REGISTER_METHOD(lldb::SBCommandInterpreter, SBDebugger,
- GetCommandInterpreter, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, HandleCommand, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBListener, SBDebugger, GetListener, ());
- LLDB_REGISTER_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, FILE *, FILE *));
- LLDB_REGISTER_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, SBFile, SBFile));
- LLDB_REGISTER_METHOD(
- void, SBDebugger, HandleProcessEvent,
- (const lldb::SBProcess &, const lldb::SBEvent &, FileSP, FileSP));
- LLDB_REGISTER_METHOD(lldb::SBSourceManager, SBDebugger, GetSourceManager, ());
- LLDB_REGISTER_STATIC_METHOD(bool, SBDebugger, SetDefaultArchitecture,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::ScriptLanguage, SBDebugger, GetScriptingLanguage,
- (const char *));
- LLDB_REGISTER_METHOD(SBStructuredData, SBDebugger, GetScriptInterpreterInfo,
- (lldb::ScriptLanguage));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBDebugger, GetVersionString, ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBDebugger, StateAsCString,
- (lldb::StateType));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBStructuredData, SBDebugger,
- GetBuildConfiguration, ());
- LLDB_REGISTER_STATIC_METHOD(bool, SBDebugger, StateIsRunningState,
- (lldb::StateType));
- LLDB_REGISTER_STATIC_METHOD(bool, SBDebugger, StateIsStoppedState,
- (lldb::StateType));
- LLDB_REGISTER_METHOD(
- lldb::SBTarget, SBDebugger, CreateTarget,
- (const char *, const char *, const char *, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger,
- CreateTargetWithFileAndTargetTriple,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, CreateTargetWithFileAndArch,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, CreateTarget,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, GetDummyTarget, ());
- LLDB_REGISTER_METHOD(bool, SBDebugger, DeleteTarget, (lldb::SBTarget &));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, GetTargetAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBDebugger, GetIndexOfTarget,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, FindTargetWithProcessID,
- (lldb::pid_t));
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, FindTargetWithFileAndArch,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBDebugger, GetNumTargets, ());
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBDebugger, GetSelectedTarget, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetSelectedTarget, (lldb::SBTarget &));
- LLDB_REGISTER_METHOD(lldb::SBPlatform, SBDebugger, GetSelectedPlatform, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetSelectedPlatform,
- (lldb::SBPlatform &));
- LLDB_REGISTER_METHOD(uint32_t, SBDebugger, GetNumPlatforms, ());
- LLDB_REGISTER_METHOD(lldb::SBPlatform, SBDebugger, GetPlatformAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBDebugger, GetNumAvailablePlatforms, ());
- LLDB_REGISTER_METHOD(lldb::SBStructuredData, SBDebugger,
- GetAvailablePlatformInfoAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBDebugger, DispatchInputInterrupt, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, DispatchInputEndOfFile, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, PushInputReader,
- (lldb::SBInputReader &));
- LLDB_REGISTER_METHOD(void, SBDebugger, RunCommandInterpreter, (bool, bool));
- LLDB_REGISTER_METHOD(void, SBDebugger, RunCommandInterpreter,
- (bool, bool, lldb::SBCommandInterpreterRunOptions &,
- int &, bool &, bool &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBDebugger, RunREPL,
- (lldb::LanguageType, const char *));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBDebugger, SBDebugger, FindDebuggerWithID,
- (int));
- LLDB_REGISTER_METHOD(const char *, SBDebugger, GetInstanceName, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBError, SBDebugger, SetInternalVariable,
- (const char *, const char *, const char *));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBStringList, SBDebugger,
- GetInternalVariableValue,
- (const char *, const char *));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBDebugger, GetTerminalWidth, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetTerminalWidth, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(const char *, SBDebugger, GetPrompt, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetPrompt, (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBDebugger, GetReproducerPath, ());
- LLDB_REGISTER_METHOD_CONST(lldb::ScriptLanguage, SBDebugger,
- GetScriptLanguage, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetScriptLanguage,
- (lldb::ScriptLanguage));
- LLDB_REGISTER_METHOD(bool, SBDebugger, SetUseExternalEditor, (bool));
- LLDB_REGISTER_METHOD(bool, SBDebugger, GetUseExternalEditor, ());
- LLDB_REGISTER_METHOD(bool, SBDebugger, SetUseColor, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBDebugger, GetUseColor, ());
- LLDB_REGISTER_METHOD(bool, SBDebugger, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::user_id_t, SBDebugger, GetID, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBDebugger, SetCurrentPlatform,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBDebugger, SetCurrentPlatformSDKRoot,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBDebugger, GetCloseInputOnEOF, ());
- LLDB_REGISTER_METHOD(void, SBDebugger, SetCloseInputOnEOF, (bool));
- LLDB_REGISTER_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategory,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategory,
- (lldb::LanguageType));
- LLDB_REGISTER_METHOD(lldb::SBTypeCategory, SBDebugger, CreateCategory,
- (const char *));
- LLDB_REGISTER_METHOD(bool, SBDebugger, DeleteCategory, (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBDebugger, GetNumCategories, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeCategory, SBDebugger, GetCategoryAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeCategory, SBDebugger, GetDefaultCategory,
- ());
- LLDB_REGISTER_METHOD(lldb::SBTypeFormat, SBDebugger, GetFormatForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeSummary, SBDebugger, GetSummaryForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeSynthetic, SBDebugger, GetSyntheticForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeFilter, SBDebugger, GetFilterForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(bool, SBDebugger, EnableLog,
- (const char *, const char **));
- LLDB_REGISTER_METHOD(lldb::SBCommandInterpreterRunResult, SBDebugger,
- RunCommandInterpreter,
- (const lldb::SBCommandInterpreterRunOptions &));
-}
-
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBDeclaration.cpp b/contrib/llvm-project/lldb/source/API/SBDeclaration.cpp
index 1496096e46d1..5b7def09b5cc 100644
--- a/contrib/llvm-project/lldb/source/API/SBDeclaration.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBDeclaration.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBDeclaration.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Declaration.h"
#include "lldb/Host/PosixApi.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include <climits>
@@ -19,30 +19,25 @@
using namespace lldb;
using namespace lldb_private;
-SBDeclaration::SBDeclaration() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBDeclaration);
-}
+SBDeclaration::SBDeclaration() { LLDB_INSTRUMENT_VA(this); }
-SBDeclaration::SBDeclaration(const SBDeclaration &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBDeclaration, (const lldb::SBDeclaration &), rhs);
+SBDeclaration::SBDeclaration(const SBDeclaration &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
-SBDeclaration::SBDeclaration(const lldb_private::Declaration *lldb_object_ptr)
- : m_opaque_up() {
+SBDeclaration::SBDeclaration(const lldb_private::Declaration *lldb_object_ptr) {
if (lldb_object_ptr)
m_opaque_up = std::make_unique<Declaration>(*lldb_object_ptr);
}
const SBDeclaration &SBDeclaration::operator=(const SBDeclaration &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBDeclaration &,
- SBDeclaration, operator=,(const lldb::SBDeclaration &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
void SBDeclaration::SetDeclaration(
@@ -53,31 +48,27 @@ void SBDeclaration::SetDeclaration(
SBDeclaration::~SBDeclaration() = default;
bool SBDeclaration::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDeclaration, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBDeclaration::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBDeclaration, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up.get() && m_opaque_up->IsValid();
}
SBFileSpec SBDeclaration::GetFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBDeclaration,
- GetFileSpec);
-
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_file_spec;
if (m_opaque_up.get() && m_opaque_up->GetFile())
sb_file_spec.SetFileSpec(m_opaque_up->GetFile());
-
- return LLDB_RECORD_RESULT(sb_file_spec);
+ return sb_file_spec;
}
uint32_t SBDeclaration::GetLine() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBDeclaration, GetLine);
-
+ LLDB_INSTRUMENT_VA(this);
uint32_t line = 0;
if (m_opaque_up)
@@ -88,7 +79,7 @@ uint32_t SBDeclaration::GetLine() const {
}
uint32_t SBDeclaration::GetColumn() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBDeclaration, GetColumn);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetColumn();
@@ -96,8 +87,7 @@ uint32_t SBDeclaration::GetColumn() const {
}
void SBDeclaration::SetFileSpec(lldb::SBFileSpec filespec) {
- LLDB_RECORD_METHOD(void, SBDeclaration, SetFileSpec, (lldb::SBFileSpec),
- filespec);
+ LLDB_INSTRUMENT_VA(this, filespec);
if (filespec.IsValid())
ref().SetFile(filespec.ref());
@@ -105,20 +95,19 @@ void SBDeclaration::SetFileSpec(lldb::SBFileSpec filespec) {
ref().SetFile(FileSpec());
}
void SBDeclaration::SetLine(uint32_t line) {
- LLDB_RECORD_METHOD(void, SBDeclaration, SetLine, (uint32_t), line);
+ LLDB_INSTRUMENT_VA(this, line);
ref().SetLine(line);
}
void SBDeclaration::SetColumn(uint32_t column) {
- LLDB_RECORD_METHOD(void, SBDeclaration, SetColumn, (uint32_t), column);
+ LLDB_INSTRUMENT_VA(this, column);
ref().SetColumn(column);
}
bool SBDeclaration::operator==(const SBDeclaration &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBDeclaration, operator==,(const lldb::SBDeclaration &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
lldb_private::Declaration *lhs_ptr = m_opaque_up.get();
lldb_private::Declaration *rhs_ptr = rhs.m_opaque_up.get();
@@ -130,8 +119,7 @@ bool SBDeclaration::operator==(const SBDeclaration &rhs) const {
}
bool SBDeclaration::operator!=(const SBDeclaration &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBDeclaration, operator!=,(const lldb::SBDeclaration &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
lldb_private::Declaration *lhs_ptr = m_opaque_up.get();
lldb_private::Declaration *rhs_ptr = rhs.m_opaque_up.get();
@@ -157,8 +145,7 @@ const lldb_private::Declaration &SBDeclaration::ref() const {
}
bool SBDeclaration::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBDeclaration, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -175,33 +162,3 @@ bool SBDeclaration::GetDescription(SBStream &description) {
}
lldb_private::Declaration *SBDeclaration::get() { return m_opaque_up.get(); }
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBDeclaration>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBDeclaration, ());
- LLDB_REGISTER_CONSTRUCTOR(SBDeclaration, (const lldb::SBDeclaration &));
- LLDB_REGISTER_METHOD(
- const lldb::SBDeclaration &,
- SBDeclaration, operator=,(const lldb::SBDeclaration &));
- LLDB_REGISTER_METHOD_CONST(bool, SBDeclaration, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBDeclaration, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBDeclaration, GetFileSpec,
- ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBDeclaration, GetLine, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBDeclaration, GetColumn, ());
- LLDB_REGISTER_METHOD(void, SBDeclaration, SetFileSpec, (lldb::SBFileSpec));
- LLDB_REGISTER_METHOD(void, SBDeclaration, SetLine, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBDeclaration, SetColumn, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBDeclaration, operator==,(const lldb::SBDeclaration &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBDeclaration, operator!=,(const lldb::SBDeclaration &));
- LLDB_REGISTER_METHOD(bool, SBDeclaration, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBEnvironment.cpp b/contrib/llvm-project/lldb/source/API/SBEnvironment.cpp
index d4de89c32567..5fafabe02e01 100644
--- a/contrib/llvm-project/lldb/source/API/SBEnvironment.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBEnvironment.cpp
@@ -7,22 +7,22 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBEnvironment.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStringList.h"
#include "lldb/Utility/ConstString.h"
#include "lldb/Utility/Environment.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
SBEnvironment::SBEnvironment() : m_opaque_up(new Environment()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBEnvironment);
+ LLDB_INSTRUMENT_VA(this);
}
SBEnvironment::SBEnvironment(const SBEnvironment &rhs)
: m_opaque_up(clone(rhs.m_opaque_up)) {
- LLDB_RECORD_CONSTRUCTOR(SBEnvironment, (const lldb::SBEnvironment &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBEnvironment::SBEnvironment(Environment rhs)
@@ -31,23 +31,21 @@ SBEnvironment::SBEnvironment(Environment rhs)
SBEnvironment::~SBEnvironment() = default;
const SBEnvironment &SBEnvironment::operator=(const SBEnvironment &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBEnvironment &,
- SBEnvironment, operator=,(const lldb::SBEnvironment &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
size_t SBEnvironment::GetNumValues() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBEnvironment, GetNumValues);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->size();
}
const char *SBEnvironment::Get(const char *name) {
- LLDB_RECORD_METHOD(const char *, SBEnvironment, Get, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
auto entry = m_opaque_up->find(name);
if (entry == m_opaque_up->end()) {
@@ -57,8 +55,7 @@ const char *SBEnvironment::Get(const char *name) {
}
const char *SBEnvironment::GetNameAtIndex(size_t index) {
- LLDB_RECORD_METHOD(const char *, SBEnvironment, GetNameAtIndex, (size_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
if (index >= GetNumValues())
return nullptr;
@@ -67,8 +64,7 @@ const char *SBEnvironment::GetNameAtIndex(size_t index) {
}
const char *SBEnvironment::GetValueAtIndex(size_t index) {
- LLDB_RECORD_METHOD(const char *, SBEnvironment, GetValueAtIndex, (size_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
if (index >= GetNumValues())
return nullptr;
@@ -77,9 +73,7 @@ const char *SBEnvironment::GetValueAtIndex(size_t index) {
}
bool SBEnvironment::Set(const char *name, const char *value, bool overwrite) {
- LLDB_RECORD_METHOD(bool, SBEnvironment, Set,
- (const char *, const char *, bool), name, value,
- overwrite);
+ LLDB_INSTRUMENT_VA(this, name, value, overwrite);
if (overwrite) {
m_opaque_up->insert_or_assign(name, std::string(value));
@@ -89,32 +83,30 @@ bool SBEnvironment::Set(const char *name, const char *value, bool overwrite) {
}
bool SBEnvironment::Unset(const char *name) {
- LLDB_RECORD_METHOD(bool, SBEnvironment, Unset, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
return m_opaque_up->erase(name);
}
SBStringList SBEnvironment::GetEntries() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBStringList, SBEnvironment, GetEntries);
+ LLDB_INSTRUMENT_VA(this);
SBStringList entries;
for (const auto &KV : *m_opaque_up) {
entries.AppendString(Environment::compose(KV).c_str());
}
- return LLDB_RECORD_RESULT(entries);
+ return entries;
}
void SBEnvironment::PutEntry(const char *name_and_value) {
- LLDB_RECORD_METHOD(void, SBEnvironment, PutEntry, (const char *),
- name_and_value);
+ LLDB_INSTRUMENT_VA(this, name_and_value);
auto split = llvm::StringRef(name_and_value).split('=');
m_opaque_up->insert_or_assign(split.first.str(), split.second.str());
}
void SBEnvironment::SetEntries(const SBStringList &entries, bool append) {
- LLDB_RECORD_METHOD(void, SBEnvironment, SetEntries,
- (const lldb::SBStringList &, bool), entries, append);
+ LLDB_INSTRUMENT_VA(this, entries, append);
if (!append)
m_opaque_up->clear();
@@ -124,32 +116,9 @@ void SBEnvironment::SetEntries(const SBStringList &entries, bool append) {
}
void SBEnvironment::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBEnvironment, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up->clear();
}
Environment &SBEnvironment::ref() const { return *m_opaque_up; }
-
-namespace lldb_private {
-namespace repro {
-template <> void RegisterMethods<SBEnvironment>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBEnvironment, ());
- LLDB_REGISTER_CONSTRUCTOR(SBEnvironment, (const lldb::SBEnvironment &));
- LLDB_REGISTER_METHOD(const lldb::SBEnvironment &,
- SBEnvironment, operator=,(const lldb::SBEnvironment &));
- LLDB_REGISTER_METHOD(size_t, SBEnvironment, GetNumValues, ());
- LLDB_REGISTER_METHOD(const char *, SBEnvironment, Get, (const char *));
- LLDB_REGISTER_METHOD(const char *, SBEnvironment, GetNameAtIndex, (size_t));
- LLDB_REGISTER_METHOD(const char *, SBEnvironment, GetValueAtIndex, (size_t));
- LLDB_REGISTER_METHOD(bool, SBEnvironment, Set,
- (const char *, const char *, bool));
- LLDB_REGISTER_METHOD(bool, SBEnvironment, Unset, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBStringList, SBEnvironment, GetEntries, ());
- LLDB_REGISTER_METHOD(void, SBEnvironment, PutEntry, (const char *));
- LLDB_REGISTER_METHOD(void, SBEnvironment, SetEntries,
- (const lldb::SBStringList &, bool));
- LLDB_REGISTER_METHOD(void, SBEnvironment, Clear, ());
-}
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBError.cpp b/contrib/llvm-project/lldb/source/API/SBError.cpp
index 89b5f26fd80c..ef4f7266f083 100644
--- a/contrib/llvm-project/lldb/source/API/SBError.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBError.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBError.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Status.h"
#include <cstdarg>
@@ -17,10 +17,10 @@
using namespace lldb;
using namespace lldb_private;
-SBError::SBError() : m_opaque_up() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBError); }
+SBError::SBError() { LLDB_INSTRUMENT_VA(this); }
-SBError::SBError(const SBError &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBError, (const lldb::SBError &), rhs);
+SBError::SBError(const SBError &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -28,16 +28,15 @@ SBError::SBError(const SBError &rhs) : m_opaque_up() {
SBError::~SBError() = default;
const SBError &SBError::operator=(const SBError &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBError &,
- SBError, operator=,(const lldb::SBError &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
const char *SBError::GetCString() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBError, GetCString);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->AsCString();
@@ -45,14 +44,14 @@ const char *SBError::GetCString() const {
}
void SBError::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBError, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
m_opaque_up->Clear();
}
bool SBError::Fail() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBError, Fail);
+ LLDB_INSTRUMENT_VA(this);
bool ret_value = false;
if (m_opaque_up)
@@ -63,7 +62,7 @@ bool SBError::Fail() const {
}
bool SBError::Success() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBError, Success);
+ LLDB_INSTRUMENT_VA(this);
bool ret_value = true;
if (m_opaque_up)
@@ -73,8 +72,7 @@ bool SBError::Success() const {
}
uint32_t SBError::GetError() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBError, GetError);
-
+ LLDB_INSTRUMENT_VA(this);
uint32_t err = 0;
if (m_opaque_up)
@@ -85,7 +83,7 @@ uint32_t SBError::GetError() const {
}
ErrorType SBError::GetType() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::ErrorType, SBError, GetType);
+ LLDB_INSTRUMENT_VA(this);
ErrorType err_type = eErrorTypeInvalid;
if (m_opaque_up)
@@ -95,8 +93,7 @@ ErrorType SBError::GetType() const {
}
void SBError::SetError(uint32_t err, ErrorType type) {
- LLDB_RECORD_METHOD(void, SBError, SetError, (uint32_t, lldb::ErrorType), err,
- type);
+ LLDB_INSTRUMENT_VA(this, err, type);
CreateIfNeeded();
m_opaque_up->SetError(err, type);
@@ -108,21 +105,21 @@ void SBError::SetError(const Status &lldb_error) {
}
void SBError::SetErrorToErrno() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBError, SetErrorToErrno);
+ LLDB_INSTRUMENT_VA(this);
CreateIfNeeded();
m_opaque_up->SetErrorToErrno();
}
void SBError::SetErrorToGenericError() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBError, SetErrorToGenericError);
+ LLDB_INSTRUMENT_VA(this);
CreateIfNeeded();
m_opaque_up->SetErrorToGenericError();
}
void SBError::SetErrorString(const char *err_str) {
- LLDB_RECORD_METHOD(void, SBError, SetErrorString, (const char *), err_str);
+ LLDB_INSTRUMENT_VA(this, err_str);
CreateIfNeeded();
m_opaque_up->SetErrorString(err_str);
@@ -138,11 +135,11 @@ int SBError::SetErrorStringWithFormat(const char *format, ...) {
}
bool SBError::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBError, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBError::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBError, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr;
}
@@ -167,8 +164,7 @@ const lldb_private::Status &SBError::operator*() const {
}
bool SBError::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBError, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
if (m_opaque_up) {
if (m_opaque_up->Success())
@@ -183,30 +179,3 @@ bool SBError::GetDescription(SBStream &description) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBError>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBError, ());
- LLDB_REGISTER_CONSTRUCTOR(SBError, (const lldb::SBError &));
- LLDB_REGISTER_METHOD(const lldb::SBError &,
- SBError, operator=,(const lldb::SBError &));
- LLDB_REGISTER_METHOD_CONST(const char *, SBError, GetCString, ());
- LLDB_REGISTER_METHOD(void, SBError, Clear, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBError, Fail, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBError, Success, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBError, GetError, ());
- LLDB_REGISTER_METHOD_CONST(lldb::ErrorType, SBError, GetType, ());
- LLDB_REGISTER_METHOD(void, SBError, SetError, (uint32_t, lldb::ErrorType));
- LLDB_REGISTER_METHOD(void, SBError, SetErrorToErrno, ());
- LLDB_REGISTER_METHOD(void, SBError, SetErrorToGenericError, ());
- LLDB_REGISTER_METHOD(void, SBError, SetErrorString, (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBError, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBError, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBError, GetDescription, (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBEvent.cpp b/contrib/llvm-project/lldb/source/API/SBEvent.cpp
index a0b606e3812e..536680bd1c5e 100644
--- a/contrib/llvm-project/lldb/source/API/SBEvent.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBEvent.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBEvent.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBBroadcaster.h"
#include "lldb/API/SBStream.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Breakpoint/Breakpoint.h"
#include "lldb/Core/StreamFile.h"
@@ -22,44 +22,42 @@
using namespace lldb;
using namespace lldb_private;
-SBEvent::SBEvent() : m_event_sp() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBEvent); }
+SBEvent::SBEvent() { LLDB_INSTRUMENT_VA(this); }
SBEvent::SBEvent(uint32_t event_type, const char *cstr, uint32_t cstr_len)
: m_event_sp(new Event(event_type, new EventDataBytes(cstr, cstr_len))),
m_opaque_ptr(m_event_sp.get()) {
- LLDB_RECORD_CONSTRUCTOR(SBEvent, (uint32_t, const char *, uint32_t),
- event_type, cstr, cstr_len);
+ LLDB_INSTRUMENT_VA(this, event_type, cstr, cstr_len);
}
SBEvent::SBEvent(EventSP &event_sp)
: m_event_sp(event_sp), m_opaque_ptr(event_sp.get()) {
- LLDB_RECORD_CONSTRUCTOR(SBEvent, (lldb::EventSP &), event_sp);
+ LLDB_INSTRUMENT_VA(this, event_sp);
}
-SBEvent::SBEvent(Event *event_ptr) : m_event_sp(), m_opaque_ptr(event_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBEvent, (lldb_private::Event *), event_ptr);
+SBEvent::SBEvent(Event *event_ptr) : m_opaque_ptr(event_ptr) {
+ LLDB_INSTRUMENT_VA(this, event_ptr);
}
SBEvent::SBEvent(const SBEvent &rhs)
: m_event_sp(rhs.m_event_sp), m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBEvent, (const lldb::SBEvent &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBEvent &SBEvent::operator=(const SBEvent &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBEvent &,
- SBEvent, operator=,(const lldb::SBEvent &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_event_sp = rhs.m_event_sp;
m_opaque_ptr = rhs.m_opaque_ptr;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBEvent::~SBEvent() = default;
const char *SBEvent::GetDataFlavor() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBEvent, GetDataFlavor);
+ LLDB_INSTRUMENT_VA(this);
Event *lldb_event = get();
if (lldb_event) {
@@ -71,8 +69,7 @@ const char *SBEvent::GetDataFlavor() {
}
uint32_t SBEvent::GetType() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBEvent, GetType);
-
+ LLDB_INSTRUMENT_VA(this);
const Event *lldb_event = get();
uint32_t event_type = 0;
@@ -84,18 +81,17 @@ uint32_t SBEvent::GetType() const {
}
SBBroadcaster SBEvent::GetBroadcaster() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBBroadcaster, SBEvent,
- GetBroadcaster);
+ LLDB_INSTRUMENT_VA(this);
SBBroadcaster broadcaster;
const Event *lldb_event = get();
if (lldb_event)
broadcaster.reset(lldb_event->GetBroadcaster(), false);
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
const char *SBEvent::GetBroadcasterClass() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBEvent, GetBroadcasterClass);
+ LLDB_INSTRUMENT_VA(this);
const Event *lldb_event = get();
if (lldb_event)
@@ -105,8 +101,7 @@ const char *SBEvent::GetBroadcasterClass() const {
}
bool SBEvent::BroadcasterMatchesPtr(const SBBroadcaster *broadcaster) {
- LLDB_RECORD_METHOD(bool, SBEvent, BroadcasterMatchesPtr,
- (const lldb::SBBroadcaster *), broadcaster);
+ LLDB_INSTRUMENT_VA(this, broadcaster);
if (broadcaster)
return BroadcasterMatchesRef(*broadcaster);
@@ -114,8 +109,7 @@ bool SBEvent::BroadcasterMatchesPtr(const SBBroadcaster *broadcaster) {
}
bool SBEvent::BroadcasterMatchesRef(const SBBroadcaster &broadcaster) {
- LLDB_RECORD_METHOD(bool, SBEvent, BroadcasterMatchesRef,
- (const lldb::SBBroadcaster &), broadcaster);
+ LLDB_INSTRUMENT_VA(this, broadcaster);
Event *lldb_event = get();
bool success = false;
@@ -127,7 +121,7 @@ bool SBEvent::BroadcasterMatchesRef(const SBBroadcaster &broadcaster) {
}
void SBEvent::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBEvent, Clear);
+ LLDB_INSTRUMENT_VA(this);
Event *lldb_event = get();
if (lldb_event)
@@ -158,11 +152,11 @@ void SBEvent::reset(Event *event_ptr) {
}
bool SBEvent::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBEvent, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBEvent::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBEvent, operator bool);
+ LLDB_INSTRUMENT_VA(this);
// Do NOT use m_opaque_ptr directly!!! Must use the SBEvent::get() accessor.
// See comments in SBEvent::get()....
@@ -170,16 +164,14 @@ SBEvent::operator bool() const {
}
const char *SBEvent::GetCStringFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBEvent, GetCStringFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return static_cast<const char *>(
EventDataBytes::GetBytesFromEvent(event.get()));
}
bool SBEvent::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBEvent, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -192,8 +184,7 @@ bool SBEvent::GetDescription(SBStream &description) {
}
bool SBEvent::GetDescription(SBStream &description) const {
- LLDB_RECORD_METHOD_CONST(bool, SBEvent, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -204,37 +195,3 @@ bool SBEvent::GetDescription(SBStream &description) const {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBEvent>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBEvent, ());
- LLDB_REGISTER_CONSTRUCTOR(SBEvent, (uint32_t, const char *, uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBEvent, (lldb::EventSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBEvent, (lldb_private::Event *));
- LLDB_REGISTER_CONSTRUCTOR(SBEvent, (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD(const lldb::SBEvent &,
- SBEvent, operator=,(const lldb::SBEvent &));
- LLDB_REGISTER_METHOD(const char *, SBEvent, GetDataFlavor, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBEvent, GetType, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBBroadcaster, SBEvent, GetBroadcaster,
- ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBEvent, GetBroadcasterClass, ());
- LLDB_REGISTER_METHOD(bool, SBEvent, BroadcasterMatchesPtr,
- (const lldb::SBBroadcaster *));
- LLDB_REGISTER_METHOD(bool, SBEvent, BroadcasterMatchesRef,
- (const lldb::SBBroadcaster &));
- LLDB_REGISTER_METHOD(void, SBEvent, Clear, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBEvent, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBEvent, operator bool, ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBEvent, GetCStringFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBEvent, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(bool, SBEvent, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBExecutionContext.cpp b/contrib/llvm-project/lldb/source/API/SBExecutionContext.cpp
index caf02b4164ea..a0b68e6efe38 100644
--- a/contrib/llvm-project/lldb/source/API/SBExecutionContext.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBExecutionContext.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBExecutionContext.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBFrame.h"
#include "lldb/API/SBProcess.h"
@@ -19,48 +19,43 @@
using namespace lldb;
using namespace lldb_private;
-SBExecutionContext::SBExecutionContext() : m_exe_ctx_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBExecutionContext);
-}
+SBExecutionContext::SBExecutionContext() { LLDB_INSTRUMENT_VA(this); }
SBExecutionContext::SBExecutionContext(const lldb::SBExecutionContext &rhs)
: m_exe_ctx_sp(rhs.m_exe_ctx_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext,
- (const lldb::SBExecutionContext &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBExecutionContext::SBExecutionContext(
lldb::ExecutionContextRefSP exe_ctx_ref_sp)
: m_exe_ctx_sp(exe_ctx_ref_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext, (lldb::ExecutionContextRefSP),
- exe_ctx_ref_sp);
+ LLDB_INSTRUMENT_VA(this, exe_ctx_ref_sp);
}
SBExecutionContext::SBExecutionContext(const lldb::SBTarget &target)
: m_exe_ctx_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext, (const lldb::SBTarget &), target);
+ LLDB_INSTRUMENT_VA(this, target);
m_exe_ctx_sp->SetTargetSP(target.GetSP());
}
SBExecutionContext::SBExecutionContext(const lldb::SBProcess &process)
: m_exe_ctx_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext, (const lldb::SBProcess &),
- process);
+ LLDB_INSTRUMENT_VA(this, process);
m_exe_ctx_sp->SetProcessSP(process.GetSP());
}
SBExecutionContext::SBExecutionContext(lldb::SBThread thread)
: m_exe_ctx_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext, (lldb::SBThread), thread);
+ LLDB_INSTRUMENT_VA(this, thread);
m_exe_ctx_sp->SetThreadPtr(thread.get());
}
SBExecutionContext::SBExecutionContext(const lldb::SBFrame &frame)
: m_exe_ctx_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR(SBExecutionContext, (const lldb::SBFrame &), frame);
+ LLDB_INSTRUMENT_VA(this, frame);
m_exe_ctx_sp->SetFrameSP(frame.GetFrameSP());
}
@@ -69,12 +64,10 @@ SBExecutionContext::~SBExecutionContext() = default;
const SBExecutionContext &SBExecutionContext::
operator=(const lldb::SBExecutionContext &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBExecutionContext &,
- SBExecutionContext, operator=,(const lldb::SBExecutionContext &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_exe_ctx_sp = rhs.m_exe_ctx_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
ExecutionContextRef *SBExecutionContext::get() const {
@@ -82,8 +75,7 @@ ExecutionContextRef *SBExecutionContext::get() const {
}
SBTarget SBExecutionContext::GetTarget() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBTarget, SBExecutionContext,
- GetTarget);
+ LLDB_INSTRUMENT_VA(this);
SBTarget sb_target;
if (m_exe_ctx_sp) {
@@ -91,12 +83,11 @@ SBTarget SBExecutionContext::GetTarget() const {
if (target_sp)
sb_target.SetSP(target_sp);
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
SBProcess SBExecutionContext::GetProcess() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBProcess, SBExecutionContext,
- GetProcess);
+ LLDB_INSTRUMENT_VA(this);
SBProcess sb_process;
if (m_exe_ctx_sp) {
@@ -104,12 +95,11 @@ SBProcess SBExecutionContext::GetProcess() const {
if (process_sp)
sb_process.SetSP(process_sp);
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBThread SBExecutionContext::GetThread() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBThread, SBExecutionContext,
- GetThread);
+ LLDB_INSTRUMENT_VA(this);
SBThread sb_thread;
if (m_exe_ctx_sp) {
@@ -117,11 +107,11 @@ SBThread SBExecutionContext::GetThread() const {
if (thread_sp)
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
SBFrame SBExecutionContext::GetFrame() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFrame, SBExecutionContext, GetFrame);
+ LLDB_INSTRUMENT_VA(this);
SBFrame sb_frame;
if (m_exe_ctx_sp) {
@@ -129,34 +119,5 @@ SBFrame SBExecutionContext::GetFrame() const {
if (frame_sp)
sb_frame.SetFrameSP(frame_sp);
}
- return LLDB_RECORD_RESULT(sb_frame);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBExecutionContext>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext, ());
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext,
- (const lldb::SBExecutionContext &));
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext,
- (lldb::ExecutionContextRefSP));
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext, (const lldb::SBTarget &));
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext, (const lldb::SBProcess &));
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext, (lldb::SBThread));
- LLDB_REGISTER_CONSTRUCTOR(SBExecutionContext, (const lldb::SBFrame &));
- LLDB_REGISTER_METHOD(
- const lldb::SBExecutionContext &,
- SBExecutionContext, operator=,(const lldb::SBExecutionContext &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBTarget, SBExecutionContext, GetTarget,
- ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBProcess, SBExecutionContext, GetProcess,
- ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBThread, SBExecutionContext, GetThread,
- ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFrame, SBExecutionContext, GetFrame, ());
-}
-
-}
+ return sb_frame;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBExpressionOptions.cpp b/contrib/llvm-project/lldb/source/API/SBExpressionOptions.cpp
index 217e8ad5c21b..191e38fe5cfc 100644
--- a/contrib/llvm-project/lldb/source/API/SBExpressionOptions.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBExpressionOptions.cpp
@@ -7,113 +7,99 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBExpressionOptions.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
SBExpressionOptions::SBExpressionOptions()
: m_opaque_up(new EvaluateExpressionOptions()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBExpressionOptions);
+ LLDB_INSTRUMENT_VA(this);
}
-SBExpressionOptions::SBExpressionOptions(const SBExpressionOptions &rhs)
- : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBExpressionOptions,
- (const lldb::SBExpressionOptions &), rhs);
+SBExpressionOptions::SBExpressionOptions(const SBExpressionOptions &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
const SBExpressionOptions &SBExpressionOptions::
operator=(const SBExpressionOptions &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBExpressionOptions &,
- SBExpressionOptions, operator=,(const lldb::SBExpressionOptions &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBExpressionOptions::~SBExpressionOptions() = default;
bool SBExpressionOptions::GetCoerceResultToId() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions,
- GetCoerceResultToId);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->DoesCoerceToId();
}
void SBExpressionOptions::SetCoerceResultToId(bool coerce) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetCoerceResultToId, (bool),
- coerce);
+ LLDB_INSTRUMENT_VA(this, coerce);
m_opaque_up->SetCoerceToId(coerce);
}
bool SBExpressionOptions::GetUnwindOnError() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions, GetUnwindOnError);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->DoesUnwindOnError();
}
void SBExpressionOptions::SetUnwindOnError(bool unwind) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetUnwindOnError, (bool),
- unwind);
+ LLDB_INSTRUMENT_VA(this, unwind);
m_opaque_up->SetUnwindOnError(unwind);
}
bool SBExpressionOptions::GetIgnoreBreakpoints() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions,
- GetIgnoreBreakpoints);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->DoesIgnoreBreakpoints();
}
void SBExpressionOptions::SetIgnoreBreakpoints(bool ignore) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetIgnoreBreakpoints, (bool),
- ignore);
+ LLDB_INSTRUMENT_VA(this, ignore);
m_opaque_up->SetIgnoreBreakpoints(ignore);
}
lldb::DynamicValueType SBExpressionOptions::GetFetchDynamicValue() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::DynamicValueType, SBExpressionOptions,
- GetFetchDynamicValue);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetUseDynamic();
}
void SBExpressionOptions::SetFetchDynamicValue(lldb::DynamicValueType dynamic) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetFetchDynamicValue,
- (lldb::DynamicValueType), dynamic);
+ LLDB_INSTRUMENT_VA(this, dynamic);
m_opaque_up->SetUseDynamic(dynamic);
}
uint32_t SBExpressionOptions::GetTimeoutInMicroSeconds() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBExpressionOptions,
- GetTimeoutInMicroSeconds);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetTimeout() ? m_opaque_up->GetTimeout()->count() : 0;
}
void SBExpressionOptions::SetTimeoutInMicroSeconds(uint32_t timeout) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetTimeoutInMicroSeconds,
- (uint32_t), timeout);
+ LLDB_INSTRUMENT_VA(this, timeout);
m_opaque_up->SetTimeout(timeout == 0 ? Timeout<std::micro>(llvm::None)
: std::chrono::microseconds(timeout));
}
uint32_t SBExpressionOptions::GetOneThreadTimeoutInMicroSeconds() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBExpressionOptions,
- GetOneThreadTimeoutInMicroSeconds);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetOneThreadTimeout()
? m_opaque_up->GetOneThreadTimeout()->count()
@@ -121,8 +107,7 @@ uint32_t SBExpressionOptions::GetOneThreadTimeoutInMicroSeconds() const {
}
void SBExpressionOptions::SetOneThreadTimeoutInMicroSeconds(uint32_t timeout) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions,
- SetOneThreadTimeoutInMicroSeconds, (uint32_t), timeout);
+ LLDB_INSTRUMENT_VA(this, timeout);
m_opaque_up->SetOneThreadTimeout(timeout == 0
? Timeout<std::micro>(llvm::None)
@@ -130,148 +115,135 @@ void SBExpressionOptions::SetOneThreadTimeoutInMicroSeconds(uint32_t timeout) {
}
bool SBExpressionOptions::GetTryAllThreads() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions, GetTryAllThreads);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetTryAllThreads();
}
void SBExpressionOptions::SetTryAllThreads(bool run_others) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetTryAllThreads, (bool),
- run_others);
+ LLDB_INSTRUMENT_VA(this, run_others);
m_opaque_up->SetTryAllThreads(run_others);
}
bool SBExpressionOptions::GetStopOthers() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions, GetStopOthers);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetStopOthers();
}
void SBExpressionOptions::SetStopOthers(bool run_others) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetStopOthers, (bool),
- run_others);
+ LLDB_INSTRUMENT_VA(this, run_others);
m_opaque_up->SetStopOthers(run_others);
}
bool SBExpressionOptions::GetTrapExceptions() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBExpressionOptions,
- GetTrapExceptions);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetTrapExceptions();
}
void SBExpressionOptions::SetTrapExceptions(bool trap_exceptions) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetTrapExceptions, (bool),
- trap_exceptions);
+ LLDB_INSTRUMENT_VA(this, trap_exceptions);
m_opaque_up->SetTrapExceptions(trap_exceptions);
}
void SBExpressionOptions::SetLanguage(lldb::LanguageType language) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetLanguage,
- (lldb::LanguageType), language);
+ LLDB_INSTRUMENT_VA(this, language);
m_opaque_up->SetLanguage(language);
}
void SBExpressionOptions::SetCancelCallback(
lldb::ExpressionCancelCallback callback, void *baton) {
- LLDB_RECORD_DUMMY(void, SBExpressionOptions, SetCancelCallback,
- (lldb::ExpressionCancelCallback, void *), callback, baton);
+ LLDB_INSTRUMENT_VA(this, callback, baton);
m_opaque_up->SetCancelCallback(callback, baton);
}
bool SBExpressionOptions::GetGenerateDebugInfo() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBExpressionOptions, GetGenerateDebugInfo);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetGenerateDebugInfo();
}
void SBExpressionOptions::SetGenerateDebugInfo(bool b) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetGenerateDebugInfo, (bool),
- b);
+ LLDB_INSTRUMENT_VA(this, b);
return m_opaque_up->SetGenerateDebugInfo(b);
}
bool SBExpressionOptions::GetSuppressPersistentResult() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBExpressionOptions,
- GetSuppressPersistentResult);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetResultIsInternal();
}
void SBExpressionOptions::SetSuppressPersistentResult(bool b) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetSuppressPersistentResult,
- (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
return m_opaque_up->SetResultIsInternal(b);
}
const char *SBExpressionOptions::GetPrefix() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBExpressionOptions,
- GetPrefix);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetPrefix();
}
void SBExpressionOptions::SetPrefix(const char *prefix) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetPrefix, (const char *),
- prefix);
+ LLDB_INSTRUMENT_VA(this, prefix);
return m_opaque_up->SetPrefix(prefix);
}
bool SBExpressionOptions::GetAutoApplyFixIts() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBExpressionOptions, GetAutoApplyFixIts);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetAutoApplyFixIts();
}
void SBExpressionOptions::SetAutoApplyFixIts(bool b) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetAutoApplyFixIts, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
return m_opaque_up->SetAutoApplyFixIts(b);
}
uint64_t SBExpressionOptions::GetRetriesWithFixIts() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBExpressionOptions,
- GetRetriesWithFixIts);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetRetriesWithFixIts();
}
void SBExpressionOptions::SetRetriesWithFixIts(uint64_t retries) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetRetriesWithFixIts,
- (uint64_t), retries);
+ LLDB_INSTRUMENT_VA(this, retries);
return m_opaque_up->SetRetriesWithFixIts(retries);
}
bool SBExpressionOptions::GetTopLevel() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBExpressionOptions, GetTopLevel);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetExecutionPolicy() == eExecutionPolicyTopLevel;
}
void SBExpressionOptions::SetTopLevel(bool b) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetTopLevel, (bool), b);
+ LLDB_INSTRUMENT_VA(this, b);
m_opaque_up->SetExecutionPolicy(b ? eExecutionPolicyTopLevel
: m_opaque_up->default_execution_policy);
}
bool SBExpressionOptions::GetAllowJIT() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBExpressionOptions, GetAllowJIT);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetExecutionPolicy() != eExecutionPolicyNever;
}
void SBExpressionOptions::SetAllowJIT(bool allow) {
- LLDB_RECORD_METHOD(void, SBExpressionOptions, SetAllowJIT, (bool), allow);
+ LLDB_INSTRUMENT_VA(this, allow);
m_opaque_up->SetExecutionPolicy(allow ? m_opaque_up->default_execution_policy
: eExecutionPolicyNever);
@@ -284,69 +256,3 @@ EvaluateExpressionOptions *SBExpressionOptions::get() const {
EvaluateExpressionOptions &SBExpressionOptions::ref() const {
return *(m_opaque_up.get());
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBExpressionOptions>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBExpressionOptions, ());
- LLDB_REGISTER_CONSTRUCTOR(SBExpressionOptions,
- (const lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD(
- const lldb::SBExpressionOptions &,
- SBExpressionOptions, operator=,(const lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetCoerceResultToId,
- ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetCoerceResultToId,
- (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetUnwindOnError, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetUnwindOnError, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetIgnoreBreakpoints,
- ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetIgnoreBreakpoints,
- (bool));
- LLDB_REGISTER_METHOD_CONST(lldb::DynamicValueType, SBExpressionOptions,
- GetFetchDynamicValue, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetFetchDynamicValue,
- (lldb::DynamicValueType));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBExpressionOptions,
- GetTimeoutInMicroSeconds, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetTimeoutInMicroSeconds,
- (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBExpressionOptions,
- GetOneThreadTimeoutInMicroSeconds, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions,
- SetOneThreadTimeoutInMicroSeconds, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetTryAllThreads, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetTryAllThreads, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetStopOthers, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetStopOthers, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBExpressionOptions, GetTrapExceptions,
- ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetTrapExceptions, (bool));
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetLanguage,
- (lldb::LanguageType));
- LLDB_REGISTER_METHOD(bool, SBExpressionOptions, GetGenerateDebugInfo, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetGenerateDebugInfo,
- (bool));
- LLDB_REGISTER_METHOD(bool, SBExpressionOptions, GetSuppressPersistentResult,
- ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetSuppressPersistentResult,
- (bool));
- LLDB_REGISTER_METHOD_CONST(const char *, SBExpressionOptions, GetPrefix,
- ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetPrefix, (const char *));
- LLDB_REGISTER_METHOD(bool, SBExpressionOptions, GetAutoApplyFixIts, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetAutoApplyFixIts, (bool));
- LLDB_REGISTER_METHOD(bool, SBExpressionOptions, GetTopLevel, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetTopLevel, (bool));
- LLDB_REGISTER_METHOD(bool, SBExpressionOptions, GetAllowJIT, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetAllowJIT, (bool));
- LLDB_REGISTER_METHOD(uint64_t, SBExpressionOptions, GetRetriesWithFixIts, ());
- LLDB_REGISTER_METHOD(void, SBExpressionOptions, SetRetriesWithFixIts,
- (uint64_t));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBFile.cpp b/contrib/llvm-project/lldb/source/API/SBFile.cpp
index 41ccdbe76b91..0db859c3b746 100644
--- a/contrib/llvm-project/lldb/source/API/SBFile.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBFile.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBFile.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBError.h"
#include "lldb/Host/File.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
@@ -19,33 +19,31 @@ SBFile::~SBFile() = default;
SBFile::SBFile(FileSP file_sp) : m_opaque_sp(file_sp) {
// We have no way to capture the incoming FileSP as the class isn't
// instrumented, so pretend that it's always null.
- LLDB_RECORD_CONSTRUCTOR(SBFile, (lldb::FileSP), nullptr);
+ LLDB_INSTRUMENT_VA(this, file_sp);
}
SBFile::SBFile(const SBFile &rhs) : m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBFile, (const lldb::SBFile&), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBFile &SBFile ::operator=(const SBFile &rhs) {
- LLDB_RECORD_METHOD(lldb::SBFile &,
- SBFile, operator=,(const lldb::SBFile &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
-SBFile::SBFile() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBFile); }
+SBFile::SBFile() { LLDB_INSTRUMENT_VA(this); }
SBFile::SBFile(FILE *file, bool transfer_ownership) {
- LLDB_RECORD_CONSTRUCTOR(SBFile, (FILE *, bool), file, transfer_ownership);
+ LLDB_INSTRUMENT_VA(this, file, transfer_ownership);
m_opaque_sp = std::make_shared<NativeFile>(file, transfer_ownership);
}
SBFile::SBFile(int fd, const char *mode, bool transfer_owndership) {
- LLDB_RECORD_CONSTRUCTOR(SBFile, (int, const char *, bool), fd, mode,
- transfer_owndership);
+ LLDB_INSTRUMENT_VA(this, fd, mode, transfer_owndership);
auto options = File::GetOptionsFromMode(mode);
if (!options) {
@@ -57,8 +55,7 @@ SBFile::SBFile(int fd, const char *mode, bool transfer_owndership) {
}
SBError SBFile::Read(uint8_t *buf, size_t num_bytes, size_t *bytes_read) {
- LLDB_RECORD_METHOD(lldb::SBError, SBFile, Read, (uint8_t *, size_t, size_t *),
- buf, num_bytes, bytes_read);
+ LLDB_INSTRUMENT_VA(this, buf, num_bytes, bytes_read);
SBError error;
if (!m_opaque_sp) {
@@ -69,14 +66,12 @@ SBError SBFile::Read(uint8_t *buf, size_t num_bytes, size_t *bytes_read) {
error.SetError(status);
*bytes_read = num_bytes;
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBFile::Write(const uint8_t *buf, size_t num_bytes,
size_t *bytes_written) {
- LLDB_RECORD_METHOD(lldb::SBError, SBFile, Write,
- (const uint8_t *, size_t, size_t *), buf, num_bytes,
- bytes_written);
+ LLDB_INSTRUMENT_VA(this, buf, num_bytes, bytes_written);
SBError error;
if (!m_opaque_sp) {
@@ -87,11 +82,11 @@ SBError SBFile::Write(const uint8_t *buf, size_t num_bytes,
error.SetError(status);
*bytes_written = num_bytes;
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBFile::Flush() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBFile, Flush);
+ LLDB_INSTRUMENT_VA(this);
SBError error;
if (!m_opaque_sp) {
@@ -100,59 +95,35 @@ SBError SBFile::Flush() {
Status status = m_opaque_sp->Flush();
error.SetError(status);
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
bool SBFile::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFile, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp && m_opaque_sp->IsValid();
}
SBError SBFile::Close() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBFile, Close);
+ LLDB_INSTRUMENT_VA(this);
SBError error;
if (m_opaque_sp) {
Status status = m_opaque_sp->Close();
error.SetError(status);
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBFile::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFile, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return IsValid();
}
bool SBFile::operator!() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFile, operator!);
+ LLDB_INSTRUMENT_VA(this);
return !IsValid();
}
FileSP SBFile::GetFile() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(FileSP, SBFile, GetFile);
- return LLDB_RECORD_RESULT(m_opaque_sp);
+ LLDB_INSTRUMENT_VA(this);
+ return m_opaque_sp;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBFile>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBFile, ());
- LLDB_REGISTER_CONSTRUCTOR(SBFile, (FileSP));
- LLDB_REGISTER_CONSTRUCTOR(SBFile, (const SBFile&));
- LLDB_REGISTER_CONSTRUCTOR(SBFile, (FILE *, bool));
- LLDB_REGISTER_CONSTRUCTOR(SBFile, (int, const char *, bool));
- LLDB_REGISTER_METHOD(SBFile&, SBFile, operator=,(const SBFile&));
- LLDB_REGISTER_METHOD(lldb::SBError, SBFile, Flush, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBFile, Read,
- (uint8_t *, size_t, size_t *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBFile, Write,
- (const uint8_t *, size_t, size_t *));
- LLDB_REGISTER_METHOD_CONST(bool, SBFile, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFile, operator bool,());
- LLDB_REGISTER_METHOD_CONST(bool, SBFile, operator!,());
- LLDB_REGISTER_METHOD_CONST(FileSP, SBFile, GetFile, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBFile, Close, ());
-}
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBFileSpec.cpp b/contrib/llvm-project/lldb/source/API/SBFileSpec.cpp
index 0a6b63bb460c..2bec9a7a1e77 100644
--- a/contrib/llvm-project/lldb/source/API/SBFileSpec.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBFileSpec.cpp
@@ -7,12 +7,12 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBFileSpec.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Host/FileSystem.h"
#include "lldb/Host/PosixApi.h"
#include "lldb/Utility/FileSpec.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include "llvm/ADT/SmallString.h"
@@ -24,11 +24,11 @@ using namespace lldb;
using namespace lldb_private;
SBFileSpec::SBFileSpec() : m_opaque_up(new lldb_private::FileSpec()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBFileSpec);
+ LLDB_INSTRUMENT_VA(this);
}
-SBFileSpec::SBFileSpec(const SBFileSpec &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBFileSpec, (const lldb::SBFileSpec &), rhs);
+SBFileSpec::SBFileSpec(const SBFileSpec &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -38,14 +38,14 @@ SBFileSpec::SBFileSpec(const lldb_private::FileSpec &fspec)
// Deprecated!!!
SBFileSpec::SBFileSpec(const char *path) : m_opaque_up(new FileSpec(path)) {
- LLDB_RECORD_CONSTRUCTOR(SBFileSpec, (const char *), path);
+ LLDB_INSTRUMENT_VA(this, path);
FileSystem::Instance().Resolve(*m_opaque_up);
}
SBFileSpec::SBFileSpec(const char *path, bool resolve)
: m_opaque_up(new FileSpec(path)) {
- LLDB_RECORD_CONSTRUCTOR(SBFileSpec, (const char *, bool), path, resolve);
+ LLDB_INSTRUMENT_VA(this, path, resolve);
if (resolve)
FileSystem::Instance().Resolve(*m_opaque_up);
@@ -54,55 +54,50 @@ SBFileSpec::SBFileSpec(const char *path, bool resolve)
SBFileSpec::~SBFileSpec() = default;
const SBFileSpec &SBFileSpec::operator=(const SBFileSpec &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBFileSpec &,
- SBFileSpec, operator=,(const lldb::SBFileSpec &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBFileSpec::operator==(const SBFileSpec &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFileSpec, operator==,(const SBFileSpec &rhs),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return ref() == rhs.ref();
}
bool SBFileSpec::operator!=(const SBFileSpec &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFileSpec, operator!=,(const SBFileSpec &rhs),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return !(*this == rhs);
}
bool SBFileSpec::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFileSpec, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBFileSpec::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFileSpec, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->operator bool();
}
bool SBFileSpec::Exists() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFileSpec, Exists);
+ LLDB_INSTRUMENT_VA(this);
return FileSystem::Instance().Exists(*m_opaque_up);
}
bool SBFileSpec::ResolveExecutableLocation() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBFileSpec, ResolveExecutableLocation);
+ LLDB_INSTRUMENT_VA(this);
return FileSystem::Instance().ResolveExecutableLocation(*m_opaque_up);
}
int SBFileSpec::ResolvePath(const char *src_path, char *dst_path,
size_t dst_len) {
- LLDB_RECORD_STATIC_METHOD(int, SBFileSpec, ResolvePath,
- (const char *, char *, size_t), src_path, dst_path,
- dst_len);
+ LLDB_INSTRUMENT_VA(src_path, dst_path, dst_len);
llvm::SmallString<64> result(src_path);
FileSystem::Instance().Resolve(result);
@@ -111,13 +106,13 @@ int SBFileSpec::ResolvePath(const char *src_path, char *dst_path,
}
const char *SBFileSpec::GetFilename() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFileSpec, GetFilename);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetFilename().AsCString();
}
const char *SBFileSpec::GetDirectory() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFileSpec, GetDirectory);
+ LLDB_INSTRUMENT_VA(this);
FileSpec directory{*m_opaque_up};
directory.GetFilename().Clear();
@@ -125,7 +120,7 @@ const char *SBFileSpec::GetDirectory() const {
}
void SBFileSpec::SetFilename(const char *filename) {
- LLDB_RECORD_METHOD(void, SBFileSpec, SetFilename, (const char *), filename);
+ LLDB_INSTRUMENT_VA(this, filename);
if (filename && filename[0])
m_opaque_up->GetFilename().SetCString(filename);
@@ -134,7 +129,7 @@ void SBFileSpec::SetFilename(const char *filename) {
}
void SBFileSpec::SetDirectory(const char *directory) {
- LLDB_RECORD_METHOD(void, SBFileSpec, SetDirectory, (const char *), directory);
+ LLDB_INSTRUMENT_VA(this, directory);
if (directory && directory[0])
m_opaque_up->GetDirectory().SetCString(directory);
@@ -143,8 +138,7 @@ void SBFileSpec::SetDirectory(const char *directory) {
}
uint32_t SBFileSpec::GetPath(char *dst_path, size_t dst_len) const {
- LLDB_RECORD_CHAR_PTR_METHOD_CONST(uint32_t, SBFileSpec, GetPath,
- (char *, size_t), dst_path, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst_path, dst_len);
uint32_t result = m_opaque_up->GetPath(dst_path, dst_len);
@@ -172,8 +166,7 @@ void SBFileSpec::SetFileSpec(const lldb_private::FileSpec &fs) {
}
bool SBFileSpec::GetDescription(SBStream &description) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFileSpec, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
char path[PATH_MAX];
@@ -183,41 +176,7 @@ bool SBFileSpec::GetDescription(SBStream &description) const {
}
void SBFileSpec::AppendPathComponent(const char *fn) {
- LLDB_RECORD_METHOD(void, SBFileSpec, AppendPathComponent, (const char *), fn);
+ LLDB_INSTRUMENT_VA(this, fn);
m_opaque_up->AppendPathComponent(fn);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBFileSpec>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpec, ());
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpec, (const lldb::SBFileSpec &));
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpec, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpec, (const char *, bool));
- LLDB_REGISTER_METHOD(const lldb::SBFileSpec &,
- SBFileSpec, operator=,(const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBFileSpec, operator==,(const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBFileSpec, operator!=,(const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD_CONST(bool, SBFileSpec, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFileSpec, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFileSpec, Exists, ());
- LLDB_REGISTER_METHOD(bool, SBFileSpec, ResolveExecutableLocation, ());
- LLDB_REGISTER_STATIC_METHOD(int, SBFileSpec, ResolvePath,
- (const char *, char *, size_t));
- LLDB_REGISTER_METHOD_CONST(const char *, SBFileSpec, GetFilename, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFileSpec, GetDirectory, ());
- LLDB_REGISTER_METHOD(void, SBFileSpec, SetFilename, (const char *));
- LLDB_REGISTER_METHOD(void, SBFileSpec, SetDirectory, (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBFileSpec, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBFileSpec, AppendPathComponent, (const char *));
- LLDB_REGISTER_CHAR_PTR_METHOD_CONST(uint32_t, SBFileSpec, GetPath);
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBFileSpecList.cpp b/contrib/llvm-project/lldb/source/API/SBFileSpecList.cpp
index 768ff0affd15..cf81c4234087 100644
--- a/contrib/llvm-project/lldb/source/API/SBFileSpecList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBFileSpecList.cpp
@@ -7,13 +7,13 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBFileSpecList.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/FileSpecList.h"
#include "lldb/Host/PosixApi.h"
#include "lldb/Utility/FileSpec.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include <climits>
@@ -22,12 +22,11 @@ using namespace lldb;
using namespace lldb_private;
SBFileSpecList::SBFileSpecList() : m_opaque_up(new FileSpecList()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBFileSpecList);
+ LLDB_INSTRUMENT_VA(this);
}
-SBFileSpecList::SBFileSpecList(const SBFileSpecList &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBFileSpecList, (const lldb::SBFileSpecList &), rhs);
-
+SBFileSpecList::SBFileSpecList(const SBFileSpecList &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -35,57 +34,50 @@ SBFileSpecList::SBFileSpecList(const SBFileSpecList &rhs) : m_opaque_up() {
SBFileSpecList::~SBFileSpecList() = default;
const SBFileSpecList &SBFileSpecList::operator=(const SBFileSpecList &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBFileSpecList &,
- SBFileSpecList, operator=,(const lldb::SBFileSpecList &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
uint32_t SBFileSpecList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBFileSpecList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSize();
}
void SBFileSpecList::Append(const SBFileSpec &sb_file) {
- LLDB_RECORD_METHOD(void, SBFileSpecList, Append, (const lldb::SBFileSpec &),
- sb_file);
+ LLDB_INSTRUMENT_VA(this, sb_file);
m_opaque_up->Append(sb_file.ref());
}
bool SBFileSpecList::AppendIfUnique(const SBFileSpec &sb_file) {
- LLDB_RECORD_METHOD(bool, SBFileSpecList, AppendIfUnique,
- (const lldb::SBFileSpec &), sb_file);
+ LLDB_INSTRUMENT_VA(this, sb_file);
return m_opaque_up->AppendIfUnique(sb_file.ref());
}
void SBFileSpecList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBFileSpecList, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up->Clear();
}
uint32_t SBFileSpecList::FindFileIndex(uint32_t idx, const SBFileSpec &sb_file,
bool full) {
- LLDB_RECORD_METHOD(uint32_t, SBFileSpecList, FindFileIndex,
- (uint32_t, const lldb::SBFileSpec &, bool), idx, sb_file,
- full);
+ LLDB_INSTRUMENT_VA(this, idx, sb_file, full);
return m_opaque_up->FindFileIndex(idx, sb_file.ref(), full);
}
const SBFileSpec SBFileSpecList::GetFileSpecAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(const lldb::SBFileSpec, SBFileSpecList,
- GetFileSpecAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBFileSpec new_spec;
new_spec.SetFileSpec(m_opaque_up->GetFileSpecAtIndex(idx));
- return LLDB_RECORD_RESULT(new_spec);
+ return new_spec;
}
const lldb_private::FileSpecList *SBFileSpecList::operator->() const {
@@ -105,8 +97,7 @@ const lldb_private::FileSpecList &SBFileSpecList::ref() const {
}
bool SBFileSpecList::GetDescription(SBStream &description) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFileSpecList, GetDescription,
- (lldb::SBStream &), description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -123,30 +114,3 @@ bool SBFileSpecList::GetDescription(SBStream &description) const {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBFileSpecList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpecList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBFileSpecList, (const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(
- const lldb::SBFileSpecList &,
- SBFileSpecList, operator=,(const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBFileSpecList, GetSize, ());
- LLDB_REGISTER_METHOD(void, SBFileSpecList, Append,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(bool, SBFileSpecList, AppendIfUnique,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(void, SBFileSpecList, Clear, ());
- LLDB_REGISTER_METHOD(uint32_t, SBFileSpecList, FindFileIndex,
- (uint32_t, const lldb::SBFileSpec &, bool));
- LLDB_REGISTER_METHOD_CONST(const lldb::SBFileSpec, SBFileSpecList,
- GetFileSpecAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(bool, SBFileSpecList, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBFrame.cpp b/contrib/llvm-project/lldb/source/API/SBFrame.cpp
index c6bc3288c4b2..ffbbed00f8e2 100644
--- a/contrib/llvm-project/lldb/source/API/SBFrame.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBFrame.cpp
@@ -14,7 +14,6 @@
#include "lldb/lldb-types.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/Core/Address.h"
#include "lldb/Core/StreamFile.h"
@@ -38,6 +37,7 @@
#include "lldb/Target/Target.h"
#include "lldb/Target/Thread.h"
#include "lldb/Utility/ConstString.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include "lldb/API/SBAddress.h"
@@ -55,17 +55,16 @@ using namespace lldb;
using namespace lldb_private;
SBFrame::SBFrame() : m_opaque_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBFrame);
+ LLDB_INSTRUMENT_VA(this);
}
SBFrame::SBFrame(const StackFrameSP &lldb_object_sp)
: m_opaque_sp(new ExecutionContextRef(lldb_object_sp)) {
- LLDB_RECORD_CONSTRUCTOR(SBFrame, (const lldb::StackFrameSP &),
- lldb_object_sp);
+ LLDB_INSTRUMENT_VA(this, lldb_object_sp);
}
-SBFrame::SBFrame(const SBFrame &rhs) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBFrame, (const lldb::SBFrame &), rhs);
+SBFrame::SBFrame(const SBFrame &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = clone(rhs.m_opaque_sp);
}
@@ -73,12 +72,11 @@ SBFrame::SBFrame(const SBFrame &rhs) : m_opaque_sp() {
SBFrame::~SBFrame() = default;
const SBFrame &SBFrame::operator=(const SBFrame &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBFrame &,
- SBFrame, operator=,(const lldb::SBFrame &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = clone(rhs.m_opaque_sp);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
StackFrameSP SBFrame::GetFrameSP() const {
@@ -90,11 +88,11 @@ void SBFrame::SetFrameSP(const StackFrameSP &lldb_object_sp) {
}
bool SBFrame::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFrame, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBFrame::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFrame, operator bool);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -112,8 +110,7 @@ SBFrame::operator bool() const {
}
SBSymbolContext SBFrame::GetSymbolContext(uint32_t resolve_scope) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBSymbolContext, SBFrame, GetSymbolContext,
- (uint32_t), resolve_scope);
+ LLDB_INSTRUMENT_VA(this, resolve_scope);
SBSymbolContext sb_sym_ctx;
std::unique_lock<std::recursive_mutex> lock;
@@ -129,11 +126,11 @@ SBSymbolContext SBFrame::GetSymbolContext(uint32_t resolve_scope) const {
}
}
- return LLDB_RECORD_RESULT(sb_sym_ctx);
+ return sb_sym_ctx;
}
SBModule SBFrame::GetModule() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBModule, SBFrame, GetModule);
+ LLDB_INSTRUMENT_VA(this);
SBModule sb_module;
ModuleSP module_sp;
@@ -154,12 +151,11 @@ SBModule SBFrame::GetModule() const {
}
}
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
SBCompileUnit SBFrame::GetCompileUnit() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBCompileUnit, SBFrame,
- GetCompileUnit);
+ LLDB_INSTRUMENT_VA(this);
SBCompileUnit sb_comp_unit;
std::unique_lock<std::recursive_mutex> lock;
@@ -179,11 +175,11 @@ SBCompileUnit SBFrame::GetCompileUnit() const {
}
}
- return LLDB_RECORD_RESULT(sb_comp_unit);
+ return sb_comp_unit;
}
SBFunction SBFrame::GetFunction() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFunction, SBFrame, GetFunction);
+ LLDB_INSTRUMENT_VA(this);
SBFunction sb_function;
std::unique_lock<std::recursive_mutex> lock;
@@ -203,11 +199,11 @@ SBFunction SBFrame::GetFunction() const {
}
}
- return LLDB_RECORD_RESULT(sb_function);
+ return sb_function;
}
SBSymbol SBFrame::GetSymbol() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBSymbol, SBFrame, GetSymbol);
+ LLDB_INSTRUMENT_VA(this);
SBSymbol sb_symbol;
std::unique_lock<std::recursive_mutex> lock;
@@ -226,11 +222,11 @@ SBSymbol SBFrame::GetSymbol() const {
}
}
- return LLDB_RECORD_RESULT(sb_symbol);
+ return sb_symbol;
}
SBBlock SBFrame::GetBlock() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBBlock, SBFrame, GetBlock);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
std::unique_lock<std::recursive_mutex> lock;
@@ -247,11 +243,11 @@ SBBlock SBFrame::GetBlock() const {
sb_block.SetPtr(frame->GetSymbolContext(eSymbolContextBlock).block);
}
}
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
SBBlock SBFrame::GetFrameBlock() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBBlock, SBFrame, GetFrameBlock);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
std::unique_lock<std::recursive_mutex> lock;
@@ -268,11 +264,11 @@ SBBlock SBFrame::GetFrameBlock() const {
sb_block.SetPtr(frame->GetFrameBlock());
}
}
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
SBLineEntry SBFrame::GetLineEntry() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBLineEntry, SBFrame, GetLineEntry);
+ LLDB_INSTRUMENT_VA(this);
SBLineEntry sb_line_entry;
std::unique_lock<std::recursive_mutex> lock;
@@ -291,11 +287,11 @@ SBLineEntry SBFrame::GetLineEntry() const {
}
}
}
- return LLDB_RECORD_RESULT(sb_line_entry);
+ return sb_line_entry;
}
uint32_t SBFrame::GetFrameID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBFrame, GetFrameID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t frame_idx = UINT32_MAX;
@@ -310,7 +306,7 @@ uint32_t SBFrame::GetFrameID() const {
}
lldb::addr_t SBFrame::GetCFA() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::addr_t, SBFrame, GetCFA);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -322,7 +318,7 @@ lldb::addr_t SBFrame::GetCFA() const {
}
addr_t SBFrame::GetPC() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::addr_t, SBFrame, GetPC);
+ LLDB_INSTRUMENT_VA(this);
addr_t addr = LLDB_INVALID_ADDRESS;
std::unique_lock<std::recursive_mutex> lock;
@@ -346,7 +342,7 @@ addr_t SBFrame::GetPC() const {
}
bool SBFrame::SetPC(addr_t new_pc) {
- LLDB_RECORD_METHOD(bool, SBFrame, SetPC, (lldb::addr_t), new_pc);
+ LLDB_INSTRUMENT_VA(this, new_pc);
bool ret_val = false;
std::unique_lock<std::recursive_mutex> lock;
@@ -369,7 +365,7 @@ bool SBFrame::SetPC(addr_t new_pc) {
}
addr_t SBFrame::GetSP() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::addr_t, SBFrame, GetSP);
+ LLDB_INSTRUMENT_VA(this);
addr_t addr = LLDB_INVALID_ADDRESS;
std::unique_lock<std::recursive_mutex> lock;
@@ -392,7 +388,7 @@ addr_t SBFrame::GetSP() const {
}
addr_t SBFrame::GetFP() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::addr_t, SBFrame, GetFP);
+ LLDB_INSTRUMENT_VA(this);
addr_t addr = LLDB_INVALID_ADDRESS;
std::unique_lock<std::recursive_mutex> lock;
@@ -415,7 +411,7 @@ addr_t SBFrame::GetFP() const {
}
SBAddress SBFrame::GetPCAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBFrame, GetPCAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress sb_addr;
std::unique_lock<std::recursive_mutex> lock;
@@ -432,18 +428,17 @@ SBAddress SBFrame::GetPCAddress() const {
sb_addr.SetAddress(frame->GetFrameCodeAddress());
}
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
void SBFrame::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBFrame, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp->Clear();
}
lldb::SBValue SBFrame::GetValueForVariablePath(const char *var_path) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, GetValueForVariablePath,
- (const char *), var_path);
+ LLDB_INSTRUMENT_VA(this, var_path);
SBValue sb_value;
std::unique_lock<std::recursive_mutex> lock;
@@ -456,18 +451,16 @@ lldb::SBValue SBFrame::GetValueForVariablePath(const char *var_path) {
frame->CalculateTarget()->GetPreferDynamicValue();
sb_value = GetValueForVariablePath(var_path, use_dynamic);
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBFrame::GetValueForVariablePath(const char *var_path,
DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, GetValueForVariablePath,
- (const char *, lldb::DynamicValueType), var_path,
- use_dynamic);
+ LLDB_INSTRUMENT_VA(this, var_path, use_dynamic);
SBValue sb_value;
if (var_path == nullptr || var_path[0] == '\0') {
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
std::unique_lock<std::recursive_mutex> lock;
@@ -492,12 +485,11 @@ lldb::SBValue SBFrame::GetValueForVariablePath(const char *var_path,
}
}
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
SBValue SBFrame::FindVariable(const char *name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, FindVariable, (const char *),
- name);
+ LLDB_INSTRUMENT_VA(this, name);
SBValue value;
std::unique_lock<std::recursive_mutex> lock;
@@ -510,19 +502,18 @@ SBValue SBFrame::FindVariable(const char *name) {
frame->CalculateTarget()->GetPreferDynamicValue();
value = FindVariable(name, use_dynamic);
}
- return LLDB_RECORD_RESULT(value);
+ return value;
}
SBValue SBFrame::FindVariable(const char *name,
lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, FindVariable,
- (const char *, lldb::DynamicValueType), name, use_dynamic);
+ LLDB_INSTRUMENT_VA(this, name, use_dynamic);
VariableSP var_sp;
SBValue sb_value;
if (name == nullptr || name[0] == '\0') {
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
ValueObjectSP value_sp;
@@ -545,12 +536,11 @@ SBValue SBFrame::FindVariable(const char *name,
}
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
SBValue SBFrame::FindValue(const char *name, ValueType value_type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, FindValue,
- (const char *, lldb::ValueType), name, value_type);
+ LLDB_INSTRUMENT_VA(this, name, value_type);
SBValue value;
std::unique_lock<std::recursive_mutex> lock;
@@ -563,19 +553,17 @@ SBValue SBFrame::FindValue(const char *name, ValueType value_type) {
frame->CalculateTarget()->GetPreferDynamicValue();
value = FindValue(name, value_type, use_dynamic);
}
- return LLDB_RECORD_RESULT(value);
+ return value;
}
SBValue SBFrame::FindValue(const char *name, ValueType value_type,
lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, FindValue,
- (const char *, lldb::ValueType, lldb::DynamicValueType),
- name, value_type, use_dynamic);
+ LLDB_INSTRUMENT_VA(this, name, value_type, use_dynamic);
SBValue sb_value;
if (name == nullptr || name[0] == '\0') {
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
ValueObjectSP value_sp;
@@ -678,12 +666,11 @@ SBValue SBFrame::FindValue(const char *name, ValueType value_type,
}
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
bool SBFrame::IsEqual(const SBFrame &that) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFrame, IsEqual, (const lldb::SBFrame &),
- that);
+ LLDB_INSTRUMENT_VA(this, that);
lldb::StackFrameSP this_sp = GetFrameSP();
lldb::StackFrameSP that_sp = that.GetFrameSP();
@@ -691,21 +678,19 @@ bool SBFrame::IsEqual(const SBFrame &that) const {
}
bool SBFrame::operator==(const SBFrame &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFrame, operator==,(const lldb::SBFrame &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return IsEqual(rhs);
}
bool SBFrame::operator!=(const SBFrame &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBFrame, operator!=,(const lldb::SBFrame &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return !IsEqual(rhs);
}
SBThread SBFrame::GetThread() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBThread, SBFrame, GetThread);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -713,11 +698,11 @@ SBThread SBFrame::GetThread() const {
ThreadSP thread_sp(exe_ctx.GetThreadSP());
SBThread sb_thread(thread_sp);
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
const char *SBFrame::Disassemble() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFrame, Disassemble);
+ LLDB_INSTRUMENT_VA(this);
const char *disassembly = nullptr;
std::unique_lock<std::recursive_mutex> lock;
@@ -741,9 +726,7 @@ const char *SBFrame::Disassemble() const {
SBValueList SBFrame::GetVariables(bool arguments, bool locals, bool statics,
bool in_scope_only) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (bool, bool, bool, bool), arguments, locals, statics,
- in_scope_only);
+ LLDB_INSTRUMENT_VA(this, arguments, locals, statics, in_scope_only);
SBValueList value_list;
std::unique_lock<std::recursive_mutex> lock;
@@ -767,15 +750,14 @@ SBValueList SBFrame::GetVariables(bool arguments, bool locals, bool statics,
value_list = GetVariables(options);
}
- return LLDB_RECORD_RESULT(value_list);
+ return value_list;
}
lldb::SBValueList SBFrame::GetVariables(bool arguments, bool locals,
bool statics, bool in_scope_only,
lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (bool, bool, bool, bool, lldb::DynamicValueType),
- arguments, locals, statics, in_scope_only, use_dynamic);
+ LLDB_INSTRUMENT_VA(this, arguments, locals, statics, in_scope_only,
+ use_dynamic);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -790,12 +772,11 @@ lldb::SBValueList SBFrame::GetVariables(bool arguments, bool locals,
options.SetInScopeOnly(in_scope_only);
options.SetIncludeRuntimeSupportValues(include_runtime_support_values);
options.SetUseDynamic(use_dynamic);
- return LLDB_RECORD_RESULT(GetVariables(options));
+ return GetVariables(options);
}
SBValueList SBFrame::GetVariables(const lldb::SBVariablesOptions &options) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (const lldb::SBVariablesOptions &), options);
+ LLDB_INSTRUMENT_VA(this, options);
SBValueList value_list;
std::unique_lock<std::recursive_mutex> lock;
@@ -891,11 +872,11 @@ SBValueList SBFrame::GetVariables(const lldb::SBVariablesOptions &options) {
}
}
- return LLDB_RECORD_RESULT(value_list);
+ return value_list;
}
SBValueList SBFrame::GetRegisters() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValueList, SBFrame, GetRegisters);
+ LLDB_INSTRUMENT_VA(this);
SBValueList value_list;
std::unique_lock<std::recursive_mutex> lock;
@@ -921,12 +902,11 @@ SBValueList SBFrame::GetRegisters() {
}
}
- return LLDB_RECORD_RESULT(value_list);
+ return value_list;
}
SBValue SBFrame::FindRegister(const char *name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, FindRegister, (const char *),
- name);
+ LLDB_INSTRUMENT_VA(this, name);
SBValue result;
ValueObjectSP value_sp;
@@ -953,12 +933,11 @@ SBValue SBFrame::FindRegister(const char *name) {
}
}
- return LLDB_RECORD_RESULT(result);
+ return result;
}
bool SBFrame::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBFrame, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -984,8 +963,7 @@ bool SBFrame::GetDescription(SBStream &description) {
}
SBValue SBFrame::EvaluateExpression(const char *expr) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, EvaluateExpression, (const char *),
- expr);
+ LLDB_INSTRUMENT_VA(this, expr);
SBValue result;
std::unique_lock<std::recursive_mutex> lock;
@@ -1004,17 +982,15 @@ SBValue SBFrame::EvaluateExpression(const char *expr) {
options.SetLanguage(target->GetLanguage());
else
options.SetLanguage(frame->GetLanguage());
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options));
+ return EvaluateExpression(expr, options);
}
- return LLDB_RECORD_RESULT(result);
+ return result;
}
SBValue
SBFrame::EvaluateExpression(const char *expr,
lldb::DynamicValueType fetch_dynamic_value) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, lldb::DynamicValueType), expr,
- fetch_dynamic_value);
+ LLDB_INSTRUMENT_VA(this, expr, fetch_dynamic_value);
SBExpressionOptions options;
options.SetFetchDynamicValue(fetch_dynamic_value);
@@ -1029,15 +1005,13 @@ SBFrame::EvaluateExpression(const char *expr,
options.SetLanguage(target->GetLanguage());
else if (frame)
options.SetLanguage(frame->GetLanguage());
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options));
+ return EvaluateExpression(expr, options);
}
SBValue SBFrame::EvaluateExpression(const char *expr,
lldb::DynamicValueType fetch_dynamic_value,
bool unwind_on_error) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, lldb::DynamicValueType, bool), expr,
- fetch_dynamic_value, unwind_on_error);
+ LLDB_INSTRUMENT_VA(this, expr, fetch_dynamic_value, unwind_on_error);
SBExpressionOptions options;
std::unique_lock<std::recursive_mutex> lock;
@@ -1052,21 +1026,19 @@ SBValue SBFrame::EvaluateExpression(const char *expr,
options.SetLanguage(target->GetLanguage());
else if (frame)
options.SetLanguage(frame->GetLanguage());
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options));
+ return EvaluateExpression(expr, options);
}
lldb::SBValue SBFrame::EvaluateExpression(const char *expr,
const SBExpressionOptions &options) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &), expr,
- options);
+ LLDB_INSTRUMENT_VA(this, expr, options);
Log *expr_log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
SBValue expr_result;
if (expr == nullptr || expr[0] == '\0') {
- return LLDB_RECORD_RESULT(expr_result);
+ return expr_result;
}
ValueObjectSP expr_value_sp;
@@ -1106,17 +1078,17 @@ lldb::SBValue SBFrame::EvaluateExpression(const char *expr,
"%s, summary %s **",
expr_result.GetValue(), expr_result.GetSummary());
- return LLDB_RECORD_RESULT(expr_result);
+ return expr_result;
}
bool SBFrame::IsInlined() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBFrame, IsInlined);
+ LLDB_INSTRUMENT_VA(this);
return static_cast<const SBFrame *>(this)->IsInlined();
}
bool SBFrame::IsInlined() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFrame, IsInlined);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1140,13 +1112,13 @@ bool SBFrame::IsInlined() const {
}
bool SBFrame::IsArtificial() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBFrame, IsArtificial);
+ LLDB_INSTRUMENT_VA(this);
return static_cast<const SBFrame *>(this)->IsArtificial();
}
bool SBFrame::IsArtificial() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFrame, IsArtificial);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1159,13 +1131,13 @@ bool SBFrame::IsArtificial() const {
}
const char *SBFrame::GetFunctionName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBFrame, GetFunctionName);
+ LLDB_INSTRUMENT_VA(this);
return static_cast<const SBFrame *>(this)->GetFunctionName();
}
lldb::LanguageType SBFrame::GuessLanguage() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::LanguageType, SBFrame, GuessLanguage);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1186,7 +1158,7 @@ lldb::LanguageType SBFrame::GuessLanguage() const {
}
const char *SBFrame::GetFunctionName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFrame, GetFunctionName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
std::unique_lock<std::recursive_mutex> lock;
@@ -1228,7 +1200,7 @@ const char *SBFrame::GetFunctionName() const {
}
const char *SBFrame::GetDisplayFunctionName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBFrame, GetDisplayFunctionName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
@@ -1269,82 +1241,3 @@ const char *SBFrame::GetDisplayFunctionName() {
}
return name;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBFrame>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBFrame, ());
- LLDB_REGISTER_CONSTRUCTOR(SBFrame, (const lldb::StackFrameSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBFrame, (const lldb::SBFrame &));
- LLDB_REGISTER_METHOD(const lldb::SBFrame &,
- SBFrame, operator=,(const lldb::SBFrame &));
- LLDB_REGISTER_METHOD_CONST(bool, SBFrame, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFrame, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBSymbolContext, SBFrame, GetSymbolContext,
- (uint32_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBModule, SBFrame, GetModule, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBCompileUnit, SBFrame, GetCompileUnit,
- ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFunction, SBFrame, GetFunction, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBSymbol, SBFrame, GetSymbol, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBBlock, SBFrame, GetBlock, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBBlock, SBFrame, GetFrameBlock, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBLineEntry, SBFrame, GetLineEntry, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBFrame, GetFrameID, ());
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBFrame, GetCFA, ());
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBFrame, GetPC, ());
- LLDB_REGISTER_METHOD(bool, SBFrame, SetPC, (lldb::addr_t));
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBFrame, GetSP, ());
- LLDB_REGISTER_METHOD_CONST(lldb::addr_t, SBFrame, GetFP, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBFrame, GetPCAddress, ());
- LLDB_REGISTER_METHOD(void, SBFrame, Clear, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, GetValueForVariablePath,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, GetValueForVariablePath,
- (const char *, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, FindVariable, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, FindVariable,
- (const char *, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, FindValue,
- (const char *, lldb::ValueType));
- LLDB_REGISTER_METHOD(
- lldb::SBValue, SBFrame, FindValue,
- (const char *, lldb::ValueType, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD_CONST(bool, SBFrame, IsEqual, (const lldb::SBFrame &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBFrame, operator==,(const lldb::SBFrame &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBFrame, operator!=,(const lldb::SBFrame &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBThread, SBFrame, GetThread, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFrame, Disassemble, ());
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (bool, bool, bool, bool));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (bool, bool, bool, bool, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBFrame, GetVariables,
- (const lldb::SBVariablesOptions &));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBFrame, GetRegisters, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, FindRegister, (const char *));
- LLDB_REGISTER_METHOD(bool, SBFrame, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, lldb::DynamicValueType, bool));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBFrame, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD(bool, SBFrame, IsInlined, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFrame, IsInlined, ());
- LLDB_REGISTER_METHOD(bool, SBFrame, IsArtificial, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFrame, IsArtificial, ());
- LLDB_REGISTER_METHOD(const char *, SBFrame, GetFunctionName, ());
- LLDB_REGISTER_METHOD_CONST(lldb::LanguageType, SBFrame, GuessLanguage, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFrame, GetFunctionName, ());
- LLDB_REGISTER_METHOD(const char *, SBFrame, GetDisplayFunctionName, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBFunction.cpp b/contrib/llvm-project/lldb/source/API/SBFunction.cpp
index 2d0cb239de75..562cae4e8906 100644
--- a/contrib/llvm-project/lldb/source/API/SBFunction.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBFunction.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBFunction.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBProcess.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Disassembler.h"
@@ -18,42 +17,42 @@
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBFunction::SBFunction() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBFunction); }
+SBFunction::SBFunction() { LLDB_INSTRUMENT_VA(this); }
SBFunction::SBFunction(lldb_private::Function *lldb_object_ptr)
: m_opaque_ptr(lldb_object_ptr) {}
SBFunction::SBFunction(const lldb::SBFunction &rhs)
: m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBFunction, (const lldb::SBFunction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBFunction &SBFunction::operator=(const SBFunction &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBFunction &,
- SBFunction, operator=,(const lldb::SBFunction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_ptr = rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBFunction::~SBFunction() { m_opaque_ptr = nullptr; }
bool SBFunction::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFunction, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBFunction::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBFunction, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
const char *SBFunction::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFunction, GetName);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
if (m_opaque_ptr)
@@ -63,7 +62,7 @@ const char *SBFunction::GetName() const {
}
const char *SBFunction::GetDisplayName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFunction, GetDisplayName);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
if (m_opaque_ptr)
@@ -73,7 +72,7 @@ const char *SBFunction::GetDisplayName() const {
}
const char *SBFunction::GetMangledName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBFunction, GetMangledName);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
if (m_opaque_ptr)
@@ -82,21 +81,19 @@ const char *SBFunction::GetMangledName() const {
}
bool SBFunction::operator==(const SBFunction &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBFunction, operator==,(const lldb::SBFunction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr == rhs.m_opaque_ptr;
}
bool SBFunction::operator!=(const SBFunction &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBFunction, operator!=,(const lldb::SBFunction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr != rhs.m_opaque_ptr;
}
bool SBFunction::GetDescription(SBStream &s) {
- LLDB_RECORD_METHOD(bool, SBFunction, GetDescription, (lldb::SBStream &), s);
+ LLDB_INSTRUMENT_VA(this, s);
if (m_opaque_ptr) {
s.Printf("SBFunction: id = 0x%8.8" PRIx64 ", name = %s",
@@ -111,16 +108,14 @@ bool SBFunction::GetDescription(SBStream &s) {
}
SBInstructionList SBFunction::GetInstructions(SBTarget target) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBFunction, GetInstructions,
- (lldb::SBTarget), target);
+ LLDB_INSTRUMENT_VA(this, target);
- return LLDB_RECORD_RESULT(GetInstructions(target, nullptr));
+ return GetInstructions(target, nullptr);
}
SBInstructionList SBFunction::GetInstructions(SBTarget target,
const char *flavor) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBFunction, GetInstructions,
- (lldb::SBTarget, const char *), target, flavor);
+ LLDB_INSTRUMENT_VA(this, target, flavor);
SBInstructionList sb_instructions;
if (m_opaque_ptr) {
@@ -136,7 +131,7 @@ SBInstructionList SBFunction::GetInstructions(SBTarget target,
m_opaque_ptr->GetAddressRange(), force_live_memory));
}
}
- return LLDB_RECORD_RESULT(sb_instructions);
+ return sb_instructions;
}
lldb_private::Function *SBFunction::get() { return m_opaque_ptr; }
@@ -146,16 +141,16 @@ void SBFunction::reset(lldb_private::Function *lldb_object_ptr) {
}
SBAddress SBFunction::GetStartAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBFunction, GetStartAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress addr;
if (m_opaque_ptr)
addr.SetAddress(m_opaque_ptr->GetAddressRange().GetBaseAddress());
- return LLDB_RECORD_RESULT(addr);
+ return addr;
}
SBAddress SBFunction::GetEndAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBFunction, GetEndAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress addr;
if (m_opaque_ptr) {
@@ -165,12 +160,11 @@ SBAddress SBFunction::GetEndAddress() {
addr->Slide(byte_size);
}
}
- return LLDB_RECORD_RESULT(addr);
+ return addr;
}
const char *SBFunction::GetArgumentName(uint32_t arg_idx) {
- LLDB_RECORD_METHOD(const char *, SBFunction, GetArgumentName, (uint32_t),
- arg_idx);
+ LLDB_INSTRUMENT_VA(this, arg_idx);
if (m_opaque_ptr) {
Block &block = m_opaque_ptr->GetBlock(true);
@@ -188,7 +182,7 @@ const char *SBFunction::GetArgumentName(uint32_t arg_idx) {
}
uint32_t SBFunction::GetPrologueByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBFunction, GetPrologueByteSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetPrologueByteSize();
@@ -196,7 +190,7 @@ uint32_t SBFunction::GetPrologueByteSize() {
}
SBType SBFunction::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBFunction, GetType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
if (m_opaque_ptr) {
@@ -204,20 +198,20 @@ SBType SBFunction::GetType() {
if (function_type)
sb_type.ref().SetType(function_type->shared_from_this());
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
SBBlock SBFunction::GetBlock() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBFunction, GetBlock);
+ LLDB_INSTRUMENT_VA(this);
SBBlock sb_block;
if (m_opaque_ptr)
sb_block.SetPtr(&m_opaque_ptr->GetBlock(true));
- return LLDB_RECORD_RESULT(sb_block);
+ return sb_block;
}
lldb::LanguageType SBFunction::GetLanguage() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::LanguageType, SBFunction, GetLanguage);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
if (m_opaque_ptr->GetCompileUnit())
@@ -227,7 +221,7 @@ lldb::LanguageType SBFunction::GetLanguage() {
}
bool SBFunction::GetIsOptimized() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBFunction, GetIsOptimized);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr) {
if (m_opaque_ptr->GetCompileUnit())
@@ -235,39 +229,3 @@ bool SBFunction::GetIsOptimized() {
}
return false;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBFunction>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBFunction, ());
- LLDB_REGISTER_CONSTRUCTOR(SBFunction, (const lldb::SBFunction &));
- LLDB_REGISTER_METHOD(const lldb::SBFunction &,
- SBFunction, operator=,(const lldb::SBFunction &));
- LLDB_REGISTER_METHOD_CONST(bool, SBFunction, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBFunction, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFunction, GetName, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFunction, GetDisplayName, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBFunction, GetMangledName, ());
- LLDB_REGISTER_METHOD_CONST(
- bool, SBFunction, operator==,(const lldb::SBFunction &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBFunction, operator!=,(const lldb::SBFunction &));
- LLDB_REGISTER_METHOD(bool, SBFunction, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBFunction, GetInstructions,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBFunction, GetInstructions,
- (lldb::SBTarget, const char *));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBFunction, GetStartAddress, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBFunction, GetEndAddress, ());
- LLDB_REGISTER_METHOD(const char *, SBFunction, GetArgumentName, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBFunction, GetPrologueByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBFunction, GetType, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBFunction, GetBlock, ());
- LLDB_REGISTER_METHOD(lldb::LanguageType, SBFunction, GetLanguage, ());
- LLDB_REGISTER_METHOD(bool, SBFunction, GetIsOptimized, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBHostOS.cpp b/contrib/llvm-project/lldb/source/API/SBHostOS.cpp
index deca4ac81a1a..06cf654031a1 100644
--- a/contrib/llvm-project/lldb/source/API/SBHostOS.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBHostOS.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBHostOS.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBError.h"
#include "lldb/Host/Config.h"
#include "lldb/Host/FileSystem.h"
@@ -17,6 +16,7 @@
#include "lldb/Host/HostThread.h"
#include "lldb/Host/ThreadLauncher.h"
#include "lldb/Utility/FileSpec.h"
+#include "lldb/Utility/Instrumentation.h"
#include "Plugins/ExpressionParser/Clang/ClangHost.h"
#if LLDB_ENABLE_PYTHON
@@ -30,24 +30,21 @@ using namespace lldb;
using namespace lldb_private;
SBFileSpec SBHostOS::GetProgramFileSpec() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBFileSpec, SBHostOS,
- GetProgramFileSpec);
+ LLDB_INSTRUMENT();
SBFileSpec sb_filespec;
sb_filespec.SetFileSpec(HostInfo::GetProgramFileSpec());
- return LLDB_RECORD_RESULT(sb_filespec);
+ return sb_filespec;
}
SBFileSpec SBHostOS::GetLLDBPythonPath() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBFileSpec, SBHostOS,
- GetLLDBPythonPath);
+ LLDB_INSTRUMENT();
- return LLDB_RECORD_RESULT(GetLLDBPath(ePathTypePythonDir));
+ return GetLLDBPath(ePathTypePythonDir);
}
SBFileSpec SBHostOS::GetLLDBPath(lldb::PathType path_type) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBFileSpec, SBHostOS, GetLLDBPath,
- (lldb::PathType), path_type);
+ LLDB_INSTRUMENT_VA(path_type);
FileSpec fspec;
switch (path_type) {
@@ -84,12 +81,11 @@ SBFileSpec SBHostOS::GetLLDBPath(lldb::PathType path_type) {
SBFileSpec sb_fspec;
sb_fspec.SetFileSpec(fspec);
- return LLDB_RECORD_RESULT(sb_fspec);
+ return sb_fspec;
}
SBFileSpec SBHostOS::GetUserHomeDirectory() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBFileSpec, SBHostOS,
- GetUserHomeDirectory);
+ LLDB_INSTRUMENT();
FileSpec homedir;
FileSystem::Instance().GetHomeDirectory(homedir);
@@ -98,15 +94,13 @@ SBFileSpec SBHostOS::GetUserHomeDirectory() {
SBFileSpec sb_fspec;
sb_fspec.SetFileSpec(homedir);
- return LLDB_RECORD_RESULT(sb_fspec);
+ return sb_fspec;
}
lldb::thread_t SBHostOS::ThreadCreate(const char *name,
lldb::thread_func_t thread_function,
void *thread_arg, SBError *error_ptr) {
- LLDB_RECORD_DUMMY(lldb::thread_t, SBHostOS, ThreadCreate,
- (lldb::thread_func_t, void *, SBError *), name,
- thread_function, thread_arg, error_ptr);
+ LLDB_INSTRUMENT_VA(name, thread_function, thread_arg, error_ptr);
llvm::Expected<HostThread> thread =
ThreadLauncher::LaunchThread(name, thread_function, thread_arg);
if (!thread) {
@@ -120,15 +114,10 @@ lldb::thread_t SBHostOS::ThreadCreate(const char *name,
return thread->Release();
}
-void SBHostOS::ThreadCreated(const char *name) {
- LLDB_RECORD_STATIC_METHOD(void, SBHostOS, ThreadCreated, (const char *),
- name);
-}
+void SBHostOS::ThreadCreated(const char *name) { LLDB_INSTRUMENT_VA(name); }
bool SBHostOS::ThreadCancel(lldb::thread_t thread, SBError *error_ptr) {
- LLDB_RECORD_DUMMY(bool, SBHostOS, ThreadCancel,
- (lldb::thread_t, lldb::SBError *), thread,
- error_ptr);
+ LLDB_INSTRUMENT_VA(thread, error_ptr);
Status error;
HostThread host_thread(thread);
@@ -140,9 +129,7 @@ bool SBHostOS::ThreadCancel(lldb::thread_t thread, SBError *error_ptr) {
}
bool SBHostOS::ThreadDetach(lldb::thread_t thread, SBError *error_ptr) {
- LLDB_RECORD_DUMMY(bool, SBHostOS, ThreadDetach,
- (lldb::thread_t, lldb::SBError *), thread,
- error_ptr);
+ LLDB_INSTRUMENT_VA(thread, error_ptr);
Status error;
#if defined(_WIN32)
@@ -160,10 +147,7 @@ bool SBHostOS::ThreadDetach(lldb::thread_t thread, SBError *error_ptr) {
bool SBHostOS::ThreadJoin(lldb::thread_t thread, lldb::thread_result_t *result,
SBError *error_ptr) {
- LLDB_RECORD_DUMMY(
- bool, SBHostOS, ThreadJoin,
- (lldb::thread_t, lldb::thread_result_t *, lldb::SBError *), thread,
- result, error_ptr);
+ LLDB_INSTRUMENT_VA(thread, result, error_ptr);
Status error;
HostThread host_thread(thread);
@@ -173,22 +157,3 @@ bool SBHostOS::ThreadJoin(lldb::thread_t thread, lldb::thread_result_t *result,
host_thread.Release();
return error.Success();
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBHostOS>(Registry &R) {
- LLDB_REGISTER_STATIC_METHOD(lldb::SBFileSpec, SBHostOS, GetProgramFileSpec,
- ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBFileSpec, SBHostOS, GetLLDBPythonPath,
- ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBFileSpec, SBHostOS, GetLLDBPath,
- (lldb::PathType));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBFileSpec, SBHostOS,
- GetUserHomeDirectory, ());
- LLDB_REGISTER_STATIC_METHOD(void, SBHostOS, ThreadCreated, (const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBInstruction.cpp b/contrib/llvm-project/lldb/source/API/SBInstruction.cpp
index 579ddf84cf45..6cb9e5dbc1af 100644
--- a/contrib/llvm-project/lldb/source/API/SBInstruction.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBInstruction.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBInstruction.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBFrame.h"
@@ -66,9 +66,7 @@ protected:
using namespace lldb;
using namespace lldb_private;
-SBInstruction::SBInstruction() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBInstruction);
-}
+SBInstruction::SBInstruction() { LLDB_INSTRUMENT_VA(this); }
SBInstruction::SBInstruction(const lldb::DisassemblerSP &disasm_sp,
const lldb::InstructionSP &inst_sp)
@@ -76,44 +74,41 @@ SBInstruction::SBInstruction(const lldb::DisassemblerSP &disasm_sp,
SBInstruction::SBInstruction(const SBInstruction &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBInstruction, (const lldb::SBInstruction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBInstruction &SBInstruction::operator=(const SBInstruction &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBInstruction &,
- SBInstruction, operator=,(const lldb::SBInstruction &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBInstruction::~SBInstruction() = default;
bool SBInstruction::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBInstruction, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBInstruction::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBInstruction, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp && m_opaque_sp->IsValid();
}
SBAddress SBInstruction::GetAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBInstruction, GetAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress sb_addr;
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp && inst_sp->GetAddress().IsValid())
sb_addr.SetAddress(inst_sp->GetAddress());
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
const char *SBInstruction::GetMnemonic(SBTarget target) {
- LLDB_RECORD_METHOD(const char *, SBInstruction, GetMnemonic, (lldb::SBTarget),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp) {
@@ -132,8 +127,7 @@ const char *SBInstruction::GetMnemonic(SBTarget target) {
}
const char *SBInstruction::GetOperands(SBTarget target) {
- LLDB_RECORD_METHOD(const char *, SBInstruction, GetOperands, (lldb::SBTarget),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp) {
@@ -152,8 +146,7 @@ const char *SBInstruction::GetOperands(SBTarget target) {
}
const char *SBInstruction::GetComment(SBTarget target) {
- LLDB_RECORD_METHOD(const char *, SBInstruction, GetComment, (lldb::SBTarget),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp) {
@@ -172,7 +165,7 @@ const char *SBInstruction::GetComment(SBTarget target) {
}
size_t SBInstruction::GetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBInstruction, GetByteSize);
+ LLDB_INSTRUMENT_VA(this);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp)
@@ -181,8 +174,7 @@ size_t SBInstruction::GetByteSize() {
}
SBData SBInstruction::GetData(SBTarget target) {
- LLDB_RECORD_METHOD(lldb::SBData, SBInstruction, GetData, (lldb::SBTarget),
- target);
+ LLDB_INSTRUMENT_VA(this, target);
lldb::SBData sb_data;
lldb::InstructionSP inst_sp(GetOpaque());
@@ -192,11 +184,11 @@ SBData SBInstruction::GetData(SBTarget target) {
sb_data.SetOpaque(data_extractor_sp);
}
}
- return LLDB_RECORD_RESULT(sb_data);
+ return sb_data;
}
bool SBInstruction::DoesBranch() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBInstruction, DoesBranch);
+ LLDB_INSTRUMENT_VA(this);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp)
@@ -205,7 +197,7 @@ bool SBInstruction::DoesBranch() {
}
bool SBInstruction::HasDelaySlot() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBInstruction, HasDelaySlot);
+ LLDB_INSTRUMENT_VA(this);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp)
@@ -214,7 +206,7 @@ bool SBInstruction::HasDelaySlot() {
}
bool SBInstruction::CanSetBreakpoint() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBInstruction, CanSetBreakpoint);
+ LLDB_INSTRUMENT_VA(this);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp)
@@ -235,8 +227,7 @@ void SBInstruction::SetOpaque(const lldb::DisassemblerSP &disasm_sp,
}
bool SBInstruction::GetDescription(lldb::SBStream &s) {
- LLDB_RECORD_METHOD(bool, SBInstruction, GetDescription, (lldb::SBStream &),
- s);
+ LLDB_INSTRUMENT_VA(this, s);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp) {
@@ -257,18 +248,18 @@ bool SBInstruction::GetDescription(lldb::SBStream &s) {
}
void SBInstruction::Print(FILE *outp) {
- LLDB_RECORD_METHOD(void, SBInstruction, Print, (FILE *), outp);
+ LLDB_INSTRUMENT_VA(this, outp);
FileSP out = std::make_shared<NativeFile>(outp, /*take_ownership=*/false);
Print(out);
}
void SBInstruction::Print(SBFile out) {
- LLDB_RECORD_METHOD(void, SBInstruction, Print, (SBFile), out);
+ LLDB_INSTRUMENT_VA(this, out);
Print(out.m_opaque_sp);
}
void SBInstruction::Print(FileSP out_sp) {
- LLDB_RECORD_METHOD(void, SBInstruction, Print, (FileSP), out_sp);
+ LLDB_INSTRUMENT_VA(this, out_sp);
if (!out_sp || !out_sp->IsValid())
return;
@@ -291,8 +282,7 @@ void SBInstruction::Print(FileSP out_sp) {
bool SBInstruction::EmulateWithFrame(lldb::SBFrame &frame,
uint32_t evaluate_options) {
- LLDB_RECORD_METHOD(bool, SBInstruction, EmulateWithFrame,
- (lldb::SBFrame &, uint32_t), frame, evaluate_options);
+ LLDB_INSTRUMENT_VA(this, frame, evaluate_options);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp) {
@@ -316,8 +306,7 @@ bool SBInstruction::EmulateWithFrame(lldb::SBFrame &frame,
}
bool SBInstruction::DumpEmulation(const char *triple) {
- LLDB_RECORD_METHOD(bool, SBInstruction, DumpEmulation, (const char *),
- triple);
+ LLDB_INSTRUMENT_VA(this, triple);
lldb::InstructionSP inst_sp(GetOpaque());
if (inst_sp && triple) {
@@ -328,9 +317,7 @@ bool SBInstruction::DumpEmulation(const char *triple) {
bool SBInstruction::TestEmulation(lldb::SBStream &output_stream,
const char *test_file) {
- LLDB_RECORD_METHOD(bool, SBInstruction, TestEmulation,
- (lldb::SBStream &, const char *), output_stream,
- test_file);
+ LLDB_INSTRUMENT_VA(this, output_stream, test_file);
if (!m_opaque_sp)
SetOpaque(lldb::DisassemblerSP(),
@@ -341,43 +328,3 @@ bool SBInstruction::TestEmulation(lldb::SBStream &output_stream,
return inst_sp->TestEmulation(output_stream.get(), test_file);
return false;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBInstruction>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBInstruction, ());
- LLDB_REGISTER_CONSTRUCTOR(SBInstruction, (const lldb::SBInstruction &));
- LLDB_REGISTER_METHOD(
- const lldb::SBInstruction &,
- SBInstruction, operator=,(const lldb::SBInstruction &));
- LLDB_REGISTER_METHOD(bool, SBInstruction, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBInstruction, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBInstruction, GetAddress, ());
- LLDB_REGISTER_METHOD(const char *, SBInstruction, GetMnemonic,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(const char *, SBInstruction, GetOperands,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(const char *, SBInstruction, GetComment,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(size_t, SBInstruction, GetByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SBData, SBInstruction, GetData,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(bool, SBInstruction, DoesBranch, ());
- LLDB_REGISTER_METHOD(bool, SBInstruction, HasDelaySlot, ());
- LLDB_REGISTER_METHOD(bool, SBInstruction, CanSetBreakpoint, ());
- LLDB_REGISTER_METHOD(bool, SBInstruction, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBInstruction, Print, (FILE *));
- LLDB_REGISTER_METHOD(void, SBInstruction, Print, (SBFile));
- LLDB_REGISTER_METHOD(void, SBInstruction, Print, (FileSP));
- LLDB_REGISTER_METHOD(bool, SBInstruction, EmulateWithFrame,
- (lldb::SBFrame &, uint32_t));
- LLDB_REGISTER_METHOD(bool, SBInstruction, DumpEmulation, (const char *));
- LLDB_REGISTER_METHOD(bool, SBInstruction, TestEmulation,
- (lldb::SBStream &, const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBInstructionList.cpp b/contrib/llvm-project/lldb/source/API/SBInstructionList.cpp
index a0c6fbe7e338..e289e8e9343d 100644
--- a/contrib/llvm-project/lldb/source/API/SBInstructionList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBInstructionList.cpp
@@ -7,55 +7,50 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBInstructionList.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
+#include "lldb/API/SBFile.h"
#include "lldb/API/SBInstruction.h"
#include "lldb/API/SBStream.h"
-#include "lldb/API/SBFile.h"
#include "lldb/Core/Disassembler.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/StreamFile.h"
#include "lldb/Symbol/SymbolContext.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
using namespace lldb;
using namespace lldb_private;
-SBInstructionList::SBInstructionList() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBInstructionList);
-}
+SBInstructionList::SBInstructionList() { LLDB_INSTRUMENT_VA(this); }
SBInstructionList::SBInstructionList(const SBInstructionList &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBInstructionList, (const lldb::SBInstructionList &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBInstructionList &SBInstructionList::
operator=(const SBInstructionList &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBInstructionList &,
- SBInstructionList, operator=,(const lldb::SBInstructionList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBInstructionList::~SBInstructionList() = default;
bool SBInstructionList::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBInstructionList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBInstructionList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBInstructionList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
size_t SBInstructionList::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBInstructionList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetInstructionList().GetSize();
@@ -63,23 +58,20 @@ size_t SBInstructionList::GetSize() {
}
SBInstruction SBInstructionList::GetInstructionAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBInstruction, SBInstructionList,
- GetInstructionAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBInstruction inst;
if (m_opaque_sp && idx < m_opaque_sp->GetInstructionList().GetSize())
inst.SetOpaque(
m_opaque_sp,
m_opaque_sp->GetInstructionList().GetInstructionAtIndex(idx));
- return LLDB_RECORD_RESULT(inst);
+ return inst;
}
size_t SBInstructionList::GetInstructionsCount(const SBAddress &start,
const SBAddress &end,
bool canSetBreakpoint) {
- LLDB_RECORD_METHOD(size_t, SBInstructionList, GetInstructionsCount,
- (const lldb::SBAddress &, const lldb::SBAddress &, bool),
- start, end, canSetBreakpoint);
+ LLDB_INSTRUMENT_VA(this, start, end, canSetBreakpoint);
size_t num_instructions = GetSize();
size_t i = 0;
@@ -104,14 +96,13 @@ size_t SBInstructionList::GetInstructionsCount(const SBAddress &start,
}
void SBInstructionList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBInstructionList, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
}
void SBInstructionList::AppendInstruction(SBInstruction insn) {
- LLDB_RECORD_METHOD(void, SBInstructionList, AppendInstruction,
- (lldb::SBInstruction), insn);
+ LLDB_INSTRUMENT_VA(this, insn);
}
void SBInstructionList::SetDisassembler(const lldb::DisassemblerSP &opaque_sp) {
@@ -119,7 +110,7 @@ void SBInstructionList::SetDisassembler(const lldb::DisassemblerSP &opaque_sp) {
}
void SBInstructionList::Print(FILE *out) {
- LLDB_RECORD_METHOD(void, SBInstructionList, Print, (FILE *), out);
+ LLDB_INSTRUMENT_VA(this, out);
if (out == nullptr)
return;
StreamFile stream(out, false);
@@ -127,7 +118,7 @@ void SBInstructionList::Print(FILE *out) {
}
void SBInstructionList::Print(SBFile out) {
- LLDB_RECORD_METHOD(void, SBInstructionList, Print, (SBFile), out);
+ LLDB_INSTRUMENT_VA(this, out);
if (!out.IsValid())
return;
StreamFile stream(out.m_opaque_sp);
@@ -135,7 +126,7 @@ void SBInstructionList::Print(SBFile out) {
}
void SBInstructionList::Print(FileSP out_sp) {
- LLDB_RECORD_METHOD(void, SBInstructionList, Print, (FileSP), out_sp);
+ LLDB_INSTRUMENT_VA(this, out_sp);
if (!out_sp || !out_sp->IsValid())
return;
StreamFile stream(out_sp);
@@ -143,8 +134,7 @@ void SBInstructionList::Print(FileSP out_sp) {
}
bool SBInstructionList::GetDescription(lldb::SBStream &stream) {
- LLDB_RECORD_METHOD(bool, SBInstructionList, GetDescription,
- (lldb::SBStream &), stream);
+ LLDB_INSTRUMENT_VA(this, stream);
return GetDescription(stream.ref());
}
@@ -186,8 +176,7 @@ bool SBInstructionList::GetDescription(Stream &sref) {
}
bool SBInstructionList::DumpEmulationForAllInstructions(const char *triple) {
- LLDB_RECORD_METHOD(bool, SBInstructionList, DumpEmulationForAllInstructions,
- (const char *), triple);
+ LLDB_INSTRUMENT_VA(this, triple);
if (m_opaque_sp) {
size_t len = GetSize();
@@ -198,37 +187,3 @@ bool SBInstructionList::DumpEmulationForAllInstructions(const char *triple) {
}
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBInstructionList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBInstructionList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBInstructionList,
- (const lldb::SBInstructionList &));
- LLDB_REGISTER_METHOD(
- const lldb::SBInstructionList &,
- SBInstructionList, operator=,(const lldb::SBInstructionList &));
- LLDB_REGISTER_METHOD_CONST(bool, SBInstructionList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBInstructionList, operator bool, ());
- LLDB_REGISTER_METHOD(size_t, SBInstructionList, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBInstruction, SBInstructionList,
- GetInstructionAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(
- size_t, SBInstructionList, GetInstructionsCount,
- (const lldb::SBAddress &, const lldb::SBAddress &, bool));
- LLDB_REGISTER_METHOD(void, SBInstructionList, Clear, ());
- LLDB_REGISTER_METHOD(void, SBInstructionList, AppendInstruction,
- (lldb::SBInstruction));
- LLDB_REGISTER_METHOD(void, SBInstructionList, Print, (FILE *));
- LLDB_REGISTER_METHOD(void, SBInstructionList, Print, (SBFile));
- LLDB_REGISTER_METHOD(void, SBInstructionList, Print, (FileSP));
- LLDB_REGISTER_METHOD(bool, SBInstructionList, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(bool, SBInstructionList,
- DumpEmulationForAllInstructions, (const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBLanguageRuntime.cpp b/contrib/llvm-project/lldb/source/API/SBLanguageRuntime.cpp
index e65b58270517..d571f282fce0 100644
--- a/contrib/llvm-project/lldb/source/API/SBLanguageRuntime.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBLanguageRuntime.cpp
@@ -7,39 +7,22 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBLanguageRuntime.h"
-#include "SBReproducerPrivate.h"
#include "lldb/Target/Language.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
lldb::LanguageType
SBLanguageRuntime::GetLanguageTypeFromString(const char *string) {
- LLDB_RECORD_STATIC_METHOD(lldb::LanguageType, SBLanguageRuntime,
- GetLanguageTypeFromString, (const char *), string);
+ LLDB_INSTRUMENT_VA(string);
return Language::GetLanguageTypeFromString(llvm::StringRef(string));
}
const char *
SBLanguageRuntime::GetNameForLanguageType(lldb::LanguageType language) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBLanguageRuntime,
- GetNameForLanguageType, (lldb::LanguageType),
- language);
+ LLDB_INSTRUMENT_VA(language);
return Language::GetNameForLanguageType(language);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBLanguageRuntime>(Registry &R) {
- LLDB_REGISTER_STATIC_METHOD(lldb::LanguageType, SBLanguageRuntime,
- GetLanguageTypeFromString, (const char *));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBLanguageRuntime,
- GetNameForLanguageType, (lldb::LanguageType));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBLaunchInfo.cpp b/contrib/llvm-project/lldb/source/API/SBLaunchInfo.cpp
index 0735e62a16cf..5149feba5e0b 100644
--- a/contrib/llvm-project/lldb/source/API/SBLaunchInfo.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBLaunchInfo.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBLaunchInfo.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBEnvironment.h"
#include "lldb/API/SBError.h"
@@ -23,8 +23,7 @@ using namespace lldb_private;
class lldb_private::SBLaunchInfoImpl : public ProcessLaunchInfo {
public:
- SBLaunchInfoImpl()
- : ProcessLaunchInfo(), m_envp(GetEnvironment().getEnvp()) {}
+ SBLaunchInfoImpl() : m_envp(GetEnvironment().getEnvp()) {}
const char *const *GetEnvp() const { return m_envp; }
void RegenerateEnvp() { m_envp = GetEnvironment().getEnvp(); }
@@ -41,7 +40,7 @@ private:
SBLaunchInfo::SBLaunchInfo(const char **argv)
: m_opaque_sp(new SBLaunchInfoImpl()) {
- LLDB_RECORD_CONSTRUCTOR(SBLaunchInfo, (const char **), argv);
+ LLDB_INSTRUMENT_VA(this, argv);
m_opaque_sp->GetFlags().Reset(eLaunchFlagDebug | eLaunchFlagDisableASLR);
if (argv && argv[0])
@@ -49,17 +48,16 @@ SBLaunchInfo::SBLaunchInfo(const char **argv)
}
SBLaunchInfo::SBLaunchInfo(const SBLaunchInfo &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBLaunchInfo, (const lldb::SBLaunchInfo &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = rhs.m_opaque_sp;
}
SBLaunchInfo &SBLaunchInfo::operator=(const SBLaunchInfo &rhs) {
- LLDB_RECORD_METHOD(SBLaunchInfo &,
- SBLaunchInfo, operator=,(const lldb::SBLaunchInfo &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBLaunchInfo::~SBLaunchInfo() = default;
@@ -73,90 +71,86 @@ void SBLaunchInfo::set_ref(const ProcessLaunchInfo &info) {
}
lldb::pid_t SBLaunchInfo::GetProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBLaunchInfo, GetProcessID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetProcessID();
}
uint32_t SBLaunchInfo::GetUserID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetUserID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetUserID();
}
uint32_t SBLaunchInfo::GetGroupID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetGroupID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetGroupID();
}
bool SBLaunchInfo::UserIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBLaunchInfo, UserIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->UserIDIsValid();
}
bool SBLaunchInfo::GroupIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBLaunchInfo, GroupIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GroupIDIsValid();
}
void SBLaunchInfo::SetUserID(uint32_t uid) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetUserID, (uint32_t), uid);
+ LLDB_INSTRUMENT_VA(this, uid);
m_opaque_sp->SetUserID(uid);
}
void SBLaunchInfo::SetGroupID(uint32_t gid) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetGroupID, (uint32_t), gid);
+ LLDB_INSTRUMENT_VA(this, gid);
m_opaque_sp->SetGroupID(gid);
}
SBFileSpec SBLaunchInfo::GetExecutableFile() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBLaunchInfo, GetExecutableFile);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(SBFileSpec(m_opaque_sp->GetExecutableFile()));
+ return SBFileSpec(m_opaque_sp->GetExecutableFile());
}
void SBLaunchInfo::SetExecutableFile(SBFileSpec exe_file,
bool add_as_first_arg) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetExecutableFile,
- (lldb::SBFileSpec, bool), exe_file, add_as_first_arg);
+ LLDB_INSTRUMENT_VA(this, exe_file, add_as_first_arg);
m_opaque_sp->SetExecutableFile(exe_file.ref(), add_as_first_arg);
}
SBListener SBLaunchInfo::GetListener() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBListener, SBLaunchInfo, GetListener);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(SBListener(m_opaque_sp->GetListener()));
+ return SBListener(m_opaque_sp->GetListener());
}
void SBLaunchInfo::SetListener(SBListener &listener) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetListener, (lldb::SBListener &),
- listener);
+ LLDB_INSTRUMENT_VA(this, listener);
m_opaque_sp->SetListener(listener.GetSP());
}
uint32_t SBLaunchInfo::GetNumArguments() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetNumArguments);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetArguments().GetArgumentCount();
}
const char *SBLaunchInfo::GetArgumentAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(const char *, SBLaunchInfo, GetArgumentAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
return m_opaque_sp->GetArguments().GetArgumentAtIndex(idx);
}
void SBLaunchInfo::SetArguments(const char **argv, bool append) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetArguments, (const char **, bool),
- argv, append);
+ LLDB_INSTRUMENT_VA(this, argv, append);
if (append) {
if (argv)
@@ -170,14 +164,13 @@ void SBLaunchInfo::SetArguments(const char **argv, bool append) {
}
uint32_t SBLaunchInfo::GetNumEnvironmentEntries() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetNumEnvironmentEntries);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetEnvironment().size();
}
const char *SBLaunchInfo::GetEnvironmentEntryAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(const char *, SBLaunchInfo, GetEnvironmentEntryAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (idx > GetNumEnvironmentEntries())
return nullptr;
@@ -185,14 +178,12 @@ const char *SBLaunchInfo::GetEnvironmentEntryAtIndex(uint32_t idx) {
}
void SBLaunchInfo::SetEnvironmentEntries(const char **envp, bool append) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetEnvironmentEntries,
- (const char **, bool), envp, append);
+ LLDB_INSTRUMENT_VA(this, envp, append);
SetEnvironment(SBEnvironment(Environment(envp)), append);
}
void SBLaunchInfo::SetEnvironment(const SBEnvironment &env, bool append) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetEnvironment,
- (const lldb::SBEnvironment &, bool), env, append);
+ LLDB_INSTRUMENT_VA(this, env, append);
Environment &refEnv = env.ref();
if (append) {
for (auto &KV : refEnv)
@@ -203,58 +194,54 @@ void SBLaunchInfo::SetEnvironment(const SBEnvironment &env, bool append) {
}
SBEnvironment SBLaunchInfo::GetEnvironment() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBEnvironment, SBLaunchInfo, GetEnvironment);
- return LLDB_RECORD_RESULT(
- SBEnvironment(Environment(m_opaque_sp->GetEnvironment())));
+ LLDB_INSTRUMENT_VA(this);
+ return SBEnvironment(Environment(m_opaque_sp->GetEnvironment()));
}
void SBLaunchInfo::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBLaunchInfo, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp->Clear();
}
const char *SBLaunchInfo::GetWorkingDirectory() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBLaunchInfo,
- GetWorkingDirectory);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetWorkingDirectory().GetCString();
}
void SBLaunchInfo::SetWorkingDirectory(const char *working_dir) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetWorkingDirectory, (const char *),
- working_dir);
+ LLDB_INSTRUMENT_VA(this, working_dir);
m_opaque_sp->SetWorkingDirectory(FileSpec(working_dir));
}
uint32_t SBLaunchInfo::GetLaunchFlags() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetLaunchFlags);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetFlags().Get();
}
void SBLaunchInfo::SetLaunchFlags(uint32_t flags) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetLaunchFlags, (uint32_t), flags);
+ LLDB_INSTRUMENT_VA(this, flags);
m_opaque_sp->GetFlags().Reset(flags);
}
const char *SBLaunchInfo::GetProcessPluginName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBLaunchInfo, GetProcessPluginName);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetProcessPluginName();
}
void SBLaunchInfo::SetProcessPluginName(const char *plugin_name) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetProcessPluginName, (const char *),
- plugin_name);
+ LLDB_INSTRUMENT_VA(this, plugin_name);
return m_opaque_sp->SetProcessPluginName(plugin_name);
}
const char *SBLaunchInfo::GetShell() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBLaunchInfo, GetShell);
+ LLDB_INSTRUMENT_VA(this);
// Constify this string so that it is saved in the string pool. Otherwise it
// would be freed when this function goes out of scope.
@@ -263,93 +250,86 @@ const char *SBLaunchInfo::GetShell() {
}
void SBLaunchInfo::SetShell(const char *path) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetShell, (const char *), path);
+ LLDB_INSTRUMENT_VA(this, path);
m_opaque_sp->SetShell(FileSpec(path));
}
bool SBLaunchInfo::GetShellExpandArguments() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBLaunchInfo, GetShellExpandArguments);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetShellExpandArguments();
}
void SBLaunchInfo::SetShellExpandArguments(bool expand) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetShellExpandArguments, (bool),
- expand);
+ LLDB_INSTRUMENT_VA(this, expand);
m_opaque_sp->SetShellExpandArguments(expand);
}
uint32_t SBLaunchInfo::GetResumeCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBLaunchInfo, GetResumeCount);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetResumeCount();
}
void SBLaunchInfo::SetResumeCount(uint32_t c) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetResumeCount, (uint32_t), c);
+ LLDB_INSTRUMENT_VA(this, c);
m_opaque_sp->SetResumeCount(c);
}
bool SBLaunchInfo::AddCloseFileAction(int fd) {
- LLDB_RECORD_METHOD(bool, SBLaunchInfo, AddCloseFileAction, (int), fd);
+ LLDB_INSTRUMENT_VA(this, fd);
return m_opaque_sp->AppendCloseFileAction(fd);
}
bool SBLaunchInfo::AddDuplicateFileAction(int fd, int dup_fd) {
- LLDB_RECORD_METHOD(bool, SBLaunchInfo, AddDuplicateFileAction, (int, int), fd,
- dup_fd);
+ LLDB_INSTRUMENT_VA(this, fd, dup_fd);
return m_opaque_sp->AppendDuplicateFileAction(fd, dup_fd);
}
bool SBLaunchInfo::AddOpenFileAction(int fd, const char *path, bool read,
bool write) {
- LLDB_RECORD_METHOD(bool, SBLaunchInfo, AddOpenFileAction,
- (int, const char *, bool, bool), fd, path, read, write);
+ LLDB_INSTRUMENT_VA(this, fd, path, read, write);
return m_opaque_sp->AppendOpenFileAction(fd, FileSpec(path), read, write);
}
bool SBLaunchInfo::AddSuppressFileAction(int fd, bool read, bool write) {
- LLDB_RECORD_METHOD(bool, SBLaunchInfo, AddSuppressFileAction,
- (int, bool, bool), fd, read, write);
+ LLDB_INSTRUMENT_VA(this, fd, read, write);
return m_opaque_sp->AppendSuppressFileAction(fd, read, write);
}
void SBLaunchInfo::SetLaunchEventData(const char *data) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetLaunchEventData, (const char *),
- data);
+ LLDB_INSTRUMENT_VA(this, data);
m_opaque_sp->SetLaunchEventData(data);
}
const char *SBLaunchInfo::GetLaunchEventData() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBLaunchInfo,
- GetLaunchEventData);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetLaunchEventData();
}
void SBLaunchInfo::SetDetachOnError(bool enable) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetDetachOnError, (bool), enable);
+ LLDB_INSTRUMENT_VA(this, enable);
m_opaque_sp->SetDetachOnError(enable);
}
bool SBLaunchInfo::GetDetachOnError() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBLaunchInfo, GetDetachOnError);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetDetachOnError();
}
const char *SBLaunchInfo::GetScriptedProcessClassName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBLaunchInfo,
- GetScriptedProcessClassName);
+ LLDB_INSTRUMENT_VA(this);
// Constify this string so that it is saved in the string pool. Otherwise it
// would be freed when this function goes out of scope.
@@ -358,15 +338,13 @@ const char *SBLaunchInfo::GetScriptedProcessClassName() const {
}
void SBLaunchInfo::SetScriptedProcessClassName(const char *class_name) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetScriptedProcessClassName,
- (const char *), class_name);
+ LLDB_INSTRUMENT_VA(this, class_name);
m_opaque_sp->SetScriptedProcessClassName(class_name);
}
lldb::SBStructuredData SBLaunchInfo::GetScriptedProcessDictionary() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBStructuredData, SBLaunchInfo,
- GetScriptedProcessDictionary);
+ LLDB_INSTRUMENT_VA(this);
lldb_private::StructuredData::DictionarySP dict_sp =
m_opaque_sp->GetScriptedProcessDictionarySP();
@@ -374,12 +352,11 @@ lldb::SBStructuredData SBLaunchInfo::GetScriptedProcessDictionary() const {
SBStructuredData data;
data.m_impl_up->SetObjectSP(dict_sp);
- return LLDB_RECORD_RESULT(data);
+ return data;
}
void SBLaunchInfo::SetScriptedProcessDictionary(lldb::SBStructuredData dict) {
- LLDB_RECORD_METHOD(void, SBLaunchInfo, SetScriptedProcessDictionary,
- (lldb::SBStructuredData), dict);
+ LLDB_INSTRUMENT_VA(this, dict);
if (!dict.IsValid() || !dict.m_impl_up)
return;
@@ -395,79 +372,3 @@ void SBLaunchInfo::SetScriptedProcessDictionary(lldb::SBStructuredData dict) {
m_opaque_sp->SetScriptedProcessDictionarySP(dict_sp);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBLaunchInfo>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBLaunchInfo, (const char **));
- LLDB_REGISTER_CONSTRUCTOR(SBLaunchInfo, (const lldb::SBLaunchInfo &));
- LLDB_REGISTER_METHOD(SBLaunchInfo &,
- SBLaunchInfo, operator=,(const lldb::SBLaunchInfo &));
- LLDB_REGISTER_METHOD(lldb::pid_t, SBLaunchInfo, GetProcessID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetUserID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetGroupID, ());
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, UserIDIsValid, ());
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, GroupIDIsValid, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetUserID, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetGroupID, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBLaunchInfo, GetExecutableFile, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetExecutableFile,
- (lldb::SBFileSpec, bool));
- LLDB_REGISTER_METHOD(lldb::SBListener, SBLaunchInfo, GetListener, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetListener, (lldb::SBListener &));
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetNumArguments, ());
- LLDB_REGISTER_METHOD(const char *, SBLaunchInfo, GetArgumentAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetArguments,
- (const char **, bool));
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetNumEnvironmentEntries, ());
- LLDB_REGISTER_METHOD(const char *, SBLaunchInfo, GetEnvironmentEntryAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetEnvironmentEntries,
- (const char **, bool));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, Clear, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBLaunchInfo, GetWorkingDirectory,
- ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetWorkingDirectory,
- (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetLaunchFlags, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetLaunchFlags, (uint32_t));
- LLDB_REGISTER_METHOD(const char *, SBLaunchInfo, GetProcessPluginName, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetProcessPluginName,
- (const char *));
- LLDB_REGISTER_METHOD(const char *, SBLaunchInfo, GetShell, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetShell, (const char *));
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, GetShellExpandArguments, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetShellExpandArguments, (bool));
- LLDB_REGISTER_METHOD(uint32_t, SBLaunchInfo, GetResumeCount, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetResumeCount, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, AddCloseFileAction, (int));
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, AddDuplicateFileAction,
- (int, int));
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, AddOpenFileAction,
- (int, const char *, bool, bool));
- LLDB_REGISTER_METHOD(bool, SBLaunchInfo, AddSuppressFileAction,
- (int, bool, bool));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetLaunchEventData,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(const char *, SBLaunchInfo, GetLaunchEventData,
- ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetDetachOnError, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBLaunchInfo, GetDetachOnError, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBLaunchInfo,
- GetScriptedProcessClassName, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetScriptedProcessClassName,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(lldb::SBStructuredData, SBLaunchInfo,
- GetScriptedProcessDictionary, ());
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetScriptedProcessDictionary,
- (lldb::SBStructuredData));
- LLDB_REGISTER_METHOD(void, SBLaunchInfo, SetEnvironment,
- (const lldb::SBEnvironment &, bool));
- LLDB_REGISTER_METHOD(lldb::SBEnvironment, SBLaunchInfo, GetEnvironment, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBLineEntry.cpp b/contrib/llvm-project/lldb/source/API/SBLineEntry.cpp
index 29ffda9b0471..28d12e65fdaf 100644
--- a/contrib/llvm-project/lldb/source/API/SBLineEntry.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBLineEntry.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBLineEntry.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Host/PosixApi.h"
#include "lldb/Symbol/LineEntry.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StreamString.h"
#include <climits>
@@ -19,29 +19,25 @@
using namespace lldb;
using namespace lldb_private;
-SBLineEntry::SBLineEntry() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBLineEntry);
-}
+SBLineEntry::SBLineEntry() { LLDB_INSTRUMENT_VA(this); }
-SBLineEntry::SBLineEntry(const SBLineEntry &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBLineEntry, (const lldb::SBLineEntry &), rhs);
+SBLineEntry::SBLineEntry(const SBLineEntry &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
-SBLineEntry::SBLineEntry(const lldb_private::LineEntry *lldb_object_ptr)
- : m_opaque_up() {
+SBLineEntry::SBLineEntry(const lldb_private::LineEntry *lldb_object_ptr) {
if (lldb_object_ptr)
m_opaque_up = std::make_unique<LineEntry>(*lldb_object_ptr);
}
const SBLineEntry &SBLineEntry::operator=(const SBLineEntry &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBLineEntry &,
- SBLineEntry, operator=,(const lldb::SBLineEntry &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
void SBLineEntry::SetLineEntry(const lldb_private::LineEntry &lldb_object_ref) {
@@ -51,49 +47,48 @@ void SBLineEntry::SetLineEntry(const lldb_private::LineEntry &lldb_object_ref) {
SBLineEntry::~SBLineEntry() = default;
SBAddress SBLineEntry::GetStartAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBLineEntry,
- GetStartAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress sb_address;
if (m_opaque_up)
sb_address.SetAddress(m_opaque_up->range.GetBaseAddress());
- return LLDB_RECORD_RESULT(sb_address);
+ return sb_address;
}
SBAddress SBLineEntry::GetEndAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBLineEntry, GetEndAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress sb_address;
if (m_opaque_up) {
sb_address.SetAddress(m_opaque_up->range.GetBaseAddress());
sb_address.OffsetAddress(m_opaque_up->range.GetByteSize());
}
- return LLDB_RECORD_RESULT(sb_address);
+ return sb_address;
}
bool SBLineEntry::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBLineEntry, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBLineEntry::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBLineEntry, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up.get() && m_opaque_up->IsValid();
}
SBFileSpec SBLineEntry::GetFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBLineEntry, GetFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_file_spec;
if (m_opaque_up.get() && m_opaque_up->file)
sb_file_spec.SetFileSpec(m_opaque_up->file);
- return LLDB_RECORD_RESULT(sb_file_spec);
+ return sb_file_spec;
}
uint32_t SBLineEntry::GetLine() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBLineEntry, GetLine);
+ LLDB_INSTRUMENT_VA(this);
uint32_t line = 0;
if (m_opaque_up)
@@ -103,7 +98,7 @@ uint32_t SBLineEntry::GetLine() const {
}
uint32_t SBLineEntry::GetColumn() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBLineEntry, GetColumn);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->column;
@@ -111,8 +106,7 @@ uint32_t SBLineEntry::GetColumn() const {
}
void SBLineEntry::SetFileSpec(lldb::SBFileSpec filespec) {
- LLDB_RECORD_METHOD(void, SBLineEntry, SetFileSpec, (lldb::SBFileSpec),
- filespec);
+ LLDB_INSTRUMENT_VA(this, filespec);
if (filespec.IsValid())
ref().file = filespec.ref();
@@ -120,20 +114,19 @@ void SBLineEntry::SetFileSpec(lldb::SBFileSpec filespec) {
ref().file.Clear();
}
void SBLineEntry::SetLine(uint32_t line) {
- LLDB_RECORD_METHOD(void, SBLineEntry, SetLine, (uint32_t), line);
+ LLDB_INSTRUMENT_VA(this, line);
ref().line = line;
}
void SBLineEntry::SetColumn(uint32_t column) {
- LLDB_RECORD_METHOD(void, SBLineEntry, SetColumn, (uint32_t), column);
+ LLDB_INSTRUMENT_VA(this, column);
ref().line = column;
}
bool SBLineEntry::operator==(const SBLineEntry &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBLineEntry, operator==,(const lldb::SBLineEntry &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
lldb_private::LineEntry *lhs_ptr = m_opaque_up.get();
lldb_private::LineEntry *rhs_ptr = rhs.m_opaque_up.get();
@@ -145,8 +138,7 @@ bool SBLineEntry::operator==(const SBLineEntry &rhs) const {
}
bool SBLineEntry::operator!=(const SBLineEntry &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBLineEntry, operator!=,(const lldb::SBLineEntry &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
lldb_private::LineEntry *lhs_ptr = m_opaque_up.get();
lldb_private::LineEntry *rhs_ptr = rhs.m_opaque_up.get();
@@ -170,8 +162,7 @@ lldb_private::LineEntry &SBLineEntry::ref() {
const lldb_private::LineEntry &SBLineEntry::ref() const { return *m_opaque_up; }
bool SBLineEntry::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBLineEntry, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -188,33 +179,3 @@ bool SBLineEntry::GetDescription(SBStream &description) {
}
lldb_private::LineEntry *SBLineEntry::get() { return m_opaque_up.get(); }
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBLineEntry>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBLineEntry, ());
- LLDB_REGISTER_CONSTRUCTOR(SBLineEntry, (const lldb::SBLineEntry &));
- LLDB_REGISTER_METHOD(const lldb::SBLineEntry &,
- SBLineEntry, operator=,(const lldb::SBLineEntry &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBLineEntry, GetStartAddress,
- ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBLineEntry, GetEndAddress, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBLineEntry, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBLineEntry, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBLineEntry, GetFileSpec, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBLineEntry, GetLine, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBLineEntry, GetColumn, ());
- LLDB_REGISTER_METHOD(void, SBLineEntry, SetFileSpec, (lldb::SBFileSpec));
- LLDB_REGISTER_METHOD(void, SBLineEntry, SetLine, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBLineEntry, SetColumn, (uint32_t));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBLineEntry, operator==,(const lldb::SBLineEntry &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBLineEntry, operator!=,(const lldb::SBLineEntry &));
- LLDB_REGISTER_METHOD(bool, SBLineEntry, GetDescription, (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBListener.cpp b/contrib/llvm-project/lldb/source/API/SBListener.cpp
index 6e5e15de7b3d..2ce17a5f521d 100644
--- a/contrib/llvm-project/lldb/source/API/SBListener.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBListener.cpp
@@ -7,42 +7,39 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBListener.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBBroadcaster.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBEvent.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Utility/Broadcaster.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Listener.h"
#include "lldb/Utility/StreamString.h"
using namespace lldb;
using namespace lldb_private;
-SBListener::SBListener() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBListener);
-}
+SBListener::SBListener() { LLDB_INSTRUMENT_VA(this); }
SBListener::SBListener(const char *name)
: m_opaque_sp(Listener::MakeListener(name)), m_unused_ptr(nullptr) {
- LLDB_RECORD_CONSTRUCTOR(SBListener, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
}
SBListener::SBListener(const SBListener &rhs)
: m_opaque_sp(rhs.m_opaque_sp), m_unused_ptr(nullptr) {
- LLDB_RECORD_CONSTRUCTOR(SBListener, (const lldb::SBListener &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const lldb::SBListener &SBListener::operator=(const lldb::SBListener &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBListener &,
- SBListener, operator=,(const lldb::SBListener &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
m_unused_ptr = nullptr;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBListener::SBListener(const lldb::ListenerSP &listener_sp)
@@ -51,18 +48,17 @@ SBListener::SBListener(const lldb::ListenerSP &listener_sp)
SBListener::~SBListener() = default;
bool SBListener::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBListener, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBListener::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBListener, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp != nullptr;
}
void SBListener::AddEvent(const SBEvent &event) {
- LLDB_RECORD_METHOD(void, SBListener, AddEvent, (const lldb::SBEvent &),
- event);
+ LLDB_INSTRUMENT_VA(this, event);
EventSP &event_sp = event.GetSP();
if (event_sp)
@@ -70,7 +66,7 @@ void SBListener::AddEvent(const SBEvent &event) {
}
void SBListener::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBListener, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
m_opaque_sp->Clear();
@@ -79,9 +75,7 @@ void SBListener::Clear() {
uint32_t SBListener::StartListeningForEventClass(SBDebugger &debugger,
const char *broadcaster_class,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(uint32_t, SBListener, StartListeningForEventClass,
- (lldb::SBDebugger &, const char *, uint32_t), debugger,
- broadcaster_class, event_mask);
+ LLDB_INSTRUMENT_VA(this, debugger, broadcaster_class, event_mask);
if (m_opaque_sp) {
Debugger *lldb_debugger = debugger.get();
@@ -97,9 +91,7 @@ uint32_t SBListener::StartListeningForEventClass(SBDebugger &debugger,
bool SBListener::StopListeningForEventClass(SBDebugger &debugger,
const char *broadcaster_class,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(bool, SBListener, StopListeningForEventClass,
- (lldb::SBDebugger &, const char *, uint32_t), debugger,
- broadcaster_class, event_mask);
+ LLDB_INSTRUMENT_VA(this, debugger, broadcaster_class, event_mask);
if (m_opaque_sp) {
Debugger *lldb_debugger = debugger.get();
@@ -114,9 +106,7 @@ bool SBListener::StopListeningForEventClass(SBDebugger &debugger,
uint32_t SBListener::StartListeningForEvents(const SBBroadcaster &broadcaster,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(uint32_t, SBListener, StartListeningForEvents,
- (const lldb::SBBroadcaster &, uint32_t), broadcaster,
- event_mask);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event_mask);
uint32_t acquired_event_mask = 0;
if (m_opaque_sp && broadcaster.IsValid()) {
@@ -129,9 +119,7 @@ uint32_t SBListener::StartListeningForEvents(const SBBroadcaster &broadcaster,
bool SBListener::StopListeningForEvents(const SBBroadcaster &broadcaster,
uint32_t event_mask) {
- LLDB_RECORD_METHOD(bool, SBListener, StopListeningForEvents,
- (const lldb::SBBroadcaster &, uint32_t), broadcaster,
- event_mask);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event_mask);
if (m_opaque_sp && broadcaster.IsValid()) {
return m_opaque_sp->StopListeningForEvents(broadcaster.get(), event_mask);
@@ -140,8 +128,7 @@ bool SBListener::StopListeningForEvents(const SBBroadcaster &broadcaster,
}
bool SBListener::WaitForEvent(uint32_t timeout_secs, SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, WaitForEvent,
- (uint32_t, lldb::SBEvent &), timeout_secs, event);
+ LLDB_INSTRUMENT_VA(this, timeout_secs, event);
bool success = false;
@@ -167,9 +154,7 @@ bool SBListener::WaitForEvent(uint32_t timeout_secs, SBEvent &event) {
bool SBListener::WaitForEventForBroadcaster(uint32_t num_seconds,
const SBBroadcaster &broadcaster,
SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, WaitForEventForBroadcaster,
- (uint32_t, const lldb::SBBroadcaster &, lldb::SBEvent &),
- num_seconds, broadcaster, event);
+ LLDB_INSTRUMENT_VA(this, num_seconds, broadcaster, event);
if (m_opaque_sp && broadcaster.IsValid()) {
Timeout<std::micro> timeout(llvm::None);
@@ -189,10 +174,7 @@ bool SBListener::WaitForEventForBroadcaster(uint32_t num_seconds,
bool SBListener::WaitForEventForBroadcasterWithType(
uint32_t num_seconds, const SBBroadcaster &broadcaster,
uint32_t event_type_mask, SBEvent &event) {
- LLDB_RECORD_METHOD(
- bool, SBListener, WaitForEventForBroadcasterWithType,
- (uint32_t, const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &),
- num_seconds, broadcaster, event_type_mask, event);
+ LLDB_INSTRUMENT_VA(this, num_seconds, broadcaster, event_type_mask, event);
if (m_opaque_sp && broadcaster.IsValid()) {
Timeout<std::micro> timeout(llvm::None);
@@ -210,8 +192,7 @@ bool SBListener::WaitForEventForBroadcasterWithType(
}
bool SBListener::PeekAtNextEvent(SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, PeekAtNextEvent, (lldb::SBEvent &),
- event);
+ LLDB_INSTRUMENT_VA(this, event);
if (m_opaque_sp) {
event.reset(m_opaque_sp->PeekAtNextEvent());
@@ -223,9 +204,7 @@ bool SBListener::PeekAtNextEvent(SBEvent &event) {
bool SBListener::PeekAtNextEventForBroadcaster(const SBBroadcaster &broadcaster,
SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, PeekAtNextEventForBroadcaster,
- (const lldb::SBBroadcaster &, lldb::SBEvent &),
- broadcaster, event);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event);
if (m_opaque_sp && broadcaster.IsValid()) {
event.reset(m_opaque_sp->PeekAtNextEventForBroadcaster(broadcaster.get()));
@@ -238,9 +217,7 @@ bool SBListener::PeekAtNextEventForBroadcaster(const SBBroadcaster &broadcaster,
bool SBListener::PeekAtNextEventForBroadcasterWithType(
const SBBroadcaster &broadcaster, uint32_t event_type_mask,
SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, PeekAtNextEventForBroadcasterWithType,
- (const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &),
- broadcaster, event_type_mask, event);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event_type_mask, event);
if (m_opaque_sp && broadcaster.IsValid()) {
event.reset(m_opaque_sp->PeekAtNextEventForBroadcasterWithType(
@@ -252,7 +229,7 @@ bool SBListener::PeekAtNextEventForBroadcasterWithType(
}
bool SBListener::GetNextEvent(SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, GetNextEvent, (lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(this, event);
if (m_opaque_sp) {
EventSP event_sp;
@@ -267,9 +244,7 @@ bool SBListener::GetNextEvent(SBEvent &event) {
bool SBListener::GetNextEventForBroadcaster(const SBBroadcaster &broadcaster,
SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, GetNextEventForBroadcaster,
- (const lldb::SBBroadcaster &, lldb::SBEvent &),
- broadcaster, event);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event);
if (m_opaque_sp && broadcaster.IsValid()) {
EventSP event_sp;
@@ -286,9 +261,7 @@ bool SBListener::GetNextEventForBroadcaster(const SBBroadcaster &broadcaster,
bool SBListener::GetNextEventForBroadcasterWithType(
const SBBroadcaster &broadcaster, uint32_t event_type_mask,
SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, GetNextEventForBroadcasterWithType,
- (const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &),
- broadcaster, event_type_mask, event);
+ LLDB_INSTRUMENT_VA(this, broadcaster, event_type_mask, event);
if (m_opaque_sp && broadcaster.IsValid()) {
EventSP event_sp;
@@ -304,8 +277,7 @@ bool SBListener::GetNextEventForBroadcasterWithType(
}
bool SBListener::HandleBroadcastEvent(const SBEvent &event) {
- LLDB_RECORD_METHOD(bool, SBListener, HandleBroadcastEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(this, event);
if (m_opaque_sp)
return m_opaque_sp->HandleBroadcastEvent(event.GetSP());
@@ -322,52 +294,3 @@ void SBListener::reset(ListenerSP listener_sp) {
m_opaque_sp = listener_sp;
m_unused_ptr = nullptr;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBListener>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBListener, ());
- LLDB_REGISTER_CONSTRUCTOR(SBListener, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBListener, (const lldb::SBListener &));
- LLDB_REGISTER_METHOD(const lldb::SBListener &,
- SBListener, operator=,(const lldb::SBListener &));
- LLDB_REGISTER_METHOD_CONST(bool, SBListener, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBListener, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBListener, AddEvent, (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD(void, SBListener, Clear, ());
- LLDB_REGISTER_METHOD(uint32_t, SBListener, StartListeningForEventClass,
- (lldb::SBDebugger &, const char *, uint32_t));
- LLDB_REGISTER_METHOD(bool, SBListener, StopListeningForEventClass,
- (lldb::SBDebugger &, const char *, uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBListener, StartListeningForEvents,
- (const lldb::SBBroadcaster &, uint32_t));
- LLDB_REGISTER_METHOD(bool, SBListener, StopListeningForEvents,
- (const lldb::SBBroadcaster &, uint32_t));
- LLDB_REGISTER_METHOD(bool, SBListener, WaitForEvent,
- (uint32_t, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(
- bool, SBListener, WaitForEventForBroadcaster,
- (uint32_t, const lldb::SBBroadcaster &, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(
- bool, SBListener, WaitForEventForBroadcasterWithType,
- (uint32_t, const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBListener, PeekAtNextEvent, (lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBListener, PeekAtNextEventForBroadcaster,
- (const lldb::SBBroadcaster &, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(
- bool, SBListener, PeekAtNextEventForBroadcasterWithType,
- (const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBListener, GetNextEvent, (lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBListener, GetNextEventForBroadcaster,
- (const lldb::SBBroadcaster &, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(
- bool, SBListener, GetNextEventForBroadcasterWithType,
- (const lldb::SBBroadcaster &, uint32_t, lldb::SBEvent &));
- LLDB_REGISTER_METHOD(bool, SBListener, HandleBroadcastEvent,
- (const lldb::SBEvent &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfo.cpp b/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfo.cpp
index 9cf7874b54a3..7d9db478dde1 100644
--- a/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfo.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfo.cpp
@@ -7,29 +7,26 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBMemoryRegionInfo.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBDefines.h"
#include "lldb/API/SBError.h"
#include "lldb/API/SBStream.h"
#include "lldb/Target/MemoryRegionInfo.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StreamString.h"
using namespace lldb;
using namespace lldb_private;
SBMemoryRegionInfo::SBMemoryRegionInfo() : m_opaque_up(new MemoryRegionInfo()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBMemoryRegionInfo);
+ LLDB_INSTRUMENT_VA(this);
}
SBMemoryRegionInfo::SBMemoryRegionInfo(const char *name, lldb::addr_t begin,
lldb::addr_t end, uint32_t permissions,
bool mapped, bool stack_memory)
: SBMemoryRegionInfo() {
- LLDB_RECORD_CONSTRUCTOR(
- SBMemoryRegionInfo,
- (const char *, lldb::addr_t, lldb::addr_t, uint32_t, bool, bool), name,
- begin, end, permissions, mapped, stack_memory);
+ LLDB_INSTRUMENT_VA(this, name, begin, end, permissions, mapped, stack_memory);
m_opaque_up->SetName(name);
m_opaque_up->GetRange().SetRangeBase(begin);
m_opaque_up->GetRange().SetRangeEnd(end);
@@ -46,44 +43,36 @@ SBMemoryRegionInfo::SBMemoryRegionInfo(const MemoryRegionInfo *lldb_object_ptr)
ref() = *lldb_object_ptr;
}
-SBMemoryRegionInfo::SBMemoryRegionInfo(const SBMemoryRegionInfo &rhs)
- : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBMemoryRegionInfo,
- (const lldb::SBMemoryRegionInfo &), rhs);
+SBMemoryRegionInfo::SBMemoryRegionInfo(const SBMemoryRegionInfo &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
const SBMemoryRegionInfo &SBMemoryRegionInfo::
operator=(const SBMemoryRegionInfo &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBMemoryRegionInfo &,
- SBMemoryRegionInfo, operator=,(const lldb::SBMemoryRegionInfo &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBMemoryRegionInfo::~SBMemoryRegionInfo() = default;
void SBMemoryRegionInfo::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBMemoryRegionInfo, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up->Clear();
}
bool SBMemoryRegionInfo::operator==(const SBMemoryRegionInfo &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBMemoryRegionInfo, operator==,(const lldb::SBMemoryRegionInfo &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return ref() == rhs.ref();
}
bool SBMemoryRegionInfo::operator!=(const SBMemoryRegionInfo &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBMemoryRegionInfo, operator!=,(const lldb::SBMemoryRegionInfo &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return ref() != rhs.ref();
}
@@ -93,55 +82,55 @@ MemoryRegionInfo &SBMemoryRegionInfo::ref() { return *m_opaque_up; }
const MemoryRegionInfo &SBMemoryRegionInfo::ref() const { return *m_opaque_up; }
lldb::addr_t SBMemoryRegionInfo::GetRegionBase() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBMemoryRegionInfo, GetRegionBase);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetRange().GetRangeBase();
}
lldb::addr_t SBMemoryRegionInfo::GetRegionEnd() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBMemoryRegionInfo, GetRegionEnd);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetRange().GetRangeEnd();
}
bool SBMemoryRegionInfo::IsReadable() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBMemoryRegionInfo, IsReadable);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetReadable() == MemoryRegionInfo::eYes;
}
bool SBMemoryRegionInfo::IsWritable() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBMemoryRegionInfo, IsWritable);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetWritable() == MemoryRegionInfo::eYes;
}
bool SBMemoryRegionInfo::IsExecutable() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBMemoryRegionInfo, IsExecutable);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetExecutable() == MemoryRegionInfo::eYes;
}
bool SBMemoryRegionInfo::IsMapped() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBMemoryRegionInfo, IsMapped);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetMapped() == MemoryRegionInfo::eYes;
}
const char *SBMemoryRegionInfo::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBMemoryRegionInfo, GetName);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetName().AsCString();
}
bool SBMemoryRegionInfo::HasDirtyMemoryPageList() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBMemoryRegionInfo, HasDirtyMemoryPageList);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetDirtyPageList().hasValue();
}
uint32_t SBMemoryRegionInfo::GetNumDirtyPages() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBMemoryRegionInfo, GetNumDirtyPages);
+ LLDB_INSTRUMENT_VA(this);
uint32_t num_dirty_pages = 0;
llvm::Optional<std::vector<addr_t>> dirty_page_list =
@@ -153,8 +142,7 @@ uint32_t SBMemoryRegionInfo::GetNumDirtyPages() {
}
addr_t SBMemoryRegionInfo::GetDirtyPageAddressAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::addr_t, SBMemoryRegionInfo,
- GetDirtyPageAddressAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
addr_t dirty_page_addr = LLDB_INVALID_ADDRESS;
const llvm::Optional<std::vector<addr_t>> &dirty_page_list =
@@ -166,14 +154,13 @@ addr_t SBMemoryRegionInfo::GetDirtyPageAddressAtIndex(uint32_t idx) {
}
int SBMemoryRegionInfo::GetPageSize() {
- LLDB_RECORD_METHOD_NO_ARGS(int, SBMemoryRegionInfo, GetPageSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetPageSize();
}
bool SBMemoryRegionInfo::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBMemoryRegionInfo, GetDescription,
- (lldb::SBStream &), description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
const addr_t load_addr = m_opaque_up->GetRange().base;
@@ -187,42 +174,3 @@ bool SBMemoryRegionInfo::GetDescription(SBStream &description) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBMemoryRegionInfo>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBMemoryRegionInfo, ());
- LLDB_REGISTER_CONSTRUCTOR(SBMemoryRegionInfo,
- (const lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_CONSTRUCTOR(
- SBMemoryRegionInfo,
- (const char *, lldb::addr_t, lldb::addr_t, uint32_t, bool, bool));
- LLDB_REGISTER_METHOD(
- const lldb::SBMemoryRegionInfo &,
- SBMemoryRegionInfo, operator=,(const lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(void, SBMemoryRegionInfo, Clear, ());
- LLDB_REGISTER_METHOD_CONST(
- bool,
- SBMemoryRegionInfo, operator==,(const lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD_CONST(
- bool,
- SBMemoryRegionInfo, operator!=,(const lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBMemoryRegionInfo, GetRegionBase, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBMemoryRegionInfo, GetRegionEnd, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, IsReadable, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, IsWritable, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, IsExecutable, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, IsMapped, ());
- LLDB_REGISTER_METHOD(const char *, SBMemoryRegionInfo, GetName, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfo, HasDirtyMemoryPageList, ());
- LLDB_REGISTER_METHOD(uint32_t, SBMemoryRegionInfo, GetNumDirtyPages, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBMemoryRegionInfo, GetDirtyPageAddressAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(int, SBMemoryRegionInfo, GetPageSize, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfoList.cpp b/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfoList.cpp
index cd8fc00ffce0..39dee86dc300 100644
--- a/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfoList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBMemoryRegionInfoList.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBMemoryRegionInfoList.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBMemoryRegionInfo.h"
#include "lldb/API/SBStream.h"
#include "lldb/Target/MemoryRegionInfo.h"
+#include "lldb/Utility/Instrumentation.h"
#include <vector>
@@ -83,70 +83,61 @@ const MemoryRegionInfos &SBMemoryRegionInfoList::ref() const {
SBMemoryRegionInfoList::SBMemoryRegionInfoList()
: m_opaque_up(new MemoryRegionInfoListImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBMemoryRegionInfoList);
+ LLDB_INSTRUMENT_VA(this);
}
SBMemoryRegionInfoList::SBMemoryRegionInfoList(
const SBMemoryRegionInfoList &rhs)
: m_opaque_up(new MemoryRegionInfoListImpl(*rhs.m_opaque_up)) {
- LLDB_RECORD_CONSTRUCTOR(SBMemoryRegionInfoList,
- (const lldb::SBMemoryRegionInfoList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBMemoryRegionInfoList::~SBMemoryRegionInfoList() = default;
const SBMemoryRegionInfoList &SBMemoryRegionInfoList::
operator=(const SBMemoryRegionInfoList &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBMemoryRegionInfoList &,
- SBMemoryRegionInfoList, operator=,(const lldb::SBMemoryRegionInfoList &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
*m_opaque_up = *rhs.m_opaque_up;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
uint32_t SBMemoryRegionInfoList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBMemoryRegionInfoList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSize();
}
bool SBMemoryRegionInfoList::GetMemoryRegionContainingAddress(
lldb::addr_t addr, SBMemoryRegionInfo &region_info) {
- LLDB_RECORD_METHOD(
- bool, SBMemoryRegionInfoList, GetMemoryRegionContainingAddress,
- (lldb::addr_t, lldb::SBMemoryRegionInfo &), addr, region_info);
+ LLDB_INSTRUMENT_VA(this, addr, region_info);
return m_opaque_up->GetMemoryRegionContainingAddress(addr, region_info.ref());
}
bool SBMemoryRegionInfoList::GetMemoryRegionAtIndex(
uint32_t idx, SBMemoryRegionInfo &region_info) {
- LLDB_RECORD_METHOD(bool, SBMemoryRegionInfoList, GetMemoryRegionAtIndex,
- (uint32_t, lldb::SBMemoryRegionInfo &), idx, region_info);
+ LLDB_INSTRUMENT_VA(this, idx, region_info);
return m_opaque_up->GetMemoryRegionInfoAtIndex(idx, region_info.ref());
}
void SBMemoryRegionInfoList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBMemoryRegionInfoList, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up->Clear();
}
void SBMemoryRegionInfoList::Append(SBMemoryRegionInfo &sb_region) {
- LLDB_RECORD_METHOD(void, SBMemoryRegionInfoList, Append,
- (lldb::SBMemoryRegionInfo &), sb_region);
+ LLDB_INSTRUMENT_VA(this, sb_region);
m_opaque_up->Append(sb_region.ref());
}
void SBMemoryRegionInfoList::Append(SBMemoryRegionInfoList &sb_region_list) {
- LLDB_RECORD_METHOD(void, SBMemoryRegionInfoList, Append,
- (lldb::SBMemoryRegionInfoList &), sb_region_list);
+ LLDB_INSTRUMENT_VA(this, sb_region_list);
m_opaque_up->Append(*sb_region_list);
}
@@ -159,31 +150,3 @@ const MemoryRegionInfoListImpl &SBMemoryRegionInfoList::operator*() const {
assert(m_opaque_up.get());
return *m_opaque_up;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBMemoryRegionInfoList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBMemoryRegionInfoList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBMemoryRegionInfoList,
- (const lldb::SBMemoryRegionInfoList &));
- LLDB_REGISTER_METHOD(
- const lldb::SBMemoryRegionInfoList &,
- SBMemoryRegionInfoList, operator=,(
- const lldb::SBMemoryRegionInfoList &));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBMemoryRegionInfoList, GetSize, ());
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfoList,
- GetMemoryRegionContainingAddress,
- (lldb::addr_t, lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(bool, SBMemoryRegionInfoList, GetMemoryRegionAtIndex,
- (uint32_t, lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(void, SBMemoryRegionInfoList, Clear, ());
- LLDB_REGISTER_METHOD(void, SBMemoryRegionInfoList, Append,
- (lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(void, SBMemoryRegionInfoList, Append,
- (lldb::SBMemoryRegionInfoList &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBModule.cpp b/contrib/llvm-project/lldb/source/API/SBModule.cpp
index 710ee8551bd6..1454012d3eb9 100644
--- a/contrib/llvm-project/lldb/source/API/SBModule.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBModule.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBModule.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBModuleSpec.h"
@@ -24,19 +23,18 @@
#include "lldb/Symbol/TypeSystem.h"
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StreamString.h"
using namespace lldb;
using namespace lldb_private;
-SBModule::SBModule() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBModule);
-}
+SBModule::SBModule() { LLDB_INSTRUMENT_VA(this); }
SBModule::SBModule(const lldb::ModuleSP &module_sp) : m_opaque_sp(module_sp) {}
-SBModule::SBModule(const SBModuleSpec &module_spec) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBModule, (const lldb::SBModuleSpec &), module_spec);
+SBModule::SBModule(const SBModuleSpec &module_spec) {
+ LLDB_INSTRUMENT_VA(this, module_spec);
ModuleSP module_sp;
Status error = ModuleList::GetSharedModule(
@@ -46,13 +44,11 @@ SBModule::SBModule(const SBModuleSpec &module_spec) : m_opaque_sp() {
}
SBModule::SBModule(const SBModule &rhs) : m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBModule, (const lldb::SBModule &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
-SBModule::SBModule(lldb::SBProcess &process, lldb::addr_t header_addr)
- : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBModule, (lldb::SBProcess &, lldb::addr_t), process,
- header_addr);
+SBModule::SBModule(lldb::SBProcess &process, lldb::addr_t header_addr) {
+ LLDB_INSTRUMENT_VA(this, process, header_addr);
ProcessSP process_sp(process.GetSP());
if (process_sp) {
@@ -67,58 +63,69 @@ SBModule::SBModule(lldb::SBProcess &process, lldb::addr_t header_addr)
}
const SBModule &SBModule::operator=(const SBModule &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBModule &, SBModule, operator=,
- (const lldb::SBModule &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBModule::~SBModule() = default;
bool SBModule::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBModule, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBModule::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBModule, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
void SBModule::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBModule, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
}
+bool SBModule::IsFileBacked() const {
+ LLDB_INSTRUMENT_VA(this);
+
+ ModuleSP module_sp(GetSP());
+ if (!module_sp)
+ return false;
+
+ ObjectFile *obj_file = module_sp->GetObjectFile();
+ if (!obj_file)
+ return false;
+
+ return !obj_file->IsInMemory();
+}
+
SBFileSpec SBModule::GetFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBModule, GetFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec file_spec;
ModuleSP module_sp(GetSP());
if (module_sp)
file_spec.SetFileSpec(module_sp->GetFileSpec());
- return LLDB_RECORD_RESULT(file_spec);
+ return file_spec;
}
lldb::SBFileSpec SBModule::GetPlatformFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBModule,
- GetPlatformFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec file_spec;
ModuleSP module_sp(GetSP());
if (module_sp)
file_spec.SetFileSpec(module_sp->GetPlatformFileSpec());
- return LLDB_RECORD_RESULT(file_spec);
+ return file_spec;
}
bool SBModule::SetPlatformFileSpec(const lldb::SBFileSpec &platform_file) {
- LLDB_RECORD_METHOD(bool, SBModule, SetPlatformFileSpec,
- (const lldb::SBFileSpec &), platform_file);
+ LLDB_INSTRUMENT_VA(this, platform_file);
bool result = false;
@@ -132,19 +139,17 @@ bool SBModule::SetPlatformFileSpec(const lldb::SBFileSpec &platform_file) {
}
lldb::SBFileSpec SBModule::GetRemoteInstallFileSpec() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBModule,
- GetRemoteInstallFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_file_spec;
ModuleSP module_sp(GetSP());
if (module_sp)
sb_file_spec.SetFileSpec(module_sp->GetRemoteInstallFileSpec());
- return LLDB_RECORD_RESULT(sb_file_spec);
+ return sb_file_spec;
}
bool SBModule::SetRemoteInstallFileSpec(lldb::SBFileSpec &file) {
- LLDB_RECORD_METHOD(bool, SBModule, SetRemoteInstallFileSpec,
- (lldb::SBFileSpec &), file);
+ LLDB_INSTRUMENT_VA(this, file);
ModuleSP module_sp(GetSP());
if (module_sp) {
@@ -155,7 +160,7 @@ bool SBModule::SetRemoteInstallFileSpec(lldb::SBFileSpec &file) {
}
const uint8_t *SBModule::GetUUIDBytes() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const uint8_t *, SBModule, GetUUIDBytes);
+ LLDB_INSTRUMENT_VA(this);
const uint8_t *uuid_bytes = nullptr;
ModuleSP module_sp(GetSP());
@@ -166,7 +171,7 @@ const uint8_t *SBModule::GetUUIDBytes() const {
}
const char *SBModule::GetUUIDString() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBModule, GetUUIDString);
+ LLDB_INSTRUMENT_VA(this);
const char *uuid_cstr = nullptr;
ModuleSP module_sp(GetSP());
@@ -186,8 +191,7 @@ const char *SBModule::GetUUIDString() const {
}
bool SBModule::operator==(const SBModule &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBModule, operator==, (const lldb::SBModule &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (m_opaque_sp)
return m_opaque_sp.get() == rhs.m_opaque_sp.get();
@@ -195,8 +199,7 @@ bool SBModule::operator==(const SBModule &rhs) const {
}
bool SBModule::operator!=(const SBModule &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBModule, operator!=, (const lldb::SBModule &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (m_opaque_sp)
return m_opaque_sp.get() != rhs.m_opaque_sp.get();
@@ -208,8 +211,7 @@ ModuleSP SBModule::GetSP() const { return m_opaque_sp; }
void SBModule::SetSP(const ModuleSP &module_sp) { m_opaque_sp = module_sp; }
SBAddress SBModule::ResolveFileAddress(lldb::addr_t vm_addr) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBModule, ResolveFileAddress,
- (lldb::addr_t), vm_addr);
+ LLDB_INSTRUMENT_VA(this, vm_addr);
lldb::SBAddress sb_addr;
ModuleSP module_sp(GetSP());
@@ -218,27 +220,24 @@ SBAddress SBModule::ResolveFileAddress(lldb::addr_t vm_addr) {
if (module_sp->ResolveFileAddress(vm_addr, addr))
sb_addr.ref() = addr;
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
SBSymbolContext
SBModule::ResolveSymbolContextForAddress(const SBAddress &addr,
uint32_t resolve_scope) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContext, SBModule,
- ResolveSymbolContextForAddress,
- (const lldb::SBAddress &, uint32_t), addr, resolve_scope);
+ LLDB_INSTRUMENT_VA(this, addr, resolve_scope);
SBSymbolContext sb_sc;
ModuleSP module_sp(GetSP());
SymbolContextItem scope = static_cast<SymbolContextItem>(resolve_scope);
if (module_sp && addr.IsValid())
module_sp->ResolveSymbolContextForAddress(addr.ref(), scope, *sb_sc);
- return LLDB_RECORD_RESULT(sb_sc);
+ return sb_sc;
}
bool SBModule::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBModule, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -252,7 +251,7 @@ bool SBModule::GetDescription(SBStream &description) {
}
uint32_t SBModule::GetNumCompileUnits() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBModule, GetNumCompileUnits);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (module_sp) {
@@ -262,8 +261,7 @@ uint32_t SBModule::GetNumCompileUnits() {
}
SBCompileUnit SBModule::GetCompileUnitAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBCompileUnit, SBModule, GetCompileUnitAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
SBCompileUnit sb_cu;
ModuleSP module_sp(GetSP());
@@ -271,19 +269,18 @@ SBCompileUnit SBModule::GetCompileUnitAtIndex(uint32_t index) {
CompUnitSP cu_sp = module_sp->GetCompileUnitAtIndex(index);
sb_cu.reset(cu_sp.get());
}
- return LLDB_RECORD_RESULT(sb_cu);
+ return sb_cu;
}
SBSymbolContextList SBModule::FindCompileUnits(const SBFileSpec &sb_file_spec) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBModule, FindCompileUnits,
- (const lldb::SBFileSpec &), sb_file_spec);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec);
SBSymbolContextList sb_sc_list;
const ModuleSP module_sp(GetSP());
if (sb_file_spec.IsValid() && module_sp) {
module_sp->FindCompileUnits(*sb_file_spec, *sb_sc_list);
}
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
static Symtab *GetUnifiedSymbolTable(const lldb::ModuleSP &module_sp) {
@@ -293,7 +290,7 @@ static Symtab *GetUnifiedSymbolTable(const lldb::ModuleSP &module_sp) {
}
size_t SBModule::GetNumSymbols() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBModule, GetNumSymbols);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (Symtab *symtab = GetUnifiedSymbolTable(module_sp))
@@ -302,20 +299,19 @@ size_t SBModule::GetNumSymbols() {
}
SBSymbol SBModule::GetSymbolAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(lldb::SBSymbol, SBModule, GetSymbolAtIndex, (size_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBSymbol sb_symbol;
ModuleSP module_sp(GetSP());
Symtab *symtab = GetUnifiedSymbolTable(module_sp);
if (symtab)
sb_symbol.SetSymbol(symtab->SymbolAtIndex(idx));
- return LLDB_RECORD_RESULT(sb_symbol);
+ return sb_symbol;
}
lldb::SBSymbol SBModule::FindSymbol(const char *name,
lldb::SymbolType symbol_type) {
- LLDB_RECORD_METHOD(lldb::SBSymbol, SBModule, FindSymbol,
- (const char *, lldb::SymbolType), name, symbol_type);
+ LLDB_INSTRUMENT_VA(this, name, symbol_type);
SBSymbol sb_symbol;
if (name && name[0]) {
@@ -326,13 +322,12 @@ lldb::SBSymbol SBModule::FindSymbol(const char *name,
ConstString(name), symbol_type, Symtab::eDebugAny,
Symtab::eVisibilityAny));
}
- return LLDB_RECORD_RESULT(sb_symbol);
+ return sb_symbol;
}
lldb::SBSymbolContextList SBModule::FindSymbols(const char *name,
lldb::SymbolType symbol_type) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBModule, FindSymbols,
- (const char *, lldb::SymbolType), name, symbol_type);
+ LLDB_INSTRUMENT_VA(this, name, symbol_type);
SBSymbolContextList sb_sc_list;
if (name && name[0]) {
@@ -355,11 +350,11 @@ lldb::SBSymbolContextList SBModule::FindSymbols(const char *name,
}
}
}
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
size_t SBModule::GetNumSections() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBModule, GetNumSections);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (module_sp) {
@@ -373,8 +368,7 @@ size_t SBModule::GetNumSections() {
}
SBSection SBModule::GetSectionAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(lldb::SBSection, SBModule, GetSectionAtIndex, (size_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBSection sb_section;
ModuleSP module_sp(GetSP());
@@ -386,13 +380,12 @@ SBSection SBModule::GetSectionAtIndex(size_t idx) {
if (section_list)
sb_section.SetSP(section_list->GetSectionAtIndex(idx));
}
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
lldb::SBSymbolContextList SBModule::FindFunctions(const char *name,
uint32_t name_type_mask) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBModule, FindFunctions,
- (const char *, uint32_t), name, name_type_mask);
+ LLDB_INSTRUMENT_VA(this, name, name_type_mask);
lldb::SBSymbolContextList sb_sc_list;
ModuleSP module_sp(GetSP());
@@ -405,14 +398,12 @@ lldb::SBSymbolContextList SBModule::FindFunctions(const char *name,
module_sp->FindFunctions(ConstString(name), CompilerDeclContext(), type,
function_options, *sb_sc_list);
}
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
SBValueList SBModule::FindGlobalVariables(SBTarget &target, const char *name,
uint32_t max_matches) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBModule, FindGlobalVariables,
- (lldb::SBTarget &, const char *, uint32_t), target, name,
- max_matches);
+ LLDB_INSTRUMENT_VA(this, target, name, max_matches);
SBValueList sb_value_list;
ModuleSP module_sp(GetSP());
@@ -429,23 +420,21 @@ SBValueList SBModule::FindGlobalVariables(SBTarget &target, const char *name,
}
}
- return LLDB_RECORD_RESULT(sb_value_list);
+ return sb_value_list;
}
lldb::SBValue SBModule::FindFirstGlobalVariable(lldb::SBTarget &target,
const char *name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBModule, FindFirstGlobalVariable,
- (lldb::SBTarget &, const char *), target, name);
+ LLDB_INSTRUMENT_VA(this, target, name);
SBValueList sb_value_list(FindGlobalVariables(target, name, 1));
if (sb_value_list.IsValid() && sb_value_list.GetSize() > 0)
- return LLDB_RECORD_RESULT(sb_value_list.GetValueAtIndex(0));
- return LLDB_RECORD_RESULT(SBValue());
+ return sb_value_list.GetValueAtIndex(0);
+ return SBValue();
}
lldb::SBType SBModule::FindFirstType(const char *name_cstr) {
- LLDB_RECORD_METHOD(lldb::SBType, SBModule, FindFirstType, (const char *),
- name_cstr);
+ LLDB_INSTRUMENT_VA(this, name_cstr);
SBType sb_type;
ModuleSP module_sp(GetSP());
@@ -461,17 +450,16 @@ lldb::SBType SBModule::FindFirstType(const char *name_cstr) {
module_sp->GetTypeSystemForLanguage(eLanguageTypeC);
if (auto err = type_system_or_err.takeError()) {
llvm::consumeError(std::move(err));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
sb_type = SBType(type_system_or_err->GetBuiltinTypeByName(name));
}
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
lldb::SBType SBModule::GetBasicType(lldb::BasicType type) {
- LLDB_RECORD_METHOD(lldb::SBType, SBModule, GetBasicType, (lldb::BasicType),
- type);
+ LLDB_INSTRUMENT_VA(this, type);
ModuleSP module_sp(GetSP());
if (module_sp) {
@@ -480,16 +468,14 @@ lldb::SBType SBModule::GetBasicType(lldb::BasicType type) {
if (auto err = type_system_or_err.takeError()) {
llvm::consumeError(std::move(err));
} else {
- return LLDB_RECORD_RESULT(
- SBType(type_system_or_err->GetBasicTypeFromAST(type)));
+ return SBType(type_system_or_err->GetBasicTypeFromAST(type));
}
}
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
lldb::SBTypeList SBModule::FindTypes(const char *type) {
- LLDB_RECORD_METHOD(lldb::SBTypeList, SBModule, FindTypes, (const char *),
- type);
+ LLDB_INSTRUMENT_VA(this, type);
SBTypeList retval;
@@ -521,47 +507,44 @@ lldb::SBTypeList SBModule::FindTypes(const char *type) {
}
}
}
- return LLDB_RECORD_RESULT(retval);
+ return retval;
}
lldb::SBType SBModule::GetTypeByID(lldb::user_id_t uid) {
- LLDB_RECORD_METHOD(lldb::SBType, SBModule, GetTypeByID, (lldb::user_id_t),
- uid);
+ LLDB_INSTRUMENT_VA(this, uid);
ModuleSP module_sp(GetSP());
if (module_sp) {
if (SymbolFile *symfile = module_sp->GetSymbolFile()) {
Type *type_ptr = symfile->ResolveTypeUID(uid);
if (type_ptr)
- return LLDB_RECORD_RESULT(SBType(type_ptr->shared_from_this()));
+ return SBType(type_ptr->shared_from_this());
}
}
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
lldb::SBTypeList SBModule::GetTypes(uint32_t type_mask) {
- LLDB_RECORD_METHOD(lldb::SBTypeList, SBModule, GetTypes, (uint32_t),
- type_mask);
+ LLDB_INSTRUMENT_VA(this, type_mask);
SBTypeList sb_type_list;
ModuleSP module_sp(GetSP());
if (!module_sp)
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
SymbolFile *symfile = module_sp->GetSymbolFile();
if (!symfile)
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
TypeClass type_class = static_cast<TypeClass>(type_mask);
TypeList type_list;
symfile->GetTypes(nullptr, type_class, type_list);
sb_type_list.m_opaque_up->Append(type_list);
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
}
SBSection SBModule::FindSection(const char *sect_name) {
- LLDB_RECORD_METHOD(lldb::SBSection, SBModule, FindSection, (const char *),
- sect_name);
+ LLDB_INSTRUMENT_VA(this, sect_name);
SBSection sb_section;
@@ -578,11 +561,11 @@ SBSection SBModule::FindSection(const char *sect_name) {
}
}
}
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
lldb::ByteOrder SBModule::GetByteOrder() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ByteOrder, SBModule, GetByteOrder);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (module_sp)
@@ -591,7 +574,7 @@ lldb::ByteOrder SBModule::GetByteOrder() {
}
const char *SBModule::GetTriple() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBModule, GetTriple);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (module_sp) {
@@ -606,7 +589,7 @@ const char *SBModule::GetTriple() {
}
uint32_t SBModule::GetAddressByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBModule, GetAddressByteSize);
+ LLDB_INSTRUMENT_VA(this);
ModuleSP module_sp(GetSP());
if (module_sp)
@@ -615,8 +598,7 @@ uint32_t SBModule::GetAddressByteSize() {
}
uint32_t SBModule::GetVersion(uint32_t *versions, uint32_t num_versions) {
- LLDB_RECORD_METHOD(uint32_t, SBModule, GetVersion, (uint32_t *, uint32_t),
- versions, num_versions);
+ LLDB_INSTRUMENT_VA(this, versions, num_versions);
llvm::VersionTuple version;
if (ModuleSP module_sp = GetSP())
@@ -644,8 +626,7 @@ uint32_t SBModule::GetVersion(uint32_t *versions, uint32_t num_versions) {
}
lldb::SBFileSpec SBModule::GetSymbolFileSpec() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBFileSpec, SBModule,
- GetSymbolFileSpec);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBFileSpec sb_file_spec;
ModuleSP module_sp(GetSP());
@@ -653,12 +634,11 @@ lldb::SBFileSpec SBModule::GetSymbolFileSpec() const {
if (SymbolFile *symfile = module_sp->GetSymbolFile())
sb_file_spec.SetFileSpec(symfile->GetObjectFile()->GetFileSpec());
}
- return LLDB_RECORD_RESULT(sb_file_spec);
+ return sb_file_spec;
}
lldb::SBAddress SBModule::GetObjectFileHeaderAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBModule,
- GetObjectFileHeaderAddress);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBAddress sb_addr;
ModuleSP module_sp(GetSP());
@@ -667,12 +647,11 @@ lldb::SBAddress SBModule::GetObjectFileHeaderAddress() const {
if (objfile_ptr)
sb_addr.ref() = objfile_ptr->GetBaseAddress();
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
lldb::SBAddress SBModule::GetObjectFileEntryPointAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBModule,
- GetObjectFileEntryPointAddress);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBAddress sb_addr;
ModuleSP module_sp(GetSP());
@@ -681,96 +660,18 @@ lldb::SBAddress SBModule::GetObjectFileEntryPointAddress() const {
if (objfile_ptr)
sb_addr.ref() = objfile_ptr->GetEntryPointAddress();
}
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
uint32_t SBModule::GetNumberAllocatedModules() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(uint32_t, SBModule,
- GetNumberAllocatedModules);
+ LLDB_INSTRUMENT();
return Module::GetNumberAllocatedModules();
}
void SBModule::GarbageCollectAllocatedModules() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(void, SBModule,
- GarbageCollectAllocatedModules);
+ LLDB_INSTRUMENT();
const bool mandatory = false;
ModuleList::RemoveOrphanSharedModules(mandatory);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBModule>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBModule, ());
- LLDB_REGISTER_CONSTRUCTOR(SBModule, (const lldb::SBModuleSpec &));
- LLDB_REGISTER_CONSTRUCTOR(SBModule, (const lldb::SBModule &));
- LLDB_REGISTER_CONSTRUCTOR(SBModule, (lldb::SBProcess &, lldb::addr_t));
- LLDB_REGISTER_METHOD(const lldb::SBModule &, SBModule, operator=,
- (const lldb::SBModule &));
- LLDB_REGISTER_METHOD_CONST(bool, SBModule, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBModule, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBModule, Clear, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBModule, GetFileSpec, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBModule, GetPlatformFileSpec,
- ());
- LLDB_REGISTER_METHOD(bool, SBModule, SetPlatformFileSpec,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBModule, GetRemoteInstallFileSpec,
- ());
- LLDB_REGISTER_METHOD(bool, SBModule, SetRemoteInstallFileSpec,
- (lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD_CONST(const char *, SBModule, GetUUIDString, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBModule, operator==,
- (const lldb::SBModule &));
- LLDB_REGISTER_METHOD_CONST(bool, SBModule, operator!=,
- (const lldb::SBModule &));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBModule, ResolveFileAddress,
- (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContext, SBModule,
- ResolveSymbolContextForAddress,
- (const lldb::SBAddress &, uint32_t));
- LLDB_REGISTER_METHOD(bool, SBModule, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(uint32_t, SBModule, GetNumCompileUnits, ());
- LLDB_REGISTER_METHOD(lldb::SBCompileUnit, SBModule, GetCompileUnitAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBModule, FindCompileUnits,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(size_t, SBModule, GetNumSymbols, ());
- LLDB_REGISTER_METHOD(lldb::SBSymbol, SBModule, GetSymbolAtIndex, (size_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbol, SBModule, FindSymbol,
- (const char *, lldb::SymbolType));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBModule, FindSymbols,
- (const char *, lldb::SymbolType));
- LLDB_REGISTER_METHOD(size_t, SBModule, GetNumSections, ());
- LLDB_REGISTER_METHOD(lldb::SBSection, SBModule, GetSectionAtIndex, (size_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBModule, FindFunctions,
- (const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBModule, FindGlobalVariables,
- (lldb::SBTarget &, const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBModule, FindFirstGlobalVariable,
- (lldb::SBTarget &, const char *));
- LLDB_REGISTER_METHOD(lldb::SBType, SBModule, FindFirstType, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBType, SBModule, GetBasicType, (lldb::BasicType));
- LLDB_REGISTER_METHOD(lldb::SBTypeList, SBModule, FindTypes, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBType, SBModule, GetTypeByID, (lldb::user_id_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeList, SBModule, GetTypes, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBSection, SBModule, FindSection, (const char *));
- LLDB_REGISTER_METHOD(lldb::ByteOrder, SBModule, GetByteOrder, ());
- LLDB_REGISTER_METHOD(const char *, SBModule, GetTriple, ());
- LLDB_REGISTER_METHOD(uint32_t, SBModule, GetAddressByteSize, ());
- LLDB_REGISTER_METHOD(uint32_t, SBModule, GetVersion, (uint32_t *, uint32_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBFileSpec, SBModule, GetSymbolFileSpec, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBModule,
- GetObjectFileHeaderAddress, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBModule,
- GetObjectFileEntryPointAddress, ());
- LLDB_REGISTER_STATIC_METHOD(uint32_t, SBModule, GetNumberAllocatedModules,
- ());
- LLDB_REGISTER_STATIC_METHOD(void, SBModule, GarbageCollectAllocatedModules,
- ());
-}
-
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBModuleSpec.cpp b/contrib/llvm-project/lldb/source/API/SBModuleSpec.cpp
index 5d88272a399b..7deba8e971f8 100644
--- a/contrib/llvm-project/lldb/source/API/SBModuleSpec.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBModuleSpec.cpp
@@ -7,110 +7,105 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBModuleSpec.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/ModuleSpec.h"
#include "lldb/Host/Host.h"
#include "lldb/Symbol/ObjectFile.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
using namespace lldb;
using namespace lldb_private;
SBModuleSpec::SBModuleSpec() : m_opaque_up(new lldb_private::ModuleSpec()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBModuleSpec);
+ LLDB_INSTRUMENT_VA(this);
}
-SBModuleSpec::SBModuleSpec(const SBModuleSpec &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBModuleSpec, (const lldb::SBModuleSpec &), rhs);
+SBModuleSpec::SBModuleSpec(const SBModuleSpec &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
const SBModuleSpec &SBModuleSpec::operator=(const SBModuleSpec &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBModuleSpec &,
- SBModuleSpec, operator=,(const lldb::SBModuleSpec &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBModuleSpec::~SBModuleSpec() = default;
bool SBModuleSpec::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBModuleSpec, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBModuleSpec::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBModuleSpec, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->operator bool();
}
void SBModuleSpec::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBModuleSpec, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up->Clear();
}
SBFileSpec SBModuleSpec::GetFileSpec() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBModuleSpec, GetFileSpec);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_spec(m_opaque_up->GetFileSpec());
- return LLDB_RECORD_RESULT(sb_spec);
+ return sb_spec;
}
void SBModuleSpec::SetFileSpec(const lldb::SBFileSpec &sb_spec) {
- LLDB_RECORD_METHOD(void, SBModuleSpec, SetFileSpec,
- (const lldb::SBFileSpec &), sb_spec);
+ LLDB_INSTRUMENT_VA(this, sb_spec);
m_opaque_up->GetFileSpec() = *sb_spec;
}
lldb::SBFileSpec SBModuleSpec::GetPlatformFileSpec() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBModuleSpec,
- GetPlatformFileSpec);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(SBFileSpec(m_opaque_up->GetPlatformFileSpec()));
+ return SBFileSpec(m_opaque_up->GetPlatformFileSpec());
}
void SBModuleSpec::SetPlatformFileSpec(const lldb::SBFileSpec &sb_spec) {
- LLDB_RECORD_METHOD(void, SBModuleSpec, SetPlatformFileSpec,
- (const lldb::SBFileSpec &), sb_spec);
+ LLDB_INSTRUMENT_VA(this, sb_spec);
m_opaque_up->GetPlatformFileSpec() = *sb_spec;
}
lldb::SBFileSpec SBModuleSpec::GetSymbolFileSpec() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBModuleSpec, GetSymbolFileSpec);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(SBFileSpec(m_opaque_up->GetSymbolFileSpec()));
+ return SBFileSpec(m_opaque_up->GetSymbolFileSpec());
}
void SBModuleSpec::SetSymbolFileSpec(const lldb::SBFileSpec &sb_spec) {
- LLDB_RECORD_METHOD(void, SBModuleSpec, SetSymbolFileSpec,
- (const lldb::SBFileSpec &), sb_spec);
+ LLDB_INSTRUMENT_VA(this, sb_spec);
m_opaque_up->GetSymbolFileSpec() = *sb_spec;
}
const char *SBModuleSpec::GetObjectName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBModuleSpec, GetObjectName);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetObjectName().GetCString();
}
void SBModuleSpec::SetObjectName(const char *name) {
- LLDB_RECORD_METHOD(void, SBModuleSpec, SetObjectName, (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
m_opaque_up->GetObjectName().SetCString(name);
}
const char *SBModuleSpec::GetTriple() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBModuleSpec, GetTriple);
+ LLDB_INSTRUMENT_VA(this);
std::string triple(m_opaque_up->GetArchitecture().GetTriple().str());
// Unique the string so we don't run into ownership issues since the const
@@ -121,180 +116,114 @@ const char *SBModuleSpec::GetTriple() {
}
void SBModuleSpec::SetTriple(const char *triple) {
- LLDB_RECORD_METHOD(void, SBModuleSpec, SetTriple, (const char *), triple);
+ LLDB_INSTRUMENT_VA(this, triple);
m_opaque_up->GetArchitecture().SetTriple(triple);
}
const uint8_t *SBModuleSpec::GetUUIDBytes() {
+ LLDB_INSTRUMENT_VA(this)
return m_opaque_up->GetUUID().GetBytes().data();
}
size_t SBModuleSpec::GetUUIDLength() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBModuleSpec, GetUUIDLength);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetUUID().GetBytes().size();
}
bool SBModuleSpec::SetUUIDBytes(const uint8_t *uuid, size_t uuid_len) {
+ LLDB_INSTRUMENT_VA(this, uuid, uuid_len)
m_opaque_up->GetUUID() = UUID::fromOptionalData(uuid, uuid_len);
return m_opaque_up->GetUUID().IsValid();
}
bool SBModuleSpec::GetDescription(lldb::SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBModuleSpec, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
m_opaque_up->Dump(description.ref());
return true;
}
SBModuleSpecList::SBModuleSpecList() : m_opaque_up(new ModuleSpecList()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBModuleSpecList);
+ LLDB_INSTRUMENT_VA(this);
}
SBModuleSpecList::SBModuleSpecList(const SBModuleSpecList &rhs)
: m_opaque_up(new ModuleSpecList(*rhs.m_opaque_up)) {
- LLDB_RECORD_CONSTRUCTOR(SBModuleSpecList, (const lldb::SBModuleSpecList &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBModuleSpecList &SBModuleSpecList::operator=(const SBModuleSpecList &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBModuleSpecList &,
- SBModuleSpecList, operator=,(const lldb::SBModuleSpecList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
*m_opaque_up = *rhs.m_opaque_up;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBModuleSpecList::~SBModuleSpecList() = default;
SBModuleSpecList SBModuleSpecList::GetModuleSpecifications(const char *path) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBModuleSpecList, SBModuleSpecList,
- GetModuleSpecifications, (const char *), path);
+ LLDB_INSTRUMENT_VA(path);
SBModuleSpecList specs;
FileSpec file_spec(path);
FileSystem::Instance().Resolve(file_spec);
Host::ResolveExecutableInBundle(file_spec);
ObjectFile::GetModuleSpecifications(file_spec, 0, 0, *specs.m_opaque_up);
- return LLDB_RECORD_RESULT(specs);
+ return specs;
}
void SBModuleSpecList::Append(const SBModuleSpec &spec) {
- LLDB_RECORD_METHOD(void, SBModuleSpecList, Append,
- (const lldb::SBModuleSpec &), spec);
+ LLDB_INSTRUMENT_VA(this, spec);
m_opaque_up->Append(*spec.m_opaque_up);
}
void SBModuleSpecList::Append(const SBModuleSpecList &spec_list) {
- LLDB_RECORD_METHOD(void, SBModuleSpecList, Append,
- (const lldb::SBModuleSpecList &), spec_list);
+ LLDB_INSTRUMENT_VA(this, spec_list);
m_opaque_up->Append(*spec_list.m_opaque_up);
}
size_t SBModuleSpecList::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBModuleSpecList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSize();
}
SBModuleSpec SBModuleSpecList::GetSpecAtIndex(size_t i) {
- LLDB_RECORD_METHOD(lldb::SBModuleSpec, SBModuleSpecList, GetSpecAtIndex,
- (size_t), i);
+ LLDB_INSTRUMENT_VA(this, i);
SBModuleSpec sb_module_spec;
m_opaque_up->GetModuleSpecAtIndex(i, *sb_module_spec.m_opaque_up);
- return LLDB_RECORD_RESULT(sb_module_spec);
+ return sb_module_spec;
}
SBModuleSpec
SBModuleSpecList::FindFirstMatchingSpec(const SBModuleSpec &match_spec) {
- LLDB_RECORD_METHOD(lldb::SBModuleSpec, SBModuleSpecList,
- FindFirstMatchingSpec, (const lldb::SBModuleSpec &),
- match_spec);
+ LLDB_INSTRUMENT_VA(this, match_spec);
SBModuleSpec sb_module_spec;
m_opaque_up->FindMatchingModuleSpec(*match_spec.m_opaque_up,
*sb_module_spec.m_opaque_up);
- return LLDB_RECORD_RESULT(sb_module_spec);
+ return sb_module_spec;
}
SBModuleSpecList
SBModuleSpecList::FindMatchingSpecs(const SBModuleSpec &match_spec) {
- LLDB_RECORD_METHOD(lldb::SBModuleSpecList, SBModuleSpecList,
- FindMatchingSpecs, (const lldb::SBModuleSpec &),
- match_spec);
+ LLDB_INSTRUMENT_VA(this, match_spec);
SBModuleSpecList specs;
m_opaque_up->FindMatchingModuleSpecs(*match_spec.m_opaque_up,
*specs.m_opaque_up);
- return LLDB_RECORD_RESULT(specs);
+ return specs;
}
bool SBModuleSpecList::GetDescription(lldb::SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBModuleSpecList, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
m_opaque_up->Dump(description.ref());
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBModuleSpec>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBModuleSpec, ());
- LLDB_REGISTER_CONSTRUCTOR(SBModuleSpec, (const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD(const lldb::SBModuleSpec &,
- SBModuleSpec, operator=,(const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD_CONST(bool, SBModuleSpec, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBModuleSpec, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, Clear, ());
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBModuleSpec, GetFileSpec, ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, SetFileSpec,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBModuleSpec, GetPlatformFileSpec,
- ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, SetPlatformFileSpec,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBModuleSpec, GetSymbolFileSpec, ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, SetSymbolFileSpec,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(const char *, SBModuleSpec, GetObjectName, ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, SetObjectName, (const char *));
- LLDB_REGISTER_METHOD(const char *, SBModuleSpec, GetTriple, ());
- LLDB_REGISTER_METHOD(void, SBModuleSpec, SetTriple, (const char *));
- LLDB_REGISTER_METHOD(size_t, SBModuleSpec, GetUUIDLength, ());
- LLDB_REGISTER_METHOD(bool, SBModuleSpec, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_CONSTRUCTOR(SBModuleSpecList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBModuleSpecList,
- (const lldb::SBModuleSpecList &));
- LLDB_REGISTER_METHOD(
- lldb::SBModuleSpecList &,
- SBModuleSpecList, operator=,(const lldb::SBModuleSpecList &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBModuleSpecList, SBModuleSpecList,
- GetModuleSpecifications, (const char *));
- LLDB_REGISTER_METHOD(void, SBModuleSpecList, Append,
- (const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD(void, SBModuleSpecList, Append,
- (const lldb::SBModuleSpecList &));
- LLDB_REGISTER_METHOD(size_t, SBModuleSpecList, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBModuleSpec, SBModuleSpecList, GetSpecAtIndex,
- (size_t));
- LLDB_REGISTER_METHOD(lldb::SBModuleSpec, SBModuleSpecList,
- FindFirstMatchingSpec, (const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD(lldb::SBModuleSpecList, SBModuleSpecList,
- FindMatchingSpecs, (const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD(bool, SBModuleSpecList, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBPlatform.cpp b/contrib/llvm-project/lldb/source/API/SBPlatform.cpp
index d7a86f0ad1dd..d521a38b30e8 100644
--- a/contrib/llvm-project/lldb/source/API/SBPlatform.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBPlatform.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBPlatform.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBEnvironment.h"
#include "lldb/API/SBError.h"
#include "lldb/API/SBFileSpec.h"
@@ -19,6 +18,7 @@
#include "lldb/Target/Target.h"
#include "lldb/Utility/ArchSpec.h"
#include "lldb/Utility/Args.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Status.h"
#include "llvm/Support/FileSystem.h"
@@ -30,10 +30,7 @@ using namespace lldb_private;
// PlatformConnectOptions
struct PlatformConnectOptions {
- PlatformConnectOptions(const char *url = nullptr)
- : m_url(), m_rsync_options(), m_rsync_remote_path_prefix(),
-
- m_local_cache_directory() {
+ PlatformConnectOptions(const char *url = nullptr) {
if (url && url[0])
m_url = url;
}
@@ -52,7 +49,7 @@ struct PlatformConnectOptions {
struct PlatformShellCommand {
PlatformShellCommand(llvm::StringRef shell_interpreter,
llvm::StringRef shell_command)
- : m_command(), m_working_dir(), m_status(0), m_signo(0) {
+ : m_status(0), m_signo(0) {
if (!shell_interpreter.empty())
m_shell = shell_interpreter.str();
@@ -60,8 +57,7 @@ struct PlatformShellCommand {
m_command = shell_command.str();
}
- PlatformShellCommand(llvm::StringRef shell_command = llvm::StringRef())
- : m_shell(), m_command(), m_working_dir() {
+ PlatformShellCommand(llvm::StringRef shell_command = llvm::StringRef()) {
if (!shell_command.empty())
m_command = shell_command.str();
}
@@ -79,14 +75,13 @@ struct PlatformShellCommand {
// SBPlatformConnectOptions
SBPlatformConnectOptions::SBPlatformConnectOptions(const char *url)
: m_opaque_ptr(new PlatformConnectOptions(url)) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatformConnectOptions, (const char *), url);
+ LLDB_INSTRUMENT_VA(this, url);
}
SBPlatformConnectOptions::SBPlatformConnectOptions(
const SBPlatformConnectOptions &rhs)
: m_opaque_ptr(new PlatformConnectOptions()) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatformConnectOptions,
- (const lldb::SBPlatformConnectOptions &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_opaque_ptr = *rhs.m_opaque_ptr;
}
@@ -95,18 +90,14 @@ SBPlatformConnectOptions::~SBPlatformConnectOptions() { delete m_opaque_ptr; }
SBPlatformConnectOptions &
SBPlatformConnectOptions::operator=(const SBPlatformConnectOptions &rhs) {
- LLDB_RECORD_METHOD(
- SBPlatformConnectOptions &,
- SBPlatformConnectOptions, operator=,(
- const lldb::SBPlatformConnectOptions &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_opaque_ptr = *rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
const char *SBPlatformConnectOptions::GetURL() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformConnectOptions, GetURL);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_url.empty())
return nullptr;
@@ -114,8 +105,7 @@ const char *SBPlatformConnectOptions::GetURL() {
}
void SBPlatformConnectOptions::SetURL(const char *url) {
- LLDB_RECORD_METHOD(void, SBPlatformConnectOptions, SetURL, (const char *),
- url);
+ LLDB_INSTRUMENT_VA(this, url);
if (url && url[0])
m_opaque_ptr->m_url = url;
@@ -124,7 +114,7 @@ void SBPlatformConnectOptions::SetURL(const char *url) {
}
bool SBPlatformConnectOptions::GetRsyncEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBPlatformConnectOptions, GetRsyncEnabled);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr->m_rsync_enabled;
}
@@ -132,9 +122,8 @@ bool SBPlatformConnectOptions::GetRsyncEnabled() {
void SBPlatformConnectOptions::EnableRsync(
const char *options, const char *remote_path_prefix,
bool omit_hostname_from_remote_path) {
- LLDB_RECORD_METHOD(void, SBPlatformConnectOptions, EnableRsync,
- (const char *, const char *, bool), options,
- remote_path_prefix, omit_hostname_from_remote_path);
+ LLDB_INSTRUMENT_VA(this, options, remote_path_prefix,
+ omit_hostname_from_remote_path);
m_opaque_ptr->m_rsync_enabled = true;
m_opaque_ptr->m_rsync_omit_hostname_from_remote_path =
@@ -151,21 +140,19 @@ void SBPlatformConnectOptions::EnableRsync(
}
void SBPlatformConnectOptions::DisableRsync() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBPlatformConnectOptions, DisableRsync);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_ptr->m_rsync_enabled = false;
}
const char *SBPlatformConnectOptions::GetLocalCacheDirectory() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformConnectOptions,
- GetLocalCacheDirectory);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr->m_local_cache_directory.GetCString();
}
void SBPlatformConnectOptions::SetLocalCacheDirectory(const char *path) {
- LLDB_RECORD_METHOD(void, SBPlatformConnectOptions, SetLocalCacheDirectory,
- (const char *), path);
+ LLDB_INSTRUMENT_VA(this, path);
if (path && path[0])
m_opaque_ptr->m_local_cache_directory.SetCString(path);
@@ -177,21 +164,18 @@ void SBPlatformConnectOptions::SetLocalCacheDirectory(const char *path) {
SBPlatformShellCommand::SBPlatformShellCommand(const char *shell_interpreter,
const char *shell_command)
: m_opaque_ptr(new PlatformShellCommand(shell_interpreter, shell_command)) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatformShellCommand, (const char *, const char *),
- shell_interpreter, shell_command);
+ LLDB_INSTRUMENT_VA(this, shell_interpreter, shell_command);
}
SBPlatformShellCommand::SBPlatformShellCommand(const char *shell_command)
: m_opaque_ptr(new PlatformShellCommand(shell_command)) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatformShellCommand, (const char *),
- shell_command);
+ LLDB_INSTRUMENT_VA(this, shell_command);
}
SBPlatformShellCommand::SBPlatformShellCommand(
const SBPlatformShellCommand &rhs)
: m_opaque_ptr(new PlatformShellCommand()) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatformShellCommand,
- (const lldb::SBPlatformShellCommand &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_opaque_ptr = *rhs.m_opaque_ptr;
}
@@ -199,19 +183,16 @@ SBPlatformShellCommand::SBPlatformShellCommand(
SBPlatformShellCommand &
SBPlatformShellCommand::operator=(const SBPlatformShellCommand &rhs) {
- LLDB_RECORD_METHOD(
- SBPlatformShellCommand &,
- SBPlatformShellCommand, operator=,(const lldb::SBPlatformShellCommand &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_opaque_ptr = *rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBPlatformShellCommand::~SBPlatformShellCommand() { delete m_opaque_ptr; }
void SBPlatformShellCommand::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBPlatformShellCommand, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_ptr->m_output = std::string();
m_opaque_ptr->m_status = 0;
@@ -219,7 +200,7 @@ void SBPlatformShellCommand::Clear() {
}
const char *SBPlatformShellCommand::GetShell() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformShellCommand, GetShell);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_shell.empty())
return nullptr;
@@ -227,8 +208,7 @@ const char *SBPlatformShellCommand::GetShell() {
}
void SBPlatformShellCommand::SetShell(const char *shell_interpreter) {
- LLDB_RECORD_METHOD(void, SBPlatformShellCommand, SetShell, (const char *),
- shell_interpreter);
+ LLDB_INSTRUMENT_VA(this, shell_interpreter);
if (shell_interpreter && shell_interpreter[0])
m_opaque_ptr->m_shell = shell_interpreter;
@@ -237,7 +217,7 @@ void SBPlatformShellCommand::SetShell(const char *shell_interpreter) {
}
const char *SBPlatformShellCommand::GetCommand() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformShellCommand, GetCommand);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_command.empty())
return nullptr;
@@ -245,8 +225,7 @@ const char *SBPlatformShellCommand::GetCommand() {
}
void SBPlatformShellCommand::SetCommand(const char *shell_command) {
- LLDB_RECORD_METHOD(void, SBPlatformShellCommand, SetCommand, (const char *),
- shell_command);
+ LLDB_INSTRUMENT_VA(this, shell_command);
if (shell_command && shell_command[0])
m_opaque_ptr->m_command = shell_command;
@@ -255,8 +234,7 @@ void SBPlatformShellCommand::SetCommand(const char *shell_command) {
}
const char *SBPlatformShellCommand::GetWorkingDirectory() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformShellCommand,
- GetWorkingDirectory);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_working_dir.empty())
return nullptr;
@@ -264,8 +242,7 @@ const char *SBPlatformShellCommand::GetWorkingDirectory() {
}
void SBPlatformShellCommand::SetWorkingDirectory(const char *path) {
- LLDB_RECORD_METHOD(void, SBPlatformShellCommand, SetWorkingDirectory,
- (const char *), path);
+ LLDB_INSTRUMENT_VA(this, path);
if (path && path[0])
m_opaque_ptr->m_working_dir = path;
@@ -274,8 +251,7 @@ void SBPlatformShellCommand::SetWorkingDirectory(const char *path) {
}
uint32_t SBPlatformShellCommand::GetTimeoutSeconds() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBPlatformShellCommand,
- GetTimeoutSeconds);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_timeout)
return m_opaque_ptr->m_timeout->count();
@@ -283,8 +259,7 @@ uint32_t SBPlatformShellCommand::GetTimeoutSeconds() {
}
void SBPlatformShellCommand::SetTimeoutSeconds(uint32_t sec) {
- LLDB_RECORD_METHOD(void, SBPlatformShellCommand, SetTimeoutSeconds,
- (uint32_t), sec);
+ LLDB_INSTRUMENT_VA(this, sec);
if (sec == UINT32_MAX)
m_opaque_ptr->m_timeout = llvm::None;
@@ -293,19 +268,19 @@ void SBPlatformShellCommand::SetTimeoutSeconds(uint32_t sec) {
}
int SBPlatformShellCommand::GetSignal() {
- LLDB_RECORD_METHOD_NO_ARGS(int, SBPlatformShellCommand, GetSignal);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr->m_signo;
}
int SBPlatformShellCommand::GetStatus() {
- LLDB_RECORD_METHOD_NO_ARGS(int, SBPlatformShellCommand, GetStatus);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr->m_status;
}
const char *SBPlatformShellCommand::GetOutput() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatformShellCommand, GetOutput);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr->m_output.empty())
return nullptr;
@@ -313,12 +288,10 @@ const char *SBPlatformShellCommand::GetOutput() {
}
// SBPlatform
-SBPlatform::SBPlatform() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBPlatform);
-}
+SBPlatform::SBPlatform() { LLDB_INSTRUMENT_VA(this); }
-SBPlatform::SBPlatform(const char *platform_name) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBPlatform, (const char *), platform_name);
+SBPlatform::SBPlatform(const char *platform_name) {
+ LLDB_INSTRUMENT_VA(this, platform_name);
Status error;
if (platform_name && platform_name[0])
@@ -326,48 +299,46 @@ SBPlatform::SBPlatform(const char *platform_name) : m_opaque_sp() {
}
SBPlatform::SBPlatform(const SBPlatform &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBPlatform, (const lldb::SBPlatform &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = rhs.m_opaque_sp;
}
SBPlatform &SBPlatform::operator=(const SBPlatform &rhs) {
- LLDB_RECORD_METHOD(SBPlatform &,
- SBPlatform, operator=,(const lldb::SBPlatform &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBPlatform::~SBPlatform() = default;
SBPlatform SBPlatform::GetHostPlatform() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(lldb::SBPlatform, SBPlatform,
- GetHostPlatform);
+ LLDB_INSTRUMENT();
SBPlatform host_platform;
host_platform.m_opaque_sp = Platform::GetHostPlatform();
- return LLDB_RECORD_RESULT(host_platform);
+ return host_platform;
}
bool SBPlatform::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBPlatform, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBPlatform::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBPlatform, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
void SBPlatform::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBPlatform, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
}
const char *SBPlatform::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetName);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp)
@@ -382,7 +353,7 @@ void SBPlatform::SetSP(const lldb::PlatformSP &platform_sp) {
}
const char *SBPlatform::GetWorkingDirectory() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetWorkingDirectory);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp)
@@ -391,8 +362,7 @@ const char *SBPlatform::GetWorkingDirectory() {
}
bool SBPlatform::SetWorkingDirectory(const char *path) {
- LLDB_RECORD_METHOD(bool, SBPlatform, SetWorkingDirectory, (const char *),
- path);
+ LLDB_INSTRUMENT_VA(this, path);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
@@ -406,8 +376,7 @@ bool SBPlatform::SetWorkingDirectory(const char *path) {
}
SBError SBPlatform::ConnectRemote(SBPlatformConnectOptions &connect_options) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, ConnectRemote,
- (lldb::SBPlatformConnectOptions &), connect_options);
+ LLDB_INSTRUMENT_VA(this, connect_options);
SBError sb_error;
PlatformSP platform_sp(GetSP());
@@ -418,11 +387,11 @@ SBError SBPlatform::ConnectRemote(SBPlatformConnectOptions &connect_options) {
} else {
sb_error.SetErrorString("invalid platform");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
void SBPlatform::DisconnectRemote() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBPlatform, DisconnectRemote);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp)
@@ -430,7 +399,7 @@ void SBPlatform::DisconnectRemote() {
}
bool SBPlatform::IsConnected() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBPlatform, IsConnected);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp)
@@ -439,7 +408,7 @@ bool SBPlatform::IsConnected() {
}
const char *SBPlatform::GetTriple() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetTriple);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
@@ -454,7 +423,7 @@ const char *SBPlatform::GetTriple() {
}
const char *SBPlatform::GetOSBuild() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetOSBuild);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
@@ -469,7 +438,7 @@ const char *SBPlatform::GetOSBuild() {
}
const char *SBPlatform::GetOSDescription() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetOSDescription);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
@@ -484,7 +453,7 @@ const char *SBPlatform::GetOSDescription() {
}
const char *SBPlatform::GetHostname() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBPlatform, GetHostname);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp)
@@ -493,7 +462,7 @@ const char *SBPlatform::GetHostname() {
}
uint32_t SBPlatform::GetOSMajorVersion() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBPlatform, GetOSMajorVersion);
+ LLDB_INSTRUMENT_VA(this);
llvm::VersionTuple version;
if (PlatformSP platform_sp = GetSP())
@@ -502,7 +471,7 @@ uint32_t SBPlatform::GetOSMajorVersion() {
}
uint32_t SBPlatform::GetOSMinorVersion() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBPlatform, GetOSMinorVersion);
+ LLDB_INSTRUMENT_VA(this);
llvm::VersionTuple version;
if (PlatformSP platform_sp = GetSP())
@@ -511,7 +480,7 @@ uint32_t SBPlatform::GetOSMinorVersion() {
}
uint32_t SBPlatform::GetOSUpdateVersion() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBPlatform, GetOSUpdateVersion);
+ LLDB_INSTRUMENT_VA(this);
llvm::VersionTuple version;
if (PlatformSP platform_sp = GetSP())
@@ -519,9 +488,14 @@ uint32_t SBPlatform::GetOSUpdateVersion() {
return version.getSubminor().getValueOr(UINT32_MAX);
}
+void SBPlatform::SetSDKRoot(const char *sysroot) {
+ LLDB_INSTRUMENT_VA(this, sysroot);
+ if (PlatformSP platform_sp = GetSP())
+ platform_sp->SetSDKRootDirectory(ConstString(sysroot));
+}
+
SBError SBPlatform::Get(SBFileSpec &src, SBFileSpec &dst) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Get,
- (lldb::SBFileSpec &, lldb::SBFileSpec &), src, dst);
+ LLDB_INSTRUMENT_VA(this, src, dst);
SBError sb_error;
PlatformSP platform_sp(GetSP());
@@ -530,54 +504,48 @@ SBError SBPlatform::Get(SBFileSpec &src, SBFileSpec &dst) {
} else {
sb_error.SetErrorString("invalid platform");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBPlatform::Put(SBFileSpec &src, SBFileSpec &dst) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Put,
- (lldb::SBFileSpec &, lldb::SBFileSpec &), src, dst);
- return LLDB_RECORD_RESULT(
- ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
- if (src.Exists()) {
- uint32_t permissions =
- FileSystem::Instance().GetPermissions(src.ref());
- if (permissions == 0) {
- if (FileSystem::Instance().IsDirectory(src.ref()))
- permissions = eFilePermissionsDirectoryDefault;
- else
- permissions = eFilePermissionsFileDefault;
- }
-
- return platform_sp->PutFile(src.ref(), dst.ref(), permissions);
- }
+ LLDB_INSTRUMENT_VA(this, src, dst);
+ return ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
+ if (src.Exists()) {
+ uint32_t permissions = FileSystem::Instance().GetPermissions(src.ref());
+ if (permissions == 0) {
+ if (FileSystem::Instance().IsDirectory(src.ref()))
+ permissions = eFilePermissionsDirectoryDefault;
+ else
+ permissions = eFilePermissionsFileDefault;
+ }
+
+ return platform_sp->PutFile(src.ref(), dst.ref(), permissions);
+ }
- Status error;
- error.SetErrorStringWithFormat("'src' argument doesn't exist: '%s'",
- src.ref().GetPath().c_str());
- return error;
- }));
+ Status error;
+ error.SetErrorStringWithFormat("'src' argument doesn't exist: '%s'",
+ src.ref().GetPath().c_str());
+ return error;
+ });
}
SBError SBPlatform::Install(SBFileSpec &src, SBFileSpec &dst) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Install,
- (lldb::SBFileSpec &, lldb::SBFileSpec &), src, dst);
- return LLDB_RECORD_RESULT(
- ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
- if (src.Exists())
- return platform_sp->Install(src.ref(), dst.ref());
+ LLDB_INSTRUMENT_VA(this, src, dst);
+ return ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
+ if (src.Exists())
+ return platform_sp->Install(src.ref(), dst.ref());
- Status error;
- error.SetErrorStringWithFormat("'src' argument doesn't exist: '%s'",
- src.ref().GetPath().c_str());
- return error;
- }));
+ Status error;
+ error.SetErrorStringWithFormat("'src' argument doesn't exist: '%s'",
+ src.ref().GetPath().c_str());
+ return error;
+ });
}
SBError SBPlatform::Run(SBPlatformShellCommand &shell_command) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Run,
- (lldb::SBPlatformShellCommand &), shell_command);
- return LLDB_RECORD_RESULT(
- ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
+ LLDB_INSTRUMENT_VA(this, shell_command);
+ return ExecuteConnected(
+ [&](const lldb::PlatformSP &platform_sp) {
const char *command = shell_command.GetCommand();
if (!command)
return Status("invalid shell command (empty)");
@@ -594,27 +562,24 @@ SBError SBPlatform::Run(SBPlatformShellCommand &shell_command) {
&shell_command.m_opaque_ptr->m_signo,
&shell_command.m_opaque_ptr->m_output,
shell_command.m_opaque_ptr->m_timeout);
- }));
+ });
}
SBError SBPlatform::Launch(SBLaunchInfo &launch_info) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Launch, (lldb::SBLaunchInfo &),
- launch_info);
- return LLDB_RECORD_RESULT(
- ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
- ProcessLaunchInfo info = launch_info.ref();
- Status error = platform_sp->LaunchProcess(info);
- launch_info.set_ref(info);
- return error;
- }));
+ LLDB_INSTRUMENT_VA(this, launch_info);
+ return ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
+ ProcessLaunchInfo info = launch_info.ref();
+ Status error = platform_sp->LaunchProcess(info);
+ launch_info.set_ref(info);
+ return error;
+ });
}
SBError SBPlatform::Kill(const lldb::pid_t pid) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, Kill, (const lldb::pid_t), pid);
- return LLDB_RECORD_RESULT(
- ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
- return platform_sp->KillProcess(pid);
- }));
+ LLDB_INSTRUMENT_VA(this, pid);
+ return ExecuteConnected([&](const lldb::PlatformSP &platform_sp) {
+ return platform_sp->KillProcess(pid);
+ });
}
SBError SBPlatform::ExecuteConnected(
@@ -633,8 +598,7 @@ SBError SBPlatform::ExecuteConnected(
}
SBError SBPlatform::MakeDirectory(const char *path, uint32_t file_permissions) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, MakeDirectory,
- (const char *, uint32_t), path, file_permissions);
+ LLDB_INSTRUMENT_VA(this, path, file_permissions);
SBError sb_error;
PlatformSP platform_sp(GetSP());
@@ -644,12 +608,11 @@ SBError SBPlatform::MakeDirectory(const char *path, uint32_t file_permissions) {
} else {
sb_error.SetErrorString("invalid platform");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
uint32_t SBPlatform::GetFilePermissions(const char *path) {
- LLDB_RECORD_METHOD(uint32_t, SBPlatform, GetFilePermissions, (const char *),
- path);
+ LLDB_INSTRUMENT_VA(this, path);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
@@ -662,8 +625,7 @@ uint32_t SBPlatform::GetFilePermissions(const char *path) {
SBError SBPlatform::SetFilePermissions(const char *path,
uint32_t file_permissions) {
- LLDB_RECORD_METHOD(lldb::SBError, SBPlatform, SetFilePermissions,
- (const char *, uint32_t), path, file_permissions);
+ LLDB_INSTRUMENT_VA(this, path, file_permissions);
SBError sb_error;
PlatformSP platform_sp(GetSP());
@@ -673,126 +635,25 @@ SBError SBPlatform::SetFilePermissions(const char *path,
} else {
sb_error.SetErrorString("invalid platform");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBUnixSignals SBPlatform::GetUnixSignals() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBUnixSignals, SBPlatform,
- GetUnixSignals);
+ LLDB_INSTRUMENT_VA(this);
if (auto platform_sp = GetSP())
- return LLDB_RECORD_RESULT(SBUnixSignals{platform_sp});
+ return SBUnixSignals{platform_sp};
- return LLDB_RECORD_RESULT(SBUnixSignals());
+ return SBUnixSignals();
}
SBEnvironment SBPlatform::GetEnvironment() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBEnvironment, SBPlatform, GetEnvironment);
+ LLDB_INSTRUMENT_VA(this);
PlatformSP platform_sp(GetSP());
if (platform_sp) {
- return LLDB_RECORD_RESULT(SBEnvironment(platform_sp->GetEnvironment()));
+ return SBEnvironment(platform_sp->GetEnvironment());
}
- return LLDB_RECORD_RESULT(SBEnvironment());
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBPlatformConnectOptions>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBPlatformConnectOptions, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBPlatformConnectOptions,
- (const lldb::SBPlatformConnectOptions &));
- LLDB_REGISTER_METHOD(
- SBPlatformConnectOptions &,
- SBPlatformConnectOptions, operator=,(
- const lldb::SBPlatformConnectOptions &));
- LLDB_REGISTER_METHOD(const char *, SBPlatformConnectOptions, GetURL, ());
- LLDB_REGISTER_METHOD(void, SBPlatformConnectOptions, SetURL, (const char *));
- LLDB_REGISTER_METHOD(bool, SBPlatformConnectOptions, GetRsyncEnabled, ());
- LLDB_REGISTER_METHOD(void, SBPlatformConnectOptions, EnableRsync,
- (const char *, const char *, bool));
- LLDB_REGISTER_METHOD(void, SBPlatformConnectOptions, DisableRsync, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatformConnectOptions,
- GetLocalCacheDirectory, ());
- LLDB_REGISTER_METHOD(void, SBPlatformConnectOptions, SetLocalCacheDirectory,
- (const char *));
-}
-
-template <> void RegisterMethods<SBPlatformShellCommand>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBPlatformShellCommand, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBPlatformShellCommand,
- (const lldb::SBPlatformShellCommand &));
- LLDB_REGISTER_METHOD(
- SBPlatformShellCommand &,
- SBPlatformShellCommand, operator=,(const lldb::SBPlatformShellCommand &));
- LLDB_REGISTER_METHOD(void, SBPlatformShellCommand, Clear, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatformShellCommand, GetShell, ());
- LLDB_REGISTER_METHOD(void, SBPlatformShellCommand, SetShell, (const char *));
- LLDB_REGISTER_METHOD(const char *, SBPlatformShellCommand, GetCommand, ());
- LLDB_REGISTER_METHOD(void, SBPlatformShellCommand, SetCommand,
- (const char *));
- LLDB_REGISTER_METHOD(const char *, SBPlatformShellCommand,
- GetWorkingDirectory, ());
- LLDB_REGISTER_METHOD(void, SBPlatformShellCommand, SetWorkingDirectory,
- (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBPlatformShellCommand, GetTimeoutSeconds, ());
- LLDB_REGISTER_METHOD(void, SBPlatformShellCommand, SetTimeoutSeconds,
- (uint32_t));
- LLDB_REGISTER_METHOD(int, SBPlatformShellCommand, GetSignal, ());
- LLDB_REGISTER_METHOD(int, SBPlatformShellCommand, GetStatus, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatformShellCommand, GetOutput, ());
-}
-
-template <> void RegisterMethods<SBPlatform>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBPlatform, ());
- LLDB_REGISTER_CONSTRUCTOR(SBPlatform, (const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBPlatform, (const lldb::SBPlatform &));
- LLDB_REGISTER_CONSTRUCTOR(SBPlatformShellCommand,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(SBPlatform &,
- SBPlatform, operator=,(const lldb::SBPlatform &));
- LLDB_REGISTER_METHOD_CONST(bool, SBPlatform, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBPlatform, operator bool,());
- LLDB_REGISTER_METHOD(void, SBPlatform, Clear, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetName, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetWorkingDirectory, ());
- LLDB_REGISTER_METHOD(bool, SBPlatform, SetWorkingDirectory, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, ConnectRemote,
- (lldb::SBPlatformConnectOptions &));
- LLDB_REGISTER_METHOD(void, SBPlatform, DisconnectRemote, ());
- LLDB_REGISTER_METHOD(bool, SBPlatform, IsConnected, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetTriple, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetOSBuild, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetOSDescription, ());
- LLDB_REGISTER_METHOD(const char *, SBPlatform, GetHostname, ());
- LLDB_REGISTER_METHOD(uint32_t, SBPlatform, GetOSMajorVersion, ());
- LLDB_REGISTER_METHOD(uint32_t, SBPlatform, GetOSMinorVersion, ());
- LLDB_REGISTER_METHOD(uint32_t, SBPlatform, GetOSUpdateVersion, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Get,
- (lldb::SBFileSpec &, lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Put,
- (lldb::SBFileSpec &, lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Install,
- (lldb::SBFileSpec &, lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Run,
- (lldb::SBPlatformShellCommand &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Launch,
- (lldb::SBLaunchInfo &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, Kill, (const lldb::pid_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, MakeDirectory,
- (const char *, uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBPlatform, GetFilePermissions,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBPlatform, SetFilePermissions,
- (const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBEnvironment, SBPlatform, GetEnvironment, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBUnixSignals, SBPlatform, GetUnixSignals,
- ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBPlatform, SBPlatform, GetHostPlatform,
- ());
-}
-
-} // namespace repro
-} // namespace lldb_private
+ return SBEnvironment();
+}
diff --git a/contrib/llvm-project/lldb/source/API/SBProcess.cpp b/contrib/llvm-project/lldb/source/API/SBProcess.cpp
index 797e19462800..2538013412b6 100644
--- a/contrib/llvm-project/lldb/source/API/SBProcess.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBProcess.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBProcess.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include <cinttypes>
@@ -49,42 +49,38 @@
using namespace lldb;
using namespace lldb_private;
-SBProcess::SBProcess() : m_opaque_wp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBProcess);
-}
+SBProcess::SBProcess() { LLDB_INSTRUMENT_VA(this); }
// SBProcess constructor
SBProcess::SBProcess(const SBProcess &rhs) : m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBProcess, (const lldb::SBProcess &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBProcess::SBProcess(const lldb::ProcessSP &process_sp)
: m_opaque_wp(process_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBProcess, (const lldb::ProcessSP &), process_sp);
+ LLDB_INSTRUMENT_VA(this, process_sp);
}
const SBProcess &SBProcess::operator=(const SBProcess &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBProcess &,
- SBProcess, operator=,(const lldb::SBProcess &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
// Destructor
SBProcess::~SBProcess() = default;
const char *SBProcess::GetBroadcasterClassName() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBProcess,
- GetBroadcasterClassName);
+ LLDB_INSTRUMENT();
return Process::GetStaticBroadcasterClass().AsCString();
}
const char *SBProcess::GetPluginName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBProcess, GetPluginName);
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -94,7 +90,7 @@ const char *SBProcess::GetPluginName() {
}
const char *SBProcess::GetShortPluginName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBProcess, GetShortPluginName);
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -108,17 +104,17 @@ lldb::ProcessSP SBProcess::GetSP() const { return m_opaque_wp.lock(); }
void SBProcess::SetSP(const ProcessSP &process_sp) { m_opaque_wp = process_sp; }
void SBProcess::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBProcess, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_wp.reset();
}
bool SBProcess::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBProcess, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBProcess::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBProcess, operator bool);
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(m_opaque_wp.lock());
return ((bool)process_sp && process_sp->IsValid());
@@ -130,11 +126,7 @@ bool SBProcess::RemoteLaunch(char const **argv, char const **envp,
const char *working_directory,
uint32_t launch_flags, bool stop_at_entry,
lldb::SBError &error) {
- LLDB_RECORD_METHOD(bool, SBProcess, RemoteLaunch,
- (const char **, const char **, const char *, const char *,
- const char *, const char *, uint32_t, bool,
- lldb::SBError &),
- argv, envp, stdin_path, stdout_path, stderr_path,
+ LLDB_INSTRUMENT_VA(this, argv, envp, stdin_path, stdout_path, stderr_path,
working_directory, launch_flags, stop_at_entry, error);
ProcessSP process_sp(GetSP());
@@ -167,8 +159,7 @@ bool SBProcess::RemoteLaunch(char const **argv, char const **envp,
bool SBProcess::RemoteAttachToProcessWithID(lldb::pid_t pid,
lldb::SBError &error) {
- LLDB_RECORD_METHOD(bool, SBProcess, RemoteAttachToProcessWithID,
- (lldb::pid_t, lldb::SBError &), pid, error);
+ LLDB_INSTRUMENT_VA(this, pid, error);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -190,7 +181,7 @@ bool SBProcess::RemoteAttachToProcessWithID(lldb::pid_t pid,
}
uint32_t SBProcess::GetNumThreads() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcess, GetNumThreads);
+ LLDB_INSTRUMENT_VA(this);
uint32_t num_threads = 0;
ProcessSP process_sp(GetSP());
@@ -207,8 +198,7 @@ uint32_t SBProcess::GetNumThreads() {
}
SBThread SBProcess::GetSelectedThread() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBThread, SBProcess,
- GetSelectedThread);
+ LLDB_INSTRUMENT_VA(this);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -220,13 +210,12 @@ SBThread SBProcess::GetSelectedThread() const {
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
SBThread SBProcess::CreateOSPluginThread(lldb::tid_t tid,
lldb::addr_t context) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBProcess, CreateOSPluginThread,
- (lldb::tid_t, lldb::addr_t), tid, context);
+ LLDB_INSTRUMENT_VA(this, tid, context);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -238,11 +227,11 @@ SBThread SBProcess::CreateOSPluginThread(lldb::tid_t tid,
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
SBTarget SBProcess::GetTarget() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBTarget, SBProcess, GetTarget);
+ LLDB_INSTRUMENT_VA(this);
SBTarget sb_target;
TargetSP target_sp;
@@ -252,12 +241,11 @@ SBTarget SBProcess::GetTarget() const {
sb_target.SetSP(target_sp);
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
size_t SBProcess::PutSTDIN(const char *src, size_t src_len) {
- LLDB_RECORD_METHOD(size_t, SBProcess, PutSTDIN, (const char *, size_t), src,
- src_len);
+ LLDB_INSTRUMENT_VA(this, src, src_len);
size_t ret_val = 0;
ProcessSP process_sp(GetSP());
@@ -270,8 +258,7 @@ size_t SBProcess::PutSTDIN(const char *src, size_t src_len) {
}
size_t SBProcess::GetSTDOUT(char *dst, size_t dst_len) const {
- LLDB_RECORD_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetSTDOUT,
- (char *, size_t), dst, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len);
size_t bytes_read = 0;
ProcessSP process_sp(GetSP());
@@ -284,8 +271,7 @@ size_t SBProcess::GetSTDOUT(char *dst, size_t dst_len) const {
}
size_t SBProcess::GetSTDERR(char *dst, size_t dst_len) const {
- LLDB_RECORD_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetSTDERR,
- (char *, size_t), dst, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len);
size_t bytes_read = 0;
ProcessSP process_sp(GetSP());
@@ -298,8 +284,7 @@ size_t SBProcess::GetSTDERR(char *dst, size_t dst_len) const {
}
size_t SBProcess::GetAsyncProfileData(char *dst, size_t dst_len) const {
- LLDB_RECORD_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetAsyncProfileData,
- (char *, size_t), dst, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len);
size_t bytes_read = 0;
ProcessSP process_sp(GetSP());
@@ -312,23 +297,20 @@ size_t SBProcess::GetAsyncProfileData(char *dst, size_t dst_len) const {
}
void SBProcess::ReportEventState(const SBEvent &event, SBFile out) const {
- LLDB_RECORD_METHOD_CONST(void, SBProcess, ReportEventState,
- (const SBEvent &, SBFile), event, out);
+ LLDB_INSTRUMENT_VA(this, event, out);
return ReportEventState(event, out.m_opaque_sp);
}
void SBProcess::ReportEventState(const SBEvent &event, FILE *out) const {
- LLDB_RECORD_METHOD_CONST(void, SBProcess, ReportEventState,
- (const lldb::SBEvent &, FILE *), event, out);
+ LLDB_INSTRUMENT_VA(this, event, out);
FileSP outfile = std::make_shared<NativeFile>(out, false);
return ReportEventState(event, outfile);
}
void SBProcess::ReportEventState(const SBEvent &event, FileSP out) const {
- LLDB_RECORD_METHOD_CONST(void, SBProcess, ReportEventState,
- (const SBEvent &, FileSP), event, out);
+ LLDB_INSTRUMENT_VA(this, event, out);
if (!out || !out->IsValid())
return;
@@ -344,9 +326,7 @@ void SBProcess::ReportEventState(const SBEvent &event, FileSP out) const {
void SBProcess::AppendEventStateReport(const SBEvent &event,
SBCommandReturnObject &result) {
- LLDB_RECORD_METHOD(void, SBProcess, AppendEventStateReport,
- (const lldb::SBEvent &, lldb::SBCommandReturnObject &),
- event, result);
+ LLDB_INSTRUMENT_VA(this, event, result);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -360,8 +340,7 @@ void SBProcess::AppendEventStateReport(const SBEvent &event,
}
bool SBProcess::SetSelectedThread(const SBThread &thread) {
- LLDB_RECORD_METHOD(bool, SBProcess, SetSelectedThread,
- (const lldb::SBThread &), thread);
+ LLDB_INSTRUMENT_VA(this, thread);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -374,9 +353,7 @@ bool SBProcess::SetSelectedThread(const SBThread &thread) {
}
bool SBProcess::SetSelectedThreadByID(lldb::tid_t tid) {
- LLDB_RECORD_METHOD(bool, SBProcess, SetSelectedThreadByID, (lldb::tid_t),
- tid);
-
+ LLDB_INSTRUMENT_VA(this, tid);
bool ret_val = false;
ProcessSP process_sp(GetSP());
@@ -390,8 +367,7 @@ bool SBProcess::SetSelectedThreadByID(lldb::tid_t tid) {
}
bool SBProcess::SetSelectedThreadByIndexID(uint32_t index_id) {
- LLDB_RECORD_METHOD(bool, SBProcess, SetSelectedThreadByIndexID, (uint32_t),
- index_id);
+ LLDB_INSTRUMENT_VA(this, index_id);
bool ret_val = false;
ProcessSP process_sp(GetSP());
@@ -406,8 +382,7 @@ bool SBProcess::SetSelectedThreadByIndexID(uint32_t index_id) {
}
SBThread SBProcess::GetThreadAtIndex(size_t index) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBProcess, GetThreadAtIndex, (size_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -421,11 +396,11 @@ SBThread SBProcess::GetThreadAtIndex(size_t index) {
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
uint32_t SBProcess::GetNumQueues() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcess, GetNumQueues);
+ LLDB_INSTRUMENT_VA(this);
uint32_t num_queues = 0;
ProcessSP process_sp(GetSP());
@@ -442,8 +417,7 @@ uint32_t SBProcess::GetNumQueues() {
}
SBQueue SBProcess::GetQueueAtIndex(size_t index) {
- LLDB_RECORD_METHOD(lldb::SBQueue, SBProcess, GetQueueAtIndex, (size_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
SBQueue sb_queue;
QueueSP queue_sp;
@@ -458,12 +432,11 @@ SBQueue SBProcess::GetQueueAtIndex(size_t index) {
}
}
- return LLDB_RECORD_RESULT(sb_queue);
+ return sb_queue;
}
uint32_t SBProcess::GetStopID(bool include_expression_stops) {
- LLDB_RECORD_METHOD(uint32_t, SBProcess, GetStopID, (bool),
- include_expression_stops);
+ LLDB_INSTRUMENT_VA(this, include_expression_stops);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -478,8 +451,7 @@ uint32_t SBProcess::GetStopID(bool include_expression_stops) {
}
SBEvent SBProcess::GetStopEventForStopID(uint32_t stop_id) {
- LLDB_RECORD_METHOD(lldb::SBEvent, SBProcess, GetStopEventForStopID,
- (uint32_t), stop_id);
+ LLDB_INSTRUMENT_VA(this, stop_id);
SBEvent sb_event;
EventSP event_sp;
@@ -491,11 +463,11 @@ SBEvent SBProcess::GetStopEventForStopID(uint32_t stop_id) {
sb_event.reset(event_sp);
}
- return LLDB_RECORD_RESULT(sb_event);
+ return sb_event;
}
StateType SBProcess::GetState() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::StateType, SBProcess, GetState);
+ LLDB_INSTRUMENT_VA(this);
StateType ret_val = eStateInvalid;
ProcessSP process_sp(GetSP());
@@ -509,7 +481,7 @@ StateType SBProcess::GetState() {
}
int SBProcess::GetExitStatus() {
- LLDB_RECORD_METHOD_NO_ARGS(int, SBProcess, GetExitStatus);
+ LLDB_INSTRUMENT_VA(this);
int exit_status = 0;
ProcessSP process_sp(GetSP());
@@ -523,7 +495,7 @@ int SBProcess::GetExitStatus() {
}
const char *SBProcess::GetExitDescription() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBProcess, GetExitDescription);
+ LLDB_INSTRUMENT_VA(this);
const char *exit_desc = nullptr;
ProcessSP process_sp(GetSP());
@@ -536,7 +508,7 @@ const char *SBProcess::GetExitDescription() {
}
lldb::pid_t SBProcess::GetProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBProcess, GetProcessID);
+ LLDB_INSTRUMENT_VA(this);
lldb::pid_t ret_val = LLDB_INVALID_PROCESS_ID;
ProcessSP process_sp(GetSP());
@@ -547,7 +519,7 @@ lldb::pid_t SBProcess::GetProcessID() {
}
uint32_t SBProcess::GetUniqueID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcess, GetUniqueID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t ret_val = 0;
ProcessSP process_sp(GetSP());
@@ -557,7 +529,7 @@ uint32_t SBProcess::GetUniqueID() {
}
ByteOrder SBProcess::GetByteOrder() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::ByteOrder, SBProcess, GetByteOrder);
+ LLDB_INSTRUMENT_VA(this);
ByteOrder byteOrder = eByteOrderInvalid;
ProcessSP process_sp(GetSP());
@@ -569,7 +541,7 @@ ByteOrder SBProcess::GetByteOrder() const {
}
uint32_t SBProcess::GetAddressByteSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBProcess, GetAddressByteSize);
+ LLDB_INSTRUMENT_VA(this);
uint32_t size = 0;
ProcessSP process_sp(GetSP());
@@ -581,7 +553,7 @@ uint32_t SBProcess::GetAddressByteSize() const {
}
SBError SBProcess::Continue() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBProcess, Continue);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -597,11 +569,11 @@ SBError SBProcess::Continue() {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBProcess::Destroy() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBProcess, Destroy);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -612,11 +584,11 @@ SBError SBProcess::Destroy() {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBProcess::Stop() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBProcess, Stop);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -627,11 +599,11 @@ SBError SBProcess::Stop() {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBProcess::Kill() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBProcess, Kill);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -642,19 +614,19 @@ SBError SBProcess::Kill() {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBProcess::Detach() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBProcess, Detach);
+ LLDB_INSTRUMENT_VA(this);
// FIXME: This should come from a process default.
bool keep_stopped = false;
- return LLDB_RECORD_RESULT(Detach(keep_stopped));
+ return Detach(keep_stopped);
}
SBError SBProcess::Detach(bool keep_stopped) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, Detach, (bool), keep_stopped);
+ LLDB_INSTRUMENT_VA(this, keep_stopped);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -665,11 +637,11 @@ SBError SBProcess::Detach(bool keep_stopped) {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBProcess::Signal(int signo) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, Signal, (int), signo);
+ LLDB_INSTRUMENT_VA(this, signo);
SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -680,20 +652,20 @@ SBError SBProcess::Signal(int signo) {
} else
sb_error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBUnixSignals SBProcess::GetUnixSignals() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBUnixSignals, SBProcess, GetUnixSignals);
+ LLDB_INSTRUMENT_VA(this);
if (auto process_sp = GetSP())
- return LLDB_RECORD_RESULT(SBUnixSignals{process_sp});
+ return SBUnixSignals{process_sp};
- return LLDB_RECORD_RESULT(SBUnixSignals{});
+ return SBUnixSignals{};
}
void SBProcess::SendAsyncInterrupt() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBProcess, SendAsyncInterrupt);
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -702,8 +674,7 @@ void SBProcess::SendAsyncInterrupt() {
}
SBThread SBProcess::GetThreadByID(tid_t tid) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBProcess, GetThreadByID, (lldb::tid_t),
- tid);
+ LLDB_INSTRUMENT_VA(this, tid);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -717,12 +688,11 @@ SBThread SBProcess::GetThreadByID(tid_t tid) {
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
SBThread SBProcess::GetThreadByIndexID(uint32_t index_id) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBProcess, GetThreadByIndexID, (uint32_t),
- index_id);
+ LLDB_INSTRUMENT_VA(this, index_id);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -737,12 +707,11 @@ SBThread SBProcess::GetThreadByIndexID(uint32_t index_id) {
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
StateType SBProcess::GetStateFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::StateType, SBProcess, GetStateFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
StateType ret_val = Process::ProcessEventData::GetStateFromEvent(event.get());
@@ -750,8 +719,7 @@ StateType SBProcess::GetStateFromEvent(const SBEvent &event) {
}
bool SBProcess::GetRestartedFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBProcess, GetRestartedFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
bool ret_val = Process::ProcessEventData::GetRestartedFromEvent(event.get());
@@ -759,8 +727,7 @@ bool SBProcess::GetRestartedFromEvent(const SBEvent &event) {
}
size_t SBProcess::GetNumRestartedReasonsFromEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(size_t, SBProcess, GetNumRestartedReasonsFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Process::ProcessEventData::GetNumRestartedReasons(event.get());
}
@@ -768,16 +735,13 @@ size_t SBProcess::GetNumRestartedReasonsFromEvent(const lldb::SBEvent &event) {
const char *
SBProcess::GetRestartedReasonAtIndexFromEvent(const lldb::SBEvent &event,
size_t idx) {
- LLDB_RECORD_STATIC_METHOD(const char *, SBProcess,
- GetRestartedReasonAtIndexFromEvent,
- (const lldb::SBEvent &, size_t), event, idx);
+ LLDB_INSTRUMENT_VA(event, idx);
return Process::ProcessEventData::GetRestartedReasonAtIndex(event.get(), idx);
}
SBProcess SBProcess::GetProcessFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBProcess, SBProcess, GetProcessFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
ProcessSP process_sp =
Process::ProcessEventData::GetProcessFromEvent(event.get());
@@ -786,36 +750,31 @@ SBProcess SBProcess::GetProcessFromEvent(const SBEvent &event) {
process_sp = EventDataStructuredData::GetProcessFromEvent(event.get());
}
- return LLDB_RECORD_RESULT(SBProcess(process_sp));
+ return SBProcess(process_sp);
}
bool SBProcess::GetInterruptedFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBProcess, GetInterruptedFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Process::ProcessEventData::GetInterruptedFromEvent(event.get());
}
lldb::SBStructuredData
SBProcess::GetStructuredDataFromEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBStructuredData, SBProcess,
- GetStructuredDataFromEvent, (const lldb::SBEvent &),
- event);
+ LLDB_INSTRUMENT_VA(event);
- return LLDB_RECORD_RESULT(SBStructuredData(event.GetSP()));
+ return SBStructuredData(event.GetSP());
}
bool SBProcess::EventIsProcessEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBProcess, EventIsProcessEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return (event.GetBroadcasterClass() == SBProcess::GetBroadcasterClass()) &&
!EventIsStructuredDataEvent(event);
}
bool SBProcess::EventIsStructuredDataEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBProcess, EventIsStructuredDataEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
EventSP event_sp = event.GetSP();
EventData *event_data = event_sp ? event_sp->GetData() : nullptr;
@@ -824,30 +783,24 @@ bool SBProcess::EventIsStructuredDataEvent(const lldb::SBEvent &event) {
}
SBBroadcaster SBProcess::GetBroadcaster() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBBroadcaster, SBProcess,
- GetBroadcaster);
-
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(GetSP());
SBBroadcaster broadcaster(process_sp.get(), false);
-
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
const char *SBProcess::GetBroadcasterClass() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBProcess,
- GetBroadcasterClass);
+ LLDB_INSTRUMENT();
return Process::GetStaticBroadcasterClass().AsCString();
}
size_t SBProcess::ReadMemory(addr_t addr, void *dst, size_t dst_len,
SBError &sb_error) {
- LLDB_RECORD_DUMMY(size_t, SBProcess, ReadMemory,
- (lldb::addr_t, void *, size_t, lldb::SBError &), addr, dst,
- dst_len, sb_error);
+ LLDB_INSTRUMENT_VA(this, addr, dst, dst_len, sb_error);
size_t bytes_read = 0;
@@ -872,9 +825,7 @@ size_t SBProcess::ReadMemory(addr_t addr, void *dst, size_t dst_len,
size_t SBProcess::ReadCStringFromMemory(addr_t addr, void *buf, size_t size,
lldb::SBError &sb_error) {
- LLDB_RECORD_DUMMY(size_t, SBProcess, ReadCStringFromMemory,
- (lldb::addr_t, void *, size_t, lldb::SBError &), addr, buf,
- size, sb_error);
+ LLDB_INSTRUMENT_VA(this, addr, buf, size, sb_error);
size_t bytes_read = 0;
ProcessSP process_sp(GetSP());
@@ -896,9 +847,7 @@ size_t SBProcess::ReadCStringFromMemory(addr_t addr, void *buf, size_t size,
uint64_t SBProcess::ReadUnsignedFromMemory(addr_t addr, uint32_t byte_size,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(uint64_t, SBProcess, ReadUnsignedFromMemory,
- (lldb::addr_t, uint32_t, lldb::SBError &), addr, byte_size,
- sb_error);
+ LLDB_INSTRUMENT_VA(this, addr, byte_size, sb_error);
uint64_t value = 0;
ProcessSP process_sp(GetSP());
@@ -920,8 +869,7 @@ uint64_t SBProcess::ReadUnsignedFromMemory(addr_t addr, uint32_t byte_size,
lldb::addr_t SBProcess::ReadPointerFromMemory(addr_t addr,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(lldb::addr_t, SBProcess, ReadPointerFromMemory,
- (lldb::addr_t, lldb::SBError &), addr, sb_error);
+ LLDB_INSTRUMENT_VA(this, addr, sb_error);
lldb::addr_t ptr = LLDB_INVALID_ADDRESS;
ProcessSP process_sp(GetSP());
@@ -942,9 +890,7 @@ lldb::addr_t SBProcess::ReadPointerFromMemory(addr_t addr,
size_t SBProcess::WriteMemory(addr_t addr, const void *src, size_t src_len,
SBError &sb_error) {
- LLDB_RECORD_DUMMY(size_t, SBProcess, WriteMemory,
- (lldb::addr_t, const void *, size_t, lldb::SBError &), addr,
- src, src_len, sb_error);
+ LLDB_INSTRUMENT_VA(this, addr, src, src_len, sb_error);
size_t bytes_written = 0;
@@ -966,8 +912,7 @@ size_t SBProcess::WriteMemory(addr_t addr, const void *src, size_t src_len,
}
bool SBProcess::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBProcess, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -991,34 +936,31 @@ bool SBProcess::GetDescription(SBStream &description) {
}
SBStructuredData SBProcess::GetExtendedCrashInformation() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBStructuredData, SBProcess,
- GetExtendedCrashInformation);
+ LLDB_INSTRUMENT_VA(this);
SBStructuredData data;
ProcessSP process_sp(GetSP());
if (!process_sp)
- return LLDB_RECORD_RESULT(data);
+ return data;
PlatformSP platform_sp = process_sp->GetTarget().GetPlatform();
if (!platform_sp)
- return LLDB_RECORD_RESULT(data);
+ return data;
auto expected_data =
platform_sp->FetchExtendedCrashInformation(*process_sp.get());
if (!expected_data)
- return LLDB_RECORD_RESULT(data);
+ return data;
StructuredData::ObjectSP fetched_data = *expected_data;
data.m_impl_up->SetObjectSP(fetched_data);
- return LLDB_RECORD_RESULT(data);
+ return data;
}
uint32_t
SBProcess::GetNumSupportedHardwareWatchpoints(lldb::SBError &sb_error) const {
- LLDB_RECORD_METHOD_CONST(uint32_t, SBProcess,
- GetNumSupportedHardwareWatchpoints,
- (lldb::SBError &), sb_error);
+ LLDB_INSTRUMENT_VA(this, sb_error);
uint32_t num = 0;
ProcessSP process_sp(GetSP());
@@ -1034,9 +976,7 @@ SBProcess::GetNumSupportedHardwareWatchpoints(lldb::SBError &sb_error) const {
uint32_t SBProcess::LoadImage(lldb::SBFileSpec &sb_remote_image_spec,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(uint32_t, SBProcess, LoadImage,
- (lldb::SBFileSpec &, lldb::SBError &),
- sb_remote_image_spec, sb_error);
+ LLDB_INSTRUMENT_VA(this, sb_remote_image_spec, sb_error);
return LoadImage(SBFileSpec(), sb_remote_image_spec, sb_error);
}
@@ -1044,10 +984,7 @@ uint32_t SBProcess::LoadImage(lldb::SBFileSpec &sb_remote_image_spec,
uint32_t SBProcess::LoadImage(const lldb::SBFileSpec &sb_local_image_spec,
const lldb::SBFileSpec &sb_remote_image_spec,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(
- uint32_t, SBProcess, LoadImage,
- (const lldb::SBFileSpec &, const lldb::SBFileSpec &, lldb::SBError &),
- sb_local_image_spec, sb_remote_image_spec, sb_error);
+ LLDB_INSTRUMENT_VA(this, sb_local_image_spec, sb_remote_image_spec, sb_error);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -1071,10 +1008,7 @@ uint32_t SBProcess::LoadImageUsingPaths(const lldb::SBFileSpec &image_spec,
SBStringList &paths,
lldb::SBFileSpec &loaded_path,
lldb::SBError &error) {
- LLDB_RECORD_METHOD(uint32_t, SBProcess, LoadImageUsingPaths,
- (const lldb::SBFileSpec &, lldb::SBStringList &,
- lldb::SBFileSpec &, lldb::SBError &),
- image_spec, paths, loaded_path, error);
+ LLDB_INSTRUMENT_VA(this, image_spec, paths, loaded_path, error);
ProcessSP process_sp(GetSP());
if (process_sp) {
@@ -1106,8 +1040,7 @@ uint32_t SBProcess::LoadImageUsingPaths(const lldb::SBFileSpec &image_spec,
}
lldb::SBError SBProcess::UnloadImage(uint32_t image_token) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, UnloadImage, (uint32_t),
- image_token);
+ LLDB_INSTRUMENT_VA(this, image_token);
lldb::SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -1124,12 +1057,11 @@ lldb::SBError SBProcess::UnloadImage(uint32_t image_token) {
}
} else
sb_error.SetErrorString("invalid process");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
lldb::SBError SBProcess::SendEventData(const char *event_data) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, SendEventData, (const char *),
- event_data);
+ LLDB_INSTRUMENT_VA(this, event_data);
lldb::SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -1144,11 +1076,11 @@ lldb::SBError SBProcess::SendEventData(const char *event_data) {
}
} else
sb_error.SetErrorString("invalid process");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
uint32_t SBProcess::GetNumExtendedBacktraceTypes() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcess, GetNumExtendedBacktraceTypes);
+ LLDB_INSTRUMENT_VA(this);
ProcessSP process_sp(GetSP());
if (process_sp && process_sp->GetSystemRuntime()) {
@@ -1159,8 +1091,7 @@ uint32_t SBProcess::GetNumExtendedBacktraceTypes() {
}
const char *SBProcess::GetExtendedBacktraceTypeAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(const char *, SBProcess, GetExtendedBacktraceTypeAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
ProcessSP process_sp(GetSP());
if (process_sp && process_sp->GetSystemRuntime()) {
@@ -1175,21 +1106,19 @@ const char *SBProcess::GetExtendedBacktraceTypeAtIndex(uint32_t idx) {
}
SBThreadCollection SBProcess::GetHistoryThreads(addr_t addr) {
- LLDB_RECORD_METHOD(lldb::SBThreadCollection, SBProcess, GetHistoryThreads,
- (lldb::addr_t), addr);
+ LLDB_INSTRUMENT_VA(this, addr);
ProcessSP process_sp(GetSP());
SBThreadCollection threads;
if (process_sp) {
threads = SBThreadCollection(process_sp->GetHistoryThreads(addr));
}
- return LLDB_RECORD_RESULT(threads);
+ return threads;
}
bool SBProcess::IsInstrumentationRuntimePresent(
InstrumentationRuntimeType type) {
- LLDB_RECORD_METHOD(bool, SBProcess, IsInstrumentationRuntimePresent,
- (lldb::InstrumentationRuntimeType), type);
+ LLDB_INSTRUMENT_VA(this, type);
ProcessSP process_sp(GetSP());
if (!process_sp)
@@ -1208,14 +1137,13 @@ bool SBProcess::IsInstrumentationRuntimePresent(
}
lldb::SBError SBProcess::SaveCore(const char *file_name) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, SaveCore, (const char *),
- file_name);
+ LLDB_INSTRUMENT_VA(this, file_name);
lldb::SBError error;
ProcessSP process_sp(GetSP());
if (!process_sp) {
error.SetErrorString("SBProcess is invalid");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
std::lock_guard<std::recursive_mutex> guard(
@@ -1223,21 +1151,19 @@ lldb::SBError SBProcess::SaveCore(const char *file_name) {
if (process_sp->GetState() != eStateStopped) {
error.SetErrorString("the process is not stopped");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
FileSpec core_file(file_name);
SaveCoreStyle core_style = SaveCoreStyle::eSaveCoreFull;
error.ref() = PluginManager::SaveCore(process_sp, core_file, core_style, "");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
lldb::SBError
SBProcess::GetMemoryRegionInfo(lldb::addr_t load_addr,
SBMemoryRegionInfo &sb_region_info) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, GetMemoryRegionInfo,
- (lldb::addr_t, lldb::SBMemoryRegionInfo &), load_addr,
- sb_region_info);
+ LLDB_INSTRUMENT_VA(this, load_addr, sb_region_info);
lldb::SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -1255,12 +1181,11 @@ SBProcess::GetMemoryRegionInfo(lldb::addr_t load_addr,
} else {
sb_error.SetErrorString("SBProcess is invalid");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
lldb::SBMemoryRegionInfoList SBProcess::GetMemoryRegions() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBMemoryRegionInfoList, SBProcess,
- GetMemoryRegions);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBMemoryRegionInfoList sb_region_list;
@@ -1273,11 +1198,11 @@ lldb::SBMemoryRegionInfoList SBProcess::GetMemoryRegions() {
process_sp->GetMemoryRegions(sb_region_list.ref());
}
- return LLDB_RECORD_RESULT(sb_region_list);
+ return sb_region_list;
}
lldb::SBProcessInfo SBProcess::GetProcessInfo() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcessInfo, SBProcess, GetProcessInfo);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBProcessInfo sb_proc_info;
ProcessSP process_sp(GetSP());
@@ -1285,14 +1210,12 @@ lldb::SBProcessInfo SBProcess::GetProcessInfo() {
if (process_sp && process_sp->GetProcessInfo(proc_info)) {
sb_proc_info.SetProcessInfo(proc_info);
}
- return LLDB_RECORD_RESULT(sb_proc_info);
+ return sb_proc_info;
}
lldb::addr_t SBProcess::AllocateMemory(size_t size, uint32_t permissions,
lldb::SBError &sb_error) {
- LLDB_RECORD_METHOD(lldb::addr_t, SBProcess, AllocateMemory,
- (size_t, uint32_t, lldb::SBError &), size, permissions,
- sb_error);
+ LLDB_INSTRUMENT_VA(this, size, permissions, sb_error);
lldb::addr_t addr = LLDB_INVALID_ADDRESS;
ProcessSP process_sp(GetSP());
@@ -1312,8 +1235,7 @@ lldb::addr_t SBProcess::AllocateMemory(size_t size, uint32_t permissions,
}
lldb::SBError SBProcess::DeallocateMemory(lldb::addr_t ptr) {
- LLDB_RECORD_METHOD(lldb::SBError, SBProcess, DeallocateMemory, (lldb::addr_t),
- ptr);
+ LLDB_INSTRUMENT_VA(this, ptr);
lldb::SBError sb_error;
ProcessSP process_sp(GetSP());
@@ -1332,145 +1254,3 @@ lldb::SBError SBProcess::DeallocateMemory(lldb::addr_t ptr) {
}
return sb_error;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBProcess>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBProcess, ());
- LLDB_REGISTER_CONSTRUCTOR(SBProcess, (const lldb::SBProcess &));
- LLDB_REGISTER_CONSTRUCTOR(SBProcess, (const lldb::ProcessSP &));
- LLDB_REGISTER_METHOD(const lldb::SBProcess &,
- SBProcess, operator=,(const lldb::SBProcess &));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBProcess,
- GetBroadcasterClassName, ());
- LLDB_REGISTER_METHOD(const char *, SBProcess, GetPluginName, ());
- LLDB_REGISTER_METHOD(const char *, SBProcess, GetShortPluginName, ());
- LLDB_REGISTER_METHOD(void, SBProcess, Clear, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBProcess, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBProcess, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBProcess, RemoteLaunch,
- (const char **, const char **, const char *,
- const char *, const char *, const char *, uint32_t,
- bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(bool, SBProcess, RemoteAttachToProcessWithID,
- (lldb::pid_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, GetNumThreads, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBThread, SBProcess, GetSelectedThread,
- ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBProcess, CreateOSPluginThread,
- (lldb::tid_t, lldb::addr_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBTarget, SBProcess, GetTarget, ());
- LLDB_REGISTER_METHOD(size_t, SBProcess, PutSTDIN, (const char *, size_t));
- LLDB_REGISTER_METHOD_CONST(void, SBProcess, ReportEventState,
- (const lldb::SBEvent &, FILE *));
- LLDB_REGISTER_METHOD_CONST(void, SBProcess, ReportEventState,
- (const lldb::SBEvent &, FileSP));
- LLDB_REGISTER_METHOD_CONST(void, SBProcess, ReportEventState,
- (const lldb::SBEvent &, SBFile));
- LLDB_REGISTER_METHOD(
- void, SBProcess, AppendEventStateReport,
- (const lldb::SBEvent &, lldb::SBCommandReturnObject &));
- LLDB_REGISTER_METHOD(bool, SBProcess, SetSelectedThread,
- (const lldb::SBThread &));
- LLDB_REGISTER_METHOD(bool, SBProcess, SetSelectedThreadByID, (lldb::tid_t));
- LLDB_REGISTER_METHOD(bool, SBProcess, SetSelectedThreadByIndexID,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBThread, SBProcess, GetThreadAtIndex, (size_t));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, GetNumQueues, ());
- LLDB_REGISTER_METHOD(lldb::SBQueue, SBProcess, GetQueueAtIndex, (size_t));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, GetStopID, (bool));
- LLDB_REGISTER_METHOD(lldb::SBEvent, SBProcess, GetStopEventForStopID,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::StateType, SBProcess, GetState, ());
- LLDB_REGISTER_METHOD(int, SBProcess, GetExitStatus, ());
- LLDB_REGISTER_METHOD(const char *, SBProcess, GetExitDescription, ());
- LLDB_REGISTER_METHOD(lldb::pid_t, SBProcess, GetProcessID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, GetUniqueID, ());
- LLDB_REGISTER_METHOD_CONST(lldb::ByteOrder, SBProcess, GetByteOrder, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBProcess, GetAddressByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Continue, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Destroy, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Stop, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Kill, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Detach, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Detach, (bool));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, Signal, (int));
- LLDB_REGISTER_METHOD(lldb::SBUnixSignals, SBProcess, GetUnixSignals, ());
- LLDB_REGISTER_METHOD(void, SBProcess, SendAsyncInterrupt, ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBProcess, GetThreadByID,
- (lldb::tid_t));
- LLDB_REGISTER_METHOD(lldb::SBThread, SBProcess, GetThreadByIndexID,
- (uint32_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::StateType, SBProcess, GetStateFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBProcess, GetRestartedFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(size_t, SBProcess,
- GetNumRestartedReasonsFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBProcess,
- GetRestartedReasonAtIndexFromEvent,
- (const lldb::SBEvent &, size_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBProcess, SBProcess, GetProcessFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBProcess, GetInterruptedFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBStructuredData, SBProcess,
- GetStructuredDataFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBProcess, EventIsProcessEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBProcess, EventIsStructuredDataEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBBroadcaster, SBProcess, GetBroadcaster,
- ());
- LLDB_REGISTER_STATIC_METHOD(const char *, SBProcess, GetBroadcasterClass,
- ());
- LLDB_REGISTER_METHOD(uint64_t, SBProcess, ReadUnsignedFromMemory,
- (lldb::addr_t, uint32_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBProcess, ReadPointerFromMemory,
- (lldb::addr_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(bool, SBProcess, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBStructuredData, SBProcess,
- GetExtendedCrashInformation, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBProcess,
- GetNumSupportedHardwareWatchpoints,
- (lldb::SBError &));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, LoadImage,
- (lldb::SBFileSpec &, lldb::SBError &));
- LLDB_REGISTER_METHOD(
- uint32_t, SBProcess, LoadImage,
- (const lldb::SBFileSpec &, const lldb::SBFileSpec &, lldb::SBError &));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, LoadImageUsingPaths,
- (const lldb::SBFileSpec &, lldb::SBStringList &,
- lldb::SBFileSpec &, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, UnloadImage, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, SendEventData,
- (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBProcess, GetNumExtendedBacktraceTypes, ());
- LLDB_REGISTER_METHOD(const char *, SBProcess,
- GetExtendedBacktraceTypeAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBThreadCollection, SBProcess, GetHistoryThreads,
- (lldb::addr_t));
- LLDB_REGISTER_METHOD(bool, SBProcess, IsInstrumentationRuntimePresent,
- (lldb::InstrumentationRuntimeType));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, SaveCore, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, GetMemoryRegionInfo,
- (lldb::addr_t, lldb::SBMemoryRegionInfo &));
- LLDB_REGISTER_METHOD(lldb::SBMemoryRegionInfoList, SBProcess,
- GetMemoryRegions, ());
- LLDB_REGISTER_METHOD(lldb::SBProcessInfo, SBProcess, GetProcessInfo, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBProcess, AllocateMemory,
- (size_t, uint32_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBProcess, DeallocateMemory,
- (lldb::addr_t));
-
- LLDB_REGISTER_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetSTDOUT);
- LLDB_REGISTER_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetSTDERR);
- LLDB_REGISTER_CHAR_PTR_METHOD_CONST(size_t, SBProcess, GetAsyncProfileData);
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBProcessInfo.cpp b/contrib/llvm-project/lldb/source/API/SBProcessInfo.cpp
index cba3bdc179f3..da3db75ff47e 100644
--- a/contrib/llvm-project/lldb/source/API/SBProcessInfo.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBProcessInfo.cpp
@@ -7,20 +7,18 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBProcessInfo.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBFileSpec.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/ProcessInfo.h"
using namespace lldb;
using namespace lldb_private;
-SBProcessInfo::SBProcessInfo() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBProcessInfo);
-}
+SBProcessInfo::SBProcessInfo() { LLDB_INSTRUMENT_VA(this); }
-SBProcessInfo::SBProcessInfo(const SBProcessInfo &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBProcessInfo, (const lldb::SBProcessInfo &), rhs);
+SBProcessInfo::SBProcessInfo(const SBProcessInfo &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -28,13 +26,11 @@ SBProcessInfo::SBProcessInfo(const SBProcessInfo &rhs) : m_opaque_up() {
SBProcessInfo::~SBProcessInfo() = default;
SBProcessInfo &SBProcessInfo::operator=(const SBProcessInfo &rhs) {
- LLDB_RECORD_METHOD(lldb::SBProcessInfo &,
- SBProcessInfo, operator=,(const lldb::SBProcessInfo &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
ProcessInstanceInfo &SBProcessInfo::ref() {
@@ -49,17 +45,17 @@ void SBProcessInfo::SetProcessInfo(const ProcessInstanceInfo &proc_info_ref) {
}
bool SBProcessInfo::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBProcessInfo, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBProcessInfo::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBProcessInfo, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr;
}
const char *SBProcessInfo::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBProcessInfo, GetName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
if (m_opaque_up) {
@@ -69,18 +65,17 @@ const char *SBProcessInfo::GetName() {
}
SBFileSpec SBProcessInfo::GetExecutableFile() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBProcessInfo,
- GetExecutableFile);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec file_spec;
if (m_opaque_up) {
file_spec.SetFileSpec(m_opaque_up->GetExecutableFile());
}
- return LLDB_RECORD_RESULT(file_spec);
+ return file_spec;
}
lldb::pid_t SBProcessInfo::GetProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBProcessInfo, GetProcessID);
+ LLDB_INSTRUMENT_VA(this);
lldb::pid_t proc_id = LLDB_INVALID_PROCESS_ID;
if (m_opaque_up) {
@@ -90,7 +85,7 @@ lldb::pid_t SBProcessInfo::GetProcessID() {
}
uint32_t SBProcessInfo::GetUserID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcessInfo, GetUserID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t user_id = UINT32_MAX;
if (m_opaque_up) {
@@ -100,7 +95,7 @@ uint32_t SBProcessInfo::GetUserID() {
}
uint32_t SBProcessInfo::GetGroupID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcessInfo, GetGroupID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t group_id = UINT32_MAX;
if (m_opaque_up) {
@@ -110,7 +105,7 @@ uint32_t SBProcessInfo::GetGroupID() {
}
bool SBProcessInfo::UserIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBProcessInfo, UserIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
bool is_valid = false;
if (m_opaque_up) {
@@ -120,7 +115,7 @@ bool SBProcessInfo::UserIDIsValid() {
}
bool SBProcessInfo::GroupIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBProcessInfo, GroupIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
bool is_valid = false;
if (m_opaque_up) {
@@ -130,7 +125,7 @@ bool SBProcessInfo::GroupIDIsValid() {
}
uint32_t SBProcessInfo::GetEffectiveUserID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcessInfo, GetEffectiveUserID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t user_id = UINT32_MAX;
if (m_opaque_up) {
@@ -140,7 +135,7 @@ uint32_t SBProcessInfo::GetEffectiveUserID() {
}
uint32_t SBProcessInfo::GetEffectiveGroupID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBProcessInfo, GetEffectiveGroupID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t group_id = UINT32_MAX;
if (m_opaque_up) {
@@ -150,7 +145,7 @@ uint32_t SBProcessInfo::GetEffectiveGroupID() {
}
bool SBProcessInfo::EffectiveUserIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBProcessInfo, EffectiveUserIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
bool is_valid = false;
if (m_opaque_up) {
@@ -160,7 +155,7 @@ bool SBProcessInfo::EffectiveUserIDIsValid() {
}
bool SBProcessInfo::EffectiveGroupIDIsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBProcessInfo, EffectiveGroupIDIsValid);
+ LLDB_INSTRUMENT_VA(this);
bool is_valid = false;
if (m_opaque_up) {
@@ -170,7 +165,7 @@ bool SBProcessInfo::EffectiveGroupIDIsValid() {
}
lldb::pid_t SBProcessInfo::GetParentProcessID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::pid_t, SBProcessInfo, GetParentProcessID);
+ LLDB_INSTRUMENT_VA(this);
lldb::pid_t proc_id = LLDB_INVALID_PROCESS_ID;
if (m_opaque_up) {
@@ -180,7 +175,7 @@ lldb::pid_t SBProcessInfo::GetParentProcessID() {
}
const char *SBProcessInfo::GetTriple() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBProcessInfo, GetTriple);
+ LLDB_INSTRUMENT_VA(this);
const char *triple = nullptr;
if (m_opaque_up) {
@@ -193,34 +188,3 @@ const char *SBProcessInfo::GetTriple() {
}
return triple;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBProcessInfo>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBProcessInfo, ());
- LLDB_REGISTER_CONSTRUCTOR(SBProcessInfo, (const lldb::SBProcessInfo &));
- LLDB_REGISTER_METHOD(
- lldb::SBProcessInfo &,
- SBProcessInfo, operator=,(const lldb::SBProcessInfo &));
- LLDB_REGISTER_METHOD_CONST(bool, SBProcessInfo, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBProcessInfo, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBProcessInfo, GetName, ());
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBProcessInfo, GetExecutableFile,
- ());
- LLDB_REGISTER_METHOD(lldb::pid_t, SBProcessInfo, GetProcessID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBProcessInfo, GetUserID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBProcessInfo, GetGroupID, ());
- LLDB_REGISTER_METHOD(bool, SBProcessInfo, UserIDIsValid, ());
- LLDB_REGISTER_METHOD(bool, SBProcessInfo, GroupIDIsValid, ());
- LLDB_REGISTER_METHOD(uint32_t, SBProcessInfo, GetEffectiveUserID, ());
- LLDB_REGISTER_METHOD(uint32_t, SBProcessInfo, GetEffectiveGroupID, ());
- LLDB_REGISTER_METHOD(bool, SBProcessInfo, EffectiveUserIDIsValid, ());
- LLDB_REGISTER_METHOD(bool, SBProcessInfo, EffectiveGroupIDIsValid, ());
- LLDB_REGISTER_METHOD(lldb::pid_t, SBProcessInfo, GetParentProcessID, ());
- LLDB_REGISTER_METHOD(const char *, SBProcessInfo, GetTriple, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBQueue.cpp b/contrib/llvm-project/lldb/source/API/SBQueue.cpp
index 746df9e79d61..b2c143f6357e 100644
--- a/contrib/llvm-project/lldb/source/API/SBQueue.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBQueue.cpp
@@ -8,8 +8,8 @@
#include <cinttypes>
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBQueue.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBProcess.h"
#include "lldb/API/SBQueueItem.h"
@@ -27,11 +27,10 @@ namespace lldb_private {
class QueueImpl {
public:
- QueueImpl() : m_queue_wp(), m_threads(), m_pending_items() {}
+ QueueImpl() {}
QueueImpl(const lldb::QueueSP &queue_sp)
- : m_queue_wp(), m_threads(), m_thread_list_fetched(false),
- m_pending_items(), m_pending_items_fetched(false) {
+ : m_thread_list_fetched(false), m_pending_items_fetched(false) {
m_queue_wp = queue_sp;
}
@@ -216,17 +215,15 @@ private:
};
}
-SBQueue::SBQueue() : m_opaque_sp(new QueueImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBQueue);
-}
+SBQueue::SBQueue() : m_opaque_sp(new QueueImpl()) { LLDB_INSTRUMENT_VA(this); }
SBQueue::SBQueue(const QueueSP &queue_sp)
: m_opaque_sp(new QueueImpl(queue_sp)) {
- LLDB_RECORD_CONSTRUCTOR(SBQueue, (const lldb::QueueSP &), queue_sp);
+ LLDB_INSTRUMENT_VA(this, queue_sp);
}
SBQueue::SBQueue(const SBQueue &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBQueue, (const lldb::SBQueue &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (&rhs == this)
return;
@@ -235,27 +232,26 @@ SBQueue::SBQueue(const SBQueue &rhs) {
}
const lldb::SBQueue &SBQueue::operator=(const lldb::SBQueue &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBQueue &,
- SBQueue, operator=,(const lldb::SBQueue &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBQueue::~SBQueue() = default;
bool SBQueue::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBQueue, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBQueue::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBQueue, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->IsValid();
}
void SBQueue::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBQueue, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp->Clear();
}
@@ -265,94 +261,63 @@ void SBQueue::SetQueue(const QueueSP &queue_sp) {
}
lldb::queue_id_t SBQueue::GetQueueID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::queue_id_t, SBQueue, GetQueueID);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetQueueID();
}
uint32_t SBQueue::GetIndexID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBQueue, GetIndexID);
+ LLDB_INSTRUMENT_VA(this);
uint32_t index_id = m_opaque_sp->GetIndexID();
return index_id;
}
const char *SBQueue::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBQueue, GetName);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetName();
}
uint32_t SBQueue::GetNumThreads() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBQueue, GetNumThreads);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetNumThreads();
}
SBThread SBQueue::GetThreadAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBQueue, GetThreadAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBThread th = m_opaque_sp->GetThreadAtIndex(idx);
- return LLDB_RECORD_RESULT(th);
+ return th;
}
uint32_t SBQueue::GetNumPendingItems() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBQueue, GetNumPendingItems);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetNumPendingItems();
}
SBQueueItem SBQueue::GetPendingItemAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBQueueItem, SBQueue, GetPendingItemAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
- return LLDB_RECORD_RESULT(m_opaque_sp->GetPendingItemAtIndex(idx));
+ return m_opaque_sp->GetPendingItemAtIndex(idx);
}
uint32_t SBQueue::GetNumRunningItems() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBQueue, GetNumRunningItems);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetNumRunningItems();
}
SBProcess SBQueue::GetProcess() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcess, SBQueue, GetProcess);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(m_opaque_sp->GetProcess());
+ return m_opaque_sp->GetProcess();
}
lldb::QueueKind SBQueue::GetKind() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::QueueKind, SBQueue, GetKind);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp->GetKind();
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBQueue>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBQueue, ());
- LLDB_REGISTER_CONSTRUCTOR(SBQueue, (const lldb::QueueSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBQueue, (const lldb::SBQueue &));
- LLDB_REGISTER_METHOD(const lldb::SBQueue &,
- SBQueue, operator=,(const lldb::SBQueue &));
- LLDB_REGISTER_METHOD_CONST(bool, SBQueue, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBQueue, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBQueue, Clear, ());
- LLDB_REGISTER_METHOD_CONST(lldb::queue_id_t, SBQueue, GetQueueID, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBQueue, GetIndexID, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBQueue, GetName, ());
- LLDB_REGISTER_METHOD(uint32_t, SBQueue, GetNumThreads, ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBQueue, GetThreadAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBQueue, GetNumPendingItems, ());
- LLDB_REGISTER_METHOD(lldb::SBQueueItem, SBQueue, GetPendingItemAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBQueue, GetNumRunningItems, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBQueue, GetProcess, ());
- LLDB_REGISTER_METHOD(lldb::QueueKind, SBQueue, GetKind, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBQueueItem.cpp b/contrib/llvm-project/lldb/source/API/SBQueueItem.cpp
index 6cd9e4514caf..b2204452c0fa 100644
--- a/contrib/llvm-project/lldb/source/API/SBQueueItem.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBQueueItem.cpp
@@ -8,7 +8,6 @@
#include "lldb/lldb-forward.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBQueueItem.h"
#include "lldb/API/SBThread.h"
@@ -16,49 +15,46 @@
#include "lldb/Target/Process.h"
#include "lldb/Target/QueueItem.h"
#include "lldb/Target/Thread.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
// Constructors
-SBQueueItem::SBQueueItem() : m_queue_item_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBQueueItem);
-}
+SBQueueItem::SBQueueItem() { LLDB_INSTRUMENT_VA(this); }
SBQueueItem::SBQueueItem(const QueueItemSP &queue_item_sp)
: m_queue_item_sp(queue_item_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBQueueItem, (const lldb::QueueItemSP &),
- queue_item_sp);
+ LLDB_INSTRUMENT_VA(this, queue_item_sp);
}
// Destructor
SBQueueItem::~SBQueueItem() { m_queue_item_sp.reset(); }
bool SBQueueItem::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBQueueItem, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBQueueItem::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBQueueItem, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_queue_item_sp.get() != nullptr;
}
void SBQueueItem::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBQueueItem, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_queue_item_sp.reset();
}
void SBQueueItem::SetQueueItem(const QueueItemSP &queue_item_sp) {
- LLDB_RECORD_METHOD(void, SBQueueItem, SetQueueItem,
- (const lldb::QueueItemSP &), queue_item_sp);
+ LLDB_INSTRUMENT_VA(this, queue_item_sp);
m_queue_item_sp = queue_item_sp;
}
lldb::QueueItemKind SBQueueItem::GetKind() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::QueueItemKind, SBQueueItem, GetKind);
+ LLDB_INSTRUMENT_VA(this);
QueueItemKind result = eQueueItemKindUnknown;
if (m_queue_item_sp) {
@@ -68,7 +64,7 @@ lldb::QueueItemKind SBQueueItem::GetKind() const {
}
void SBQueueItem::SetKind(lldb::QueueItemKind kind) {
- LLDB_RECORD_METHOD(void, SBQueueItem, SetKind, (lldb::QueueItemKind), kind);
+ LLDB_INSTRUMENT_VA(this, kind);
if (m_queue_item_sp) {
m_queue_item_sp->SetKind(kind);
@@ -76,17 +72,17 @@ void SBQueueItem::SetKind(lldb::QueueItemKind kind) {
}
SBAddress SBQueueItem::GetAddress() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBAddress, SBQueueItem, GetAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress result;
if (m_queue_item_sp) {
result.SetAddress(m_queue_item_sp->GetAddress());
}
- return LLDB_RECORD_RESULT(result);
+ return result;
}
void SBQueueItem::SetAddress(SBAddress addr) {
- LLDB_RECORD_METHOD(void, SBQueueItem, SetAddress, (lldb::SBAddress), addr);
+ LLDB_INSTRUMENT_VA(this, addr);
if (m_queue_item_sp) {
m_queue_item_sp->SetAddress(addr.ref());
@@ -94,8 +90,7 @@ void SBQueueItem::SetAddress(SBAddress addr) {
}
SBThread SBQueueItem::GetExtendedBacktraceThread(const char *type) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBQueueItem, GetExtendedBacktraceThread,
- (const char *), type);
+ LLDB_INSTRUMENT_VA(this, type);
SBThread result;
if (m_queue_item_sp) {
@@ -113,28 +108,5 @@ SBThread SBQueueItem::GetExtendedBacktraceThread(const char *type) {
}
}
}
- return LLDB_RECORD_RESULT(result);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBQueueItem>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBQueueItem, ());
- LLDB_REGISTER_CONSTRUCTOR(SBQueueItem, (const lldb::QueueItemSP &));
- LLDB_REGISTER_METHOD_CONST(bool, SBQueueItem, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBQueueItem, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBQueueItem, Clear, ());
- LLDB_REGISTER_METHOD(void, SBQueueItem, SetQueueItem,
- (const lldb::QueueItemSP &));
- LLDB_REGISTER_METHOD_CONST(lldb::QueueItemKind, SBQueueItem, GetKind, ());
- LLDB_REGISTER_METHOD(void, SBQueueItem, SetKind, (lldb::QueueItemKind));
- LLDB_REGISTER_METHOD_CONST(lldb::SBAddress, SBQueueItem, GetAddress, ());
- LLDB_REGISTER_METHOD(void, SBQueueItem, SetAddress, (lldb::SBAddress));
- LLDB_REGISTER_METHOD(lldb::SBThread, SBQueueItem,
- GetExtendedBacktraceThread, (const char *));
-}
-
-}
+ return result;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBReproducer.cpp b/contrib/llvm-project/lldb/source/API/SBReproducer.cpp
index c9c9a03c694a..d3d27cc57748 100644
--- a/contrib/llvm-project/lldb/source/API/SBReproducer.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBReproducer.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
-
+#include "lldb/API/SBReproducer.h"
#include "lldb/API/LLDB.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBAttachInfo.h"
@@ -21,8 +20,10 @@
#include "lldb/API/SBError.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBHostOS.h"
-#include "lldb/API/SBReproducer.h"
#include "lldb/Host/FileSystem.h"
+#include "lldb/Utility/Instrumentation.h"
+#include "lldb/Utility/Reproducer.h"
+#include "lldb/Utility/ReproducerProvider.h"
#include "lldb/Version/Version.h"
using namespace lldb;
@@ -38,117 +39,44 @@ SBReplayOptions::SBReplayOptions(const SBReplayOptions &rhs)
SBReplayOptions::~SBReplayOptions() = default;
SBReplayOptions &SBReplayOptions::operator=(const SBReplayOptions &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs)
if (this == &rhs)
return *this;
*m_opaque_up = *rhs.m_opaque_up;
return *this;
}
-void SBReplayOptions::SetVerify(bool verify) { m_opaque_up->verify = verify; }
+void SBReplayOptions::SetVerify(bool verify) {
+ LLDB_INSTRUMENT_VA(this, verify) m_opaque_up->verify = verify;
+}
-bool SBReplayOptions::GetVerify() const { return m_opaque_up->verify; }
+bool SBReplayOptions::GetVerify() const {
+ LLDB_INSTRUMENT_VA(this) return m_opaque_up->verify;
+}
void SBReplayOptions::SetCheckVersion(bool check) {
+ LLDB_INSTRUMENT_VA(this, check)
m_opaque_up->check_version = check;
}
bool SBReplayOptions::GetCheckVersion() const {
+ LLDB_INSTRUMENT_VA(this)
return m_opaque_up->check_version;
}
-SBRegistry::SBRegistry() {
- Registry &R = *this;
-
- RegisterMethods<SBAddress>(R);
- RegisterMethods<SBAttachInfo>(R);
- RegisterMethods<SBBlock>(R);
- RegisterMethods<SBBreakpoint>(R);
- RegisterMethods<SBBreakpointList>(R);
- RegisterMethods<SBBreakpointLocation>(R);
- RegisterMethods<SBBreakpointName>(R);
- RegisterMethods<SBBroadcaster>(R);
- RegisterMethods<SBCommandInterpreter>(R);
- RegisterMethods<SBCommandInterpreterRunOptions>(R);
- RegisterMethods<SBCommandReturnObject>(R);
- RegisterMethods<SBCommunication>(R);
- RegisterMethods<SBCompileUnit>(R);
- RegisterMethods<SBData>(R);
- RegisterMethods<SBDebugger>(R);
- RegisterMethods<SBDeclaration>(R);
- RegisterMethods<SBEnvironment>(R);
- RegisterMethods<SBError>(R);
- RegisterMethods<SBEvent>(R);
- RegisterMethods<SBExecutionContext>(R);
- RegisterMethods<SBExpressionOptions>(R);
- RegisterMethods<SBFile>(R);
- RegisterMethods<SBFileSpec>(R);
- RegisterMethods<SBFileSpecList>(R);
- RegisterMethods<SBFrame>(R);
- RegisterMethods<SBFunction>(R);
- RegisterMethods<SBHostOS>(R);
- RegisterMethods<SBInputReader>(R);
- RegisterMethods<SBInstruction>(R);
- RegisterMethods<SBInstructionList>(R);
- RegisterMethods<SBLanguageRuntime>(R);
- RegisterMethods<SBLaunchInfo>(R);
- RegisterMethods<SBLineEntry>(R);
- RegisterMethods<SBListener>(R);
- RegisterMethods<SBMemoryRegionInfo>(R);
- RegisterMethods<SBMemoryRegionInfoList>(R);
- RegisterMethods<SBModule>(R);
- RegisterMethods<SBModuleSpec>(R);
- RegisterMethods<SBPlatform>(R);
- RegisterMethods<SBPlatformConnectOptions>(R);
- RegisterMethods<SBPlatformShellCommand>(R);
- RegisterMethods<SBProcess>(R);
- RegisterMethods<SBProcessInfo>(R);
- RegisterMethods<SBQueue>(R);
- RegisterMethods<SBQueueItem>(R);
- RegisterMethods<SBSection>(R);
- RegisterMethods<SBSourceManager>(R);
- RegisterMethods<SBStream>(R);
- RegisterMethods<SBStringList>(R);
- RegisterMethods<SBStructuredData>(R);
- RegisterMethods<SBSymbol>(R);
- RegisterMethods<SBSymbolContext>(R);
- RegisterMethods<SBSymbolContextList>(R);
- RegisterMethods<SBTarget>(R);
- RegisterMethods<SBThread>(R);
- RegisterMethods<SBThreadCollection>(R);
- RegisterMethods<SBThreadPlan>(R);
- RegisterMethods<SBTrace>(R);
- RegisterMethods<SBType>(R);
- RegisterMethods<SBTypeCategory>(R);
- RegisterMethods<SBTypeEnumMember>(R);
- RegisterMethods<SBTypeFilter>(R);
- RegisterMethods<SBTypeFormat>(R);
- RegisterMethods<SBTypeNameSpecifier>(R);
- RegisterMethods<SBTypeSummary>(R);
- RegisterMethods<SBTypeSummaryOptions>(R);
- RegisterMethods<SBTypeSynthetic>(R);
- RegisterMethods<SBUnixSignals>(R);
- RegisterMethods<SBValue>(R);
- RegisterMethods<SBValueList>(R);
- RegisterMethods<SBVariablesOptions>(R);
- RegisterMethods<SBWatchpoint>(R);
-}
-
const char *SBReproducer::Capture() {
+ LLDB_INSTRUMENT()
static std::string error;
if (auto e = Reproducer::Initialize(ReproducerMode::Capture, llvm::None)) {
error = llvm::toString(std::move(e));
return error.c_str();
}
- if (auto *g = lldb_private::repro::Reproducer::Instance().GetGenerator()) {
- auto &p = g->GetOrCreate<SBProvider>();
- InstrumentationData::Initialize(p.GetSerializer(), p.GetRegistry());
- }
-
return nullptr;
}
const char *SBReproducer::Capture(const char *path) {
+ LLDB_INSTRUMENT_VA(path)
static std::string error;
if (auto e =
Reproducer::Initialize(ReproducerMode::Capture, FileSpec(path))) {
@@ -156,32 +84,32 @@ const char *SBReproducer::Capture(const char *path) {
return error.c_str();
}
- if (auto *g = lldb_private::repro::Reproducer::Instance().GetGenerator()) {
- auto &p = g->GetOrCreate<SBProvider>();
- InstrumentationData::Initialize(p.GetSerializer(), p.GetRegistry());
- }
-
return nullptr;
}
const char *SBReproducer::PassiveReplay(const char *path) {
+ LLDB_INSTRUMENT_VA(path)
return "Reproducer replay has been removed";
}
const char *SBReproducer::Replay(const char *path) {
+ LLDB_INSTRUMENT_VA(path)
return "Reproducer replay has been removed";
}
const char *SBReproducer::Replay(const char *path, bool skip_version_check) {
+ LLDB_INSTRUMENT_VA(path, skip_version_check)
return Replay(path);
}
const char *SBReproducer::Replay(const char *path,
const SBReplayOptions &options) {
+ LLDB_INSTRUMENT_VA(path, options)
return Replay(path);
}
const char *SBReproducer::Finalize(const char *path) {
+ LLDB_INSTRUMENT_VA(path)
static std::string error;
repro::Loader *loader = repro::Reproducer::Instance().GetLoader();
@@ -199,6 +127,7 @@ const char *SBReproducer::Finalize(const char *path) {
}
bool SBReproducer::Generate() {
+ LLDB_INSTRUMENT()
auto &r = Reproducer::Instance();
if (auto generator = r.GetGenerator()) {
generator->Keep();
@@ -208,6 +137,7 @@ bool SBReproducer::Generate() {
}
bool SBReproducer::SetAutoGenerate(bool b) {
+ LLDB_INSTRUMENT_VA(b)
auto &r = Reproducer::Instance();
if (auto generator = r.GetGenerator()) {
generator->SetAutoGenerate(b);
@@ -217,6 +147,7 @@ bool SBReproducer::SetAutoGenerate(bool b) {
}
const char *SBReproducer::GetPath() {
+ LLDB_INSTRUMENT()
ConstString path;
auto &r = Reproducer::Instance();
if (FileSpec reproducer_path = Reproducer::Instance().GetReproducerPath())
@@ -225,6 +156,7 @@ const char *SBReproducer::GetPath() {
}
void SBReproducer::SetWorkingDirectory(const char *path) {
+ LLDB_INSTRUMENT_VA(path)
if (auto *g = lldb_private::repro::Reproducer::Instance().GetGenerator()) {
auto &wp = g->GetOrCreate<repro::WorkingDirectoryProvider>();
wp.SetDirectory(path);
@@ -232,7 +164,3 @@ void SBReproducer::SetWorkingDirectory(const char *path) {
fp.RecordInterestingDirectory(wp.GetDirectory());
}
}
-
-char lldb_private::repro::SBProvider::ID = 0;
-const char *SBProvider::Info::name = "sbapi";
-const char *SBProvider::Info::file = "sbapi.bin";
diff --git a/contrib/llvm-project/lldb/source/API/SBReproducerPrivate.h b/contrib/llvm-project/lldb/source/API/SBReproducerPrivate.h
deleted file mode 100644
index 02ac31c2ad89..000000000000
--- a/contrib/llvm-project/lldb/source/API/SBReproducerPrivate.h
+++ /dev/null
@@ -1,78 +0,0 @@
-//===-- SBReproducerPrivate.h -----------------------------------*- C++ -*-===//
-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLDB_SOURCE_API_SBREPRODUCERPRIVATE_H
-#define LLDB_SOURCE_API_SBREPRODUCERPRIVATE_H
-
-#include "lldb/API/SBReproducer.h"
-
-#include "lldb/Utility/FileSpec.h"
-#include "lldb/Utility/Log.h"
-#include "lldb/Utility/Reproducer.h"
-#include "lldb/Utility/ReproducerInstrumentation.h"
-#include "lldb/Utility/ReproducerProvider.h"
-
-#include "llvm/ADT/DenseMap.h"
-
-#define LLDB_GET_INSTRUMENTATION_DATA() \
- lldb_private::repro::InstrumentationData::Instance()
-
-namespace lldb_private {
-namespace repro {
-
-class SBRegistry : public Registry {
-public:
- SBRegistry();
-};
-
-class SBProvider : public Provider<SBProvider> {
-public:
- struct Info {
- static const char *name;
- static const char *file;
- };
-
- SBProvider(const FileSpec &directory)
- : Provider(directory),
- m_stream(directory.CopyByAppendingPathComponent("sbapi.bin").GetPath(),
- m_ec, llvm::sys::fs::OpenFlags::OF_None),
- m_serializer(m_stream) {}
-
- Serializer &GetSerializer() { return m_serializer; }
- Registry &GetRegistry() { return m_registry; }
-
- static char ID;
-
-private:
- std::error_code m_ec;
- llvm::raw_fd_ostream m_stream;
- Serializer m_serializer;
- SBRegistry m_registry;
-};
-
-class ReplayData {
-public:
- ReplayData(std::unique_ptr<llvm::MemoryBuffer> memory_buffer)
- : m_memory_buffer(std::move(memory_buffer)), m_registry(),
- m_deserializer(m_memory_buffer->getBuffer()) {}
- Deserializer &GetDeserializer() { return m_deserializer; }
- Registry &GetRegistry() { return m_registry; }
-
-private:
- std::unique_ptr<llvm::MemoryBuffer> m_memory_buffer;
- SBRegistry m_registry;
- Deserializer m_deserializer;
-};
-
-template <typename T> void RegisterMethods(Registry &R);
-
-} // namespace repro
-} // namespace lldb_private
-
-#endif
diff --git a/contrib/llvm-project/lldb/source/API/SBSection.cpp b/contrib/llvm-project/lldb/source/API/SBSection.cpp
index bb56fa18d9ca..733e0db0b5ba 100644
--- a/contrib/llvm-project/lldb/source/API/SBSection.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBSection.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBSection.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBTarget.h"
#include "lldb/Core/Module.h"
@@ -15,50 +14,47 @@
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Utility/DataBuffer.h"
#include "lldb/Utility/DataExtractor.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StreamString.h"
using namespace lldb;
using namespace lldb_private;
-SBSection::SBSection() : m_opaque_wp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBSection);
-}
+SBSection::SBSection() { LLDB_INSTRUMENT_VA(this); }
SBSection::SBSection(const SBSection &rhs) : m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBSection, (const lldb::SBSection &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
-SBSection::SBSection(const lldb::SectionSP &section_sp)
- : m_opaque_wp() // Don't init with section_sp otherwise this will throw if
- // section_sp doesn't contain a valid Section *
-{
+SBSection::SBSection(const lldb::SectionSP &section_sp) {
+ // Don't init with section_sp otherwise this will throw if
+ // section_sp doesn't contain a valid Section *
if (section_sp)
m_opaque_wp = section_sp;
}
const SBSection &SBSection::operator=(const SBSection &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBSection &,
- SBSection, operator=,(const lldb::SBSection &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBSection::~SBSection() = default;
bool SBSection::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSection, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBSection::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSection, operator bool);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
return section_sp && section_sp->GetModule().get() != nullptr;
}
const char *SBSection::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBSection, GetName);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp)
@@ -67,7 +63,7 @@ const char *SBSection::GetName() {
}
lldb::SBSection SBSection::GetParent() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSection, SBSection, GetParent);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBSection sb_section;
SectionSP section_sp(GetSP());
@@ -76,12 +72,11 @@ lldb::SBSection SBSection::GetParent() {
if (parent_section_sp)
sb_section.SetSP(parent_section_sp);
}
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
lldb::SBSection SBSection::FindSubSection(const char *sect_name) {
- LLDB_RECORD_METHOD(lldb::SBSection, SBSection, FindSubSection, (const char *),
- sect_name);
+ LLDB_INSTRUMENT_VA(this, sect_name);
lldb::SBSection sb_section;
if (sect_name) {
@@ -92,11 +87,11 @@ lldb::SBSection SBSection::FindSubSection(const char *sect_name) {
section_sp->GetChildren().FindSectionByName(const_sect_name));
}
}
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
size_t SBSection::GetNumSubSections() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBSection, GetNumSubSections);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp)
@@ -105,14 +100,13 @@ size_t SBSection::GetNumSubSections() {
}
lldb::SBSection SBSection::GetSubSectionAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(lldb::SBSection, SBSection, GetSubSectionAtIndex, (size_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
lldb::SBSection sb_section;
SectionSP section_sp(GetSP());
if (section_sp)
sb_section.SetSP(section_sp->GetChildren().GetSectionAtIndex(idx));
- return LLDB_RECORD_RESULT(sb_section);
+ return sb_section;
}
lldb::SectionSP SBSection::GetSP() const { return m_opaque_wp.lock(); }
@@ -122,7 +116,7 @@ void SBSection::SetSP(const lldb::SectionSP &section_sp) {
}
lldb::addr_t SBSection::GetFileAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBSection, GetFileAddress);
+ LLDB_INSTRUMENT_VA(this);
lldb::addr_t file_addr = LLDB_INVALID_ADDRESS;
SectionSP section_sp(GetSP());
@@ -132,8 +126,7 @@ lldb::addr_t SBSection::GetFileAddress() {
}
lldb::addr_t SBSection::GetLoadAddress(lldb::SBTarget &sb_target) {
- LLDB_RECORD_METHOD(lldb::addr_t, SBSection, GetLoadAddress,
- (lldb::SBTarget &), sb_target);
+ LLDB_INSTRUMENT_VA(this, sb_target);
TargetSP target_sp(sb_target.GetSP());
if (target_sp) {
@@ -145,7 +138,7 @@ lldb::addr_t SBSection::GetLoadAddress(lldb::SBTarget &sb_target) {
}
lldb::addr_t SBSection::GetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBSection, GetByteSize);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp)
@@ -154,7 +147,7 @@ lldb::addr_t SBSection::GetByteSize() {
}
uint64_t SBSection::GetFileOffset() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBSection, GetFileOffset);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp) {
@@ -169,7 +162,7 @@ uint64_t SBSection::GetFileOffset() {
}
uint64_t SBSection::GetFileByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBSection, GetFileByteSize);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp)
@@ -178,14 +171,13 @@ uint64_t SBSection::GetFileByteSize() {
}
SBData SBSection::GetSectionData() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBData, SBSection, GetSectionData);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(GetSectionData(0, UINT64_MAX));
+ return GetSectionData(0, UINT64_MAX);
}
SBData SBSection::GetSectionData(uint64_t offset, uint64_t size) {
- LLDB_RECORD_METHOD(lldb::SBData, SBSection, GetSectionData,
- (uint64_t, uint64_t), offset, size);
+ LLDB_INSTRUMENT_VA(this, offset, size);
SBData sb_data;
SectionSP section_sp(GetSP());
@@ -220,11 +212,11 @@ SBData SBSection::GetSectionData(uint64_t offset, uint64_t size) {
}
}
}
- return LLDB_RECORD_RESULT(sb_data);
+ return sb_data;
}
SectionType SBSection::GetSectionType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SectionType, SBSection, GetSectionType);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp.get())
@@ -233,7 +225,7 @@ SectionType SBSection::GetSectionType() {
}
uint32_t SBSection::GetPermissions() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBSection, GetPermissions);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp)
@@ -242,7 +234,7 @@ uint32_t SBSection::GetPermissions() const {
}
uint32_t SBSection::GetTargetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBSection, GetTargetByteSize);
+ LLDB_INSTRUMENT_VA(this);
SectionSP section_sp(GetSP());
if (section_sp.get())
@@ -251,8 +243,7 @@ uint32_t SBSection::GetTargetByteSize() {
}
bool SBSection::operator==(const SBSection &rhs) {
- LLDB_RECORD_METHOD(bool, SBSection, operator==,(const lldb::SBSection &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
SectionSP lhs_section_sp(GetSP());
SectionSP rhs_section_sp(rhs.GetSP());
@@ -262,8 +253,7 @@ bool SBSection::operator==(const SBSection &rhs) {
}
bool SBSection::operator!=(const SBSection &rhs) {
- LLDB_RECORD_METHOD(bool, SBSection, operator!=,(const lldb::SBSection &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
SectionSP lhs_section_sp(GetSP());
SectionSP rhs_section_sp(rhs.GetSP());
@@ -271,8 +261,7 @@ bool SBSection::operator!=(const SBSection &rhs) {
}
bool SBSection::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBSection, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -288,41 +277,3 @@ bool SBSection::GetDescription(SBStream &description) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBSection>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBSection, ());
- LLDB_REGISTER_CONSTRUCTOR(SBSection, (const lldb::SBSection &));
- LLDB_REGISTER_METHOD(const lldb::SBSection &,
- SBSection, operator=,(const lldb::SBSection &));
- LLDB_REGISTER_METHOD_CONST(bool, SBSection, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBSection, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBSection, GetName, ());
- LLDB_REGISTER_METHOD(lldb::SBSection, SBSection, GetParent, ());
- LLDB_REGISTER_METHOD(lldb::SBSection, SBSection, FindSubSection,
- (const char *));
- LLDB_REGISTER_METHOD(size_t, SBSection, GetNumSubSections, ());
- LLDB_REGISTER_METHOD(lldb::SBSection, SBSection, GetSubSectionAtIndex,
- (size_t));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBSection, GetFileAddress, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBSection, GetLoadAddress,
- (lldb::SBTarget &));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBSection, GetByteSize, ());
- LLDB_REGISTER_METHOD(uint64_t, SBSection, GetFileOffset, ());
- LLDB_REGISTER_METHOD(uint64_t, SBSection, GetFileByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SBData, SBSection, GetSectionData, ());
- LLDB_REGISTER_METHOD(lldb::SBData, SBSection, GetSectionData,
- (uint64_t, uint64_t));
- LLDB_REGISTER_METHOD(lldb::SectionType, SBSection, GetSectionType, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBSection, GetPermissions, ());
- LLDB_REGISTER_METHOD(uint32_t, SBSection, GetTargetByteSize, ());
- LLDB_REGISTER_METHOD(bool, SBSection, operator==,(const lldb::SBSection &));
- LLDB_REGISTER_METHOD(bool, SBSection, operator!=,(const lldb::SBSection &));
- LLDB_REGISTER_METHOD(bool, SBSection, GetDescription, (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBSourceManager.cpp b/contrib/llvm-project/lldb/source/API/SBSourceManager.cpp
index 43c3443672f7..7729f5d9d69f 100644
--- a/contrib/llvm-project/lldb/source/API/SBSourceManager.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBSourceManager.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBSourceManager.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBTarget.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/Core/Debugger.h"
@@ -24,10 +24,9 @@ namespace lldb_private {
class SourceManagerImpl {
public:
SourceManagerImpl(const lldb::DebuggerSP &debugger_sp)
- : m_debugger_wp(debugger_sp), m_target_wp() {}
+ : m_debugger_wp(debugger_sp) {}
- SourceManagerImpl(const lldb::TargetSP &target_sp)
- : m_debugger_wp(), m_target_wp(target_sp) {}
+ SourceManagerImpl(const lldb::TargetSP &target_sp) : m_target_wp(target_sp) {}
SourceManagerImpl(const SourceManagerImpl &rhs) {
if (&rhs == this)
@@ -72,21 +71,19 @@ using namespace lldb;
using namespace lldb_private;
SBSourceManager::SBSourceManager(const SBDebugger &debugger) {
- LLDB_RECORD_CONSTRUCTOR(SBSourceManager, (const lldb::SBDebugger &),
- debugger);
+ LLDB_INSTRUMENT_VA(this, debugger);
m_opaque_up = std::make_unique<SourceManagerImpl>(debugger.get_sp());
}
SBSourceManager::SBSourceManager(const SBTarget &target) {
- LLDB_RECORD_CONSTRUCTOR(SBSourceManager, (const lldb::SBTarget &), target);
+ LLDB_INSTRUMENT_VA(this, target);
m_opaque_up = std::make_unique<SourceManagerImpl>(target.GetSP());
}
SBSourceManager::SBSourceManager(const SBSourceManager &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBSourceManager, (const lldb::SBSourceManager &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (&rhs == this)
return;
@@ -96,12 +93,10 @@ SBSourceManager::SBSourceManager(const SBSourceManager &rhs) {
const lldb::SBSourceManager &SBSourceManager::
operator=(const lldb::SBSourceManager &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBSourceManager &,
- SBSourceManager, operator=,(const lldb::SBSourceManager &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = std::make_unique<SourceManagerImpl>(*(rhs.m_opaque_up.get()));
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBSourceManager::~SBSourceManager() = default;
@@ -109,10 +104,7 @@ SBSourceManager::~SBSourceManager() = default;
size_t SBSourceManager::DisplaySourceLinesWithLineNumbers(
const SBFileSpec &file, uint32_t line, uint32_t context_before,
uint32_t context_after, const char *current_line_cstr, SBStream &s) {
- LLDB_RECORD_METHOD(size_t, SBSourceManager, DisplaySourceLinesWithLineNumbers,
- (const lldb::SBFileSpec &, uint32_t, uint32_t, uint32_t,
- const char *, lldb::SBStream &),
- file, line, context_before, context_after,
+ LLDB_INSTRUMENT_VA(this, file, line, context_before, context_after,
current_line_cstr, s);
const uint32_t column = 0;
@@ -125,11 +117,8 @@ size_t SBSourceManager::DisplaySourceLinesWithLineNumbersAndColumn(
const SBFileSpec &file, uint32_t line, uint32_t column,
uint32_t context_before, uint32_t context_after,
const char *current_line_cstr, SBStream &s) {
- LLDB_RECORD_METHOD(
- size_t, SBSourceManager, DisplaySourceLinesWithLineNumbersAndColumn,
- (const lldb::SBFileSpec &, uint32_t, uint32_t, uint32_t, uint32_t,
- const char *, lldb::SBStream &),
- file, line, column, context_before, context_after, current_line_cstr, s);
+ LLDB_INSTRUMENT_VA(this, file, line, column, context_before, context_after,
+ current_line_cstr, s);
if (m_opaque_up == nullptr)
return 0;
@@ -138,27 +127,3 @@ size_t SBSourceManager::DisplaySourceLinesWithLineNumbersAndColumn(
file.ref(), line, column, context_before, context_after,
current_line_cstr, s.get());
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBSourceManager>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBSourceManager, (const lldb::SBDebugger &));
- LLDB_REGISTER_CONSTRUCTOR(SBSourceManager, (const lldb::SBTarget &));
- LLDB_REGISTER_CONSTRUCTOR(SBSourceManager, (const lldb::SBSourceManager &));
- LLDB_REGISTER_METHOD(
- const lldb::SBSourceManager &,
- SBSourceManager, operator=,(const lldb::SBSourceManager &));
- LLDB_REGISTER_METHOD(size_t, SBSourceManager,
- DisplaySourceLinesWithLineNumbers,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- uint32_t, const char *, lldb::SBStream &));
- LLDB_REGISTER_METHOD(size_t, SBSourceManager,
- DisplaySourceLinesWithLineNumbersAndColumn,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- uint32_t, uint32_t, const char *, lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBStream.cpp b/contrib/llvm-project/lldb/source/API/SBStream.cpp
index 190abd18df33..9ceef3466f93 100644
--- a/contrib/llvm-project/lldb/source/API/SBStream.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBStream.cpp
@@ -8,10 +8,10 @@
#include "lldb/API/SBStream.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBFile.h"
#include "lldb/Core/StreamFile.h"
#include "lldb/Host/FileSystem.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Status.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/StreamString.h"
@@ -20,7 +20,7 @@ using namespace lldb;
using namespace lldb_private;
SBStream::SBStream() : m_opaque_up(new StreamString()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBStream);
+ LLDB_INSTRUMENT_VA(this);
}
SBStream::SBStream(SBStream &&rhs)
@@ -29,11 +29,11 @@ SBStream::SBStream(SBStream &&rhs)
SBStream::~SBStream() = default;
bool SBStream::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStream, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBStream::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStream, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_up != nullptr);
}
@@ -41,7 +41,7 @@ SBStream::operator bool() const {
// If this stream is not redirected to a file, it will maintain a local cache
// for the stream data which can be accessed using this accessor.
const char *SBStream::GetData() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBStream, GetData);
+ LLDB_INSTRUMENT_VA(this);
if (m_is_file || m_opaque_up == nullptr)
return nullptr;
@@ -52,7 +52,7 @@ const char *SBStream::GetData() {
// If this stream is not redirected to a file, it will maintain a local cache
// for the stream output whose length can be accessed using this accessor.
size_t SBStream::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBStream, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_is_file || m_opaque_up == nullptr)
return 0;
@@ -61,7 +61,7 @@ size_t SBStream::GetSize() {
}
void SBStream::Print(const char *str) {
- LLDB_RECORD_METHOD(void, SBStream, Print, (const char *), str);
+ LLDB_INSTRUMENT_VA(this, str);
Printf("%s", str);
}
@@ -76,8 +76,7 @@ void SBStream::Printf(const char *format, ...) {
}
void SBStream::RedirectToFile(const char *path, bool append) {
- LLDB_RECORD_METHOD(void, SBStream, RedirectToFile, (const char *, bool), path,
- append);
+ LLDB_INSTRUMENT_VA(this, path, append);
if (path == nullptr)
return;
@@ -114,19 +113,18 @@ void SBStream::RedirectToFile(const char *path, bool append) {
}
void SBStream::RedirectToFileHandle(FILE *fh, bool transfer_fh_ownership) {
- LLDB_RECORD_METHOD(void, SBStream, RedirectToFileHandle, (FILE *, bool), fh,
- transfer_fh_ownership);
+ LLDB_INSTRUMENT_VA(this, fh, transfer_fh_ownership);
FileSP file = std::make_unique<NativeFile>(fh, transfer_fh_ownership);
return RedirectToFile(file);
}
void SBStream::RedirectToFile(SBFile file) {
- LLDB_RECORD_METHOD(void, SBStream, RedirectToFile, (SBFile), file)
+ LLDB_INSTRUMENT_VA(this, file)
RedirectToFile(file.GetFile());
}
void SBStream::RedirectToFile(FileSP file_sp) {
- LLDB_RECORD_METHOD(void, SBStream, RedirectToFile, (FileSP), file_sp);
+ LLDB_INSTRUMENT_VA(this, file_sp);
if (!file_sp || !file_sp->IsValid())
return;
@@ -150,8 +148,7 @@ void SBStream::RedirectToFile(FileSP file_sp) {
}
void SBStream::RedirectToFileDescriptor(int fd, bool transfer_fh_ownership) {
- LLDB_RECORD_METHOD(void, SBStream, RedirectToFileDescriptor, (int, bool), fd,
- transfer_fh_ownership);
+ LLDB_INSTRUMENT_VA(this, fd, transfer_fh_ownership);
std::string local_data;
if (m_opaque_up) {
@@ -182,7 +179,7 @@ lldb_private::Stream &SBStream::ref() {
}
void SBStream::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBStream, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up) {
// See if we have any locally backed data. If so, copy it so we can then
@@ -193,25 +190,3 @@ void SBStream::Clear() {
static_cast<StreamString *>(m_opaque_up.get())->Clear();
}
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBStream>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBStream, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBStream, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBStream, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBStream, GetData, ());
- LLDB_REGISTER_METHOD(size_t, SBStream, GetSize, ());
- LLDB_REGISTER_METHOD(void, SBStream, RedirectToFile, (const char *, bool));
- LLDB_REGISTER_METHOD(void, SBStream, RedirectToFile, (FileSP));
- LLDB_REGISTER_METHOD(void, SBStream, RedirectToFile, (SBFile));
- LLDB_REGISTER_METHOD(void, SBStream, RedirectToFileHandle, (FILE *, bool));
- LLDB_REGISTER_METHOD(void, SBStream, RedirectToFileDescriptor, (int, bool));
- LLDB_REGISTER_METHOD(void, SBStream, Clear, ());
- LLDB_REGISTER_METHOD(void, SBStream, Print, (const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBStringList.cpp b/contrib/llvm-project/lldb/source/API/SBStringList.cpp
index d9b03692ec0e..dfb77b1ab32f 100644
--- a/contrib/llvm-project/lldb/source/API/SBStringList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBStringList.cpp
@@ -7,36 +7,32 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBStringList.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/StringList.h"
using namespace lldb;
using namespace lldb_private;
-SBStringList::SBStringList() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBStringList);
-}
+SBStringList::SBStringList() { LLDB_INSTRUMENT_VA(this); }
-SBStringList::SBStringList(const lldb_private::StringList *lldb_strings_ptr)
- : m_opaque_up() {
+SBStringList::SBStringList(const lldb_private::StringList *lldb_strings_ptr) {
if (lldb_strings_ptr)
m_opaque_up = std::make_unique<StringList>(*lldb_strings_ptr);
}
-SBStringList::SBStringList(const SBStringList &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBStringList, (const lldb::SBStringList &), rhs);
+SBStringList::SBStringList(const SBStringList &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
const SBStringList &SBStringList::operator=(const SBStringList &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBStringList &,
- SBStringList, operator=,(const lldb::SBStringList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBStringList::~SBStringList() = default;
@@ -50,17 +46,17 @@ const lldb_private::StringList &SBStringList::operator*() const {
}
bool SBStringList::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStringList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBStringList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStringList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_up != nullptr);
}
void SBStringList::AppendString(const char *str) {
- LLDB_RECORD_METHOD(void, SBStringList, AppendString, (const char *), str);
+ LLDB_INSTRUMENT_VA(this, str);
if (str != nullptr) {
if (IsValid())
@@ -71,8 +67,7 @@ void SBStringList::AppendString(const char *str) {
}
void SBStringList::AppendList(const char **strv, int strc) {
- LLDB_RECORD_METHOD(void, SBStringList, AppendList, (const char **, int), strv,
- strc);
+ LLDB_INSTRUMENT_VA(this, strv, strc);
if ((strv != nullptr) && (strc > 0)) {
if (IsValid())
@@ -83,8 +78,7 @@ void SBStringList::AppendList(const char **strv, int strc) {
}
void SBStringList::AppendList(const SBStringList &strings) {
- LLDB_RECORD_METHOD(void, SBStringList, AppendList,
- (const lldb::SBStringList &), strings);
+ LLDB_INSTRUMENT_VA(this, strings);
if (strings.IsValid()) {
if (!IsValid())
@@ -100,7 +94,7 @@ void SBStringList::AppendList(const StringList &strings) {
}
uint32_t SBStringList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBStringList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid()) {
return m_opaque_up->GetSize();
@@ -109,8 +103,7 @@ uint32_t SBStringList::GetSize() const {
}
const char *SBStringList::GetStringAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(const char *, SBStringList, GetStringAtIndex, (size_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (IsValid()) {
return m_opaque_up->GetStringAtIndex(idx);
@@ -119,8 +112,7 @@ const char *SBStringList::GetStringAtIndex(size_t idx) {
}
const char *SBStringList::GetStringAtIndex(size_t idx) const {
- LLDB_RECORD_METHOD_CONST(const char *, SBStringList, GetStringAtIndex,
- (size_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (IsValid()) {
return m_opaque_up->GetStringAtIndex(idx);
@@ -129,35 +121,9 @@ const char *SBStringList::GetStringAtIndex(size_t idx) const {
}
void SBStringList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBStringList, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid()) {
m_opaque_up->Clear();
}
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBStringList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBStringList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBStringList, (const lldb::SBStringList &));
- LLDB_REGISTER_METHOD(const lldb::SBStringList &,
- SBStringList, operator=,(const lldb::SBStringList &));
- LLDB_REGISTER_METHOD_CONST(bool, SBStringList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBStringList, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBStringList, AppendString, (const char *));
- LLDB_REGISTER_METHOD(void, SBStringList, AppendList, (const char **, int));
- LLDB_REGISTER_METHOD(void, SBStringList, AppendList,
- (const lldb::SBStringList &));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBStringList, GetSize, ());
- LLDB_REGISTER_METHOD(const char *, SBStringList, GetStringAtIndex,
- (size_t));
- LLDB_REGISTER_METHOD_CONST(const char *, SBStringList, GetStringAtIndex,
- (size_t));
- LLDB_REGISTER_METHOD(void, SBStringList, Clear, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBStructuredData.cpp b/contrib/llvm-project/lldb/source/API/SBStructuredData.cpp
index e99c6194c516..498bcdd39e44 100644
--- a/contrib/llvm-project/lldb/source/API/SBStructuredData.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBStructuredData.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBStructuredData.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBStringList.h"
@@ -25,41 +25,36 @@ using namespace lldb_private;
#pragma mark SBStructuredData
SBStructuredData::SBStructuredData() : m_impl_up(new StructuredDataImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBStructuredData);
+ LLDB_INSTRUMENT_VA(this);
}
SBStructuredData::SBStructuredData(const lldb::SBStructuredData &rhs)
: m_impl_up(new StructuredDataImpl(*rhs.m_impl_up)) {
- LLDB_RECORD_CONSTRUCTOR(SBStructuredData, (const lldb::SBStructuredData &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBStructuredData::SBStructuredData(const lldb::EventSP &event_sp)
: m_impl_up(new StructuredDataImpl(event_sp)) {
- LLDB_RECORD_CONSTRUCTOR(SBStructuredData, (const lldb::EventSP &), event_sp);
+ LLDB_INSTRUMENT_VA(this, event_sp);
}
SBStructuredData::SBStructuredData(const lldb_private::StructuredDataImpl &impl)
: m_impl_up(new StructuredDataImpl(impl)) {
- LLDB_RECORD_CONSTRUCTOR(SBStructuredData,
- (const lldb_private::StructuredDataImpl &), impl);
+ LLDB_INSTRUMENT_VA(this, impl);
}
SBStructuredData::~SBStructuredData() = default;
SBStructuredData &SBStructuredData::
operator=(const lldb::SBStructuredData &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBStructuredData &,
- SBStructuredData, operator=,(const lldb::SBStructuredData &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
*m_impl_up = *rhs.m_impl_up;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
lldb::SBError SBStructuredData::SetFromJSON(lldb::SBStream &stream) {
- LLDB_RECORD_METHOD(lldb::SBError, SBStructuredData, SetFromJSON,
- (lldb::SBStream &), stream);
+ LLDB_INSTRUMENT_VA(this, stream);
lldb::SBError error;
std::string json_str(stream.GetData());
@@ -69,69 +64,64 @@ lldb::SBError SBStructuredData::SetFromJSON(lldb::SBStream &stream) {
if (!json_obj || json_obj->GetType() != eStructuredDataTypeDictionary)
error.SetErrorString("Invalid Syntax");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
lldb::SBError SBStructuredData::SetFromJSON(const char *json) {
- LLDB_RECORD_METHOD(lldb::SBError, SBStructuredData, SetFromJSON,
- (const char *), json);
+ LLDB_INSTRUMENT_VA(this, json);
lldb::SBStream s;
s.Print(json);
- return LLDB_RECORD_RESULT(SetFromJSON(s));
+ return SetFromJSON(s);
}
bool SBStructuredData::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStructuredData, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBStructuredData::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBStructuredData, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_impl_up->IsValid();
}
void SBStructuredData::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBStructuredData, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_impl_up->Clear();
}
SBError SBStructuredData::GetAsJSON(lldb::SBStream &stream) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBError, SBStructuredData, GetAsJSON,
- (lldb::SBStream &), stream);
+ LLDB_INSTRUMENT_VA(this, stream);
SBError error;
error.SetError(m_impl_up->GetAsJSON(stream.ref()));
- return LLDB_RECORD_RESULT(error);
+ return error;
}
lldb::SBError SBStructuredData::GetDescription(lldb::SBStream &stream) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBError, SBStructuredData, GetDescription,
- (lldb::SBStream &), stream);
+ LLDB_INSTRUMENT_VA(this, stream);
Status error = m_impl_up->GetDescription(stream.ref());
SBError sb_error;
sb_error.SetError(error);
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
StructuredDataType SBStructuredData::GetType() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::StructuredDataType, SBStructuredData,
- GetType);
+ LLDB_INSTRUMENT_VA(this);
return m_impl_up->GetType();
}
size_t SBStructuredData::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(size_t, SBStructuredData, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_impl_up->GetSize();
}
bool SBStructuredData::GetKeys(lldb::SBStringList &keys) const {
- LLDB_RECORD_METHOD_CONST(bool, SBStructuredData, GetKeys,
- (lldb::SBStringList &), keys);
+ LLDB_INSTRUMENT_VA(this, keys);
if (GetType() != eStructuredDataTypeDictionary)
return false;
@@ -157,89 +147,41 @@ bool SBStructuredData::GetKeys(lldb::SBStringList &keys) const {
}
lldb::SBStructuredData SBStructuredData::GetValueForKey(const char *key) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBStructuredData, SBStructuredData,
- GetValueForKey, (const char *), key);
+ LLDB_INSTRUMENT_VA(this, key);
SBStructuredData result;
result.m_impl_up->SetObjectSP(m_impl_up->GetValueForKey(key));
- return LLDB_RECORD_RESULT(result);
+ return result;
}
lldb::SBStructuredData SBStructuredData::GetItemAtIndex(size_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBStructuredData, SBStructuredData,
- GetItemAtIndex, (size_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBStructuredData result;
result.m_impl_up->SetObjectSP(m_impl_up->GetItemAtIndex(idx));
- return LLDB_RECORD_RESULT(result);
+ return result;
}
uint64_t SBStructuredData::GetIntegerValue(uint64_t fail_value) const {
- LLDB_RECORD_METHOD_CONST(uint64_t, SBStructuredData, GetIntegerValue,
- (uint64_t), fail_value);
+ LLDB_INSTRUMENT_VA(this, fail_value);
return m_impl_up->GetIntegerValue(fail_value);
}
double SBStructuredData::GetFloatValue(double fail_value) const {
- LLDB_RECORD_METHOD_CONST(double, SBStructuredData, GetFloatValue, (double),
- fail_value);
+ LLDB_INSTRUMENT_VA(this, fail_value);
return m_impl_up->GetFloatValue(fail_value);
}
bool SBStructuredData::GetBooleanValue(bool fail_value) const {
- LLDB_RECORD_METHOD_CONST(bool, SBStructuredData, GetBooleanValue, (bool),
- fail_value);
+ LLDB_INSTRUMENT_VA(this, fail_value);
return m_impl_up->GetBooleanValue(fail_value);
}
size_t SBStructuredData::GetStringValue(char *dst, size_t dst_len) const {
- LLDB_RECORD_CHAR_PTR_METHOD_CONST(size_t, SBStructuredData, GetStringValue,
- (char *, size_t), dst, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len);
return m_impl_up->GetStringValue(dst, dst_len);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <> void RegisterMethods<SBStructuredData>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBStructuredData, ());
- LLDB_REGISTER_CONSTRUCTOR(SBStructuredData, (const lldb::SBStructuredData &));
- LLDB_REGISTER_CONSTRUCTOR(SBStructuredData, (const lldb::EventSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBStructuredData,
- (const lldb_private::StructuredDataImpl &));
- LLDB_REGISTER_METHOD(
- lldb::SBStructuredData &,
- SBStructuredData, operator=,(const lldb::SBStructuredData &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBStructuredData, SetFromJSON,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBStructuredData, SetFromJSON,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBStructuredData, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBStructuredData, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBStructuredData, Clear, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBError, SBStructuredData, GetAsJSON,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBError, SBStructuredData, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(lldb::StructuredDataType, SBStructuredData,
- GetType, ());
- LLDB_REGISTER_METHOD_CONST(size_t, SBStructuredData, GetSize, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBStructuredData, GetKeys,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBStructuredData, SBStructuredData,
- GetValueForKey, (const char *));
- LLDB_REGISTER_METHOD_CONST(lldb::SBStructuredData, SBStructuredData,
- GetItemAtIndex, (size_t));
- LLDB_REGISTER_METHOD_CONST(uint64_t, SBStructuredData, GetIntegerValue,
- (uint64_t));
- LLDB_REGISTER_METHOD_CONST(double, SBStructuredData, GetFloatValue, (double));
- LLDB_REGISTER_METHOD_CONST(bool, SBStructuredData, GetBooleanValue, (bool));
- LLDB_REGISTER_CHAR_PTR_METHOD_CONST(size_t, SBStructuredData, GetStringValue);
-}
-
-} // namespace repro
-} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/API/SBSymbol.cpp b/contrib/llvm-project/lldb/source/API/SBSymbol.cpp
index d3abc13675f5..b671f987dc99 100644
--- a/contrib/llvm-project/lldb/source/API/SBSymbol.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBSymbol.cpp
@@ -7,32 +7,31 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBSymbol.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Disassembler.h"
#include "lldb/Core/Module.h"
#include "lldb/Symbol/Symbol.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBSymbol::SBSymbol() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBSymbol); }
+SBSymbol::SBSymbol() { LLDB_INSTRUMENT_VA(this); }
SBSymbol::SBSymbol(lldb_private::Symbol *lldb_object_ptr)
: m_opaque_ptr(lldb_object_ptr) {}
SBSymbol::SBSymbol(const lldb::SBSymbol &rhs) : m_opaque_ptr(rhs.m_opaque_ptr) {
- LLDB_RECORD_CONSTRUCTOR(SBSymbol, (const lldb::SBSymbol &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBSymbol &SBSymbol::operator=(const SBSymbol &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBSymbol &,
- SBSymbol, operator=,(const lldb::SBSymbol &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_ptr = rhs.m_opaque_ptr;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBSymbol::~SBSymbol() { m_opaque_ptr = nullptr; }
@@ -42,17 +41,17 @@ void SBSymbol::SetSymbol(lldb_private::Symbol *lldb_object_ptr) {
}
bool SBSymbol::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbol, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBSymbol::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbol, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_ptr != nullptr;
}
const char *SBSymbol::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBSymbol, GetName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
if (m_opaque_ptr)
@@ -62,7 +61,7 @@ const char *SBSymbol::GetName() const {
}
const char *SBSymbol::GetDisplayName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBSymbol, GetDisplayName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
if (m_opaque_ptr)
@@ -72,7 +71,7 @@ const char *SBSymbol::GetDisplayName() const {
}
const char *SBSymbol::GetMangledName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBSymbol, GetMangledName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
if (m_opaque_ptr)
@@ -81,22 +80,19 @@ const char *SBSymbol::GetMangledName() const {
}
bool SBSymbol::operator==(const SBSymbol &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBSymbol, operator==,(const lldb::SBSymbol &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr == rhs.m_opaque_ptr;
}
bool SBSymbol::operator!=(const SBSymbol &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBSymbol, operator!=,(const lldb::SBSymbol &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_ptr != rhs.m_opaque_ptr;
}
bool SBSymbol::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBSymbol, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -109,16 +105,14 @@ bool SBSymbol::GetDescription(SBStream &description) {
}
SBInstructionList SBSymbol::GetInstructions(SBTarget target) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBSymbol, GetInstructions,
- (lldb::SBTarget), target);
+ LLDB_INSTRUMENT_VA(this, target);
- return LLDB_RECORD_RESULT(GetInstructions(target, nullptr));
+ return GetInstructions(target, nullptr);
}
SBInstructionList SBSymbol::GetInstructions(SBTarget target,
const char *flavor_string) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBSymbol, GetInstructions,
- (lldb::SBTarget, const char *), target, flavor_string);
+ LLDB_INSTRUMENT_VA(this, target, flavor_string);
SBInstructionList sb_instructions;
if (m_opaque_ptr) {
@@ -137,7 +131,7 @@ SBInstructionList SBSymbol::GetInstructions(SBTarget target,
}
}
}
- return LLDB_RECORD_RESULT(sb_instructions);
+ return sb_instructions;
}
lldb_private::Symbol *SBSymbol::get() { return m_opaque_ptr; }
@@ -145,17 +139,17 @@ lldb_private::Symbol *SBSymbol::get() { return m_opaque_ptr; }
void SBSymbol::reset(lldb_private::Symbol *symbol) { m_opaque_ptr = symbol; }
SBAddress SBSymbol::GetStartAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBSymbol, GetStartAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress addr;
if (m_opaque_ptr && m_opaque_ptr->ValueIsAddress()) {
addr.SetAddress(m_opaque_ptr->GetAddressRef());
}
- return LLDB_RECORD_RESULT(addr);
+ return addr;
}
SBAddress SBSymbol::GetEndAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBSymbol, GetEndAddress);
+ LLDB_INSTRUMENT_VA(this);
SBAddress addr;
if (m_opaque_ptr && m_opaque_ptr->ValueIsAddress()) {
@@ -165,11 +159,11 @@ SBAddress SBSymbol::GetEndAddress() {
addr->Slide(m_opaque_ptr->GetByteSize());
}
}
- return LLDB_RECORD_RESULT(addr);
+ return addr;
}
uint32_t SBSymbol::GetPrologueByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBSymbol, GetPrologueByteSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetPrologueByteSize();
@@ -177,7 +171,7 @@ uint32_t SBSymbol::GetPrologueByteSize() {
}
SymbolType SBSymbol::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SymbolType, SBSymbol, GetType);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->GetType();
@@ -185,7 +179,7 @@ SymbolType SBSymbol::GetType() {
}
bool SBSymbol::IsExternal() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBSymbol, IsExternal);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->IsExternal();
@@ -193,43 +187,9 @@ bool SBSymbol::IsExternal() {
}
bool SBSymbol::IsSynthetic() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBSymbol, IsSynthetic);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_ptr)
return m_opaque_ptr->IsSynthetic();
return false;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBSymbol>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBSymbol, ());
- LLDB_REGISTER_CONSTRUCTOR(SBSymbol, (const lldb::SBSymbol &));
- LLDB_REGISTER_METHOD(const lldb::SBSymbol &,
- SBSymbol, operator=,(const lldb::SBSymbol &));
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbol, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbol, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBSymbol, GetName, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBSymbol, GetDisplayName, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBSymbol, GetMangledName, ());
- LLDB_REGISTER_METHOD_CONST(bool,
- SBSymbol, operator==,(const lldb::SBSymbol &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBSymbol, operator!=,(const lldb::SBSymbol &));
- LLDB_REGISTER_METHOD(bool, SBSymbol, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBSymbol, GetInstructions,
- (lldb::SBTarget));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBSymbol, GetInstructions,
- (lldb::SBTarget, const char *));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBSymbol, GetStartAddress, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBSymbol, GetEndAddress, ());
- LLDB_REGISTER_METHOD(uint32_t, SBSymbol, GetPrologueByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SymbolType, SBSymbol, GetType, ());
- LLDB_REGISTER_METHOD(bool, SBSymbol, IsExternal, ());
- LLDB_REGISTER_METHOD(bool, SBSymbol, IsSynthetic, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBSymbolContext.cpp b/contrib/llvm-project/lldb/source/API/SBSymbolContext.cpp
index 89fe051658ff..484399c89590 100644
--- a/contrib/llvm-project/lldb/source/API/SBSymbolContext.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBSymbolContext.cpp
@@ -7,30 +7,26 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBSymbolContext.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Module.h"
#include "lldb/Symbol/Function.h"
#include "lldb/Symbol/Symbol.h"
#include "lldb/Symbol/SymbolContext.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBSymbolContext::SBSymbolContext() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBSymbolContext);
-}
+SBSymbolContext::SBSymbolContext() { LLDB_INSTRUMENT_VA(this); }
SBSymbolContext::SBSymbolContext(const SymbolContext &sc)
: m_opaque_up(std::make_unique<SymbolContext>(sc)) {
- LLDB_RECORD_CONSTRUCTOR(SBSymbolContext,
- (const lldb_private::SymbolContext &), sc);
+ LLDB_INSTRUMENT_VA(this, sc);
}
-SBSymbolContext::SBSymbolContext(const SBSymbolContext &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBSymbolContext, (const lldb::SBSymbolContext &),
- rhs);
+SBSymbolContext::SBSymbolContext(const SBSymbolContext &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -38,27 +34,25 @@ SBSymbolContext::SBSymbolContext(const SBSymbolContext &rhs) : m_opaque_up() {
SBSymbolContext::~SBSymbolContext() = default;
const SBSymbolContext &SBSymbolContext::operator=(const SBSymbolContext &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBSymbolContext &,
- SBSymbolContext, operator=,(const lldb::SBSymbolContext &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBSymbolContext::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbolContext, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBSymbolContext::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbolContext, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr;
}
SBModule SBSymbolContext::GetModule() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBModule, SBSymbolContext, GetModule);
+ LLDB_INSTRUMENT_VA(this);
SBModule sb_module;
ModuleSP module_sp;
@@ -67,19 +61,17 @@ SBModule SBSymbolContext::GetModule() {
sb_module.SetSP(module_sp);
}
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
SBCompileUnit SBSymbolContext::GetCompileUnit() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBCompileUnit, SBSymbolContext,
- GetCompileUnit);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(
- SBCompileUnit(m_opaque_up ? m_opaque_up->comp_unit : nullptr));
+ return SBCompileUnit(m_opaque_up ? m_opaque_up->comp_unit : nullptr);
}
SBFunction SBSymbolContext::GetFunction() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFunction, SBSymbolContext, GetFunction);
+ LLDB_INSTRUMENT_VA(this);
Function *function = nullptr;
@@ -88,28 +80,27 @@ SBFunction SBSymbolContext::GetFunction() {
SBFunction sb_function(function);
- return LLDB_RECORD_RESULT(sb_function);
+ return sb_function;
}
SBBlock SBSymbolContext::GetBlock() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBBlock, SBSymbolContext, GetBlock);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(
- SBBlock(m_opaque_up ? m_opaque_up->block : nullptr));
+ return SBBlock(m_opaque_up ? m_opaque_up->block : nullptr);
}
SBLineEntry SBSymbolContext::GetLineEntry() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBLineEntry, SBSymbolContext, GetLineEntry);
+ LLDB_INSTRUMENT_VA(this);
SBLineEntry sb_line_entry;
if (m_opaque_up)
sb_line_entry.SetLineEntry(m_opaque_up->line_entry);
- return LLDB_RECORD_RESULT(sb_line_entry);
+ return sb_line_entry;
}
SBSymbol SBSymbolContext::GetSymbol() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSymbol, SBSymbolContext, GetSymbol);
+ LLDB_INSTRUMENT_VA(this);
Symbol *symbol = nullptr;
@@ -118,39 +109,35 @@ SBSymbol SBSymbolContext::GetSymbol() {
SBSymbol sb_symbol(symbol);
- return LLDB_RECORD_RESULT(sb_symbol);
+ return sb_symbol;
}
void SBSymbolContext::SetModule(lldb::SBModule module) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetModule, (lldb::SBModule),
- module);
+ LLDB_INSTRUMENT_VA(this, module);
ref().module_sp = module.GetSP();
}
void SBSymbolContext::SetCompileUnit(lldb::SBCompileUnit compile_unit) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetCompileUnit,
- (lldb::SBCompileUnit), compile_unit);
+ LLDB_INSTRUMENT_VA(this, compile_unit);
ref().comp_unit = compile_unit.get();
}
void SBSymbolContext::SetFunction(lldb::SBFunction function) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetFunction, (lldb::SBFunction),
- function);
+ LLDB_INSTRUMENT_VA(this, function);
ref().function = function.get();
}
void SBSymbolContext::SetBlock(lldb::SBBlock block) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetBlock, (lldb::SBBlock), block);
+ LLDB_INSTRUMENT_VA(this, block);
ref().block = block.GetPtr();
}
void SBSymbolContext::SetLineEntry(lldb::SBLineEntry line_entry) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetLineEntry, (lldb::SBLineEntry),
- line_entry);
+ LLDB_INSTRUMENT_VA(this, line_entry);
if (line_entry.IsValid())
ref().line_entry = line_entry.ref();
@@ -159,8 +146,7 @@ void SBSymbolContext::SetLineEntry(lldb::SBLineEntry line_entry) {
}
void SBSymbolContext::SetSymbol(lldb::SBSymbol symbol) {
- LLDB_RECORD_METHOD(void, SBSymbolContext, SetSymbol, (lldb::SBSymbol),
- symbol);
+ LLDB_INSTRUMENT_VA(this, symbol);
ref().symbol = symbol.get();
}
@@ -191,8 +177,7 @@ lldb_private::SymbolContext *SBSymbolContext::get() const {
}
bool SBSymbolContext::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBSymbolContext, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -207,56 +192,13 @@ bool SBSymbolContext::GetDescription(SBStream &description) {
SBSymbolContext
SBSymbolContext::GetParentOfInlinedScope(const SBAddress &curr_frame_pc,
SBAddress &parent_frame_addr) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBSymbolContext, SBSymbolContext,
- GetParentOfInlinedScope,
- (const lldb::SBAddress &, lldb::SBAddress &),
- curr_frame_pc, parent_frame_addr);
+ LLDB_INSTRUMENT_VA(this, curr_frame_pc, parent_frame_addr);
SBSymbolContext sb_sc;
if (m_opaque_up.get() && curr_frame_pc.IsValid()) {
if (m_opaque_up->GetParentOfInlinedScope(curr_frame_pc.ref(), sb_sc.ref(),
parent_frame_addr.ref()))
- return LLDB_RECORD_RESULT(sb_sc);
+ return sb_sc;
}
- return LLDB_RECORD_RESULT(SBSymbolContext());
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBSymbolContext>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBSymbolContext, ());
- LLDB_REGISTER_CONSTRUCTOR(SBSymbolContext,
- (const lldb_private::SymbolContext &));
- LLDB_REGISTER_CONSTRUCTOR(SBSymbolContext, (const lldb::SBSymbolContext &));
- LLDB_REGISTER_METHOD(
- const lldb::SBSymbolContext &,
- SBSymbolContext, operator=,(const lldb::SBSymbolContext &));
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbolContext, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbolContext, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::SBModule, SBSymbolContext, GetModule, ());
- LLDB_REGISTER_METHOD(lldb::SBCompileUnit, SBSymbolContext, GetCompileUnit,
- ());
- LLDB_REGISTER_METHOD(lldb::SBFunction, SBSymbolContext, GetFunction, ());
- LLDB_REGISTER_METHOD(lldb::SBBlock, SBSymbolContext, GetBlock, ());
- LLDB_REGISTER_METHOD(lldb::SBLineEntry, SBSymbolContext, GetLineEntry, ());
- LLDB_REGISTER_METHOD(lldb::SBSymbol, SBSymbolContext, GetSymbol, ());
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetModule, (lldb::SBModule));
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetCompileUnit,
- (lldb::SBCompileUnit));
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetFunction,
- (lldb::SBFunction));
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetBlock, (lldb::SBBlock));
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetLineEntry,
- (lldb::SBLineEntry));
- LLDB_REGISTER_METHOD(void, SBSymbolContext, SetSymbol, (lldb::SBSymbol));
- LLDB_REGISTER_METHOD(bool, SBSymbolContext, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBSymbolContext, SBSymbolContext,
- GetParentOfInlinedScope,
- (const lldb::SBAddress &, lldb::SBAddress &));
-}
-
-}
+ return SBSymbolContext();
}
diff --git a/contrib/llvm-project/lldb/source/API/SBSymbolContextList.cpp b/contrib/llvm-project/lldb/source/API/SBSymbolContextList.cpp
index 70a8bbe6694c..baa558caebbc 100644
--- a/contrib/llvm-project/lldb/source/API/SBSymbolContextList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBSymbolContextList.cpp
@@ -7,23 +7,21 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBSymbolContextList.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/Symbol/SymbolContext.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
SBSymbolContextList::SBSymbolContextList()
: m_opaque_up(new SymbolContextList()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBSymbolContextList);
+ LLDB_INSTRUMENT_VA(this);
}
-SBSymbolContextList::SBSymbolContextList(const SBSymbolContextList &rhs)
- : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBSymbolContextList,
- (const lldb::SBSymbolContextList &), rhs);
+SBSymbolContextList::SBSymbolContextList(const SBSymbolContextList &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -32,17 +30,15 @@ SBSymbolContextList::~SBSymbolContextList() = default;
const SBSymbolContextList &SBSymbolContextList::
operator=(const SBSymbolContextList &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBSymbolContextList &,
- SBSymbolContextList, operator=,(const lldb::SBSymbolContextList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_up = clone(rhs.m_opaque_up);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
uint32_t SBSymbolContextList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBSymbolContextList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetSize();
@@ -50,8 +46,7 @@ uint32_t SBSymbolContextList::GetSize() const {
}
SBSymbolContext SBSymbolContextList::GetContextAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContext, SBSymbolContextList,
- GetContextAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBSymbolContext sb_sc;
if (m_opaque_up) {
@@ -59,38 +54,36 @@ SBSymbolContext SBSymbolContextList::GetContextAtIndex(uint32_t idx) {
if (m_opaque_up->GetContextAtIndex(idx, sc))
sb_sc = sc;
}
- return LLDB_RECORD_RESULT(sb_sc);
+ return sb_sc;
}
void SBSymbolContextList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBSymbolContextList, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
m_opaque_up->Clear();
}
void SBSymbolContextList::Append(SBSymbolContext &sc) {
- LLDB_RECORD_METHOD(void, SBSymbolContextList, Append,
- (lldb::SBSymbolContext &), sc);
+ LLDB_INSTRUMENT_VA(this, sc);
if (sc.IsValid() && m_opaque_up.get())
m_opaque_up->Append(*sc);
}
void SBSymbolContextList::Append(SBSymbolContextList &sc_list) {
- LLDB_RECORD_METHOD(void, SBSymbolContextList, Append,
- (lldb::SBSymbolContextList &), sc_list);
+ LLDB_INSTRUMENT_VA(this, sc_list);
if (sc_list.IsValid() && m_opaque_up.get())
m_opaque_up->Append(*sc_list);
}
bool SBSymbolContextList::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbolContextList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBSymbolContextList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBSymbolContextList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr;
}
@@ -105,39 +98,10 @@ lldb_private::SymbolContextList &SBSymbolContextList::operator*() const {
}
bool SBSymbolContextList::GetDescription(lldb::SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBSymbolContextList, GetDescription,
- (lldb::SBStream &), description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
if (m_opaque_up)
m_opaque_up->GetDescription(&strm, lldb::eDescriptionLevelFull, nullptr);
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBSymbolContextList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBSymbolContextList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBSymbolContextList,
- (const lldb::SBSymbolContextList &));
- LLDB_REGISTER_METHOD(
- const lldb::SBSymbolContextList &,
- SBSymbolContextList, operator=,(const lldb::SBSymbolContextList &));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBSymbolContextList, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBSymbolContext, SBSymbolContextList,
- GetContextAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBSymbolContextList, Clear, ());
- LLDB_REGISTER_METHOD(void, SBSymbolContextList, Append,
- (lldb::SBSymbolContext &));
- LLDB_REGISTER_METHOD(void, SBSymbolContextList, Append,
- (lldb::SBSymbolContextList &));
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbolContextList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBSymbolContextList, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBSymbolContextList, GetDescription,
- (lldb::SBStream &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTarget.cpp b/contrib/llvm-project/lldb/source/API/SBTarget.cpp
index dc79c77fee9e..75534b2343d4 100644
--- a/contrib/llvm-project/lldb/source/API/SBTarget.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTarget.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTarget.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/lldb-public.h"
@@ -93,48 +93,41 @@ static Status AttachToProcess(ProcessAttachInfo &attach_info, Target &target) {
}
// SBTarget constructor
-SBTarget::SBTarget() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTarget);
-}
+SBTarget::SBTarget() { LLDB_INSTRUMENT_VA(this); }
SBTarget::SBTarget(const SBTarget &rhs) : m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTarget, (const lldb::SBTarget &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTarget::SBTarget(const TargetSP &target_sp) : m_opaque_sp(target_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTarget, (const lldb::TargetSP &), target_sp);
+ LLDB_INSTRUMENT_VA(this, target_sp);
}
const SBTarget &SBTarget::operator=(const SBTarget &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBTarget &,
- SBTarget, operator=,(const lldb::SBTarget &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
// Destructor
SBTarget::~SBTarget() = default;
bool SBTarget::EventIsTargetEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBTarget, EventIsTargetEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Target::TargetEventData::GetEventDataFromEvent(event.get()) != nullptr;
}
SBTarget SBTarget::GetTargetFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTarget, SBTarget, GetTargetFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
- return LLDB_RECORD_RESULT(
- Target::TargetEventData::GetTargetFromEvent(event.get()));
+ return Target::TargetEventData::GetTargetFromEvent(event.get());
}
uint32_t SBTarget::GetNumModulesFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(uint32_t, SBTarget, GetNumModulesFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
const ModuleList module_list =
Target::TargetEventData::GetModuleListFromEvent(event.get());
@@ -143,34 +136,31 @@ uint32_t SBTarget::GetNumModulesFromEvent(const SBEvent &event) {
SBModule SBTarget::GetModuleAtIndexFromEvent(const uint32_t idx,
const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBModule, SBTarget, GetModuleAtIndexFromEvent,
- (const uint32_t, const lldb::SBEvent &), idx,
- event);
+ LLDB_INSTRUMENT_VA(idx, event);
const ModuleList module_list =
Target::TargetEventData::GetModuleListFromEvent(event.get());
- return LLDB_RECORD_RESULT(SBModule(module_list.GetModuleAtIndex(idx)));
+ return SBModule(module_list.GetModuleAtIndex(idx));
}
const char *SBTarget::GetBroadcasterClassName() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBTarget,
- GetBroadcasterClassName);
+ LLDB_INSTRUMENT();
return Target::GetStaticBroadcasterClass().AsCString();
}
bool SBTarget::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTarget, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTarget::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTarget, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr && m_opaque_sp->IsValid();
}
SBProcess SBTarget::GetProcess() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcess, SBTarget, GetProcess);
+ LLDB_INSTRUMENT_VA(this);
SBProcess sb_process;
ProcessSP process_sp;
@@ -180,49 +170,49 @@ SBProcess SBTarget::GetProcess() {
sb_process.SetSP(process_sp);
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBPlatform SBTarget::GetPlatform() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBPlatform, SBTarget, GetPlatform);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (!target_sp)
- return LLDB_RECORD_RESULT(SBPlatform());
+ return SBPlatform();
SBPlatform platform;
platform.m_opaque_sp = target_sp->GetPlatform();
- return LLDB_RECORD_RESULT(platform);
+ return platform;
}
SBDebugger SBTarget::GetDebugger() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBDebugger, SBTarget, GetDebugger);
+ LLDB_INSTRUMENT_VA(this);
SBDebugger debugger;
TargetSP target_sp(GetSP());
if (target_sp)
debugger.reset(target_sp->GetDebugger().shared_from_this());
- return LLDB_RECORD_RESULT(debugger);
+ return debugger;
}
SBStructuredData SBTarget::GetStatistics() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBStructuredData, SBTarget, GetStatistics);
+ LLDB_INSTRUMENT_VA(this);
SBStructuredData data;
TargetSP target_sp(GetSP());
if (!target_sp)
- return LLDB_RECORD_RESULT(data);
+ return data;
std::string json_str =
llvm::formatv("{0:2}",
DebuggerStats::ReportStatistics(target_sp->GetDebugger(),
target_sp.get())).str();
data.m_impl_up->SetObjectSP(StructuredData::ParseJSON(json_str));
- return LLDB_RECORD_RESULT(data);
+ return data;
}
void SBTarget::SetCollectingStats(bool v) {
- LLDB_RECORD_METHOD(void, SBTarget, SetCollectingStats, (bool), v);
+ LLDB_INSTRUMENT_VA(this, v);
TargetSP target_sp(GetSP());
if (!target_sp)
@@ -231,7 +221,7 @@ void SBTarget::SetCollectingStats(bool v) {
}
bool SBTarget::GetCollectingStats() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, GetCollectingStats);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (!target_sp)
@@ -240,16 +230,14 @@ bool SBTarget::GetCollectingStats() {
}
SBProcess SBTarget::LoadCore(const char *core_file) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, LoadCore, (const char *),
- core_file);
+ LLDB_INSTRUMENT_VA(this, core_file);
lldb::SBError error; // Ignored
- return LLDB_RECORD_RESULT(LoadCore(core_file, error));
+ return LoadCore(core_file, error);
}
SBProcess SBTarget::LoadCore(const char *core_file, lldb::SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, LoadCore,
- (const char *, lldb::SBError &), core_file, error);
+ LLDB_INSTRUMENT_VA(this, core_file, error);
SBProcess sb_process;
TargetSP target_sp(GetSP());
@@ -268,18 +256,16 @@ SBProcess SBTarget::LoadCore(const char *core_file, lldb::SBError &error) {
} else {
error.SetErrorString("SBTarget is invalid");
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBProcess SBTarget::LaunchSimple(char const **argv, char const **envp,
const char *working_directory) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, LaunchSimple,
- (const char **, const char **, const char *), argv, envp,
- working_directory);
+ LLDB_INSTRUMENT_VA(this, argv, envp, working_directory);
TargetSP target_sp = GetSP();
if (!target_sp)
- return LLDB_RECORD_RESULT(SBProcess());
+ return SBProcess();
SBLaunchInfo launch_info = GetLaunchInfo();
@@ -294,11 +280,11 @@ SBProcess SBTarget::LaunchSimple(char const **argv, char const **envp,
launch_info.SetWorkingDirectory(working_directory);
SBError error;
- return LLDB_RECORD_RESULT(Launch(launch_info, error));
+ return Launch(launch_info, error);
}
SBError SBTarget::Install() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBTarget, Install);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
TargetSP target_sp(GetSP());
@@ -306,7 +292,7 @@ SBError SBTarget::Install() {
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
sb_error.ref() = target_sp->Install(nullptr);
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBProcess SBTarget::Launch(SBListener &listener, char const **argv,
@@ -315,12 +301,9 @@ SBProcess SBTarget::Launch(SBListener &listener, char const **argv,
const char *working_directory,
uint32_t launch_flags, // See LaunchFlags
bool stop_at_entry, lldb::SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, Launch,
- (lldb::SBListener &, const char **, const char **,
- const char *, const char *, const char *, const char *,
- uint32_t, bool, lldb::SBError &),
- listener, argv, envp, stdin_path, stdout_path, stderr_path,
- working_directory, launch_flags, stop_at_entry, error);
+ LLDB_INSTRUMENT_VA(this, listener, argv, envp, stdin_path, stdout_path,
+ stderr_path, working_directory, launch_flags,
+ stop_at_entry, error);
SBProcess sb_process;
ProcessSP process_sp;
@@ -345,7 +328,7 @@ SBProcess SBTarget::Launch(SBListener &listener, char const **argv,
error.SetErrorString("process attach is in progress");
else
error.SetErrorString("a process is already being debugged");
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
}
@@ -356,7 +339,7 @@ SBProcess SBTarget::Launch(SBListener &listener, char const **argv,
if (listener.IsValid()) {
error.SetErrorString("process is connected and already has a listener, "
"pass empty listener");
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
}
@@ -394,14 +377,11 @@ SBProcess SBTarget::Launch(SBListener &listener, char const **argv,
error.SetErrorString("SBTarget is invalid");
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBProcess SBTarget::Launch(SBLaunchInfo &sb_launch_info, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, Launch,
- (lldb::SBLaunchInfo &, lldb::SBError &), sb_launch_info,
- error);
-
+ LLDB_INSTRUMENT_VA(this, sb_launch_info, error);
SBProcess sb_process;
TargetSP target_sp(GetSP());
@@ -419,7 +399,7 @@ SBProcess SBTarget::Launch(SBLaunchInfo &sb_launch_info, SBError &error) {
error.SetErrorString("process attach is in progress");
else
error.SetErrorString("a process is already being debugged");
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
}
}
@@ -443,13 +423,11 @@ SBProcess SBTarget::Launch(SBLaunchInfo &sb_launch_info, SBError &error) {
error.SetErrorString("SBTarget is invalid");
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
lldb::SBProcess SBTarget::Attach(SBAttachInfo &sb_attach_info, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, Attach,
- (lldb::SBAttachInfo &, lldb::SBError &), sb_attach_info,
- error);
+ LLDB_INSTRUMENT_VA(this, sb_attach_info, error);
SBProcess sb_process;
TargetSP target_sp(GetSP());
@@ -467,7 +445,7 @@ lldb::SBProcess SBTarget::Attach(SBAttachInfo &sb_attach_info, SBError &error) {
} else {
error.ref().SetErrorStringWithFormat(
"no process found with process ID %" PRIu64, attach_pid);
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
}
}
@@ -478,7 +456,7 @@ lldb::SBProcess SBTarget::Attach(SBAttachInfo &sb_attach_info, SBError &error) {
error.SetErrorString("SBTarget is invalid");
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
lldb::SBProcess SBTarget::AttachToProcessWithID(
@@ -486,9 +464,7 @@ lldb::SBProcess SBTarget::AttachToProcessWithID(
lldb::pid_t pid, // The process ID to attach to
SBError &error // An error explaining what went wrong if attach fails
) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, AttachToProcessWithID,
- (lldb::SBListener &, lldb::pid_t, lldb::SBError &),
- listener, pid, error);
+ LLDB_INSTRUMENT_VA(this, listener, pid, error);
SBProcess sb_process;
TargetSP target_sp(GetSP());
@@ -509,7 +485,7 @@ lldb::SBProcess SBTarget::AttachToProcessWithID(
} else
error.SetErrorString("SBTarget is invalid");
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
lldb::SBProcess SBTarget::AttachToProcessWithName(
@@ -518,9 +494,7 @@ lldb::SBProcess SBTarget::AttachToProcessWithName(
bool wait_for, // if true wait for a new instance of "name" to be launched
SBError &error // An error explaining what went wrong if attach fails
) {
- LLDB_RECORD_METHOD(lldb::SBProcess, SBTarget, AttachToProcessWithName,
- (lldb::SBListener &, const char *, bool, lldb::SBError &),
- listener, name, wait_for, error);
+ LLDB_INSTRUMENT_VA(this, listener, name, wait_for, error);
SBProcess sb_process;
TargetSP target_sp(GetSP());
@@ -538,16 +512,13 @@ lldb::SBProcess SBTarget::AttachToProcessWithName(
} else
error.SetErrorString("SBTarget is invalid");
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
lldb::SBProcess SBTarget::ConnectRemote(SBListener &listener, const char *url,
const char *plugin_name,
SBError &error) {
- LLDB_RECORD_METHOD(
- lldb::SBProcess, SBTarget, ConnectRemote,
- (lldb::SBListener &, const char *, const char *, lldb::SBError &),
- listener, url, plugin_name, error);
+ LLDB_INSTRUMENT_VA(this, listener, url, plugin_name, error);
SBProcess sb_process;
ProcessSP process_sp;
@@ -573,11 +544,11 @@ lldb::SBProcess SBTarget::ConnectRemote(SBListener &listener, const char *url,
error.SetErrorString("SBTarget is invalid");
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
SBFileSpec SBTarget::GetExecutable() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFileSpec, SBTarget, GetExecutable);
+ LLDB_INSTRUMENT_VA(this);
SBFileSpec exe_file_spec;
TargetSP target_sp(GetSP());
@@ -587,19 +558,17 @@ SBFileSpec SBTarget::GetExecutable() {
exe_file_spec.SetFileSpec(exe_module->GetFileSpec());
}
- return LLDB_RECORD_RESULT(exe_file_spec);
+ return exe_file_spec;
}
bool SBTarget::operator==(const SBTarget &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBTarget, operator==,(const lldb::SBTarget &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_sp.get() == rhs.m_opaque_sp.get();
}
bool SBTarget::operator!=(const SBTarget &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBTarget, operator!=,(const lldb::SBTarget &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_sp.get() != rhs.m_opaque_sp.get();
}
@@ -611,8 +580,7 @@ void SBTarget::SetSP(const lldb::TargetSP &target_sp) {
}
lldb::SBAddress SBTarget::ResolveLoadAddress(lldb::addr_t vm_addr) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBTarget, ResolveLoadAddress,
- (lldb::addr_t), vm_addr);
+ LLDB_INSTRUMENT_VA(this, vm_addr);
lldb::SBAddress sb_addr;
Address &addr = sb_addr.ref();
@@ -620,18 +588,17 @@ lldb::SBAddress SBTarget::ResolveLoadAddress(lldb::addr_t vm_addr) {
if (target_sp) {
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
if (target_sp->ResolveLoadAddress(vm_addr, addr))
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
// We have a load address that isn't in a section, just return an address
// with the offset filled in (the address) and the section set to NULL
addr.SetRawAddress(vm_addr);
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
lldb::SBAddress SBTarget::ResolveFileAddress(lldb::addr_t file_addr) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBTarget, ResolveFileAddress,
- (lldb::addr_t), file_addr);
+ LLDB_INSTRUMENT_VA(this, file_addr);
lldb::SBAddress sb_addr;
Address &addr = sb_addr.ref();
@@ -639,17 +606,16 @@ lldb::SBAddress SBTarget::ResolveFileAddress(lldb::addr_t file_addr) {
if (target_sp) {
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
if (target_sp->ResolveFileAddress(file_addr, addr))
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
addr.SetRawAddress(file_addr);
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
lldb::SBAddress SBTarget::ResolvePastLoadAddress(uint32_t stop_id,
lldb::addr_t vm_addr) {
- LLDB_RECORD_METHOD(lldb::SBAddress, SBTarget, ResolvePastLoadAddress,
- (uint32_t, lldb::addr_t), stop_id, vm_addr);
+ LLDB_INSTRUMENT_VA(this, stop_id, vm_addr);
lldb::SBAddress sb_addr;
Address &addr = sb_addr.ref();
@@ -657,21 +623,19 @@ lldb::SBAddress SBTarget::ResolvePastLoadAddress(uint32_t stop_id,
if (target_sp) {
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
if (target_sp->ResolveLoadAddress(vm_addr, addr))
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
// We have a load address that isn't in a section, just return an address
// with the offset filled in (the address) and the section set to NULL
addr.SetRawAddress(vm_addr);
- return LLDB_RECORD_RESULT(sb_addr);
+ return sb_addr;
}
SBSymbolContext
SBTarget::ResolveSymbolContextForAddress(const SBAddress &addr,
uint32_t resolve_scope) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContext, SBTarget,
- ResolveSymbolContextForAddress,
- (const lldb::SBAddress &, uint32_t), addr, resolve_scope);
+ LLDB_INSTRUMENT_VA(this, addr, resolve_scope);
SBSymbolContext sc;
SymbolContextItem scope = static_cast<SymbolContextItem>(resolve_scope);
@@ -681,14 +645,12 @@ SBTarget::ResolveSymbolContextForAddress(const SBAddress &addr,
target_sp->GetImages().ResolveSymbolContextForAddress(addr.ref(), scope,
sc.ref());
}
- return LLDB_RECORD_RESULT(sc);
+ return sc;
}
size_t SBTarget::ReadMemory(const SBAddress addr, void *buf, size_t size,
lldb::SBError &error) {
- LLDB_RECORD_METHOD(size_t, SBTarget, ReadMemory,
- (const lldb::SBAddress, void *, size_t, lldb::SBError &),
- addr, buf, size, error);
+ LLDB_INSTRUMENT_VA(this, addr, buf, size, error);
SBError sb_error;
size_t bytes_read = 0;
@@ -706,54 +668,43 @@ size_t SBTarget::ReadMemory(const SBAddress addr, void *buf, size_t size,
SBBreakpoint SBTarget::BreakpointCreateByLocation(const char *file,
uint32_t line) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const char *, uint32_t), file, line);
+ LLDB_INSTRUMENT_VA(this, file, line);
- return LLDB_RECORD_RESULT(
- SBBreakpoint(BreakpointCreateByLocation(SBFileSpec(file, false), line)));
+ return SBBreakpoint(
+ BreakpointCreateByLocation(SBFileSpec(file, false), line));
}
SBBreakpoint
SBTarget::BreakpointCreateByLocation(const SBFileSpec &sb_file_spec,
uint32_t line) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t), sb_file_spec, line);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec, line);
- return LLDB_RECORD_RESULT(BreakpointCreateByLocation(sb_file_spec, line, 0));
+ return BreakpointCreateByLocation(sb_file_spec, line, 0);
}
SBBreakpoint
SBTarget::BreakpointCreateByLocation(const SBFileSpec &sb_file_spec,
uint32_t line, lldb::addr_t offset) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, lldb::addr_t),
- sb_file_spec, line, offset);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec, line, offset);
SBFileSpecList empty_list;
- return LLDB_RECORD_RESULT(
- BreakpointCreateByLocation(sb_file_spec, line, offset, empty_list));
+ return BreakpointCreateByLocation(sb_file_spec, line, offset, empty_list);
}
SBBreakpoint
SBTarget::BreakpointCreateByLocation(const SBFileSpec &sb_file_spec,
uint32_t line, lldb::addr_t offset,
SBFileSpecList &sb_module_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, lldb::addr_t,
- lldb::SBFileSpecList &),
- sb_file_spec, line, offset, sb_module_list);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec, line, offset, sb_module_list);
- return LLDB_RECORD_RESULT(BreakpointCreateByLocation(sb_file_spec, line, 0,
- offset, sb_module_list));
+ return BreakpointCreateByLocation(sb_file_spec, line, 0, offset,
+ sb_module_list);
}
SBBreakpoint SBTarget::BreakpointCreateByLocation(
const SBFileSpec &sb_file_spec, uint32_t line, uint32_t column,
lldb::addr_t offset, SBFileSpecList &sb_module_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- lldb::addr_t, lldb::SBFileSpecList &),
- sb_file_spec, line, column, offset, sb_module_list);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec, line, column, offset, sb_module_list);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -774,17 +725,14 @@ SBBreakpoint SBTarget::BreakpointCreateByLocation(
skip_prologue, internal, hardware, move_to_nearest_code);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
SBBreakpoint SBTarget::BreakpointCreateByLocation(
const SBFileSpec &sb_file_spec, uint32_t line, uint32_t column,
lldb::addr_t offset, SBFileSpecList &sb_module_list,
bool move_to_nearest_code) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- lldb::addr_t, lldb::SBFileSpecList &, bool),
- sb_file_spec, line, column, offset, sb_module_list,
+ LLDB_INSTRUMENT_VA(this, sb_file_spec, line, column, offset, sb_module_list,
move_to_nearest_code);
SBBreakpoint sb_bp;
@@ -806,13 +754,12 @@ SBBreakpoint SBTarget::BreakpointCreateByLocation(
move_to_nearest_code ? eLazyBoolYes : eLazyBoolNo);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
SBBreakpoint SBTarget::BreakpointCreateByName(const char *symbol_name,
const char *module_name) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, const char *), symbol_name, module_name);
+ LLDB_INSTRUMENT_VA(this, symbol_name, module_name);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -836,47 +783,38 @@ SBBreakpoint SBTarget::BreakpointCreateByName(const char *symbol_name,
}
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
lldb::SBBreakpoint
SBTarget::BreakpointCreateByName(const char *symbol_name,
const SBFileSpecList &module_list,
const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_name, module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_name, module_list, comp_unit_list);
lldb::FunctionNameType name_type_mask = eFunctionNameTypeAuto;
- return LLDB_RECORD_RESULT(
- BreakpointCreateByName(symbol_name, name_type_mask, eLanguageTypeUnknown,
- module_list, comp_unit_list));
+ return BreakpointCreateByName(symbol_name, name_type_mask,
+ eLanguageTypeUnknown, module_list,
+ comp_unit_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
const char *symbol_name, uint32_t name_type_mask,
const SBFileSpecList &module_list, const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, uint32_t, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_name, name_type_mask, module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_name, name_type_mask, module_list,
+ comp_unit_list);
- return LLDB_RECORD_RESULT(
- BreakpointCreateByName(symbol_name, name_type_mask, eLanguageTypeUnknown,
- module_list, comp_unit_list));
+ return BreakpointCreateByName(symbol_name, name_type_mask,
+ eLanguageTypeUnknown, module_list,
+ comp_unit_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
const char *symbol_name, uint32_t name_type_mask,
LanguageType symbol_language, const SBFileSpecList &module_list,
const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, uint32_t, lldb::LanguageType,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_name, name_type_mask, symbol_language, module_list,
- comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_name, name_type_mask, symbol_language,
+ module_list, comp_unit_list);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -891,49 +829,38 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateByName(
skip_prologue, internal, hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByNames(
const char *symbol_names[], uint32_t num_names, uint32_t name_type_mask,
const SBFileSpecList &module_list, const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_names, num_names, name_type_mask, module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_names, num_names, name_type_mask, module_list,
+ comp_unit_list);
- return LLDB_RECORD_RESULT(BreakpointCreateByNames(
- symbol_names, num_names, name_type_mask, eLanguageTypeUnknown,
- module_list, comp_unit_list));
+ return BreakpointCreateByNames(symbol_names, num_names, name_type_mask,
+ eLanguageTypeUnknown, module_list,
+ comp_unit_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByNames(
const char *symbol_names[], uint32_t num_names, uint32_t name_type_mask,
LanguageType symbol_language, const SBFileSpecList &module_list,
const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t, lldb::LanguageType,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_names, num_names, name_type_mask, symbol_language,
- module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_names, num_names, name_type_mask,
+ symbol_language, module_list, comp_unit_list);
- return LLDB_RECORD_RESULT(BreakpointCreateByNames(
- symbol_names, num_names, name_type_mask, eLanguageTypeUnknown, 0,
- module_list, comp_unit_list));
+ return BreakpointCreateByNames(symbol_names, num_names, name_type_mask,
+ eLanguageTypeUnknown, 0, module_list,
+ comp_unit_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByNames(
const char *symbol_names[], uint32_t num_names, uint32_t name_type_mask,
LanguageType symbol_language, lldb::addr_t offset,
const SBFileSpecList &module_list, const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t, lldb::LanguageType,
- lldb::addr_t, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_names, num_names, name_type_mask, symbol_language,
- offset, module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_names, num_names, name_type_mask,
+ symbol_language, offset, module_list, comp_unit_list);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -948,47 +875,37 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateByNames(
symbol_language, offset, skip_prologue, internal, hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
SBBreakpoint SBTarget::BreakpointCreateByRegex(const char *symbol_name_regex,
const char *module_name) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, const char *), symbol_name_regex,
- module_name);
+ LLDB_INSTRUMENT_VA(this, symbol_name_regex, module_name);
SBFileSpecList module_spec_list;
SBFileSpecList comp_unit_list;
if (module_name && module_name[0]) {
module_spec_list.Append(FileSpec(module_name));
}
- return LLDB_RECORD_RESULT(
- BreakpointCreateByRegex(symbol_name_regex, eLanguageTypeUnknown,
- module_spec_list, comp_unit_list));
+ return BreakpointCreateByRegex(symbol_name_regex, eLanguageTypeUnknown,
+ module_spec_list, comp_unit_list);
}
lldb::SBBreakpoint
SBTarget::BreakpointCreateByRegex(const char *symbol_name_regex,
const SBFileSpecList &module_list,
const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_name_regex, module_list, comp_unit_list);
+ LLDB_INSTRUMENT_VA(this, symbol_name_regex, module_list, comp_unit_list);
- return LLDB_RECORD_RESULT(BreakpointCreateByRegex(
- symbol_name_regex, eLanguageTypeUnknown, module_list, comp_unit_list));
+ return BreakpointCreateByRegex(symbol_name_regex, eLanguageTypeUnknown,
+ module_list, comp_unit_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateByRegex(
const char *symbol_name_regex, LanguageType symbol_language,
const SBFileSpecList &module_list, const SBFileSpecList &comp_unit_list) {
- LLDB_RECORD_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, lldb::LanguageType, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- symbol_name_regex, symbol_language, module_list, comp_unit_list);
-
+ LLDB_INSTRUMENT_VA(this, symbol_name_regex, symbol_language, module_list,
+ comp_unit_list);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -1004,12 +921,11 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateByRegex(
symbol_language, skip_prologue, internal, hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
SBBreakpoint SBTarget::BreakpointCreateByAddress(addr_t address) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByAddress,
- (lldb::addr_t), address);
+ LLDB_INSTRUMENT_VA(this, address);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -1019,17 +935,16 @@ SBBreakpoint SBTarget::BreakpointCreateByAddress(addr_t address) {
sb_bp = target_sp->CreateBreakpoint(address, false, hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
SBBreakpoint SBTarget::BreakpointCreateBySBAddress(SBAddress &sb_address) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateBySBAddress,
- (lldb::SBAddress &), sb_address);
+ LLDB_INSTRUMENT_VA(this, sb_address);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
if (!sb_address.IsValid()) {
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
if (target_sp) {
@@ -1038,17 +953,14 @@ SBBreakpoint SBTarget::BreakpointCreateBySBAddress(SBAddress &sb_address) {
sb_bp = target_sp->CreateBreakpoint(sb_address.ref(), false, hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
lldb::SBBreakpoint
SBTarget::BreakpointCreateBySourceRegex(const char *source_regex,
const lldb::SBFileSpec &source_file,
const char *module_name) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpec &, const char *),
- source_regex, source_file, module_name);
+ LLDB_INSTRUMENT_VA(this, source_regex, source_file, module_name);
SBFileSpecList module_spec_list;
@@ -1061,32 +973,25 @@ SBTarget::BreakpointCreateBySourceRegex(const char *source_regex,
source_file_list.Append(source_file);
}
- return LLDB_RECORD_RESULT(BreakpointCreateBySourceRegex(
- source_regex, module_spec_list, source_file_list));
+ return BreakpointCreateBySourceRegex(source_regex, module_spec_list,
+ source_file_list);
}
lldb::SBBreakpoint SBTarget::BreakpointCreateBySourceRegex(
const char *source_regex, const SBFileSpecList &module_list,
const lldb::SBFileSpecList &source_file_list) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &),
- source_regex, module_list, source_file_list);
+ LLDB_INSTRUMENT_VA(this, source_regex, module_list, source_file_list);
- return LLDB_RECORD_RESULT(BreakpointCreateBySourceRegex(
- source_regex, module_list, source_file_list, SBStringList()));
+ return BreakpointCreateBySourceRegex(source_regex, module_list,
+ source_file_list, SBStringList());
}
lldb::SBBreakpoint SBTarget::BreakpointCreateBySourceRegex(
const char *source_regex, const SBFileSpecList &module_list,
const lldb::SBFileSpecList &source_file_list,
const SBStringList &func_names) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &, const lldb::SBStringList &),
- source_regex, module_list, source_file_list, func_names);
+ LLDB_INSTRUMENT_VA(this, source_regex, module_list, source_file_list,
+ func_names);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -1105,15 +1010,13 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateBySourceRegex(
std::move(regexp), false, hardware, move_to_nearest_code);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
lldb::SBBreakpoint
SBTarget::BreakpointCreateForException(lldb::LanguageType language,
bool catch_bp, bool throw_bp) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateForException,
- (lldb::LanguageType, bool, bool), language, catch_bp,
- throw_bp);
+ LLDB_INSTRUMENT_VA(this, language, catch_bp, throw_bp);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -1124,18 +1027,15 @@ SBTarget::BreakpointCreateForException(lldb::LanguageType language,
hardware);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
lldb::SBBreakpoint SBTarget::BreakpointCreateFromScript(
const char *class_name, SBStructuredData &extra_args,
const SBFileSpecList &module_list, const SBFileSpecList &file_list,
bool request_hardware) {
- LLDB_RECORD_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateFromScript,
- (const char *, lldb::SBStructuredData &, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &, bool),
- class_name, extra_args, module_list, file_list, request_hardware);
+ LLDB_INSTRUMENT_VA(this, class_name, extra_args, module_list, file_list,
+ request_hardware);
SBBreakpoint sb_bp;
TargetSP target_sp(GetSP());
@@ -1154,11 +1054,11 @@ lldb::SBBreakpoint SBTarget::BreakpointCreateFromScript(
&error);
}
- return LLDB_RECORD_RESULT(sb_bp);
+ return sb_bp;
}
uint32_t SBTarget::GetNumBreakpoints() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBTarget, GetNumBreakpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1169,8 +1069,7 @@ uint32_t SBTarget::GetNumBreakpoints() const {
}
SBBreakpoint SBTarget::GetBreakpointAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBBreakpoint, SBTarget, GetBreakpointAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBBreakpoint sb_breakpoint;
TargetSP target_sp(GetSP());
@@ -1178,12 +1077,11 @@ SBBreakpoint SBTarget::GetBreakpointAtIndex(uint32_t idx) const {
// The breakpoint list is thread safe, no need to lock
sb_breakpoint = target_sp->GetBreakpointList().GetBreakpointAtIndex(idx);
}
- return LLDB_RECORD_RESULT(sb_breakpoint);
+ return sb_breakpoint;
}
bool SBTarget::BreakpointDelete(break_id_t bp_id) {
- LLDB_RECORD_METHOD(bool, SBTarget, BreakpointDelete, (lldb::break_id_t),
- bp_id);
+ LLDB_INSTRUMENT_VA(this, bp_id);
bool result = false;
TargetSP target_sp(GetSP());
@@ -1196,8 +1094,7 @@ bool SBTarget::BreakpointDelete(break_id_t bp_id) {
}
SBBreakpoint SBTarget::FindBreakpointByID(break_id_t bp_id) {
- LLDB_RECORD_METHOD(lldb::SBBreakpoint, SBTarget, FindBreakpointByID,
- (lldb::break_id_t), bp_id);
+ LLDB_INSTRUMENT_VA(this, bp_id);
SBBreakpoint sb_breakpoint;
TargetSP target_sp(GetSP());
@@ -1206,13 +1103,12 @@ SBBreakpoint SBTarget::FindBreakpointByID(break_id_t bp_id) {
sb_breakpoint = target_sp->GetBreakpointByID(bp_id);
}
- return LLDB_RECORD_RESULT(sb_breakpoint);
+ return sb_breakpoint;
}
bool SBTarget::FindBreakpointsByName(const char *name,
SBBreakpointList &bkpts) {
- LLDB_RECORD_METHOD(bool, SBTarget, FindBreakpointsByName,
- (const char *, lldb::SBBreakpointList &), name, bkpts);
+ LLDB_INSTRUMENT_VA(this, name, bkpts);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1233,8 +1129,7 @@ bool SBTarget::FindBreakpointsByName(const char *name,
}
void SBTarget::GetBreakpointNames(SBStringList &names) {
- LLDB_RECORD_METHOD(void, SBTarget, GetBreakpointNames, (lldb::SBStringList &),
- names);
+ LLDB_INSTRUMENT_VA(this, names);
names.Clear();
@@ -1250,8 +1145,7 @@ void SBTarget::GetBreakpointNames(SBStringList &names) {
}
void SBTarget::DeleteBreakpointName(const char *name) {
- LLDB_RECORD_METHOD(void, SBTarget, DeleteBreakpointName, (const char *),
- name);
+ LLDB_INSTRUMENT_VA(this, name);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1261,7 +1155,7 @@ void SBTarget::DeleteBreakpointName(const char *name) {
}
bool SBTarget::EnableAllBreakpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, EnableAllBreakpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1273,7 +1167,7 @@ bool SBTarget::EnableAllBreakpoints() {
}
bool SBTarget::DisableAllBreakpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, DisableAllBreakpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1285,7 +1179,7 @@ bool SBTarget::DisableAllBreakpoints() {
}
bool SBTarget::DeleteAllBreakpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, DeleteAllBreakpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1298,29 +1192,23 @@ bool SBTarget::DeleteAllBreakpoints() {
lldb::SBError SBTarget::BreakpointsCreateFromFile(SBFileSpec &source_file,
SBBreakpointList &new_bps) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, BreakpointsCreateFromFile,
- (lldb::SBFileSpec &, lldb::SBBreakpointList &),
- source_file, new_bps);
+ LLDB_INSTRUMENT_VA(this, source_file, new_bps);
SBStringList empty_name_list;
- return LLDB_RECORD_RESULT(
- BreakpointsCreateFromFile(source_file, empty_name_list, new_bps));
+ return BreakpointsCreateFromFile(source_file, empty_name_list, new_bps);
}
lldb::SBError SBTarget::BreakpointsCreateFromFile(SBFileSpec &source_file,
SBStringList &matching_names,
SBBreakpointList &new_bps) {
- LLDB_RECORD_METHOD(
- lldb::SBError, SBTarget, BreakpointsCreateFromFile,
- (lldb::SBFileSpec &, lldb::SBStringList &, lldb::SBBreakpointList &),
- source_file, matching_names, new_bps);
+ LLDB_INSTRUMENT_VA(this, source_file, matching_names, new_bps);
SBError sberr;
TargetSP target_sp(GetSP());
if (!target_sp) {
sberr.SetErrorString(
"BreakpointCreateFromFile called with invalid target.");
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
}
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
@@ -1334,42 +1222,39 @@ lldb::SBError SBTarget::BreakpointsCreateFromFile(SBFileSpec &source_file,
sberr.ref() = target_sp->CreateBreakpointsFromFile(source_file.ref(),
name_vector, bp_ids);
if (sberr.Fail())
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
size_t num_bkpts = bp_ids.GetSize();
for (size_t i = 0; i < num_bkpts; i++) {
BreakpointID bp_id = bp_ids.GetBreakpointIDAtIndex(i);
new_bps.AppendByID(bp_id.GetBreakpointID());
}
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
}
lldb::SBError SBTarget::BreakpointsWriteToFile(SBFileSpec &dest_file) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, BreakpointsWriteToFile,
- (lldb::SBFileSpec &), dest_file);
+ LLDB_INSTRUMENT_VA(this, dest_file);
SBError sberr;
TargetSP target_sp(GetSP());
if (!target_sp) {
sberr.SetErrorString("BreakpointWriteToFile called with invalid target.");
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
}
SBBreakpointList bkpt_list(*this);
- return LLDB_RECORD_RESULT(BreakpointsWriteToFile(dest_file, bkpt_list));
+ return BreakpointsWriteToFile(dest_file, bkpt_list);
}
lldb::SBError SBTarget::BreakpointsWriteToFile(SBFileSpec &dest_file,
SBBreakpointList &bkpt_list,
bool append) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, BreakpointsWriteToFile,
- (lldb::SBFileSpec &, lldb::SBBreakpointList &, bool),
- dest_file, bkpt_list, append);
+ LLDB_INSTRUMENT_VA(this, dest_file, bkpt_list, append);
SBError sberr;
TargetSP target_sp(GetSP());
if (!target_sp) {
sberr.SetErrorString("BreakpointWriteToFile called with invalid target.");
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
}
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
@@ -1377,11 +1262,11 @@ lldb::SBError SBTarget::BreakpointsWriteToFile(SBFileSpec &dest_file,
bkpt_list.CopyToBreakpointIDList(bp_id_list);
sberr.ref() = target_sp->SerializeBreakpointsToFile(dest_file.ref(),
bp_id_list, append);
- return LLDB_RECORD_RESULT(sberr);
+ return sberr;
}
uint32_t SBTarget::GetNumWatchpoints() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBTarget, GetNumWatchpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1392,8 +1277,7 @@ uint32_t SBTarget::GetNumWatchpoints() const {
}
SBWatchpoint SBTarget::GetWatchpointAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBWatchpoint, SBTarget, GetWatchpointAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBWatchpoint sb_watchpoint;
TargetSP target_sp(GetSP());
@@ -1401,13 +1285,11 @@ SBWatchpoint SBTarget::GetWatchpointAtIndex(uint32_t idx) const {
// The watchpoint list is thread safe, no need to lock
sb_watchpoint.SetSP(target_sp->GetWatchpointList().GetByIndex(idx));
}
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
bool SBTarget::DeleteWatchpoint(watch_id_t wp_id) {
- LLDB_RECORD_METHOD(bool, SBTarget, DeleteWatchpoint, (lldb::watch_id_t),
- wp_id);
-
+ LLDB_INSTRUMENT_VA(this, wp_id);
bool result = false;
TargetSP target_sp(GetSP());
@@ -1422,9 +1304,7 @@ bool SBTarget::DeleteWatchpoint(watch_id_t wp_id) {
}
SBWatchpoint SBTarget::FindWatchpointByID(lldb::watch_id_t wp_id) {
- LLDB_RECORD_METHOD(lldb::SBWatchpoint, SBTarget, FindWatchpointByID,
- (lldb::watch_id_t), wp_id);
-
+ LLDB_INSTRUMENT_VA(this, wp_id);
SBWatchpoint sb_watchpoint;
lldb::WatchpointSP watchpoint_sp;
@@ -1437,15 +1317,13 @@ SBWatchpoint SBTarget::FindWatchpointByID(lldb::watch_id_t wp_id) {
sb_watchpoint.SetSP(watchpoint_sp);
}
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
lldb::SBWatchpoint SBTarget::WatchAddress(lldb::addr_t addr, size_t size,
bool read, bool write,
SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBWatchpoint, SBTarget, WatchAddress,
- (lldb::addr_t, size_t, bool, bool, lldb::SBError &), addr,
- size, read, write, error);
+ LLDB_INSTRUMENT_VA(this, addr, size, read, write, error);
SBWatchpoint sb_watchpoint;
lldb::WatchpointSP watchpoint_sp;
@@ -1461,7 +1339,7 @@ lldb::SBWatchpoint SBTarget::WatchAddress(lldb::addr_t addr, size_t size,
if (watch_type == 0) {
error.SetErrorString(
"Can't create a watchpoint that is neither read nor write.");
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
// Target::CreateWatchpoint() is thread safe.
@@ -1474,11 +1352,11 @@ lldb::SBWatchpoint SBTarget::WatchAddress(lldb::addr_t addr, size_t size,
sb_watchpoint.SetSP(watchpoint_sp);
}
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
bool SBTarget::EnableAllWatchpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, EnableAllWatchpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1492,7 +1370,7 @@ bool SBTarget::EnableAllWatchpoints() {
}
bool SBTarget::DisableAllWatchpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, DisableAllWatchpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1507,9 +1385,7 @@ bool SBTarget::DisableAllWatchpoints() {
SBValue SBTarget::CreateValueFromAddress(const char *name, SBAddress addr,
SBType type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, CreateValueFromAddress,
- (const char *, lldb::SBAddress, lldb::SBType), name, addr,
- type);
+ LLDB_INSTRUMENT_VA(this, name, addr, type);
SBValue sb_value;
lldb::ValueObjectSP new_value_sp;
@@ -1522,14 +1398,12 @@ SBValue SBTarget::CreateValueFromAddress(const char *name, SBAddress addr,
exe_ctx, ast_type);
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBTarget::CreateValueFromData(const char *name, lldb::SBData data,
lldb::SBType type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, CreateValueFromData,
- (const char *, lldb::SBData, lldb::SBType), name, data,
- type);
+ LLDB_INSTRUMENT_VA(this, name, data, type);
SBValue sb_value;
lldb::ValueObjectSP new_value_sp;
@@ -1542,13 +1416,12 @@ lldb::SBValue SBTarget::CreateValueFromData(const char *name, lldb::SBData data,
exe_ctx, ast_type);
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBTarget::CreateValueFromExpression(const char *name,
const char *expr) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, CreateValueFromExpression,
- (const char *, const char *), name, expr);
+ LLDB_INSTRUMENT_VA(this, name, expr);
SBValue sb_value;
lldb::ValueObjectSP new_value_sp;
@@ -1559,11 +1432,11 @@ lldb::SBValue SBTarget::CreateValueFromExpression(const char *name,
ValueObject::CreateValueObjectFromExpression(name, expr, exe_ctx);
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
bool SBTarget::DeleteAllWatchpoints() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTarget, DeleteAllWatchpoints);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1578,9 +1451,7 @@ bool SBTarget::DeleteAllWatchpoints() {
void SBTarget::AppendImageSearchPath(const char *from, const char *to,
lldb::SBError &error) {
- LLDB_RECORD_METHOD(void, SBTarget, AppendImageSearchPath,
- (const char *, const char *, lldb::SBError &), from, to,
- error);
+ LLDB_INSTRUMENT_VA(this, from, to, error);
TargetSP target_sp(GetSP());
if (!target_sp)
@@ -1597,18 +1468,14 @@ void SBTarget::AppendImageSearchPath(const char *from, const char *to,
lldb::SBModule SBTarget::AddModule(const char *path, const char *triple,
const char *uuid_cstr) {
- LLDB_RECORD_METHOD(lldb::SBModule, SBTarget, AddModule,
- (const char *, const char *, const char *), path, triple,
- uuid_cstr);
+ LLDB_INSTRUMENT_VA(this, path, triple, uuid_cstr);
- return LLDB_RECORD_RESULT(AddModule(path, triple, uuid_cstr, nullptr));
+ return AddModule(path, triple, uuid_cstr, nullptr);
}
lldb::SBModule SBTarget::AddModule(const char *path, const char *triple,
const char *uuid_cstr, const char *symfile) {
- LLDB_RECORD_METHOD(lldb::SBModule, SBTarget, AddModule,
- (const char *, const char *, const char *, const char *),
- path, triple, uuid_cstr, symfile);
+ LLDB_INSTRUMENT_VA(this, path, triple, uuid_cstr, symfile);
lldb::SBModule sb_module;
TargetSP target_sp(GetSP());
@@ -1631,23 +1498,22 @@ lldb::SBModule SBTarget::AddModule(const char *path, const char *triple,
sb_module.SetSP(target_sp->GetOrCreateModule(module_spec, true /* notify */));
}
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
lldb::SBModule SBTarget::AddModule(const SBModuleSpec &module_spec) {
- LLDB_RECORD_METHOD(lldb::SBModule, SBTarget, AddModule,
- (const lldb::SBModuleSpec &), module_spec);
+ LLDB_INSTRUMENT_VA(this, module_spec);
lldb::SBModule sb_module;
TargetSP target_sp(GetSP());
if (target_sp)
sb_module.SetSP(target_sp->GetOrCreateModule(*module_spec.m_opaque_up,
true /* notify */));
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
bool SBTarget::AddModule(lldb::SBModule &module) {
- LLDB_RECORD_METHOD(bool, SBTarget, AddModule, (lldb::SBModule &), module);
+ LLDB_INSTRUMENT_VA(this, module);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1658,7 +1524,7 @@ bool SBTarget::AddModule(lldb::SBModule &module) {
}
uint32_t SBTarget::GetNumModules() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBTarget, GetNumModules);
+ LLDB_INSTRUMENT_VA(this);
uint32_t num = 0;
TargetSP target_sp(GetSP());
@@ -1671,14 +1537,13 @@ uint32_t SBTarget::GetNumModules() const {
}
void SBTarget::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBTarget, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
}
SBModule SBTarget::FindModule(const SBFileSpec &sb_file_spec) {
- LLDB_RECORD_METHOD(lldb::SBModule, SBTarget, FindModule,
- (const lldb::SBFileSpec &), sb_file_spec);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec);
SBModule sb_module;
TargetSP target_sp(GetSP());
@@ -1687,22 +1552,21 @@ SBModule SBTarget::FindModule(const SBFileSpec &sb_file_spec) {
// The module list is thread safe, no need to lock
sb_module.SetSP(target_sp->GetImages().FindFirstModule(module_spec));
}
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
SBSymbolContextList SBTarget::FindCompileUnits(const SBFileSpec &sb_file_spec) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBTarget, FindCompileUnits,
- (const lldb::SBFileSpec &), sb_file_spec);
+ LLDB_INSTRUMENT_VA(this, sb_file_spec);
SBSymbolContextList sb_sc_list;
const TargetSP target_sp(GetSP());
if (target_sp && sb_file_spec.IsValid())
target_sp->GetImages().FindCompileUnits(*sb_file_spec, *sb_sc_list);
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
lldb::ByteOrder SBTarget::GetByteOrder() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ByteOrder, SBTarget, GetByteOrder);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp)
@@ -1711,7 +1575,7 @@ lldb::ByteOrder SBTarget::GetByteOrder() {
}
const char *SBTarget::GetTriple() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTarget, GetTriple);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1726,7 +1590,7 @@ const char *SBTarget::GetTriple() {
}
uint32_t SBTarget::GetDataByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTarget, GetDataByteSize);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1736,7 +1600,7 @@ uint32_t SBTarget::GetDataByteSize() {
}
uint32_t SBTarget::GetCodeByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTarget, GetCodeByteSize);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -1746,7 +1610,7 @@ uint32_t SBTarget::GetCodeByteSize() {
}
uint32_t SBTarget::GetMaximumNumberOfChildrenToDisplay() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBTarget, GetMaximumNumberOfChildrenToDisplay);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if(target_sp){
@@ -1756,7 +1620,7 @@ uint32_t SBTarget::GetMaximumNumberOfChildrenToDisplay() const {
}
uint32_t SBTarget::GetAddressByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTarget, GetAddressByteSize);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp)
@@ -1765,8 +1629,7 @@ uint32_t SBTarget::GetAddressByteSize() {
}
SBModule SBTarget::GetModuleAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBModule, SBTarget, GetModuleAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBModule sb_module;
ModuleSP module_sp;
@@ -1777,11 +1640,11 @@ SBModule SBTarget::GetModuleAtIndex(uint32_t idx) {
sb_module.SetSP(module_sp);
}
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
bool SBTarget::RemoveModule(lldb::SBModule module) {
- LLDB_RECORD_METHOD(bool, SBTarget, RemoveModule, (lldb::SBModule), module);
+ LLDB_INSTRUMENT_VA(this, module);
TargetSP target_sp(GetSP());
if (target_sp)
@@ -1790,22 +1653,17 @@ bool SBTarget::RemoveModule(lldb::SBModule module) {
}
SBBroadcaster SBTarget::GetBroadcaster() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBBroadcaster, SBTarget,
- GetBroadcaster);
-
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
SBBroadcaster broadcaster(target_sp.get(), false);
-
- return LLDB_RECORD_RESULT(broadcaster);
+ return broadcaster;
}
bool SBTarget::GetDescription(SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTarget, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
Stream &strm = description.ref();
@@ -1820,16 +1678,15 @@ bool SBTarget::GetDescription(SBStream &description,
lldb::SBSymbolContextList SBTarget::FindFunctions(const char *name,
uint32_t name_type_mask) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBTarget, FindFunctions,
- (const char *, uint32_t), name, name_type_mask);
+ LLDB_INSTRUMENT_VA(this, name, name_type_mask);
lldb::SBSymbolContextList sb_sc_list;
if (!name || !name[0])
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
TargetSP target_sp(GetSP());
if (!target_sp)
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
ModuleFunctionSearchOptions function_options;
function_options.include_symbols = true;
@@ -1838,15 +1695,13 @@ lldb::SBSymbolContextList SBTarget::FindFunctions(const char *name,
FunctionNameType mask = static_cast<FunctionNameType>(name_type_mask);
target_sp->GetImages().FindFunctions(ConstString(name), mask,
function_options, *sb_sc_list);
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
lldb::SBSymbolContextList SBTarget::FindGlobalFunctions(const char *name,
uint32_t max_matches,
MatchType matchtype) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBTarget, FindGlobalFunctions,
- (const char *, uint32_t, lldb::MatchType), name,
- max_matches, matchtype);
+ LLDB_INSTRUMENT_VA(this, name, max_matches, matchtype);
lldb::SBSymbolContextList sb_sc_list;
if (name && name[0]) {
@@ -1876,12 +1731,11 @@ lldb::SBSymbolContextList SBTarget::FindGlobalFunctions(const char *name,
}
}
}
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
lldb::SBType SBTarget::FindFirstType(const char *typename_cstr) {
- LLDB_RECORD_METHOD(lldb::SBType, SBTarget, FindFirstType, (const char *),
- typename_cstr);
+ LLDB_INSTRUMENT_VA(this, typename_cstr);
TargetSP target_sp(GetSP());
if (typename_cstr && typename_cstr[0] && target_sp) {
@@ -1897,7 +1751,7 @@ lldb::SBType SBTarget::FindFirstType(const char *typename_cstr) {
TypeSP type_sp(
module_sp->FindFirstType(sc, const_typename, exact_match));
if (type_sp)
- return LLDB_RECORD_RESULT(SBType(type_sp));
+ return SBType(type_sp);
}
}
@@ -1907,7 +1761,7 @@ lldb::SBType SBTarget::FindFirstType(const char *typename_cstr) {
if (auto vendor = runtime->GetDeclVendor()) {
auto types = vendor->FindTypes(const_typename, /*max_matches*/ 1);
if (!types.empty())
- return LLDB_RECORD_RESULT(SBType(types.front()));
+ return SBType(types.front());
}
}
}
@@ -1915,28 +1769,26 @@ lldb::SBType SBTarget::FindFirstType(const char *typename_cstr) {
// No matches, search for basic typename matches
for (auto *type_system : target_sp->GetScratchTypeSystems())
if (auto type = type_system->GetBuiltinTypeByName(const_typename))
- return LLDB_RECORD_RESULT(SBType(type));
+ return SBType(type);
}
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
SBType SBTarget::GetBasicType(lldb::BasicType type) {
- LLDB_RECORD_METHOD(lldb::SBType, SBTarget, GetBasicType, (lldb::BasicType),
- type);
+ LLDB_INSTRUMENT_VA(this, type);
TargetSP target_sp(GetSP());
if (target_sp) {
for (auto *type_system : target_sp->GetScratchTypeSystems())
if (auto compiler_type = type_system->GetBasicTypeFromAST(type))
- return LLDB_RECORD_RESULT(SBType(compiler_type));
+ return SBType(compiler_type);
}
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
lldb::SBTypeList SBTarget::FindTypes(const char *typename_cstr) {
- LLDB_RECORD_METHOD(lldb::SBTypeList, SBTarget, FindTypes, (const char *),
- typename_cstr);
+ LLDB_INSTRUMENT_VA(this, typename_cstr);
SBTypeList sb_type_list;
TargetSP target_sp(GetSP());
@@ -1975,13 +1827,12 @@ lldb::SBTypeList SBTarget::FindTypes(const char *typename_cstr) {
sb_type_list.Append(SBType(compiler_type));
}
}
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
}
SBValueList SBTarget::FindGlobalVariables(const char *name,
uint32_t max_matches) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBTarget, FindGlobalVariables,
- (const char *, uint32_t), name, max_matches);
+ LLDB_INSTRUMENT_VA(this, name, max_matches);
SBValueList sb_value_list;
@@ -2003,15 +1854,13 @@ SBValueList SBTarget::FindGlobalVariables(const char *name,
}
}
- return LLDB_RECORD_RESULT(sb_value_list);
+ return sb_value_list;
}
SBValueList SBTarget::FindGlobalVariables(const char *name,
uint32_t max_matches,
MatchType matchtype) {
- LLDB_RECORD_METHOD(lldb::SBValueList, SBTarget, FindGlobalVariables,
- (const char *, uint32_t, lldb::MatchType), name,
- max_matches, matchtype);
+ LLDB_INSTRUMENT_VA(this, name, max_matches, matchtype);
SBValueList sb_value_list;
@@ -2049,40 +1898,36 @@ SBValueList SBTarget::FindGlobalVariables(const char *name,
}
}
- return LLDB_RECORD_RESULT(sb_value_list);
+ return sb_value_list;
}
lldb::SBValue SBTarget::FindFirstGlobalVariable(const char *name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, FindFirstGlobalVariable,
- (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
SBValueList sb_value_list(FindGlobalVariables(name, 1));
if (sb_value_list.IsValid() && sb_value_list.GetSize() > 0)
- return LLDB_RECORD_RESULT(sb_value_list.GetValueAtIndex(0));
- return LLDB_RECORD_RESULT(SBValue());
+ return sb_value_list.GetValueAtIndex(0);
+ return SBValue();
}
SBSourceManager SBTarget::GetSourceManager() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBSourceManager, SBTarget, GetSourceManager);
+ LLDB_INSTRUMENT_VA(this);
SBSourceManager source_manager(*this);
- return LLDB_RECORD_RESULT(source_manager);
+ return source_manager;
}
lldb::SBInstructionList SBTarget::ReadInstructions(lldb::SBAddress base_addr,
uint32_t count) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget, ReadInstructions,
- (lldb::SBAddress, uint32_t), base_addr, count);
+ LLDB_INSTRUMENT_VA(this, base_addr, count);
- return LLDB_RECORD_RESULT(ReadInstructions(base_addr, count, nullptr));
+ return ReadInstructions(base_addr, count, nullptr);
}
lldb::SBInstructionList SBTarget::ReadInstructions(lldb::SBAddress base_addr,
uint32_t count,
const char *flavor_string) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget, ReadInstructions,
- (lldb::SBAddress, uint32_t, const char *), base_addr,
- count, flavor_string);
+ LLDB_INSTRUMENT_VA(this, base_addr, count, flavor_string);
SBInstructionList sb_instructions;
@@ -2106,28 +1951,22 @@ lldb::SBInstructionList SBTarget::ReadInstructions(lldb::SBAddress base_addr,
}
}
- return LLDB_RECORD_RESULT(sb_instructions);
+ return sb_instructions;
}
lldb::SBInstructionList SBTarget::GetInstructions(lldb::SBAddress base_addr,
const void *buf,
size_t size) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget, GetInstructions,
- (lldb::SBAddress, const void *, size_t), base_addr, buf,
- size);
+ LLDB_INSTRUMENT_VA(this, base_addr, buf, size);
- return LLDB_RECORD_RESULT(
- GetInstructionsWithFlavor(base_addr, nullptr, buf, size));
+ return GetInstructionsWithFlavor(base_addr, nullptr, buf, size);
}
lldb::SBInstructionList
SBTarget::GetInstructionsWithFlavor(lldb::SBAddress base_addr,
const char *flavor_string, const void *buf,
size_t size) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget,
- GetInstructionsWithFlavor,
- (lldb::SBAddress, const char *, const void *, size_t),
- base_addr, flavor_string, buf, size);
+ LLDB_INSTRUMENT_VA(this, base_addr, flavor_string, buf, size);
SBInstructionList sb_instructions;
@@ -2145,38 +1984,31 @@ SBTarget::GetInstructionsWithFlavor(lldb::SBAddress base_addr,
UINT32_MAX, data_from_file));
}
- return LLDB_RECORD_RESULT(sb_instructions);
+ return sb_instructions;
}
lldb::SBInstructionList SBTarget::GetInstructions(lldb::addr_t base_addr,
const void *buf,
size_t size) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget, GetInstructions,
- (lldb::addr_t, const void *, size_t), base_addr, buf,
- size);
+ LLDB_INSTRUMENT_VA(this, base_addr, buf, size);
- return LLDB_RECORD_RESULT(GetInstructionsWithFlavor(
- ResolveLoadAddress(base_addr), nullptr, buf, size));
+ return GetInstructionsWithFlavor(ResolveLoadAddress(base_addr), nullptr, buf,
+ size);
}
lldb::SBInstructionList
SBTarget::GetInstructionsWithFlavor(lldb::addr_t base_addr,
const char *flavor_string, const void *buf,
size_t size) {
- LLDB_RECORD_METHOD(lldb::SBInstructionList, SBTarget,
- GetInstructionsWithFlavor,
- (lldb::addr_t, const char *, const void *, size_t),
- base_addr, flavor_string, buf, size);
+ LLDB_INSTRUMENT_VA(this, base_addr, flavor_string, buf, size);
- return LLDB_RECORD_RESULT(GetInstructionsWithFlavor(
- ResolveLoadAddress(base_addr), flavor_string, buf, size));
+ return GetInstructionsWithFlavor(ResolveLoadAddress(base_addr), flavor_string,
+ buf, size);
}
SBError SBTarget::SetSectionLoadAddress(lldb::SBSection section,
lldb::addr_t section_base_addr) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, SetSectionLoadAddress,
- (lldb::SBSection, lldb::addr_t), section,
- section_base_addr);
+ LLDB_INSTRUMENT_VA(this, section, section_base_addr);
SBError sb_error;
TargetSP target_sp(GetSP());
@@ -2208,12 +2040,11 @@ SBError SBTarget::SetSectionLoadAddress(lldb::SBSection section,
} else {
sb_error.SetErrorString("invalid target");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBTarget::ClearSectionLoadAddress(lldb::SBSection section) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, ClearSectionLoadAddress,
- (lldb::SBSection), section);
+ LLDB_INSTRUMENT_VA(this, section);
SBError sb_error;
@@ -2243,13 +2074,12 @@ SBError SBTarget::ClearSectionLoadAddress(lldb::SBSection section) {
} else {
sb_error.SetErrorStringWithFormat("invalid target");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBTarget::SetModuleLoadAddress(lldb::SBModule module,
int64_t slide_offset) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, SetModuleLoadAddress,
- (lldb::SBModule, int64_t), module, slide_offset);
+ LLDB_INSTRUMENT_VA(this, module, slide_offset);
SBError sb_error;
@@ -2278,12 +2108,11 @@ SBError SBTarget::SetModuleLoadAddress(lldb::SBModule module,
} else {
sb_error.SetErrorStringWithFormat("invalid target");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBTarget::ClearModuleLoadAddress(lldb::SBModule module) {
- LLDB_RECORD_METHOD(lldb::SBError, SBTarget, ClearModuleLoadAddress,
- (lldb::SBModule), module);
+ LLDB_INSTRUMENT_VA(this, module);
SBError sb_error;
@@ -2330,13 +2159,12 @@ SBError SBTarget::ClearModuleLoadAddress(lldb::SBModule module) {
} else {
sb_error.SetErrorStringWithFormat("invalid target");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
lldb::SBSymbolContextList SBTarget::FindSymbols(const char *name,
lldb::SymbolType symbol_type) {
- LLDB_RECORD_METHOD(lldb::SBSymbolContextList, SBTarget, FindSymbols,
- (const char *, lldb::SymbolType), name, symbol_type);
+ LLDB_INSTRUMENT_VA(this, name, symbol_type);
SBSymbolContextList sb_sc_list;
if (name && name[0]) {
@@ -2345,30 +2173,27 @@ lldb::SBSymbolContextList SBTarget::FindSymbols(const char *name,
target_sp->GetImages().FindSymbolsWithNameAndType(
ConstString(name), symbol_type, *sb_sc_list);
}
- return LLDB_RECORD_RESULT(sb_sc_list);
+ return sb_sc_list;
}
lldb::SBValue SBTarget::EvaluateExpression(const char *expr) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, EvaluateExpression,
- (const char *), expr);
+ LLDB_INSTRUMENT_VA(this, expr);
TargetSP target_sp(GetSP());
if (!target_sp)
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
SBExpressionOptions options;
lldb::DynamicValueType fetch_dynamic_value =
target_sp->GetPreferDynamicValue();
options.SetFetchDynamicValue(fetch_dynamic_value);
options.SetUnwindOnError(true);
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options));
+ return EvaluateExpression(expr, options);
}
lldb::SBValue SBTarget::EvaluateExpression(const char *expr,
const SBExpressionOptions &options) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBTarget, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &), expr,
- options);
+ LLDB_INSTRUMENT_VA(this, expr, options);
Log *expr_log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
SBValue expr_result;
@@ -2377,7 +2202,7 @@ lldb::SBValue SBTarget::EvaluateExpression(const char *expr,
StackFrame *frame = nullptr;
if (target_sp) {
if (expr == nullptr || expr[0] == '\0') {
- return LLDB_RECORD_RESULT(expr_result);
+ return expr_result;
}
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
@@ -2397,11 +2222,11 @@ lldb::SBValue SBTarget::EvaluateExpression(const char *expr,
"** [SBTarget::EvaluateExpression] Expression result is "
"%s, summary %s **",
expr_result.GetValue(), expr_result.GetSummary());
- return LLDB_RECORD_RESULT(expr_result);
+ return expr_result;
}
lldb::addr_t SBTarget::GetStackRedZoneSize() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBTarget, GetStackRedZoneSize);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
@@ -2418,8 +2243,7 @@ lldb::addr_t SBTarget::GetStackRedZoneSize() {
}
bool SBTarget::IsLoaded(const SBModule &module) const {
- LLDB_RECORD_METHOD_CONST(bool, SBTarget, IsLoaded, (const lldb::SBModule &),
- module);
+ LLDB_INSTRUMENT_VA(this, module);
TargetSP target_sp(GetSP());
if (!target_sp)
@@ -2433,18 +2257,17 @@ bool SBTarget::IsLoaded(const SBModule &module) const {
}
lldb::SBLaunchInfo SBTarget::GetLaunchInfo() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBLaunchInfo, SBTarget, GetLaunchInfo);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBLaunchInfo launch_info(nullptr);
TargetSP target_sp(GetSP());
if (target_sp)
launch_info.set_ref(m_opaque_sp->GetProcessLaunchInfo());
- return LLDB_RECORD_RESULT(launch_info);
+ return launch_info;
}
void SBTarget::SetLaunchInfo(const lldb::SBLaunchInfo &launch_info) {
- LLDB_RECORD_METHOD(void, SBTarget, SetLaunchInfo,
- (const lldb::SBLaunchInfo &), launch_info);
+ LLDB_INSTRUMENT_VA(this, launch_info);
TargetSP target_sp(GetSP());
if (target_sp)
@@ -2452,309 +2275,39 @@ void SBTarget::SetLaunchInfo(const lldb::SBLaunchInfo &launch_info) {
}
SBEnvironment SBTarget::GetEnvironment() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBEnvironment, SBTarget, GetEnvironment);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp) {
- return LLDB_RECORD_RESULT(SBEnvironment(target_sp->GetEnvironment()));
+ return SBEnvironment(target_sp->GetEnvironment());
}
- return LLDB_RECORD_RESULT(SBEnvironment());
+ return SBEnvironment();
}
lldb::SBTrace SBTarget::GetTrace() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTrace, SBTarget, GetTrace);
+ LLDB_INSTRUMENT_VA(this);
TargetSP target_sp(GetSP());
if (target_sp)
- return LLDB_RECORD_RESULT(SBTrace(target_sp->GetTrace()));
+ return SBTrace(target_sp->GetTrace());
- return LLDB_RECORD_RESULT(SBTrace());
+ return SBTrace();
}
lldb::SBTrace SBTarget::CreateTrace(lldb::SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBTrace, SBTarget, CreateTrace, (lldb::SBError &),
- error);
+ LLDB_INSTRUMENT_VA(this, error);
TargetSP target_sp(GetSP());
error.Clear();
if (target_sp) {
if (llvm::Expected<lldb::TraceSP> trace_sp = target_sp->CreateTrace()) {
- return LLDB_RECORD_RESULT(SBTrace(*trace_sp));
+ return SBTrace(*trace_sp);
} else {
error.SetErrorString(llvm::toString(trace_sp.takeError()).c_str());
}
} else {
error.SetErrorString("missing target");
}
- return LLDB_RECORD_RESULT(SBTrace());
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTarget>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTarget, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTarget, (const lldb::SBTarget &));
- LLDB_REGISTER_CONSTRUCTOR(SBTarget, (const lldb::TargetSP &));
- LLDB_REGISTER_METHOD(const lldb::SBTarget &,
- SBTarget, operator=,(const lldb::SBTarget &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBTarget, EventIsTargetEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTarget, SBTarget, GetTargetFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(uint32_t, SBTarget, GetNumModulesFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBModule, SBTarget,
- GetModuleAtIndexFromEvent,
- (const uint32_t, const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(const char *, SBTarget, GetBroadcasterClassName,
- ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTarget, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTarget, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, GetProcess, ());
- LLDB_REGISTER_METHOD(lldb::SBPlatform, SBTarget, GetPlatform, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBDebugger, SBTarget, GetDebugger, ());
- LLDB_REGISTER_METHOD(lldb::SBStructuredData, SBTarget, GetStatistics, ());
- LLDB_REGISTER_METHOD(void, SBTarget, SetCollectingStats, (bool));
- LLDB_REGISTER_METHOD(bool, SBTarget, GetCollectingStats, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, LoadCore, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, LoadCore,
- (const char *, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, LaunchSimple,
- (const char **, const char **, const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, Install, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, Launch,
- (lldb::SBListener &, const char **, const char **,
- const char *, const char *, const char *,
- const char *, uint32_t, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, Launch,
- (lldb::SBLaunchInfo &, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, Attach,
- (lldb::SBAttachInfo &, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBTarget, AttachToProcessWithID,
- (lldb::SBListener &, lldb::pid_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(
- lldb::SBProcess, SBTarget, AttachToProcessWithName,
- (lldb::SBListener &, const char *, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(
- lldb::SBProcess, SBTarget, ConnectRemote,
- (lldb::SBListener &, const char *, const char *, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBFileSpec, SBTarget, GetExecutable, ());
- LLDB_REGISTER_METHOD_CONST(bool,
- SBTarget, operator==,(const lldb::SBTarget &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBTarget, operator!=,(const lldb::SBTarget &));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBTarget, ResolveLoadAddress,
- (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBTarget, ResolveFileAddress,
- (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBTarget, ResolvePastLoadAddress,
- (uint32_t, lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContext, SBTarget,
- ResolveSymbolContextForAddress,
- (const lldb::SBAddress &, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByLocation, (const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, lldb::addr_t,
- lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- lldb::addr_t, lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByLocation,
- (const lldb::SBFileSpec &, uint32_t, uint32_t,
- lldb::addr_t, lldb::SBFileSpecList &, bool));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, uint32_t, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByName,
- (const char *, uint32_t, lldb::LanguageType,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t, lldb::LanguageType,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByNames,
- (const char **, uint32_t, uint32_t, lldb::LanguageType,
- lldb::addr_t, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, BreakpointCreateByRegex,
- (const char *, lldb::LanguageType,
- const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateByAddress, (lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateBySBAddress, (lldb::SBAddress &));
- LLDB_REGISTER_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpec &, const char *));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &));
- LLDB_REGISTER_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateBySourceRegex,
- (const char *, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &, const lldb::SBStringList &));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget,
- BreakpointCreateForException,
- (lldb::LanguageType, bool, bool));
- LLDB_REGISTER_METHOD(
- lldb::SBBreakpoint, SBTarget, BreakpointCreateFromScript,
- (const char *, lldb::SBStructuredData &, const lldb::SBFileSpecList &,
- const lldb::SBFileSpecList &, bool));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBTarget, GetNumBreakpoints, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBBreakpoint, SBTarget,
- GetBreakpointAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTarget, BreakpointDelete, (lldb::break_id_t));
- LLDB_REGISTER_METHOD(lldb::SBBreakpoint, SBTarget, FindBreakpointByID,
- (lldb::break_id_t));
- LLDB_REGISTER_METHOD(bool, SBTarget, FindBreakpointsByName,
- (const char *, lldb::SBBreakpointList &));
- LLDB_REGISTER_METHOD(void, SBTarget, GetBreakpointNames,
- (lldb::SBStringList &));
- LLDB_REGISTER_METHOD(void, SBTarget, DeleteBreakpointName, (const char *));
- LLDB_REGISTER_METHOD(bool, SBTarget, EnableAllBreakpoints, ());
- LLDB_REGISTER_METHOD(bool, SBTarget, DisableAllBreakpoints, ());
- LLDB_REGISTER_METHOD(bool, SBTarget, DeleteAllBreakpoints, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, BreakpointsCreateFromFile,
- (lldb::SBFileSpec &, lldb::SBBreakpointList &));
- LLDB_REGISTER_METHOD(
- lldb::SBError, SBTarget, BreakpointsCreateFromFile,
- (lldb::SBFileSpec &, lldb::SBStringList &, lldb::SBBreakpointList &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, BreakpointsWriteToFile,
- (lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, BreakpointsWriteToFile,
- (lldb::SBFileSpec &, lldb::SBBreakpointList &, bool));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBTarget, GetNumWatchpoints, ());
- LLDB_REGISTER_METHOD_CONST(lldb::SBWatchpoint, SBTarget,
- GetWatchpointAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTarget, DeleteWatchpoint, (lldb::watch_id_t));
- LLDB_REGISTER_METHOD(lldb::SBWatchpoint, SBTarget, FindWatchpointByID,
- (lldb::watch_id_t));
- LLDB_REGISTER_METHOD(lldb::SBWatchpoint, SBTarget, WatchAddress,
- (lldb::addr_t, size_t, bool, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(bool, SBTarget, EnableAllWatchpoints, ());
- LLDB_REGISTER_METHOD(bool, SBTarget, DisableAllWatchpoints, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, CreateValueFromAddress,
- (const char *, lldb::SBAddress, lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, CreateValueFromData,
- (const char *, lldb::SBData, lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, CreateValueFromExpression,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(bool, SBTarget, DeleteAllWatchpoints, ());
- LLDB_REGISTER_METHOD(void, SBTarget, AppendImageSearchPath,
- (const char *, const char *, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBModule, SBTarget, AddModule,
- (const char *, const char *, const char *));
- LLDB_REGISTER_METHOD(
- lldb::SBModule, SBTarget, AddModule,
- (const char *, const char *, const char *, const char *));
- LLDB_REGISTER_METHOD(lldb::SBModule, SBTarget, AddModule,
- (const lldb::SBModuleSpec &));
- LLDB_REGISTER_METHOD(bool, SBTarget, AddModule, (lldb::SBModule &));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBTarget, GetNumModules, ());
- LLDB_REGISTER_METHOD(void, SBTarget, Clear, ());
- LLDB_REGISTER_METHOD(lldb::SBModule, SBTarget, FindModule,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBTarget, FindCompileUnits,
- (const lldb::SBFileSpec &));
- LLDB_REGISTER_METHOD(lldb::ByteOrder, SBTarget, GetByteOrder, ());
- LLDB_REGISTER_METHOD(const char *, SBTarget, GetTriple, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTarget, GetDataByteSize, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTarget, GetCodeByteSize, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBTarget, GetMaximumNumberOfChildrenToDisplay,());
- LLDB_REGISTER_METHOD(uint32_t, SBTarget, GetAddressByteSize, ());
- LLDB_REGISTER_METHOD(lldb::SBModule, SBTarget, GetModuleAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTarget, RemoveModule, (lldb::SBModule));
- LLDB_REGISTER_METHOD_CONST(lldb::SBBroadcaster, SBTarget, GetBroadcaster,
- ());
- LLDB_REGISTER_METHOD(bool, SBTarget, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBTarget, FindFunctions,
- (const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBTarget,
- FindGlobalFunctions,
- (const char *, uint32_t, lldb::MatchType));
- LLDB_REGISTER_METHOD(lldb::SBType, SBTarget, FindFirstType, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBType, SBTarget, GetBasicType,
- (lldb::BasicType));
- LLDB_REGISTER_METHOD(lldb::SBTypeList, SBTarget, FindTypes, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBTarget, FindGlobalVariables,
- (const char *, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBValueList, SBTarget, FindGlobalVariables,
- (const char *, uint32_t, lldb::MatchType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, FindFirstGlobalVariable,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBSourceManager, SBTarget, GetSourceManager, ());
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget, ReadInstructions,
- (lldb::SBAddress, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget, ReadInstructions,
- (lldb::SBAddress, uint32_t, const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, SetSectionLoadAddress,
- (lldb::SBSection, lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, ClearSectionLoadAddress,
- (lldb::SBSection));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, SetModuleLoadAddress,
- (lldb::SBModule, int64_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBTarget, ClearModuleLoadAddress,
- (lldb::SBModule));
- LLDB_REGISTER_METHOD(lldb::SBSymbolContextList, SBTarget, FindSymbols,
- (const char *, lldb::SymbolType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, EvaluateExpression,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBTarget, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD(lldb::addr_t, SBTarget, GetStackRedZoneSize, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTarget, IsLoaded,
- (const lldb::SBModule &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBLaunchInfo, SBTarget, GetLaunchInfo, ());
- LLDB_REGISTER_METHOD(void, SBTarget, SetLaunchInfo,
- (const lldb::SBLaunchInfo &));
- LLDB_REGISTER_METHOD(
- size_t, SBTarget, ReadMemory,
- (const lldb::SBAddress, void *, size_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget, GetInstructions,
- (lldb::SBAddress, const void *, size_t));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget,
- GetInstructionsWithFlavor,
- (lldb::SBAddress, const char *, const void *, size_t));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget, GetInstructions,
- (lldb::addr_t, const void *, size_t));
- LLDB_REGISTER_METHOD(lldb::SBInstructionList, SBTarget,
- GetInstructionsWithFlavor,
- (lldb::addr_t, const char *, const void *, size_t));
- LLDB_REGISTER_METHOD(lldb::SBEnvironment, SBTarget, GetEnvironment, ());
- LLDB_REGISTER_METHOD(lldb::SBTrace, SBTarget, GetTrace, ());
- LLDB_REGISTER_METHOD(lldb::SBTrace, SBTarget, CreateTrace, (lldb::SBError &));
-}
-
-}
+ return SBTrace();
}
diff --git a/contrib/llvm-project/lldb/source/API/SBThread.cpp b/contrib/llvm-project/lldb/source/API/SBThread.cpp
index 8d5b6f2a5423..dcc2a6ed3d18 100644
--- a/contrib/llvm-project/lldb/source/API/SBThread.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBThread.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBThread.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBDebugger.h"
@@ -40,6 +39,7 @@
#include "lldb/Target/ThreadPlanStepInstruction.h"
#include "lldb/Target/ThreadPlanStepOut.h"
#include "lldb/Target/ThreadPlanStepRange.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/State.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/StructuredData.h"
@@ -51,24 +51,23 @@ using namespace lldb;
using namespace lldb_private;
const char *SBThread::GetBroadcasterClassName() {
- LLDB_RECORD_STATIC_METHOD_NO_ARGS(const char *, SBThread,
- GetBroadcasterClassName);
+ LLDB_INSTRUMENT();
return Thread::GetStaticBroadcasterClass().AsCString();
}
// Constructors
SBThread::SBThread() : m_opaque_sp(new ExecutionContextRef()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBThread);
+ LLDB_INSTRUMENT_VA(this);
}
SBThread::SBThread(const ThreadSP &lldb_object_sp)
: m_opaque_sp(new ExecutionContextRef(lldb_object_sp)) {
- LLDB_RECORD_CONSTRUCTOR(SBThread, (const lldb::ThreadSP &), lldb_object_sp);
+ LLDB_INSTRUMENT_VA(this, lldb_object_sp);
}
-SBThread::SBThread(const SBThread &rhs) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBThread, (const lldb::SBThread &), rhs);
+SBThread::SBThread(const SBThread &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = clone(rhs.m_opaque_sp);
}
@@ -76,19 +75,18 @@ SBThread::SBThread(const SBThread &rhs) : m_opaque_sp() {
// Assignment operator
const lldb::SBThread &SBThread::operator=(const SBThread &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBThread &,
- SBThread, operator=,(const lldb::SBThread &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = clone(rhs.m_opaque_sp);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
// Destructor
SBThread::~SBThread() = default;
lldb::SBQueue SBThread::GetQueue() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBQueue, SBThread, GetQueue);
+ LLDB_INSTRUMENT_VA(this);
SBQueue sb_queue;
QueueSP queue_sp;
@@ -105,15 +103,15 @@ lldb::SBQueue SBThread::GetQueue() const {
}
}
- return LLDB_RECORD_RESULT(sb_queue);
+ return sb_queue;
}
bool SBThread::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThread, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBThread::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThread, operator bool);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -130,13 +128,13 @@ SBThread::operator bool() const {
}
void SBThread::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBThread, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp->Clear();
}
StopReason SBThread::GetStopReason() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::StopReason, SBThread, GetStopReason);
+ LLDB_INSTRUMENT_VA(this);
StopReason reason = eStopReasonInvalid;
std::unique_lock<std::recursive_mutex> lock;
@@ -153,7 +151,7 @@ StopReason SBThread::GetStopReason() {
}
size_t SBThread::GetStopReasonDataCount() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBThread, GetStopReasonDataCount);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -210,8 +208,7 @@ size_t SBThread::GetStopReasonDataCount() {
}
uint64_t SBThread::GetStopReasonDataAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(uint64_t, SBThread, GetStopReasonDataAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -280,8 +277,7 @@ uint64_t SBThread::GetStopReasonDataAtIndex(uint32_t idx) {
}
bool SBThread::GetStopReasonExtendedInfoAsJSON(lldb::SBStream &stream) {
- LLDB_RECORD_METHOD(bool, SBThread, GetStopReasonExtendedInfoAsJSON,
- (lldb::SBStream &), stream);
+ LLDB_INSTRUMENT_VA(this, stream);
Stream &strm = stream.ref();
@@ -303,9 +299,7 @@ bool SBThread::GetStopReasonExtendedInfoAsJSON(lldb::SBStream &stream) {
SBThreadCollection
SBThread::GetStopReasonExtendedBacktraces(InstrumentationRuntimeType type) {
- LLDB_RECORD_METHOD(lldb::SBThreadCollection, SBThread,
- GetStopReasonExtendedBacktraces,
- (lldb::InstrumentationRuntimeType), type);
+ LLDB_INSTRUMENT_VA(this, type);
SBThreadCollection threads;
@@ -313,23 +307,22 @@ SBThread::GetStopReasonExtendedBacktraces(InstrumentationRuntimeType type) {
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
if (!exe_ctx.HasThreadScope())
- return LLDB_RECORD_RESULT(SBThreadCollection());
+ return SBThreadCollection();
ProcessSP process_sp = exe_ctx.GetProcessSP();
StopInfoSP stop_info = exe_ctx.GetThreadPtr()->GetStopInfo();
StructuredData::ObjectSP info = stop_info->GetExtendedInfo();
if (!info)
- return LLDB_RECORD_RESULT(threads);
+ return threads;
threads = process_sp->GetInstrumentationRuntime(type)
->GetBacktracesFromExtendedStopInfo(info);
- return LLDB_RECORD_RESULT(threads);
+ return threads;
}
size_t SBThread::GetStopDescription(char *dst, size_t dst_len) {
- LLDB_RECORD_CHAR_PTR_METHOD(size_t, SBThread, GetStopDescription,
- (char *, size_t), dst, "", dst_len);
+ LLDB_INSTRUMENT_VA(this, dst, dst_len);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -357,7 +350,7 @@ size_t SBThread::GetStopDescription(char *dst, size_t dst_len) {
}
SBValue SBThread::GetStopReturnValue() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBThread, GetStopReturnValue);
+ LLDB_INSTRUMENT_VA(this);
ValueObjectSP return_valobj_sp;
std::unique_lock<std::recursive_mutex> lock;
@@ -373,7 +366,7 @@ SBValue SBThread::GetStopReturnValue() {
}
}
- return LLDB_RECORD_RESULT(SBValue(return_valobj_sp));
+ return SBValue(return_valobj_sp);
}
void SBThread::SetThread(const ThreadSP &lldb_object_sp) {
@@ -381,7 +374,7 @@ void SBThread::SetThread(const ThreadSP &lldb_object_sp) {
}
lldb::tid_t SBThread::GetThreadID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::tid_t, SBThread, GetThreadID);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (thread_sp)
@@ -390,7 +383,7 @@ lldb::tid_t SBThread::GetThreadID() const {
}
uint32_t SBThread::GetIndexID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBThread, GetIndexID);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (thread_sp)
@@ -399,7 +392,7 @@ uint32_t SBThread::GetIndexID() const {
}
const char *SBThread::GetName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBThread, GetName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
std::unique_lock<std::recursive_mutex> lock;
@@ -416,7 +409,7 @@ const char *SBThread::GetName() const {
}
const char *SBThread::GetQueueName() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(const char *, SBThread, GetQueueName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
std::unique_lock<std::recursive_mutex> lock;
@@ -433,7 +426,7 @@ const char *SBThread::GetQueueName() const {
}
lldb::queue_id_t SBThread::GetQueueID() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::queue_id_t, SBThread, GetQueueID);
+ LLDB_INSTRUMENT_VA(this);
queue_id_t id = LLDB_INVALID_QUEUE_ID;
std::unique_lock<std::recursive_mutex> lock;
@@ -450,8 +443,7 @@ lldb::queue_id_t SBThread::GetQueueID() const {
}
bool SBThread::GetInfoItemByPathAsString(const char *path, SBStream &strm) {
- LLDB_RECORD_METHOD(bool, SBThread, GetInfoItemByPathAsString,
- (const char *, lldb::SBStream &), path, strm);
+ LLDB_INSTRUMENT_VA(this, path, strm);
bool success = false;
std::unique_lock<std::recursive_mutex> lock;
@@ -532,16 +524,14 @@ SBError SBThread::ResumeNewPlan(ExecutionContext &exe_ctx,
}
void SBThread::StepOver(lldb::RunMode stop_other_threads) {
- LLDB_RECORD_METHOD(void, SBThread, StepOver, (lldb::RunMode),
- stop_other_threads);
+ LLDB_INSTRUMENT_VA(this, stop_other_threads);
SBError error; // Ignored
StepOver(stop_other_threads, error);
}
void SBThread::StepOver(lldb::RunMode stop_other_threads, SBError &error) {
- LLDB_RECORD_METHOD(void, SBThread, StepOver, (lldb::RunMode, lldb::SBError &),
- stop_other_threads, error);
+ LLDB_INSTRUMENT_VA(this, stop_other_threads, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -573,16 +563,14 @@ void SBThread::StepOver(lldb::RunMode stop_other_threads, SBError &error) {
}
void SBThread::StepInto(lldb::RunMode stop_other_threads) {
- LLDB_RECORD_METHOD(void, SBThread, StepInto, (lldb::RunMode),
- stop_other_threads);
+ LLDB_INSTRUMENT_VA(this, stop_other_threads);
StepInto(nullptr, stop_other_threads);
}
void SBThread::StepInto(const char *target_name,
lldb::RunMode stop_other_threads) {
- LLDB_RECORD_METHOD(void, SBThread, StepInto, (const char *, lldb::RunMode),
- target_name, stop_other_threads);
+ LLDB_INSTRUMENT_VA(this, target_name, stop_other_threads);
SBError error; // Ignored
StepInto(target_name, LLDB_INVALID_LINE_NUMBER, error, stop_other_threads);
@@ -590,10 +578,7 @@ void SBThread::StepInto(const char *target_name,
void SBThread::StepInto(const char *target_name, uint32_t end_line,
SBError &error, lldb::RunMode stop_other_threads) {
- LLDB_RECORD_METHOD(void, SBThread, StepInto,
- (const char *, uint32_t, lldb::SBError &, lldb::RunMode),
- target_name, end_line, error, stop_other_threads);
-
+ LLDB_INSTRUMENT_VA(this, target_name, end_line, error, stop_other_threads);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -640,14 +625,14 @@ void SBThread::StepInto(const char *target_name, uint32_t end_line,
}
void SBThread::StepOut() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBThread, StepOut);
+ LLDB_INSTRUMENT_VA(this);
SBError error; // Ignored
StepOut(error);
}
void SBThread::StepOut(SBError &error) {
- LLDB_RECORD_METHOD(void, SBThread, StepOut, (lldb::SBError &), error);
+ LLDB_INSTRUMENT_VA(this, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -675,17 +660,14 @@ void SBThread::StepOut(SBError &error) {
}
void SBThread::StepOutOfFrame(SBFrame &sb_frame) {
- LLDB_RECORD_METHOD(void, SBThread, StepOutOfFrame, (lldb::SBFrame &),
- sb_frame);
+ LLDB_INSTRUMENT_VA(this, sb_frame);
SBError error; // Ignored
StepOutOfFrame(sb_frame, error);
}
void SBThread::StepOutOfFrame(SBFrame &sb_frame, SBError &error) {
- LLDB_RECORD_METHOD(void, SBThread, StepOutOfFrame,
- (lldb::SBFrame &, lldb::SBError &), sb_frame, error);
-
+ LLDB_INSTRUMENT_VA(this, sb_frame, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -722,15 +704,14 @@ void SBThread::StepOutOfFrame(SBFrame &sb_frame, SBError &error) {
}
void SBThread::StepInstruction(bool step_over) {
- LLDB_RECORD_METHOD(void, SBThread, StepInstruction, (bool), step_over);
+ LLDB_INSTRUMENT_VA(this, step_over);
SBError error; // Ignored
StepInstruction(step_over, error);
}
void SBThread::StepInstruction(bool step_over, SBError &error) {
- LLDB_RECORD_METHOD(void, SBThread, StepInstruction, (bool, lldb::SBError &),
- step_over, error);
+ LLDB_INSTRUMENT_VA(this, step_over, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -752,15 +733,14 @@ void SBThread::StepInstruction(bool step_over, SBError &error) {
}
void SBThread::RunToAddress(lldb::addr_t addr) {
- LLDB_RECORD_METHOD(void, SBThread, RunToAddress, (lldb::addr_t), addr);
+ LLDB_INSTRUMENT_VA(this, addr);
SBError error; // Ignored
RunToAddress(addr, error);
}
void SBThread::RunToAddress(lldb::addr_t addr, SBError &error) {
- LLDB_RECORD_METHOD(void, SBThread, RunToAddress,
- (lldb::addr_t, lldb::SBError &), addr, error);
+ LLDB_INSTRUMENT_VA(this, addr, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -789,9 +769,7 @@ void SBThread::RunToAddress(lldb::addr_t addr, SBError &error) {
SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
lldb::SBFileSpec &sb_file_spec, uint32_t line) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, StepOverUntil,
- (lldb::SBFrame &, lldb::SBFileSpec &, uint32_t), sb_frame,
- sb_file_spec, line);
+ LLDB_INSTRUMENT_VA(this, sb_frame, sb_file_spec, line);
SBError sb_error;
char path[PATH_MAX];
@@ -807,7 +785,7 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
if (line == 0) {
sb_error.SetErrorString("invalid line argument");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
if (!frame_sp) {
@@ -819,7 +797,7 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
SymbolContext frame_sc;
if (!frame_sp) {
sb_error.SetErrorString("no valid frames in thread to step");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
// If we have a frame, get its line
@@ -830,7 +808,7 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
if (frame_sc.comp_unit == nullptr) {
sb_error.SetErrorStringWithFormat(
"frame %u doesn't have debug information", frame_sp->GetFrameIndex());
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
FileSpec step_file_spec;
@@ -842,7 +820,7 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
step_file_spec = frame_sc.line_entry.file;
else {
sb_error.SetErrorString("invalid file argument or no file for frame");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
}
@@ -904,34 +882,28 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
} else {
sb_error.SetErrorString("this SBThread object is invalid");
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBThread::StepUsingScriptedThreadPlan(const char *script_class_name) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *), script_class_name);
+ LLDB_INSTRUMENT_VA(this, script_class_name);
- return LLDB_RECORD_RESULT(
- StepUsingScriptedThreadPlan(script_class_name, true));
+ return StepUsingScriptedThreadPlan(script_class_name, true);
}
SBError SBThread::StepUsingScriptedThreadPlan(const char *script_class_name,
bool resume_immediately) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *, bool), script_class_name,
- resume_immediately);
+ LLDB_INSTRUMENT_VA(this, script_class_name, resume_immediately);
lldb::SBStructuredData no_data;
- return LLDB_RECORD_RESULT(StepUsingScriptedThreadPlan(
- script_class_name, no_data, resume_immediately));
+ return StepUsingScriptedThreadPlan(script_class_name, no_data,
+ resume_immediately);
}
SBError SBThread::StepUsingScriptedThreadPlan(const char *script_class_name,
SBStructuredData &args_data,
bool resume_immediately) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *, lldb::SBStructuredData &, bool),
- script_class_name, args_data, resume_immediately);
+ LLDB_INSTRUMENT_VA(this, script_class_name, args_data, resume_immediately);
SBError error;
@@ -940,7 +912,7 @@ SBError SBThread::StepUsingScriptedThreadPlan(const char *script_class_name,
if (!exe_ctx.HasThreadScope()) {
error.SetErrorString("this SBThread object is invalid");
- return LLDB_RECORD_RESULT(error);
+ return error;
}
Thread *thread = exe_ctx.GetThreadPtr();
@@ -952,23 +924,22 @@ SBError SBThread::StepUsingScriptedThreadPlan(const char *script_class_name,
if (new_plan_status.Fail()) {
error.SetErrorString(new_plan_status.AsCString());
- return LLDB_RECORD_RESULT(error);
+ return error;
}
if (!resume_immediately)
- return LLDB_RECORD_RESULT(error);
+ return error;
if (new_plan_status.Success())
error = ResumeNewPlan(exe_ctx, new_plan_sp.get());
else
error.SetErrorString(new_plan_status.AsCString());
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBThread::JumpToLine(lldb::SBFileSpec &file_spec, uint32_t line) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, JumpToLine,
- (lldb::SBFileSpec &, uint32_t), file_spec, line);
+ LLDB_INSTRUMENT_VA(this, file_spec, line);
SBError sb_error;
@@ -977,19 +948,18 @@ SBError SBThread::JumpToLine(lldb::SBFileSpec &file_spec, uint32_t line) {
if (!exe_ctx.HasThreadScope()) {
sb_error.SetErrorString("this SBThread object is invalid");
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
Thread *thread = exe_ctx.GetThreadPtr();
Status err = thread->JumpToLine(file_spec.ref(), line, true);
sb_error.SetError(err);
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBThread::ReturnFromFrame(SBFrame &frame, SBValue &return_value) {
- LLDB_RECORD_METHOD(lldb::SBError, SBThread, ReturnFromFrame,
- (lldb::SBFrame &, lldb::SBValue &), frame, return_value);
+ LLDB_INSTRUMENT_VA(this, frame, return_value);
SBError sb_error;
@@ -1002,12 +972,11 @@ SBError SBThread::ReturnFromFrame(SBFrame &frame, SBValue &return_value) {
thread->ReturnFromFrame(frame.GetFrameSP(), return_value.GetSP()));
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
SBError SBThread::UnwindInnermostExpression() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBThread,
- UnwindInnermostExpression);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
@@ -1021,18 +990,18 @@ SBError SBThread::UnwindInnermostExpression() {
thread->SetSelectedFrameByIndex(0, false);
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
bool SBThread::Suspend() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThread, Suspend);
+ LLDB_INSTRUMENT_VA(this);
SBError error; // Ignored
return Suspend(error);
}
bool SBThread::Suspend(SBError &error) {
- LLDB_RECORD_METHOD(bool, SBThread, Suspend, (lldb::SBError &), error);
+ LLDB_INSTRUMENT_VA(this, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1052,14 +1021,14 @@ bool SBThread::Suspend(SBError &error) {
}
bool SBThread::Resume() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThread, Resume);
+ LLDB_INSTRUMENT_VA(this);
SBError error; // Ignored
return Resume(error);
}
bool SBThread::Resume(SBError &error) {
- LLDB_RECORD_METHOD(bool, SBThread, Resume, (lldb::SBError &), error);
+ LLDB_INSTRUMENT_VA(this, error);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1080,7 +1049,7 @@ bool SBThread::Resume(SBError &error) {
}
bool SBThread::IsSuspended() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThread, IsSuspended);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1091,7 +1060,7 @@ bool SBThread::IsSuspended() {
}
bool SBThread::IsStopped() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThread, IsStopped);
+ LLDB_INSTRUMENT_VA(this);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1102,7 +1071,7 @@ bool SBThread::IsStopped() {
}
SBProcess SBThread::GetProcess() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcess, SBThread, GetProcess);
+ LLDB_INSTRUMENT_VA(this);
SBProcess sb_process;
std::unique_lock<std::recursive_mutex> lock;
@@ -1114,11 +1083,11 @@ SBProcess SBThread::GetProcess() {
sb_process.SetSP(exe_ctx.GetProcessSP());
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
uint32_t SBThread::GetNumFrames() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBThread, GetNumFrames);
+ LLDB_INSTRUMENT_VA(this);
uint32_t num_frames = 0;
std::unique_lock<std::recursive_mutex> lock;
@@ -1135,7 +1104,7 @@ uint32_t SBThread::GetNumFrames() {
}
SBFrame SBThread::GetFrameAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBFrame, SBThread, GetFrameAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBFrame sb_frame;
StackFrameSP frame_sp;
@@ -1150,11 +1119,11 @@ SBFrame SBThread::GetFrameAtIndex(uint32_t idx) {
}
}
- return LLDB_RECORD_RESULT(sb_frame);
+ return sb_frame;
}
lldb::SBFrame SBThread::GetSelectedFrame() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFrame, SBThread, GetSelectedFrame);
+ LLDB_INSTRUMENT_VA(this);
SBFrame sb_frame;
StackFrameSP frame_sp;
@@ -1169,12 +1138,11 @@ lldb::SBFrame SBThread::GetSelectedFrame() {
}
}
- return LLDB_RECORD_RESULT(sb_frame);
+ return sb_frame;
}
lldb::SBFrame SBThread::SetSelectedFrame(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBFrame, SBThread, SetSelectedFrame, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBFrame sb_frame;
StackFrameSP frame_sp;
@@ -1193,51 +1161,43 @@ lldb::SBFrame SBThread::SetSelectedFrame(uint32_t idx) {
}
}
- return LLDB_RECORD_RESULT(sb_frame);
+ return sb_frame;
}
bool SBThread::EventIsThreadEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBThread, EventIsThreadEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Thread::ThreadEventData::GetEventDataFromEvent(event.get()) != nullptr;
}
SBFrame SBThread::GetStackFrameFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBFrame, SBThread, GetStackFrameFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
- return LLDB_RECORD_RESULT(
- Thread::ThreadEventData::GetStackFrameFromEvent(event.get()));
+ return Thread::ThreadEventData::GetStackFrameFromEvent(event.get());
}
SBThread SBThread::GetThreadFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBThread, SBThread, GetThreadFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
- return LLDB_RECORD_RESULT(
- Thread::ThreadEventData::GetThreadFromEvent(event.get()));
+ return Thread::ThreadEventData::GetThreadFromEvent(event.get());
}
bool SBThread::operator==(const SBThread &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThread, operator==,(const lldb::SBThread &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_sp->GetThreadSP().get() ==
rhs.m_opaque_sp->GetThreadSP().get();
}
bool SBThread::operator!=(const SBThread &rhs) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThread, operator!=,(const lldb::SBThread &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return m_opaque_sp->GetThreadSP().get() !=
rhs.m_opaque_sp->GetThreadSP().get();
}
bool SBThread::GetStatus(SBStream &status) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThread, GetStatus, (lldb::SBStream &),
- status);
+ LLDB_INSTRUMENT_VA(this, status);
Stream &strm = status.ref();
@@ -1253,15 +1213,13 @@ bool SBThread::GetStatus(SBStream &status) const {
}
bool SBThread::GetDescription(SBStream &description) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThread, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
return GetDescription(description, false);
}
bool SBThread::GetDescription(SBStream &description, bool stop_format) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThread, GetDescription,
- (lldb::SBStream &, bool), description, stop_format);
+ LLDB_INSTRUMENT_VA(this, description, stop_format);
Stream &strm = description.ref();
@@ -1281,8 +1239,7 @@ bool SBThread::GetDescription(SBStream &description, bool stop_format) const {
}
SBThread SBThread::GetExtendedBacktraceThread(const char *type) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBThread, GetExtendedBacktraceThread,
- (const char *), type);
+ LLDB_INSTRUMENT_VA(this, type);
std::unique_lock<std::recursive_mutex> lock;
ExecutionContext exe_ctx(m_opaque_sp.get(), lock);
@@ -1312,12 +1269,11 @@ SBThread SBThread::GetExtendedBacktraceThread(const char *type) {
}
}
- return LLDB_RECORD_RESULT(sb_origin_thread);
+ return sb_origin_thread;
}
uint32_t SBThread::GetExtendedBacktraceOriginatingIndexID() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBThread,
- GetExtendedBacktraceOriginatingIndexID);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (thread_sp)
@@ -1326,29 +1282,27 @@ uint32_t SBThread::GetExtendedBacktraceOriginatingIndexID() {
}
SBValue SBThread::GetCurrentException() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBThread, GetCurrentException);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (!thread_sp)
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
- return LLDB_RECORD_RESULT(SBValue(thread_sp->GetCurrentException()));
+ return SBValue(thread_sp->GetCurrentException());
}
SBThread SBThread::GetCurrentExceptionBacktrace() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBThread, SBThread,
- GetCurrentExceptionBacktrace);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (!thread_sp)
- return LLDB_RECORD_RESULT(SBThread());
+ return SBThread();
- return LLDB_RECORD_RESULT(
- SBThread(thread_sp->GetCurrentExceptionBacktrace()));
+ return SBThread(thread_sp->GetCurrentExceptionBacktrace());
}
bool SBThread::SafeToCallFunctions() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThread, SafeToCallFunctions);
+ LLDB_INSTRUMENT_VA(this);
ThreadSP thread_sp(m_opaque_sp->GetThreadSP());
if (thread_sp)
@@ -1364,109 +1318,49 @@ lldb_private::Thread *SBThread::get() {
return m_opaque_sp->GetThreadSP().get();
}
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBThread>(Registry &R) {
- LLDB_REGISTER_STATIC_METHOD(const char *, SBThread, GetBroadcasterClassName,
- ());
- LLDB_REGISTER_CONSTRUCTOR(SBThread, ());
- LLDB_REGISTER_CONSTRUCTOR(SBThread, (const lldb::ThreadSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBThread, (const lldb::SBThread &));
- LLDB_REGISTER_METHOD(const lldb::SBThread &,
- SBThread, operator=,(const lldb::SBThread &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBQueue, SBThread, GetQueue, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBThread, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBThread, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBThread, Clear, ());
- LLDB_REGISTER_METHOD(lldb::StopReason, SBThread, GetStopReason, ());
- LLDB_REGISTER_METHOD(size_t, SBThread, GetStopReasonDataCount, ());
- LLDB_REGISTER_METHOD(uint64_t, SBThread, GetStopReasonDataAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBThread, GetStopReasonExtendedInfoAsJSON,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::SBThreadCollection, SBThread,
- GetStopReasonExtendedBacktraces,
- (lldb::InstrumentationRuntimeType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBThread, GetStopReturnValue, ());
- LLDB_REGISTER_METHOD_CONST(lldb::tid_t, SBThread, GetThreadID, ());
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBThread, GetIndexID, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBThread, GetName, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBThread, GetQueueName, ());
- LLDB_REGISTER_METHOD_CONST(lldb::queue_id_t, SBThread, GetQueueID, ());
- LLDB_REGISTER_METHOD(bool, SBThread, GetInfoItemByPathAsString,
- (const char *, lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBThread, StepOver, (lldb::RunMode));
- LLDB_REGISTER_METHOD(void, SBThread, StepOver,
- (lldb::RunMode, lldb::SBError &));
- LLDB_REGISTER_METHOD(void, SBThread, StepInto, (lldb::RunMode));
- LLDB_REGISTER_METHOD(void, SBThread, StepInto,
- (const char *, lldb::RunMode));
- LLDB_REGISTER_METHOD(
- void, SBThread, StepInto,
- (const char *, uint32_t, lldb::SBError &, lldb::RunMode));
- LLDB_REGISTER_METHOD(void, SBThread, StepOut, ());
- LLDB_REGISTER_METHOD(void, SBThread, StepOut, (lldb::SBError &));
- LLDB_REGISTER_METHOD(void, SBThread, StepOutOfFrame, (lldb::SBFrame &));
- LLDB_REGISTER_METHOD(void, SBThread, StepOutOfFrame,
- (lldb::SBFrame &, lldb::SBError &));
- LLDB_REGISTER_METHOD(void, SBThread, StepInstruction, (bool));
- LLDB_REGISTER_METHOD(void, SBThread, StepInstruction,
- (bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(void, SBThread, RunToAddress, (lldb::addr_t));
- LLDB_REGISTER_METHOD(void, SBThread, RunToAddress,
- (lldb::addr_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, StepOverUntil,
- (lldb::SBFrame &, lldb::SBFileSpec &, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *, bool));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, StepUsingScriptedThreadPlan,
- (const char *, SBStructuredData &, bool));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, JumpToLine,
- (lldb::SBFileSpec &, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, ReturnFromFrame,
- (lldb::SBFrame &, lldb::SBValue &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBThread, UnwindInnermostExpression,
- ());
- LLDB_REGISTER_METHOD(bool, SBThread, Suspend, ());
- LLDB_REGISTER_METHOD(bool, SBThread, Suspend, (lldb::SBError &));
- LLDB_REGISTER_METHOD(bool, SBThread, Resume, ());
- LLDB_REGISTER_METHOD(bool, SBThread, Resume, (lldb::SBError &));
- LLDB_REGISTER_METHOD(bool, SBThread, IsSuspended, ());
- LLDB_REGISTER_METHOD(bool, SBThread, IsStopped, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBThread, GetProcess, ());
- LLDB_REGISTER_METHOD(uint32_t, SBThread, GetNumFrames, ());
- LLDB_REGISTER_METHOD(lldb::SBFrame, SBThread, GetFrameAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBFrame, SBThread, GetSelectedFrame, ());
- LLDB_REGISTER_METHOD(lldb::SBFrame, SBThread, SetSelectedFrame, (uint32_t));
- LLDB_REGISTER_STATIC_METHOD(bool, SBThread, EventIsThreadEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBFrame, SBThread, GetStackFrameFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBThread, SBThread, GetThreadFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBThread, operator==,(const lldb::SBThread &));
- LLDB_REGISTER_METHOD_CONST(bool,
- SBThread, operator!=,(const lldb::SBThread &));
- LLDB_REGISTER_METHOD_CONST(bool, SBThread, GetStatus, (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(bool, SBThread, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD_CONST(bool, SBThread, GetDescription,
- (lldb::SBStream &, bool));
- LLDB_REGISTER_METHOD(lldb::SBThread, SBThread, GetExtendedBacktraceThread,
- (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBThread,
- GetExtendedBacktraceOriginatingIndexID, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBThread, GetCurrentException, ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBThread, GetCurrentExceptionBacktrace,
- ());
- LLDB_REGISTER_METHOD(bool, SBThread, SafeToCallFunctions, ());
- LLDB_REGISTER_CHAR_PTR_METHOD(size_t, SBThread, GetStopDescription);
-}
+SBValue SBThread::GetSiginfo(SBError &error) {
+ LLDB_INSTRUMENT_VA(this, error);
-}
+ SBValue value;
+ SBProcess process = GetProcess();
+ if (!process.IsValid()) {
+ error.SetErrorString("no process");
+ return value;
+ }
+ SBTarget target = process.GetTarget();
+ if (!target.IsValid()) {
+ error.SetErrorString("unable to get target");
+ return value;
+ }
+ SBPlatform platform = target.GetPlatform();
+ if (!platform.IsValid()) {
+ error.SetErrorString("unable to get platform");
+ return value;
+ }
+ CompilerType type = platform.GetSP()->GetSiginfoType(
+ target.GetSP()->GetArchitecture().GetTriple());
+ if (!type.IsValid()) {
+ error.SetErrorString("no siginfo_t for the platform");
+ return value;
+ }
+ llvm::Optional<uint64_t> type_size = type.GetByteSize(nullptr);
+ assert(type_size);
+ ThreadSP thread_sp = m_opaque_sp->GetThreadSP();
+ if (!thread_sp) {
+ error.SetErrorString("unable to get thread");
+ return value;
+ }
+ llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>> data =
+ thread_sp->GetSiginfo(type_size.getValue());
+ if (!data) {
+ error.SetErrorString(llvm::toString(data.takeError()).c_str());
+ return value;
+ }
+ SBData sb_data;
+ sb_data.SetData(error, data.get()->getBufferStart(),
+ data.get()->getBufferSize(), process.GetByteOrder(), 0);
+ if (!sb_data.IsValid())
+ return value;
+
+ return target.CreateValueFromData("siginfo", sb_data, type);
}
diff --git a/contrib/llvm-project/lldb/source/API/SBThreadCollection.cpp b/contrib/llvm-project/lldb/source/API/SBThreadCollection.cpp
index bfca864d6bcd..9d688e012239 100644
--- a/contrib/llvm-project/lldb/source/API/SBThreadCollection.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBThreadCollection.cpp
@@ -7,32 +7,27 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBThreadCollection.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBThread.h"
#include "lldb/Target/ThreadList.h"
+#include "lldb/Utility/Instrumentation.h"
using namespace lldb;
using namespace lldb_private;
-SBThreadCollection::SBThreadCollection() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBThreadCollection);
-}
+SBThreadCollection::SBThreadCollection() { LLDB_INSTRUMENT_VA(this); }
SBThreadCollection::SBThreadCollection(const SBThreadCollection &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBThreadCollection,
- (const lldb::SBThreadCollection &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBThreadCollection &SBThreadCollection::
operator=(const SBThreadCollection &rhs) {
- LLDB_RECORD_METHOD(
- const lldb::SBThreadCollection &,
- SBThreadCollection, operator=,(const lldb::SBThreadCollection &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBThreadCollection::SBThreadCollection(const ThreadCollectionSP &threads)
@@ -61,17 +56,17 @@ const lldb::ThreadCollectionSP &SBThreadCollection::operator*() const {
}
bool SBThreadCollection::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThreadCollection, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBThreadCollection::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThreadCollection, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
size_t SBThreadCollection::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBThreadCollection, GetSize);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetSize();
@@ -79,32 +74,10 @@ size_t SBThreadCollection::GetSize() {
}
SBThread SBThreadCollection::GetThreadAtIndex(size_t idx) {
- LLDB_RECORD_METHOD(lldb::SBThread, SBThreadCollection, GetThreadAtIndex,
- (size_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBThread thread;
if (m_opaque_sp && idx < m_opaque_sp->GetSize())
thread = m_opaque_sp->GetThreadAtIndex(idx);
- return LLDB_RECORD_RESULT(thread);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBThreadCollection>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBThreadCollection, ());
- LLDB_REGISTER_CONSTRUCTOR(SBThreadCollection,
- (const lldb::SBThreadCollection &));
- LLDB_REGISTER_METHOD(
- const lldb::SBThreadCollection &,
- SBThreadCollection, operator=,(const lldb::SBThreadCollection &));
- LLDB_REGISTER_METHOD_CONST(bool, SBThreadCollection, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBThreadCollection, operator bool, ());
- LLDB_REGISTER_METHOD(size_t, SBThreadCollection, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBThreadCollection, GetThreadAtIndex,
- (size_t));
-}
-
-}
+ return thread;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBThreadPlan.cpp b/contrib/llvm-project/lldb/source/API/SBThreadPlan.cpp
index 99ecb321595f..2e66ac120839 100644
--- a/contrib/llvm-project/lldb/source/API/SBThreadPlan.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBThreadPlan.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBThread.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBFileSpec.h"
#include "lldb/API/SBStream.h"
@@ -50,22 +50,20 @@ using namespace lldb;
using namespace lldb_private;
// Constructors
-SBThreadPlan::SBThreadPlan() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBThreadPlan); }
+SBThreadPlan::SBThreadPlan() { LLDB_INSTRUMENT_VA(this); }
SBThreadPlan::SBThreadPlan(const ThreadPlanSP &lldb_object_sp)
: m_opaque_wp(lldb_object_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBThreadPlan, (const lldb::ThreadPlanSP &),
- lldb_object_sp);
+ LLDB_INSTRUMENT_VA(this, lldb_object_sp);
}
SBThreadPlan::SBThreadPlan(const SBThreadPlan &rhs)
: m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBThreadPlan, (const lldb::SBThreadPlan &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBThreadPlan::SBThreadPlan(lldb::SBThread &sb_thread, const char *class_name) {
- LLDB_RECORD_CONSTRUCTOR(SBThreadPlan, (lldb::SBThread &, const char *),
- sb_thread, class_name);
+ LLDB_INSTRUMENT_VA(this, sb_thread, class_name);
Thread *thread = sb_thread.get();
if (thread)
@@ -75,9 +73,7 @@ SBThreadPlan::SBThreadPlan(lldb::SBThread &sb_thread, const char *class_name) {
SBThreadPlan::SBThreadPlan(lldb::SBThread &sb_thread, const char *class_name,
lldb::SBStructuredData &args_data) {
- LLDB_RECORD_CONSTRUCTOR(SBThreadPlan, (lldb::SBThread &, const char *,
- SBStructuredData &),
- sb_thread, class_name, args_data);
+ LLDB_INSTRUMENT_VA(this, sb_thread, class_name, args_data);
Thread *thread = sb_thread.get();
if (thread)
@@ -88,65 +84,61 @@ SBThreadPlan::SBThreadPlan(lldb::SBThread &sb_thread, const char *class_name,
// Assignment operator
const lldb::SBThreadPlan &SBThreadPlan::operator=(const SBThreadPlan &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBThreadPlan &,
- SBThreadPlan, operator=,(const lldb::SBThreadPlan &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
// Destructor
SBThreadPlan::~SBThreadPlan() = default;
bool SBThreadPlan::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThreadPlan, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBThreadPlan::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBThreadPlan, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return static_cast<bool>(GetSP());
}
void SBThreadPlan::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBThreadPlan, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_wp.reset();
}
lldb::StopReason SBThreadPlan::GetStopReason() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::StopReason, SBThreadPlan, GetStopReason);
+ LLDB_INSTRUMENT_VA(this);
return eStopReasonNone;
}
size_t SBThreadPlan::GetStopReasonDataCount() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBThreadPlan, GetStopReasonDataCount);
+ LLDB_INSTRUMENT_VA(this);
return 0;
}
uint64_t SBThreadPlan::GetStopReasonDataAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(uint64_t, SBThreadPlan, GetStopReasonDataAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
return 0;
}
SBThread SBThreadPlan::GetThread() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::SBThread, SBThreadPlan, GetThread);
+ LLDB_INSTRUMENT_VA(this);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
- return LLDB_RECORD_RESULT(
- SBThread(thread_plan_sp->GetThread().shared_from_this()));
+ return SBThread(thread_plan_sp->GetThread().shared_from_this());
} else
- return LLDB_RECORD_RESULT(SBThread());
+ return SBThread();
}
bool SBThreadPlan::GetDescription(lldb::SBStream &description) const {
- LLDB_RECORD_METHOD_CONST(bool, SBThreadPlan, GetDescription,
- (lldb::SBStream &), description);
+ LLDB_INSTRUMENT_VA(this, description);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
@@ -162,7 +154,7 @@ void SBThreadPlan::SetThreadPlan(const ThreadPlanSP &lldb_object_wp) {
}
void SBThreadPlan::SetPlanComplete(bool success) {
- LLDB_RECORD_METHOD(void, SBThreadPlan, SetPlanComplete, (bool), success);
+ LLDB_INSTRUMENT_VA(this, success);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -170,7 +162,7 @@ void SBThreadPlan::SetPlanComplete(bool success) {
}
bool SBThreadPlan::IsPlanComplete() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThreadPlan, IsPlanComplete);
+ LLDB_INSTRUMENT_VA(this);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -179,7 +171,7 @@ bool SBThreadPlan::IsPlanComplete() {
}
bool SBThreadPlan::IsPlanStale() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThreadPlan, IsPlanStale);
+ LLDB_INSTRUMENT_VA(this);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -188,7 +180,7 @@ bool SBThreadPlan::IsPlanStale() {
}
bool SBThreadPlan::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThreadPlan, IsValid);
+ LLDB_INSTRUMENT_VA(this);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -197,7 +189,7 @@ bool SBThreadPlan::IsValid() {
}
bool SBThreadPlan::GetStopOthers() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBThreadPlan, GetStopOthers);
+ LLDB_INSTRUMENT_VA(this);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -206,7 +198,7 @@ bool SBThreadPlan::GetStopOthers() {
}
void SBThreadPlan::SetStopOthers(bool stop_others) {
- LLDB_RECORD_METHOD(void, SBThreadPlan, SetStopOthers, (bool), stop_others);
+ LLDB_INSTRUMENT_VA(this, stop_others);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp)
@@ -222,27 +214,21 @@ void SBThreadPlan::SetStopOthers(bool stop_others) {
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepOverRange(SBAddress &sb_start_address,
lldb::addr_t size) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOverRange,
- (lldb::SBAddress &, lldb::addr_t), sb_start_address, size);
+ LLDB_INSTRUMENT_VA(this, sb_start_address, size);
SBError error;
- return LLDB_RECORD_RESULT(
- QueueThreadPlanForStepOverRange(sb_start_address, size, error));
+ return QueueThreadPlanForStepOverRange(sb_start_address, size, error);
}
SBThreadPlan SBThreadPlan::QueueThreadPlanForStepOverRange(
SBAddress &sb_start_address, lldb::addr_t size, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOverRange,
- (lldb::SBAddress &, lldb::addr_t, lldb::SBError &),
- sb_start_address, size, error);
+ LLDB_INSTRUMENT_VA(this, sb_start_address, size, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
Address *start_address = sb_start_address.get();
if (!start_address) {
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
AddressRange range(*start_address, size);
@@ -259,36 +245,30 @@ SBThreadPlan SBThreadPlan::QueueThreadPlanForStepOverRange(
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
}
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepInRange(SBAddress &sb_start_address,
lldb::addr_t size) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepInRange,
- (lldb::SBAddress &, lldb::addr_t), sb_start_address, size);
+ LLDB_INSTRUMENT_VA(this, sb_start_address, size);
SBError error;
- return LLDB_RECORD_RESULT(
- QueueThreadPlanForStepInRange(sb_start_address, size, error));
+ return QueueThreadPlanForStepInRange(sb_start_address, size, error);
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepInRange(SBAddress &sb_start_address,
lldb::addr_t size, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepInRange,
- (lldb::SBAddress &, lldb::addr_t, lldb::SBError &),
- sb_start_address, size, error);
+ LLDB_INSTRUMENT_VA(this, sb_start_address, size, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
Address *start_address = sb_start_address.get();
if (!start_address) {
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
AddressRange range(*start_address, size);
@@ -305,30 +285,24 @@ SBThreadPlan::QueueThreadPlanForStepInRange(SBAddress &sb_start_address,
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
}
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepOut(uint32_t frame_idx_to_step_to,
bool first_insn) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOut, (uint32_t, bool),
- frame_idx_to_step_to, first_insn);
+ LLDB_INSTRUMENT_VA(this, frame_idx_to_step_to, first_insn);
SBError error;
- return LLDB_RECORD_RESULT(
- QueueThreadPlanForStepOut(frame_idx_to_step_to, first_insn, error));
+ return QueueThreadPlanForStepOut(frame_idx_to_step_to, first_insn, error);
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepOut(uint32_t frame_idx_to_step_to,
bool first_insn, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOut,
- (uint32_t, bool, lldb::SBError &), frame_idx_to_step_to,
- first_insn, error);
+ LLDB_INSTRUMENT_VA(this, frame_idx_to_step_to, first_insn, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
@@ -347,32 +321,28 @@ SBThreadPlan::QueueThreadPlanForStepOut(uint32_t frame_idx_to_step_to,
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
}
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForRunToAddress(SBAddress sb_address) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForRunToAddress, (lldb::SBAddress),
- sb_address);
+ LLDB_INSTRUMENT_VA(this, sb_address);
SBError error;
- return LLDB_RECORD_RESULT(QueueThreadPlanForRunToAddress(sb_address, error));
+ return QueueThreadPlanForRunToAddress(sb_address, error);
}
SBThreadPlan SBThreadPlan::QueueThreadPlanForRunToAddress(SBAddress sb_address,
SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForRunToAddress,
- (lldb::SBAddress, lldb::SBError &), sb_address, error);
+ LLDB_INSTRUMENT_VA(this, sb_address, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
Address *address = sb_address.get();
if (!address)
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
Status plan_status;
SBThreadPlan plan =
@@ -384,28 +354,23 @@ SBThreadPlan SBThreadPlan::QueueThreadPlanForRunToAddress(SBAddress sb_address,
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
}
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepScripted(const char *script_class_name) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted, (const char *),
- script_class_name);
+ LLDB_INSTRUMENT_VA(this, script_class_name);
SBError error;
- return LLDB_RECORD_RESULT(
- QueueThreadPlanForStepScripted(script_class_name, error));
+ return QueueThreadPlanForStepScripted(script_class_name, error);
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepScripted(const char *script_class_name,
SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted,
- (const char *, lldb::SBError &), script_class_name, error);
+ LLDB_INSTRUMENT_VA(this, script_class_name, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
@@ -420,19 +385,16 @@ SBThreadPlan::QueueThreadPlanForStepScripted(const char *script_class_name,
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
}
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
SBThreadPlan
SBThreadPlan::QueueThreadPlanForStepScripted(const char *script_class_name,
lldb::SBStructuredData &args_data,
SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted,
- (const char *, lldb::SBStructuredData &, lldb::SBError &),
- script_class_name, args_data, error);
+ LLDB_INSTRUMENT_VA(this, script_class_name, args_data, error);
ThreadPlanSP thread_plan_sp(GetSP());
if (thread_plan_sp) {
@@ -447,73 +409,8 @@ SBThreadPlan::QueueThreadPlanForStepScripted(const char *script_class_name,
else
plan.GetSP()->SetPrivate(true);
- return LLDB_RECORD_RESULT(plan);
+ return plan;
} else {
- return LLDB_RECORD_RESULT(SBThreadPlan());
+ return SBThreadPlan();
}
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBThreadPlan>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBThreadPlan, ());
- LLDB_REGISTER_CONSTRUCTOR(SBThreadPlan, (const lldb::ThreadPlanSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBThreadPlan, (const lldb::SBThreadPlan &));
- LLDB_REGISTER_CONSTRUCTOR(SBThreadPlan, (lldb::SBThread &, const char *));
- LLDB_REGISTER_CONSTRUCTOR(SBThreadPlan, (lldb::SBThread &, const char *,
- lldb::SBStructuredData &));
- LLDB_REGISTER_METHOD(const lldb::SBThreadPlan &,
- SBThreadPlan, operator=,(const lldb::SBThreadPlan &));
- LLDB_REGISTER_METHOD_CONST(bool, SBThreadPlan, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBThreadPlan, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBThreadPlan, Clear, ());
- LLDB_REGISTER_METHOD(lldb::StopReason, SBThreadPlan, GetStopReason, ());
- LLDB_REGISTER_METHOD(size_t, SBThreadPlan, GetStopReasonDataCount, ());
- LLDB_REGISTER_METHOD(uint64_t, SBThreadPlan, GetStopReasonDataAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBThread, SBThreadPlan, GetThread, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBThreadPlan, GetDescription,
- (lldb::SBStream &));
- LLDB_REGISTER_METHOD(void, SBThreadPlan, SetPlanComplete, (bool));
- LLDB_REGISTER_METHOD(bool, SBThreadPlan, IsPlanComplete, ());
- LLDB_REGISTER_METHOD(bool, SBThreadPlan, IsPlanStale, ());
- LLDB_REGISTER_METHOD(bool, SBThreadPlan, IsValid, ());
- LLDB_REGISTER_METHOD(void, SBThreadPlan, SetStopOthers, (bool));
- LLDB_REGISTER_METHOD(bool, SBThreadPlan, GetStopOthers, ());
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOverRange,
- (lldb::SBAddress &, lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOverRange,
- (lldb::SBAddress &, lldb::addr_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepInRange,
- (lldb::SBAddress &, lldb::addr_t));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepInRange,
- (lldb::SBAddress &, lldb::addr_t, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOut, (uint32_t, bool));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepOut,
- (uint32_t, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForRunToAddress, (lldb::SBAddress));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForRunToAddress,
- (lldb::SBAddress, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted,
- (const char *, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBThreadPlan, SBThreadPlan,
- QueueThreadPlanForStepScripted,
- (const char *, lldb::SBStructuredData &,
- lldb::SBError &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTrace.cpp b/contrib/llvm-project/lldb/source/API/SBTrace.cpp
index 079c33a562c0..64a675e2e16c 100644
--- a/contrib/llvm-project/lldb/source/API/SBTrace.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTrace.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
#include "lldb/Target/Process.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStructuredData.h"
#include "lldb/API/SBThread.h"
@@ -20,35 +20,31 @@
using namespace lldb;
using namespace lldb_private;
-SBTrace::SBTrace() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTrace); }
+SBTrace::SBTrace() { LLDB_INSTRUMENT_VA(this); }
SBTrace::SBTrace(const lldb::TraceSP &trace_sp) : m_opaque_sp(trace_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTrace, (const lldb::TraceSP &), trace_sp);
+ LLDB_INSTRUMENT_VA(this, trace_sp);
}
const char *SBTrace::GetStartConfigurationHelp() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTrace, GetStartConfigurationHelp);
- return LLDB_RECORD_RESULT(
- m_opaque_sp ? m_opaque_sp->GetStartConfigurationHelp() : nullptr);
+ LLDB_INSTRUMENT_VA(this);
+ return m_opaque_sp ? m_opaque_sp->GetStartConfigurationHelp() : nullptr;
}
SBError SBTrace::Start(const SBStructuredData &configuration) {
- LLDB_RECORD_METHOD(SBError, SBTrace, Start, (const SBStructuredData &),
- configuration);
+ LLDB_INSTRUMENT_VA(this, configuration);
SBError error;
if (!m_opaque_sp)
error.SetErrorString("error: invalid trace");
else if (llvm::Error err =
m_opaque_sp->Start(configuration.m_impl_up->GetObjectSP()))
error.SetErrorString(llvm::toString(std::move(err)).c_str());
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBTrace::Start(const SBThread &thread,
const SBStructuredData &configuration) {
- LLDB_RECORD_METHOD(SBError, SBTrace, Start,
- (const SBThread &, const SBStructuredData &), thread,
- configuration);
+ LLDB_INSTRUMENT_VA(this, thread, configuration);
SBError error;
if (!m_opaque_sp)
@@ -60,55 +56,35 @@ SBError SBTrace::Start(const SBThread &thread,
error.SetErrorString(llvm::toString(std::move(err)).c_str());
}
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBTrace::Stop() {
- LLDB_RECORD_METHOD_NO_ARGS(SBError, SBTrace, Stop);
+ LLDB_INSTRUMENT_VA(this);
SBError error;
if (!m_opaque_sp)
error.SetErrorString("error: invalid trace");
else if (llvm::Error err = m_opaque_sp->Stop())
error.SetErrorString(llvm::toString(std::move(err)).c_str());
- return LLDB_RECORD_RESULT(error);
+ return error;
}
SBError SBTrace::Stop(const SBThread &thread) {
- LLDB_RECORD_METHOD(SBError, SBTrace, Stop, (const SBThread &), thread);
+ LLDB_INSTRUMENT_VA(this, thread);
SBError error;
if (!m_opaque_sp)
error.SetErrorString("error: invalid trace");
else if (llvm::Error err = m_opaque_sp->Stop({thread.GetThreadID()}))
error.SetErrorString(llvm::toString(std::move(err)).c_str());
- return LLDB_RECORD_RESULT(error);
+ return error;
}
bool SBTrace::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTrace, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTrace::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTrace, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (bool)m_opaque_sp;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTrace>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTrace, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTrace, (const lldb::TraceSP &));
- LLDB_REGISTER_METHOD(SBError, SBTrace, Start, (const SBStructuredData &));
- LLDB_REGISTER_METHOD(SBError, SBTrace, Start,
- (const SBThread &, const SBStructuredData &));
- LLDB_REGISTER_METHOD(SBError, SBTrace, Stop, (const SBThread &));
- LLDB_REGISTER_METHOD(SBError, SBTrace, Stop, ());
- LLDB_REGISTER_METHOD(bool, SBTrace, IsValid, ());
- LLDB_REGISTER_METHOD(const char *, SBTrace, GetStartConfigurationHelp, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTrace, operator bool, ());
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBType.cpp b/contrib/llvm-project/lldb/source/API/SBType.cpp
index 550c4b065914..da9202bf9386 100644
--- a/contrib/llvm-project/lldb/source/API/SBType.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBType.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBType.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBDefines.h"
#include "lldb/API/SBModule.h"
#include "lldb/API/SBStream.h"
@@ -17,6 +16,7 @@
#include "lldb/Symbol/Type.h"
#include "lldb/Symbol/TypeSystem.h"
#include "lldb/Utility/ConstString.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include "llvm/ADT/APSInt.h"
@@ -26,7 +26,7 @@
using namespace lldb;
using namespace lldb_private;
-SBType::SBType() : m_opaque_sp() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBType); }
+SBType::SBType() { LLDB_INSTRUMENT_VA(this); }
SBType::SBType(const CompilerType &type)
: m_opaque_sp(new TypeImpl(
@@ -38,8 +38,8 @@ SBType::SBType(const lldb::TypeSP &type_sp)
SBType::SBType(const lldb::TypeImplSP &type_impl_sp)
: m_opaque_sp(type_impl_sp) {}
-SBType::SBType(const SBType &rhs) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBType, (const lldb::SBType &), rhs);
+SBType::SBType(const SBType &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
@@ -51,7 +51,7 @@ SBType::SBType(const SBType &rhs) : m_opaque_sp() {
//{}
//
bool SBType::operator==(SBType &rhs) {
- LLDB_RECORD_METHOD(bool, SBType, operator==,(lldb::SBType &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -63,7 +63,7 @@ bool SBType::operator==(SBType &rhs) {
}
bool SBType::operator!=(SBType &rhs) {
- LLDB_RECORD_METHOD(bool, SBType, operator!=,(lldb::SBType &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return rhs.IsValid();
@@ -81,13 +81,12 @@ void SBType::SetSP(const lldb::TypeImplSP &type_impl_sp) {
}
SBType &SBType::operator=(const SBType &rhs) {
- LLDB_RECORD_METHOD(lldb::SBType &, SBType, operator=,(const lldb::SBType &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBType::~SBType() = default;
@@ -107,11 +106,11 @@ const TypeImpl &SBType::ref() const {
}
bool SBType::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBType, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBType::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBType, operator bool);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp.get() == nullptr)
return false;
@@ -120,7 +119,7 @@ SBType::operator bool() const {
}
uint64_t SBType::GetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBType, GetByteSize);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
if (llvm::Optional<uint64_t> size =
@@ -130,7 +129,7 @@ uint64_t SBType::GetByteSize() {
}
bool SBType::IsPointerType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsPointerType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -138,7 +137,7 @@ bool SBType::IsPointerType() {
}
bool SBType::IsArrayType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsArrayType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -147,7 +146,7 @@ bool SBType::IsArrayType() {
}
bool SBType::IsVectorType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsVectorType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -155,7 +154,7 @@ bool SBType::IsVectorType() {
}
bool SBType::IsReferenceType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsReferenceType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -163,71 +162,66 @@ bool SBType::IsReferenceType() {
}
SBType SBType::GetPointerType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetPointerType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetPointerType()))));
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetPointerType())));
}
SBType SBType::GetPointeeType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetPointeeType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetPointeeType()))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetPointeeType())));
}
SBType SBType::GetReferenceType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetReferenceType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetReferenceType()))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetReferenceType())));
}
SBType SBType::GetTypedefedType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetTypedefedType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetTypedefedType()))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetTypedefedType())));
}
SBType SBType::GetDereferencedType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetDereferencedType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetDereferencedType()))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetDereferencedType())));
}
SBType SBType::GetArrayElementType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetArrayElementType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(SBType(TypeImplSP(new TypeImpl(
- m_opaque_sp->GetCompilerType(true).GetArrayElementType(nullptr)))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(
+ m_opaque_sp->GetCompilerType(true).GetArrayElementType(nullptr))));
}
SBType SBType::GetArrayType(uint64_t size) {
- LLDB_RECORD_METHOD(lldb::SBType, SBType, GetArrayType, (uint64_t), size);
+ LLDB_INSTRUMENT_VA(this, size);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(SBType(TypeImplSP(
- new TypeImpl(m_opaque_sp->GetCompilerType(true).GetArrayType(size)))));
+ return SBType();
+ return SBType(TypeImplSP(
+ new TypeImpl(m_opaque_sp->GetCompilerType(true).GetArrayType(size))));
}
SBType SBType::GetVectorElementType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetVectorElementType);
+ LLDB_INSTRUMENT_VA(this);
SBType type_sb;
if (IsValid()) {
@@ -236,11 +230,11 @@ SBType SBType::GetVectorElementType() {
nullptr))
type_sb.SetSP(TypeImplSP(new TypeImpl(vector_element_type)));
}
- return LLDB_RECORD_RESULT(type_sb);
+ return type_sb;
}
bool SBType::IsFunctionType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsFunctionType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -248,7 +242,7 @@ bool SBType::IsFunctionType() {
}
bool SBType::IsPolymorphicClass() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsPolymorphicClass);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -256,7 +250,7 @@ bool SBType::IsPolymorphicClass() {
}
bool SBType::IsTypedefType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsTypedefType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -264,7 +258,7 @@ bool SBType::IsTypedefType() {
}
bool SBType::IsAnonymousType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsAnonymousType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -272,7 +266,7 @@ bool SBType::IsAnonymousType() {
}
bool SBType::IsScopedEnumerationType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsScopedEnumerationType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -280,20 +274,19 @@ bool SBType::IsScopedEnumerationType() {
}
lldb::SBType SBType::GetFunctionReturnType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetFunctionReturnType);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid()) {
CompilerType return_type(
m_opaque_sp->GetCompilerType(true).GetFunctionReturnType());
if (return_type.IsValid())
- return LLDB_RECORD_RESULT(SBType(return_type));
+ return SBType(return_type);
}
- return LLDB_RECORD_RESULT(lldb::SBType());
+ return lldb::SBType();
}
lldb::SBTypeList SBType::GetFunctionArgumentTypes() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeList, SBType,
- GetFunctionArgumentTypes);
+ LLDB_INSTRUMENT_VA(this);
SBTypeList sb_type_list;
if (IsValid()) {
@@ -303,11 +296,11 @@ lldb::SBTypeList SBType::GetFunctionArgumentTypes() {
sb_type_list.Append(SBType(func_type.GetFunctionArgumentAtIndex(i)));
}
}
- return LLDB_RECORD_RESULT(sb_type_list);
+ return sb_type_list;
}
uint32_t SBType::GetNumberOfMemberFunctions() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetNumberOfMemberFunctions);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid()) {
return m_opaque_sp->GetCompilerType(true).GetNumMemberFunctions();
@@ -316,46 +309,43 @@ uint32_t SBType::GetNumberOfMemberFunctions() {
}
lldb::SBTypeMemberFunction SBType::GetMemberFunctionAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBTypeMemberFunction, SBType,
- GetMemberFunctionAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBTypeMemberFunction sb_func_type;
if (IsValid())
sb_func_type.reset(new TypeMemberFunctionImpl(
m_opaque_sp->GetCompilerType(true).GetMemberFunctionAtIndex(idx)));
- return LLDB_RECORD_RESULT(sb_func_type);
+ return sb_func_type;
}
lldb::SBType SBType::GetUnqualifiedType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetUnqualifiedType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetUnqualifiedType()))));
+ return SBType();
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetUnqualifiedType())));
}
lldb::SBType SBType::GetCanonicalType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetCanonicalType);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
- return LLDB_RECORD_RESULT(
- SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetCanonicalType()))));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType(TypeImplSP(new TypeImpl(m_opaque_sp->GetCanonicalType())));
+ return SBType();
}
SBType SBType::GetEnumerationIntegerType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBType, GetEnumerationIntegerType);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid()) {
- return LLDB_RECORD_RESULT(
- SBType(m_opaque_sp->GetCompilerType(true).GetEnumerationIntegerType()));
+ return SBType(
+ m_opaque_sp->GetCompilerType(true).GetEnumerationIntegerType());
}
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
}
lldb::BasicType SBType::GetBasicType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::BasicType, SBType, GetBasicType);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(false).GetBasicTypeEnumeration();
@@ -363,17 +353,16 @@ lldb::BasicType SBType::GetBasicType() {
}
SBType SBType::GetBasicType(lldb::BasicType basic_type) {
- LLDB_RECORD_METHOD(lldb::SBType, SBType, GetBasicType, (lldb::BasicType),
- basic_type);
+ LLDB_INSTRUMENT_VA(this, basic_type);
if (IsValid() && m_opaque_sp->IsValid())
- return LLDB_RECORD_RESULT(SBType(
- m_opaque_sp->GetTypeSystem(false)->GetBasicTypeFromAST(basic_type)));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType(
+ m_opaque_sp->GetTypeSystem(false)->GetBasicTypeFromAST(basic_type));
+ return SBType();
}
uint32_t SBType::GetNumberOfDirectBaseClasses() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetNumberOfDirectBaseClasses);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(true).GetNumDirectBaseClasses();
@@ -381,7 +370,7 @@ uint32_t SBType::GetNumberOfDirectBaseClasses() {
}
uint32_t SBType::GetNumberOfVirtualBaseClasses() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetNumberOfVirtualBaseClasses);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(true).GetNumVirtualBaseClasses();
@@ -389,7 +378,7 @@ uint32_t SBType::GetNumberOfVirtualBaseClasses() {
}
uint32_t SBType::GetNumberOfFields() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetNumberOfFields);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(true).GetNumFields();
@@ -398,9 +387,7 @@ uint32_t SBType::GetNumberOfFields() {
bool SBType::GetDescription(SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBType, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
Stream &strm = description.ref();
@@ -413,8 +400,7 @@ bool SBType::GetDescription(SBStream &description,
}
SBTypeMember SBType::GetDirectBaseClassAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBTypeMember, SBType, GetDirectBaseClassAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBTypeMember sb_type_member;
if (IsValid()) {
@@ -426,12 +412,11 @@ SBTypeMember SBType::GetDirectBaseClassAtIndex(uint32_t idx) {
sb_type_member.reset(new TypeMemberImpl(
TypeImplSP(new TypeImpl(base_class_type)), bit_offset));
}
- return LLDB_RECORD_RESULT(sb_type_member);
+ return sb_type_member;
}
SBTypeMember SBType::GetVirtualBaseClassAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBTypeMember, SBType, GetVirtualBaseClassAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBTypeMember sb_type_member;
if (IsValid()) {
@@ -443,12 +428,11 @@ SBTypeMember SBType::GetVirtualBaseClassAtIndex(uint32_t idx) {
sb_type_member.reset(new TypeMemberImpl(
TypeImplSP(new TypeImpl(base_class_type)), bit_offset));
}
- return LLDB_RECORD_RESULT(sb_type_member);
+ return sb_type_member;
}
SBTypeEnumMemberList SBType::GetEnumMembers() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeEnumMemberList, SBType,
- GetEnumMembers);
+ LLDB_INSTRUMENT_VA(this);
SBTypeEnumMemberList sb_enum_member_list;
if (IsValid()) {
@@ -466,12 +450,11 @@ SBTypeEnumMemberList SBType::GetEnumMembers() {
});
}
}
- return LLDB_RECORD_RESULT(sb_enum_member_list);
+ return sb_enum_member_list;
}
SBTypeMember SBType::GetFieldAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBTypeMember, SBType, GetFieldAtIndex, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
SBTypeMember sb_type_member;
if (IsValid()) {
@@ -493,11 +476,11 @@ SBTypeMember SBType::GetFieldAtIndex(uint32_t idx) {
}
}
}
- return LLDB_RECORD_RESULT(sb_type_member);
+ return sb_type_member;
}
bool SBType::IsTypeComplete() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBType, IsTypeComplete);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -505,7 +488,7 @@ bool SBType::IsTypeComplete() {
}
uint32_t SBType::GetTypeFlags() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetTypeFlags);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return 0;
@@ -513,18 +496,18 @@ uint32_t SBType::GetTypeFlags() {
}
lldb::SBModule SBType::GetModule() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBModule, SBType, GetModule);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBModule sb_module;
if (!IsValid())
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
sb_module.SetSP(m_opaque_sp->GetModule());
- return LLDB_RECORD_RESULT(sb_module);
+ return sb_module;
}
const char *SBType::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBType, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return "";
@@ -532,7 +515,7 @@ const char *SBType::GetName() {
}
const char *SBType::GetDisplayTypeName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBType, GetDisplayTypeName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return "";
@@ -540,7 +523,7 @@ const char *SBType::GetDisplayTypeName() {
}
lldb::TypeClass SBType::GetTypeClass() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::TypeClass, SBType, GetTypeClass);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(true).GetTypeClass();
@@ -548,7 +531,7 @@ lldb::TypeClass SBType::GetTypeClass() {
}
uint32_t SBType::GetNumberOfTemplateArguments() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBType, GetNumberOfTemplateArguments);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCompilerType(false).GetNumTemplateArguments();
@@ -556,11 +539,10 @@ uint32_t SBType::GetNumberOfTemplateArguments() {
}
lldb::SBType SBType::GetTemplateArgumentType(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBType, SBType, GetTemplateArgumentType, (uint32_t),
- idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
CompilerType type;
switch(GetTemplateArgumentKind(idx)) {
@@ -576,13 +558,12 @@ lldb::SBType SBType::GetTemplateArgumentType(uint32_t idx) {
break;
}
if (type.IsValid())
- return LLDB_RECORD_RESULT(SBType(type));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType(type);
+ return SBType();
}
lldb::TemplateArgumentKind SBType::GetTemplateArgumentKind(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::TemplateArgumentKind, SBType,
- GetTemplateArgumentKind, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (IsValid())
return m_opaque_sp->GetCompilerType(false).GetTemplateArgumentKind(idx);
@@ -590,12 +571,12 @@ lldb::TemplateArgumentKind SBType::GetTemplateArgumentKind(uint32_t idx) {
}
SBTypeList::SBTypeList() : m_opaque_up(new TypeListImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeList);
+ LLDB_INSTRUMENT_VA(this);
}
SBTypeList::SBTypeList(const SBTypeList &rhs)
: m_opaque_up(new TypeListImpl()) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeList, (const lldb::SBTypeList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
for (uint32_t i = 0, rhs_size = const_cast<SBTypeList &>(rhs).GetSize();
i < rhs_size; i++)
@@ -603,18 +584,17 @@ SBTypeList::SBTypeList(const SBTypeList &rhs)
}
bool SBTypeList::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_up != nullptr);
}
SBTypeList &SBTypeList::operator=(const SBTypeList &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeList &,
- SBTypeList, operator=,(const lldb::SBTypeList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_up = std::make_unique<TypeListImpl>();
@@ -622,41 +602,38 @@ SBTypeList &SBTypeList::operator=(const SBTypeList &rhs) {
i < rhs_size; i++)
Append(const_cast<SBTypeList &>(rhs).GetTypeAtIndex(i));
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
void SBTypeList::Append(SBType type) {
- LLDB_RECORD_METHOD(void, SBTypeList, Append, (lldb::SBType), type);
+ LLDB_INSTRUMENT_VA(this, type);
if (type.IsValid())
m_opaque_up->Append(type.m_opaque_sp);
}
SBType SBTypeList::GetTypeAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBType, SBTypeList, GetTypeAtIndex, (uint32_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
if (m_opaque_up)
- return LLDB_RECORD_RESULT(SBType(m_opaque_up->GetTypeAtIndex(index)));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType(m_opaque_up->GetTypeAtIndex(index));
+ return SBType();
}
uint32_t SBTypeList::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSize();
}
SBTypeList::~SBTypeList() = default;
-SBTypeMember::SBTypeMember() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeMember);
-}
+SBTypeMember::SBTypeMember() { LLDB_INSTRUMENT_VA(this); }
SBTypeMember::~SBTypeMember() = default;
-SBTypeMember::SBTypeMember(const SBTypeMember &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBTypeMember, (const lldb::SBTypeMember &), rhs);
+SBTypeMember::SBTypeMember(const SBTypeMember &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
if (rhs.IsValid())
@@ -665,28 +642,27 @@ SBTypeMember::SBTypeMember(const SBTypeMember &rhs) : m_opaque_up() {
}
lldb::SBTypeMember &SBTypeMember::operator=(const lldb::SBTypeMember &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeMember &,
- SBTypeMember, operator=,(const lldb::SBTypeMember &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
if (rhs.IsValid())
m_opaque_up = std::make_unique<TypeMemberImpl>(rhs.ref());
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeMember::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeMember, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeMember::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeMember, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up.get();
}
const char *SBTypeMember::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeMember, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetName().GetCString();
@@ -694,17 +670,17 @@ const char *SBTypeMember::GetName() {
}
SBType SBTypeMember::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBTypeMember, GetType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
if (m_opaque_up) {
sb_type.SetSP(m_opaque_up->GetTypeImpl());
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
uint64_t SBTypeMember::GetOffsetInBytes() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBTypeMember, GetOffsetInBytes);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetBitOffset() / 8u;
@@ -712,7 +688,7 @@ uint64_t SBTypeMember::GetOffsetInBytes() {
}
uint64_t SBTypeMember::GetOffsetInBits() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBTypeMember, GetOffsetInBits);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetBitOffset();
@@ -720,7 +696,7 @@ uint64_t SBTypeMember::GetOffsetInBits() {
}
bool SBTypeMember::IsBitfield() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeMember, IsBitfield);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetIsBitfield();
@@ -728,7 +704,7 @@ bool SBTypeMember::IsBitfield() {
}
uint32_t SBTypeMember::GetBitfieldSizeInBits() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeMember, GetBitfieldSizeInBits);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_up)
return m_opaque_up->GetBitfieldBitSize();
@@ -737,9 +713,7 @@ uint32_t SBTypeMember::GetBitfieldSizeInBits() {
bool SBTypeMember::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeMember, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
Stream &strm = description.ref();
@@ -780,42 +754,36 @@ TypeMemberImpl &SBTypeMember::ref() {
const TypeMemberImpl &SBTypeMember::ref() const { return *m_opaque_up; }
-SBTypeMemberFunction::SBTypeMemberFunction() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeMemberFunction);
-}
+SBTypeMemberFunction::SBTypeMemberFunction() { LLDB_INSTRUMENT_VA(this); }
SBTypeMemberFunction::~SBTypeMemberFunction() = default;
SBTypeMemberFunction::SBTypeMemberFunction(const SBTypeMemberFunction &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeMemberFunction,
- (const lldb::SBTypeMemberFunction &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
lldb::SBTypeMemberFunction &SBTypeMemberFunction::
operator=(const lldb::SBTypeMemberFunction &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBTypeMemberFunction &,
- SBTypeMemberFunction, operator=,(const lldb::SBTypeMemberFunction &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = rhs.m_opaque_sp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeMemberFunction::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeMemberFunction, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeMemberFunction::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeMemberFunction, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get();
}
const char *SBTypeMemberFunction::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeMemberFunction, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetName().GetCString();
@@ -823,8 +791,7 @@ const char *SBTypeMemberFunction::GetName() {
}
const char *SBTypeMemberFunction::GetDemangledName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeMemberFunction,
- GetDemangledName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp) {
ConstString mangled_str = m_opaque_sp->GetMangledName();
@@ -837,8 +804,7 @@ const char *SBTypeMemberFunction::GetDemangledName() {
}
const char *SBTypeMemberFunction::GetMangledName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeMemberFunction,
- GetMangledName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetMangledName().GetCString();
@@ -846,28 +812,27 @@ const char *SBTypeMemberFunction::GetMangledName() {
}
SBType SBTypeMemberFunction::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBTypeMemberFunction, GetType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
if (m_opaque_sp) {
sb_type.SetSP(lldb::TypeImplSP(new TypeImpl(m_opaque_sp->GetType())));
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
lldb::SBType SBTypeMemberFunction::GetReturnType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBTypeMemberFunction, GetReturnType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
if (m_opaque_sp) {
sb_type.SetSP(lldb::TypeImplSP(new TypeImpl(m_opaque_sp->GetReturnType())));
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
uint32_t SBTypeMemberFunction::GetNumberOfArguments() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeMemberFunction,
- GetNumberOfArguments);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetNumArguments();
@@ -875,20 +840,18 @@ uint32_t SBTypeMemberFunction::GetNumberOfArguments() {
}
lldb::SBType SBTypeMemberFunction::GetArgumentTypeAtIndex(uint32_t i) {
- LLDB_RECORD_METHOD(lldb::SBType, SBTypeMemberFunction, GetArgumentTypeAtIndex,
- (uint32_t), i);
+ LLDB_INSTRUMENT_VA(this, i);
SBType sb_type;
if (m_opaque_sp) {
sb_type.SetSP(
lldb::TypeImplSP(new TypeImpl(m_opaque_sp->GetArgumentAtIndex(i))));
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
lldb::MemberFunctionKind SBTypeMemberFunction::GetKind() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::MemberFunctionKind, SBTypeMemberFunction,
- GetKind);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp)
return m_opaque_sp->GetKind();
@@ -897,9 +860,7 @@ lldb::MemberFunctionKind SBTypeMemberFunction::GetKind() {
bool SBTypeMemberFunction::GetDescription(
lldb::SBStream &description, lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeMemberFunction, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
Stream &strm = description.ref();
@@ -922,120 +883,3 @@ TypeMemberFunctionImpl &SBTypeMemberFunction::ref() {
const TypeMemberFunctionImpl &SBTypeMemberFunction::ref() const {
return *m_opaque_sp.get();
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBType>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBType, ());
- LLDB_REGISTER_CONSTRUCTOR(SBType, (const lldb::SBType &));
- LLDB_REGISTER_METHOD(bool, SBType, operator==,(lldb::SBType &));
- LLDB_REGISTER_METHOD(bool, SBType, operator!=,(lldb::SBType &));
- LLDB_REGISTER_METHOD(lldb::SBType &,
- SBType, operator=,(const lldb::SBType &));
- LLDB_REGISTER_METHOD_CONST(bool, SBType, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBType, operator bool, ());
- LLDB_REGISTER_METHOD(uint64_t, SBType, GetByteSize, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsPointerType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsArrayType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsVectorType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsReferenceType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetPointerType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetPointeeType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetReferenceType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetTypedefedType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetDereferencedType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetArrayElementType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetArrayType, (uint64_t));
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetVectorElementType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsFunctionType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsPolymorphicClass, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsTypedefType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsAnonymousType, ());
- LLDB_REGISTER_METHOD(bool, SBType, IsScopedEnumerationType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetFunctionReturnType, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeList, SBType, GetFunctionArgumentTypes,
- ());
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetNumberOfMemberFunctions, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeMemberFunction, SBType,
- GetMemberFunctionAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetUnqualifiedType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetCanonicalType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetEnumerationIntegerType, ());
- LLDB_REGISTER_METHOD(lldb::BasicType, SBType, GetBasicType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetBasicType, (lldb::BasicType));
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetNumberOfDirectBaseClasses, ());
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetNumberOfVirtualBaseClasses, ());
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetNumberOfFields, ());
- LLDB_REGISTER_METHOD(bool, SBType, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(lldb::SBTypeMember, SBType, GetDirectBaseClassAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeMember, SBType, GetVirtualBaseClassAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeEnumMemberList, SBType, GetEnumMembers,
- ());
- LLDB_REGISTER_METHOD(lldb::SBTypeMember, SBType, GetFieldAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBType, IsTypeComplete, ());
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetTypeFlags, ());
- LLDB_REGISTER_METHOD(lldb::SBModule, SBType, GetModule, ());
- LLDB_REGISTER_METHOD(const char *, SBType, GetName, ());
- LLDB_REGISTER_METHOD(const char *, SBType, GetDisplayTypeName, ());
- LLDB_REGISTER_METHOD(lldb::TypeClass, SBType, GetTypeClass, ());
- LLDB_REGISTER_METHOD(uint32_t, SBType, GetNumberOfTemplateArguments, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBType, GetTemplateArgumentType,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::TemplateArgumentKind, SBType,
- GetTemplateArgumentKind, (uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeList, (const lldb::SBTypeList &));
- LLDB_REGISTER_METHOD(bool, SBTypeList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeList, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeList &,
- SBTypeList, operator=,(const lldb::SBTypeList &));
- LLDB_REGISTER_METHOD(void, SBTypeList, Append, (lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeList, GetTypeAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBTypeList, GetSize, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeMember, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeMember, (const lldb::SBTypeMember &));
- LLDB_REGISTER_METHOD(lldb::SBTypeMember &,
- SBTypeMember, operator=,(const lldb::SBTypeMember &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeMember, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeMember, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeMember, GetName, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeMember, GetType, ());
- LLDB_REGISTER_METHOD(uint64_t, SBTypeMember, GetOffsetInBytes, ());
- LLDB_REGISTER_METHOD(uint64_t, SBTypeMember, GetOffsetInBits, ());
- LLDB_REGISTER_METHOD(bool, SBTypeMember, IsBitfield, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeMember, GetBitfieldSizeInBits, ());
- LLDB_REGISTER_METHOD(bool, SBTypeMember, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeMemberFunction, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeMemberFunction,
- (const lldb::SBTypeMemberFunction &));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeMemberFunction &,
- SBTypeMemberFunction, operator=,(const lldb::SBTypeMemberFunction &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeMemberFunction, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeMemberFunction, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeMemberFunction, GetName, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeMemberFunction, GetDemangledName,
- ());
- LLDB_REGISTER_METHOD(const char *, SBTypeMemberFunction, GetMangledName,
- ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeMemberFunction, GetType, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeMemberFunction, GetReturnType, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeMemberFunction, GetNumberOfArguments,
- ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeMemberFunction,
- GetArgumentTypeAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::MemberFunctionKind, SBTypeMemberFunction,
- GetKind, ());
- LLDB_REGISTER_METHOD(bool, SBTypeMemberFunction, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeCategory.cpp b/contrib/llvm-project/lldb/source/API/SBTypeCategory.cpp
index e7432959b260..7d929fe49795 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeCategory.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeCategory.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeCategory.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBTypeFilter.h"
@@ -26,33 +26,31 @@ using namespace lldb_private;
typedef std::pair<lldb::TypeCategoryImplSP, user_id_t> ImplType;
-SBTypeCategory::SBTypeCategory() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeCategory);
-}
+SBTypeCategory::SBTypeCategory() { LLDB_INSTRUMENT_VA(this); }
-SBTypeCategory::SBTypeCategory(const char *name) : m_opaque_sp() {
+SBTypeCategory::SBTypeCategory(const char *name) {
DataVisualization::Categories::GetCategory(ConstString(name), m_opaque_sp);
}
SBTypeCategory::SBTypeCategory(const lldb::SBTypeCategory &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeCategory, (const lldb::SBTypeCategory &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeCategory::~SBTypeCategory() = default;
bool SBTypeCategory::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeCategory, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeCategory::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeCategory, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_sp.get() != nullptr);
}
bool SBTypeCategory::GetEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeCategory, GetEnabled);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -60,7 +58,7 @@ bool SBTypeCategory::GetEnabled() {
}
void SBTypeCategory::SetEnabled(bool enabled) {
- LLDB_RECORD_METHOD(void, SBTypeCategory, SetEnabled, (bool), enabled);
+ LLDB_INSTRUMENT_VA(this, enabled);
if (!IsValid())
return;
@@ -71,7 +69,7 @@ void SBTypeCategory::SetEnabled(bool enabled) {
}
const char *SBTypeCategory::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeCategory, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return nullptr;
@@ -79,8 +77,7 @@ const char *SBTypeCategory::GetName() {
}
lldb::LanguageType SBTypeCategory::GetLanguageAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::LanguageType, SBTypeCategory, GetLanguageAtIndex,
- (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
if (IsValid())
return m_opaque_sp->GetLanguageAtIndex(idx);
@@ -88,7 +85,7 @@ lldb::LanguageType SBTypeCategory::GetLanguageAtIndex(uint32_t idx) {
}
uint32_t SBTypeCategory::GetNumLanguages() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeCategory, GetNumLanguages);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetNumLanguages();
@@ -96,15 +93,14 @@ uint32_t SBTypeCategory::GetNumLanguages() {
}
void SBTypeCategory::AddLanguage(lldb::LanguageType language) {
- LLDB_RECORD_METHOD(void, SBTypeCategory, AddLanguage, (lldb::LanguageType),
- language);
+ LLDB_INSTRUMENT_VA(this, language);
if (IsValid())
m_opaque_sp->AddLanguage(language);
}
uint32_t SBTypeCategory::GetNumFormats() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeCategory, GetNumFormats);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return 0;
@@ -114,7 +110,7 @@ uint32_t SBTypeCategory::GetNumFormats() {
}
uint32_t SBTypeCategory::GetNumSummaries() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeCategory, GetNumSummaries);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return 0;
@@ -123,7 +119,7 @@ uint32_t SBTypeCategory::GetNumSummaries() {
}
uint32_t SBTypeCategory::GetNumFilters() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeCategory, GetNumFilters);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return 0;
@@ -132,7 +128,7 @@ uint32_t SBTypeCategory::GetNumFilters() {
}
uint32_t SBTypeCategory::GetNumSynthetics() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeCategory, GetNumSynthetics);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return 0;
@@ -142,58 +138,52 @@ uint32_t SBTypeCategory::GetNumSynthetics() {
lldb::SBTypeNameSpecifier
SBTypeCategory::GetTypeNameSpecifierForFilterAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForFilterAtIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier());
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier(
- m_opaque_sp->GetTypeNameSpecifierForFilterAtIndex(index)));
+ return SBTypeNameSpecifier();
+ return SBTypeNameSpecifier(
+ m_opaque_sp->GetTypeNameSpecifierForFilterAtIndex(index));
}
lldb::SBTypeNameSpecifier
SBTypeCategory::GetTypeNameSpecifierForFormatAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForFormatAtIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier());
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier(
- m_opaque_sp->GetTypeNameSpecifierForFormatAtIndex(index)));
+ return SBTypeNameSpecifier();
+ return SBTypeNameSpecifier(
+ m_opaque_sp->GetTypeNameSpecifierForFormatAtIndex(index));
}
lldb::SBTypeNameSpecifier
SBTypeCategory::GetTypeNameSpecifierForSummaryAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForSummaryAtIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier());
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier(
- m_opaque_sp->GetTypeNameSpecifierForSummaryAtIndex(index)));
+ return SBTypeNameSpecifier();
+ return SBTypeNameSpecifier(
+ m_opaque_sp->GetTypeNameSpecifierForSummaryAtIndex(index));
}
lldb::SBTypeNameSpecifier
SBTypeCategory::GetTypeNameSpecifierForSyntheticAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForSyntheticAtIndex, (uint32_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier());
- return LLDB_RECORD_RESULT(SBTypeNameSpecifier(
- m_opaque_sp->GetTypeNameSpecifierForSyntheticAtIndex(index)));
+ return SBTypeNameSpecifier();
+ return SBTypeNameSpecifier(
+ m_opaque_sp->GetTypeNameSpecifierForSyntheticAtIndex(index));
}
SBTypeFilter SBTypeCategory::GetFilterForType(SBTypeNameSpecifier spec) {
- LLDB_RECORD_METHOD(lldb::SBTypeFilter, SBTypeCategory, GetFilterForType,
- (lldb::SBTypeNameSpecifier), spec);
+ LLDB_INSTRUMENT_VA(this, spec);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeFilter());
+ return SBTypeFilter();
if (!spec.IsValid())
- return LLDB_RECORD_RESULT(SBTypeFilter());
+ return SBTypeFilter();
lldb::TypeFilterImplSP children_sp;
@@ -205,22 +195,21 @@ SBTypeFilter SBTypeCategory::GetFilterForType(SBTypeNameSpecifier spec) {
ConstString(spec.GetName()), children_sp);
if (!children_sp)
- return LLDB_RECORD_RESULT(lldb::SBTypeFilter());
+ return lldb::SBTypeFilter();
TypeFilterImplSP filter_sp =
std::static_pointer_cast<TypeFilterImpl>(children_sp);
- return LLDB_RECORD_RESULT(lldb::SBTypeFilter(filter_sp));
+ return lldb::SBTypeFilter(filter_sp);
}
SBTypeFormat SBTypeCategory::GetFormatForType(SBTypeNameSpecifier spec) {
- LLDB_RECORD_METHOD(lldb::SBTypeFormat, SBTypeCategory, GetFormatForType,
- (lldb::SBTypeNameSpecifier), spec);
+ LLDB_INSTRUMENT_VA(this, spec);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeFormat());
+ return SBTypeFormat();
if (!spec.IsValid())
- return LLDB_RECORD_RESULT(SBTypeFormat());
+ return SBTypeFormat();
lldb::TypeFormatImplSP format_sp;
@@ -232,20 +221,19 @@ SBTypeFormat SBTypeCategory::GetFormatForType(SBTypeNameSpecifier spec) {
ConstString(spec.GetName()), format_sp);
if (!format_sp)
- return LLDB_RECORD_RESULT(lldb::SBTypeFormat());
+ return lldb::SBTypeFormat();
- return LLDB_RECORD_RESULT(lldb::SBTypeFormat(format_sp));
+ return lldb::SBTypeFormat(format_sp);
}
SBTypeSummary SBTypeCategory::GetSummaryForType(SBTypeNameSpecifier spec) {
- LLDB_RECORD_METHOD(lldb::SBTypeSummary, SBTypeCategory, GetSummaryForType,
- (lldb::SBTypeNameSpecifier), spec);
+ LLDB_INSTRUMENT_VA(this, spec);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeSummary());
+ return SBTypeSummary();
if (!spec.IsValid())
- return LLDB_RECORD_RESULT(SBTypeSummary());
+ return SBTypeSummary();
lldb::TypeSummaryImplSP summary_sp;
@@ -257,20 +245,19 @@ SBTypeSummary SBTypeCategory::GetSummaryForType(SBTypeNameSpecifier spec) {
ConstString(spec.GetName()), summary_sp);
if (!summary_sp)
- return LLDB_RECORD_RESULT(lldb::SBTypeSummary());
+ return lldb::SBTypeSummary();
- return LLDB_RECORD_RESULT(lldb::SBTypeSummary(summary_sp));
+ return lldb::SBTypeSummary(summary_sp);
}
SBTypeSynthetic SBTypeCategory::GetSyntheticForType(SBTypeNameSpecifier spec) {
- LLDB_RECORD_METHOD(lldb::SBTypeSynthetic, SBTypeCategory, GetSyntheticForType,
- (lldb::SBTypeNameSpecifier), spec);
+ LLDB_INSTRUMENT_VA(this, spec);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
+ return SBTypeSynthetic();
if (!spec.IsValid())
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
+ return SBTypeSynthetic();
lldb::SyntheticChildrenSP children_sp;
@@ -282,75 +269,67 @@ SBTypeSynthetic SBTypeCategory::GetSyntheticForType(SBTypeNameSpecifier spec) {
ConstString(spec.GetName()), children_sp);
if (!children_sp)
- return LLDB_RECORD_RESULT(lldb::SBTypeSynthetic());
+ return lldb::SBTypeSynthetic();
ScriptedSyntheticChildrenSP synth_sp =
std::static_pointer_cast<ScriptedSyntheticChildren>(children_sp);
- return LLDB_RECORD_RESULT(lldb::SBTypeSynthetic(synth_sp));
+ return lldb::SBTypeSynthetic(synth_sp);
}
SBTypeFilter SBTypeCategory::GetFilterAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeFilter, SBTypeCategory, GetFilterAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeFilter());
+ return SBTypeFilter();
lldb::SyntheticChildrenSP children_sp =
m_opaque_sp->GetSyntheticAtIndex((index));
if (!children_sp.get())
- return LLDB_RECORD_RESULT(lldb::SBTypeFilter());
+ return lldb::SBTypeFilter();
TypeFilterImplSP filter_sp =
std::static_pointer_cast<TypeFilterImpl>(children_sp);
- return LLDB_RECORD_RESULT(lldb::SBTypeFilter(filter_sp));
+ return lldb::SBTypeFilter(filter_sp);
}
SBTypeFormat SBTypeCategory::GetFormatAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeFormat, SBTypeCategory, GetFormatAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeFormat());
- return LLDB_RECORD_RESULT(
- SBTypeFormat(m_opaque_sp->GetFormatAtIndex((index))));
+ return SBTypeFormat();
+ return SBTypeFormat(m_opaque_sp->GetFormatAtIndex((index)));
}
SBTypeSummary SBTypeCategory::GetSummaryAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeSummary, SBTypeCategory, GetSummaryAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeSummary());
- return LLDB_RECORD_RESULT(
- SBTypeSummary(m_opaque_sp->GetSummaryAtIndex((index))));
+ return SBTypeSummary();
+ return SBTypeSummary(m_opaque_sp->GetSummaryAtIndex((index)));
}
SBTypeSynthetic SBTypeCategory::GetSyntheticAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeSynthetic, SBTypeCategory, GetSyntheticAtIndex,
- (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
+ return SBTypeSynthetic();
lldb::SyntheticChildrenSP children_sp =
m_opaque_sp->GetSyntheticAtIndex((index));
if (!children_sp.get())
- return LLDB_RECORD_RESULT(lldb::SBTypeSynthetic());
+ return lldb::SBTypeSynthetic();
ScriptedSyntheticChildrenSP synth_sp =
std::static_pointer_cast<ScriptedSyntheticChildren>(children_sp);
- return LLDB_RECORD_RESULT(lldb::SBTypeSynthetic(synth_sp));
+ return lldb::SBTypeSynthetic(synth_sp);
}
bool SBTypeCategory::AddTypeFormat(SBTypeNameSpecifier type_name,
SBTypeFormat format) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, AddTypeFormat,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeFormat), type_name,
- format);
+ LLDB_INSTRUMENT_VA(this, type_name, format);
if (!IsValid())
return false;
@@ -372,8 +351,7 @@ bool SBTypeCategory::AddTypeFormat(SBTypeNameSpecifier type_name,
}
bool SBTypeCategory::DeleteTypeFormat(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, DeleteTypeFormat,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!IsValid())
return false;
@@ -391,9 +369,7 @@ bool SBTypeCategory::DeleteTypeFormat(SBTypeNameSpecifier type_name) {
bool SBTypeCategory::AddTypeSummary(SBTypeNameSpecifier type_name,
SBTypeSummary summary) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, AddTypeSummary,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeSummary),
- type_name, summary);
+ LLDB_INSTRUMENT_VA(this, type_name, summary);
if (!IsValid())
return false;
@@ -449,8 +425,7 @@ bool SBTypeCategory::AddTypeSummary(SBTypeNameSpecifier type_name,
}
bool SBTypeCategory::DeleteTypeSummary(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, DeleteTypeSummary,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!IsValid())
return false;
@@ -468,9 +443,7 @@ bool SBTypeCategory::DeleteTypeSummary(SBTypeNameSpecifier type_name) {
bool SBTypeCategory::AddTypeFilter(SBTypeNameSpecifier type_name,
SBTypeFilter filter) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, AddTypeFilter,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeFilter), type_name,
- filter);
+ LLDB_INSTRUMENT_VA(this, type_name, filter);
if (!IsValid())
return false;
@@ -492,8 +465,7 @@ bool SBTypeCategory::AddTypeFilter(SBTypeNameSpecifier type_name,
}
bool SBTypeCategory::DeleteTypeFilter(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, DeleteTypeFilter,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!IsValid())
return false;
@@ -511,9 +483,7 @@ bool SBTypeCategory::DeleteTypeFilter(SBTypeNameSpecifier type_name) {
bool SBTypeCategory::AddTypeSynthetic(SBTypeNameSpecifier type_name,
SBTypeSynthetic synth) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, AddTypeSynthetic,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeSynthetic),
- type_name, synth);
+ LLDB_INSTRUMENT_VA(this, type_name, synth);
if (!IsValid())
return false;
@@ -569,8 +539,7 @@ bool SBTypeCategory::AddTypeSynthetic(SBTypeNameSpecifier type_name,
}
bool SBTypeCategory::DeleteTypeSynthetic(SBTypeNameSpecifier type_name) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, DeleteTypeSynthetic,
- (lldb::SBTypeNameSpecifier), type_name);
+ LLDB_INSTRUMENT_VA(this, type_name);
if (!IsValid())
return false;
@@ -588,9 +557,7 @@ bool SBTypeCategory::DeleteTypeSynthetic(SBTypeNameSpecifier type_name) {
bool SBTypeCategory::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (!IsValid())
return false;
@@ -600,19 +567,16 @@ bool SBTypeCategory::GetDescription(lldb::SBStream &description,
lldb::SBTypeCategory &SBTypeCategory::
operator=(const lldb::SBTypeCategory &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeCategory &,
- SBTypeCategory, operator=,(const lldb::SBTypeCategory &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeCategory::operator==(lldb::SBTypeCategory &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, operator==,(lldb::SBTypeCategory &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -621,8 +585,7 @@ bool SBTypeCategory::operator==(lldb::SBTypeCategory &rhs) {
}
bool SBTypeCategory::operator!=(lldb::SBTypeCategory &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeCategory, operator!=,(lldb::SBTypeCategory &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return rhs.IsValid();
@@ -651,78 +614,3 @@ bool SBTypeCategory::IsDefaultCategory() {
return (strcmp(m_opaque_sp->GetName(), "default") == 0);
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeCategory>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeCategory, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeCategory, (const lldb::SBTypeCategory &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeCategory, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeCategory, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, GetEnabled, ());
- LLDB_REGISTER_METHOD(void, SBTypeCategory, SetEnabled, (bool));
- LLDB_REGISTER_METHOD(const char *, SBTypeCategory, GetName, ());
- LLDB_REGISTER_METHOD(lldb::LanguageType, SBTypeCategory, GetLanguageAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBTypeCategory, GetNumLanguages, ());
- LLDB_REGISTER_METHOD(void, SBTypeCategory, AddLanguage,
- (lldb::LanguageType));
- LLDB_REGISTER_METHOD(uint32_t, SBTypeCategory, GetNumFormats, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeCategory, GetNumSummaries, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeCategory, GetNumFilters, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeCategory, GetNumSynthetics, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForSyntheticAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeSummary, SBTypeCategory, GetSummaryForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeSynthetic, SBTypeCategory,
- GetSyntheticForType, (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeFilter, SBTypeCategory, GetFilterAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeSummary, SBTypeCategory, GetSummaryAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeSynthetic, SBTypeCategory,
- GetSyntheticAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, AddTypeSummary,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeSummary));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, AddTypeSynthetic,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeSynthetic));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, DeleteTypeSynthetic,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForFilterAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForFormatAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeNameSpecifier, SBTypeCategory,
- GetTypeNameSpecifierForSummaryAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBTypeFilter, SBTypeCategory, GetFilterForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeFormat, SBTypeCategory, GetFormatForType,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(lldb::SBTypeFormat, SBTypeCategory, GetFormatAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, AddTypeFormat,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeFormat));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, DeleteTypeFormat,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, DeleteTypeSummary,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, AddTypeFilter,
- (lldb::SBTypeNameSpecifier, lldb::SBTypeFilter));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, DeleteTypeFilter,
- (lldb::SBTypeNameSpecifier));
- LLDB_REGISTER_METHOD(bool, SBTypeCategory, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeCategory &,
- SBTypeCategory, operator=,(const lldb::SBTypeCategory &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeCategory, operator==,(lldb::SBTypeCategory &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeCategory, operator!=,(lldb::SBTypeCategory &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeEnumMember.cpp b/contrib/llvm-project/lldb/source/API/SBTypeEnumMember.cpp
index 43a4891b54b1..a3d99bd57e31 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeEnumMember.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeEnumMember.cpp
@@ -7,13 +7,13 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeEnumMember.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBDefines.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBType.h"
#include "lldb/Symbol/CompilerType.h"
#include "lldb/Symbol/Type.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Stream.h"
#include <memory>
@@ -21,9 +21,7 @@
using namespace lldb;
using namespace lldb_private;
-SBTypeEnumMember::SBTypeEnumMember() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeEnumMember);
-}
+SBTypeEnumMember::SBTypeEnumMember() { LLDB_INSTRUMENT_VA(this); }
SBTypeEnumMember::~SBTypeEnumMember() = default;
@@ -31,36 +29,32 @@ SBTypeEnumMember::SBTypeEnumMember(
const lldb::TypeEnumMemberImplSP &enum_member_sp)
: m_opaque_sp(enum_member_sp) {}
-SBTypeEnumMember::SBTypeEnumMember(const SBTypeEnumMember &rhs)
- : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBTypeEnumMember, (const lldb::SBTypeEnumMember &),
- rhs);
+SBTypeEnumMember::SBTypeEnumMember(const SBTypeEnumMember &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_sp = clone(rhs.m_opaque_sp);
}
SBTypeEnumMember &SBTypeEnumMember::operator=(const SBTypeEnumMember &rhs) {
- LLDB_RECORD_METHOD(
- SBTypeEnumMember &,
- SBTypeEnumMember, operator=,(const lldb::SBTypeEnumMember &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_sp = clone(rhs.m_opaque_sp);
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeEnumMember::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeEnumMember, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeEnumMember::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeEnumMember, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get();
}
const char *SBTypeEnumMember::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeEnumMember, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp.get())
return m_opaque_sp->GetName().GetCString();
@@ -68,7 +62,7 @@ const char *SBTypeEnumMember::GetName() {
}
int64_t SBTypeEnumMember::GetValueAsSigned() {
- LLDB_RECORD_METHOD_NO_ARGS(int64_t, SBTypeEnumMember, GetValueAsSigned);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp.get())
return m_opaque_sp->GetValueAsSigned();
@@ -76,7 +70,7 @@ int64_t SBTypeEnumMember::GetValueAsSigned() {
}
uint64_t SBTypeEnumMember::GetValueAsUnsigned() {
- LLDB_RECORD_METHOD_NO_ARGS(uint64_t, SBTypeEnumMember, GetValueAsUnsigned);
+ LLDB_INSTRUMENT_VA(this);
if (m_opaque_sp.get())
return m_opaque_sp->GetValueAsUnsigned();
@@ -84,13 +78,13 @@ uint64_t SBTypeEnumMember::GetValueAsUnsigned() {
}
SBType SBTypeEnumMember::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBTypeEnumMember, GetType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
if (m_opaque_sp.get()) {
sb_type.SetSP(m_opaque_sp->GetIntegerType());
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
void SBTypeEnumMember::reset(TypeEnumMemberImpl *type_member_impl) {
@@ -109,13 +103,12 @@ const TypeEnumMemberImpl &SBTypeEnumMember::ref() const {
SBTypeEnumMemberList::SBTypeEnumMemberList()
: m_opaque_up(new TypeEnumMemberListImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeEnumMemberList);
+ LLDB_INSTRUMENT_VA(this);
}
SBTypeEnumMemberList::SBTypeEnumMemberList(const SBTypeEnumMemberList &rhs)
: m_opaque_up(new TypeEnumMemberListImpl()) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeEnumMemberList,
- (const lldb::SBTypeEnumMemberList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
for (uint32_t i = 0,
rhs_size = const_cast<SBTypeEnumMemberList &>(rhs).GetSize();
@@ -124,21 +117,18 @@ SBTypeEnumMemberList::SBTypeEnumMemberList(const SBTypeEnumMemberList &rhs)
}
bool SBTypeEnumMemberList::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeEnumMemberList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeEnumMemberList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeEnumMemberList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_up != nullptr);
}
SBTypeEnumMemberList &SBTypeEnumMemberList::
operator=(const SBTypeEnumMemberList &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBTypeEnumMemberList &,
- SBTypeEnumMemberList, operator=,(const lldb::SBTypeEnumMemberList &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_up = std::make_unique<TypeEnumMemberListImpl>();
@@ -148,12 +138,11 @@ operator=(const SBTypeEnumMemberList &rhs) {
Append(
const_cast<SBTypeEnumMemberList &>(rhs).GetTypeEnumMemberAtIndex(i));
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
void SBTypeEnumMemberList::Append(SBTypeEnumMember enum_member) {
- LLDB_RECORD_METHOD(void, SBTypeEnumMemberList, Append,
- (lldb::SBTypeEnumMember), enum_member);
+ LLDB_INSTRUMENT_VA(this, enum_member);
if (enum_member.IsValid())
m_opaque_up->Append(enum_member.m_opaque_sp);
@@ -161,17 +150,15 @@ void SBTypeEnumMemberList::Append(SBTypeEnumMember enum_member) {
SBTypeEnumMember
SBTypeEnumMemberList::GetTypeEnumMemberAtIndex(uint32_t index) {
- LLDB_RECORD_METHOD(lldb::SBTypeEnumMember, SBTypeEnumMemberList,
- GetTypeEnumMemberAtIndex, (uint32_t), index);
+ LLDB_INSTRUMENT_VA(this, index);
if (m_opaque_up)
- return LLDB_RECORD_RESULT(
- SBTypeEnumMember(m_opaque_up->GetTypeEnumMemberAtIndex(index)));
- return LLDB_RECORD_RESULT(SBTypeEnumMember());
+ return SBTypeEnumMember(m_opaque_up->GetTypeEnumMemberAtIndex(index));
+ return SBTypeEnumMember();
}
uint32_t SBTypeEnumMemberList::GetSize() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeEnumMemberList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetSize();
}
@@ -180,9 +167,7 @@ SBTypeEnumMemberList::~SBTypeEnumMemberList() = default;
bool SBTypeEnumMember::GetDescription(
lldb::SBStream &description, lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeEnumMember, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
Stream &strm = description.ref();
@@ -196,40 +181,3 @@ bool SBTypeEnumMember::GetDescription(
}
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeEnumMember>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeEnumMember, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeEnumMember,
- (const lldb::SBTypeEnumMember &));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeEnumMember &,
- SBTypeEnumMember, operator=,(const lldb::SBTypeEnumMember &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeEnumMember, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeEnumMember, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeEnumMember, GetName, ());
- LLDB_REGISTER_METHOD(int64_t, SBTypeEnumMember, GetValueAsSigned, ());
- LLDB_REGISTER_METHOD(uint64_t, SBTypeEnumMember, GetValueAsUnsigned, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeEnumMember, GetType, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeEnumMemberList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeEnumMemberList,
- (const lldb::SBTypeEnumMemberList &));
- LLDB_REGISTER_METHOD(bool, SBTypeEnumMemberList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeEnumMemberList, operator bool, ());
- LLDB_REGISTER_METHOD(
- lldb::SBTypeEnumMemberList &,
- SBTypeEnumMemberList, operator=,(const lldb::SBTypeEnumMemberList &));
- LLDB_REGISTER_METHOD(void, SBTypeEnumMemberList, Append,
- (lldb::SBTypeEnumMember));
- LLDB_REGISTER_METHOD(lldb::SBTypeEnumMember, SBTypeEnumMemberList,
- GetTypeEnumMemberAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(uint32_t, SBTypeEnumMemberList, GetSize, ());
- LLDB_REGISTER_METHOD(bool, SBTypeEnumMember, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeFilter.cpp b/contrib/llvm-project/lldb/source/API/SBTypeFilter.cpp
index 5f91a194f16b..94f222b254b2 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeFilter.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeFilter.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeFilter.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
@@ -16,34 +16,32 @@
using namespace lldb;
using namespace lldb_private;
-SBTypeFilter::SBTypeFilter() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeFilter);
-}
+SBTypeFilter::SBTypeFilter() { LLDB_INSTRUMENT_VA(this); }
SBTypeFilter::SBTypeFilter(uint32_t options)
: m_opaque_sp(TypeFilterImplSP(new TypeFilterImpl(options))) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeFilter, (uint32_t), options);
+ LLDB_INSTRUMENT_VA(this, options);
}
SBTypeFilter::SBTypeFilter(const lldb::SBTypeFilter &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeFilter, (const lldb::SBTypeFilter &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeFilter::~SBTypeFilter() = default;
bool SBTypeFilter::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeFilter, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeFilter::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeFilter, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
uint32_t SBTypeFilter::GetOptions() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeFilter, GetOptions);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetOptions();
@@ -51,7 +49,7 @@ uint32_t SBTypeFilter::GetOptions() {
}
void SBTypeFilter::SetOptions(uint32_t value) {
- LLDB_RECORD_METHOD(void, SBTypeFilter, SetOptions, (uint32_t), value);
+ LLDB_INSTRUMENT_VA(this, value);
if (CopyOnWrite_Impl())
m_opaque_sp->SetOptions(value);
@@ -59,9 +57,7 @@ void SBTypeFilter::SetOptions(uint32_t value) {
bool SBTypeFilter::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeFilter, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (!IsValid())
return false;
@@ -72,15 +68,14 @@ bool SBTypeFilter::GetDescription(lldb::SBStream &description,
}
void SBTypeFilter::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBTypeFilter, Clear);
+ LLDB_INSTRUMENT_VA(this);
if (CopyOnWrite_Impl())
m_opaque_sp->Clear();
}
uint32_t SBTypeFilter::GetNumberOfExpressionPaths() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeFilter,
- GetNumberOfExpressionPaths);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetCount();
@@ -88,8 +83,7 @@ uint32_t SBTypeFilter::GetNumberOfExpressionPaths() {
}
const char *SBTypeFilter::GetExpressionPathAtIndex(uint32_t i) {
- LLDB_RECORD_METHOD(const char *, SBTypeFilter, GetExpressionPathAtIndex,
- (uint32_t), i);
+ LLDB_INSTRUMENT_VA(this, i);
if (IsValid()) {
const char *item = m_opaque_sp->GetExpressionPathAtIndex(i);
@@ -101,8 +95,7 @@ const char *SBTypeFilter::GetExpressionPathAtIndex(uint32_t i) {
}
bool SBTypeFilter::ReplaceExpressionPathAtIndex(uint32_t i, const char *item) {
- LLDB_RECORD_METHOD(bool, SBTypeFilter, ReplaceExpressionPathAtIndex,
- (uint32_t, const char *), i, item);
+ LLDB_INSTRUMENT_VA(this, i, item);
if (CopyOnWrite_Impl())
return m_opaque_sp->SetExpressionPathAtIndex(i, item);
@@ -111,26 +104,23 @@ bool SBTypeFilter::ReplaceExpressionPathAtIndex(uint32_t i, const char *item) {
}
void SBTypeFilter::AppendExpressionPath(const char *item) {
- LLDB_RECORD_METHOD(void, SBTypeFilter, AppendExpressionPath, (const char *),
- item);
+ LLDB_INSTRUMENT_VA(this, item);
if (CopyOnWrite_Impl())
m_opaque_sp->AddExpressionPath(item);
}
lldb::SBTypeFilter &SBTypeFilter::operator=(const lldb::SBTypeFilter &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeFilter &,
- SBTypeFilter, operator=,(const lldb::SBTypeFilter &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeFilter::operator==(lldb::SBTypeFilter &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFilter, operator==,(lldb::SBTypeFilter &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -139,8 +129,7 @@ bool SBTypeFilter::operator==(lldb::SBTypeFilter &rhs) {
}
bool SBTypeFilter::IsEqualTo(lldb::SBTypeFilter &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFilter, IsEqualTo, (lldb::SBTypeFilter &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -157,8 +146,7 @@ bool SBTypeFilter::IsEqualTo(lldb::SBTypeFilter &rhs) {
}
bool SBTypeFilter::operator!=(lldb::SBTypeFilter &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFilter, operator!=,(lldb::SBTypeFilter &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -190,36 +178,3 @@ bool SBTypeFilter::CopyOnWrite_Impl() {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeFilter>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFilter, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFilter, (uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFilter, (const lldb::SBTypeFilter &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeFilter, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeFilter, operator bool, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeFilter, GetOptions, ());
- LLDB_REGISTER_METHOD(void, SBTypeFilter, SetOptions, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeFilter, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(void, SBTypeFilter, Clear, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeFilter, GetNumberOfExpressionPaths,
- ());
- LLDB_REGISTER_METHOD(const char *, SBTypeFilter, GetExpressionPathAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeFilter, ReplaceExpressionPathAtIndex,
- (uint32_t, const char *));
- LLDB_REGISTER_METHOD(void, SBTypeFilter, AppendExpressionPath,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBTypeFilter &,
- SBTypeFilter, operator=,(const lldb::SBTypeFilter &));
- LLDB_REGISTER_METHOD(bool, SBTypeFilter, operator==,(lldb::SBTypeFilter &));
- LLDB_REGISTER_METHOD(bool, SBTypeFilter, IsEqualTo, (lldb::SBTypeFilter &));
- LLDB_REGISTER_METHOD(bool, SBTypeFilter, operator!=,(lldb::SBTypeFilter &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeFormat.cpp b/contrib/llvm-project/lldb/source/API/SBTypeFormat.cpp
index 70289bef8db5..86e11e8b8fde 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeFormat.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeFormat.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeFormat.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
@@ -16,43 +16,39 @@
using namespace lldb;
using namespace lldb_private;
-SBTypeFormat::SBTypeFormat() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeFormat);
-}
+SBTypeFormat::SBTypeFormat() { LLDB_INSTRUMENT_VA(this); }
SBTypeFormat::SBTypeFormat(lldb::Format format, uint32_t options)
: m_opaque_sp(
TypeFormatImplSP(new TypeFormatImpl_Format(format, options))) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeFormat, (lldb::Format, uint32_t), format,
- options);
+ LLDB_INSTRUMENT_VA(this, format, options);
}
SBTypeFormat::SBTypeFormat(const char *type, uint32_t options)
: m_opaque_sp(TypeFormatImplSP(new TypeFormatImpl_EnumType(
ConstString(type ? type : ""), options))) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeFormat, (const char *, uint32_t), type,
- options);
+ LLDB_INSTRUMENT_VA(this, type, options);
}
SBTypeFormat::SBTypeFormat(const lldb::SBTypeFormat &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeFormat, (const lldb::SBTypeFormat &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeFormat::~SBTypeFormat() = default;
bool SBTypeFormat::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeFormat, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeFormat::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeFormat, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
lldb::Format SBTypeFormat::GetFormat() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::Format, SBTypeFormat, GetFormat);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid() && m_opaque_sp->GetType() == TypeFormatImpl::Type::eTypeFormat)
return ((TypeFormatImpl_Format *)m_opaque_sp.get())->GetFormat();
@@ -60,7 +56,7 @@ lldb::Format SBTypeFormat::GetFormat() {
}
const char *SBTypeFormat::GetTypeName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeFormat, GetTypeName);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid() && m_opaque_sp->GetType() == TypeFormatImpl::Type::eTypeEnum)
return ((TypeFormatImpl_EnumType *)m_opaque_sp.get())
@@ -70,7 +66,7 @@ const char *SBTypeFormat::GetTypeName() {
}
uint32_t SBTypeFormat::GetOptions() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeFormat, GetOptions);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_sp->GetOptions();
@@ -78,14 +74,14 @@ uint32_t SBTypeFormat::GetOptions() {
}
void SBTypeFormat::SetFormat(lldb::Format fmt) {
- LLDB_RECORD_METHOD(void, SBTypeFormat, SetFormat, (lldb::Format), fmt);
+ LLDB_INSTRUMENT_VA(this, fmt);
if (CopyOnWrite_Impl(Type::eTypeFormat))
((TypeFormatImpl_Format *)m_opaque_sp.get())->SetFormat(fmt);
}
void SBTypeFormat::SetTypeName(const char *type) {
- LLDB_RECORD_METHOD(void, SBTypeFormat, SetTypeName, (const char *), type);
+ LLDB_INSTRUMENT_VA(this, type);
if (CopyOnWrite_Impl(Type::eTypeEnum))
((TypeFormatImpl_EnumType *)m_opaque_sp.get())
@@ -93,7 +89,7 @@ void SBTypeFormat::SetTypeName(const char *type) {
}
void SBTypeFormat::SetOptions(uint32_t value) {
- LLDB_RECORD_METHOD(void, SBTypeFormat, SetOptions, (uint32_t), value);
+ LLDB_INSTRUMENT_VA(this, value);
if (CopyOnWrite_Impl(Type::eTypeKeepSame))
m_opaque_sp->SetOptions(value);
@@ -101,9 +97,7 @@ void SBTypeFormat::SetOptions(uint32_t value) {
bool SBTypeFormat::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeFormat, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (!IsValid())
return false;
@@ -114,18 +108,16 @@ bool SBTypeFormat::GetDescription(lldb::SBStream &description,
}
lldb::SBTypeFormat &SBTypeFormat::operator=(const lldb::SBTypeFormat &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeFormat &,
- SBTypeFormat, operator=,(const lldb::SBTypeFormat &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeFormat::operator==(lldb::SBTypeFormat &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFormat, operator==,(lldb::SBTypeFormat &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -133,8 +125,7 @@ bool SBTypeFormat::operator==(lldb::SBTypeFormat &rhs) {
}
bool SBTypeFormat::IsEqualTo(lldb::SBTypeFormat &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFormat, IsEqualTo, (lldb::SBTypeFormat &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -146,8 +137,7 @@ bool SBTypeFormat::IsEqualTo(lldb::SBTypeFormat &rhs) {
}
bool SBTypeFormat::operator!=(lldb::SBTypeFormat &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeFormat, operator!=,(lldb::SBTypeFormat &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -191,32 +181,3 @@ bool SBTypeFormat::CopyOnWrite_Impl(Type type) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeFormat>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFormat, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFormat, (lldb::Format, uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFormat, (const char *, uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeFormat, (const lldb::SBTypeFormat &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeFormat, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeFormat, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::Format, SBTypeFormat, GetFormat, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeFormat, GetTypeName, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeFormat, GetOptions, ());
- LLDB_REGISTER_METHOD(void, SBTypeFormat, SetFormat, (lldb::Format));
- LLDB_REGISTER_METHOD(void, SBTypeFormat, SetTypeName, (const char *));
- LLDB_REGISTER_METHOD(void, SBTypeFormat, SetOptions, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeFormat, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(lldb::SBTypeFormat &,
- SBTypeFormat, operator=,(const lldb::SBTypeFormat &));
- LLDB_REGISTER_METHOD(bool, SBTypeFormat, operator==,(lldb::SBTypeFormat &));
- LLDB_REGISTER_METHOD(bool, SBTypeFormat, IsEqualTo, (lldb::SBTypeFormat &));
- LLDB_REGISTER_METHOD(bool, SBTypeFormat, operator!=,(lldb::SBTypeFormat &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeNameSpecifier.cpp b/contrib/llvm-project/lldb/source/API/SBTypeNameSpecifier.cpp
index 3673a5024530..bc83a1d664d0 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeNameSpecifier.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeNameSpecifier.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeNameSpecifier.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBType.h"
@@ -17,21 +17,18 @@
using namespace lldb;
using namespace lldb_private;
-SBTypeNameSpecifier::SBTypeNameSpecifier() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeNameSpecifier);
-}
+SBTypeNameSpecifier::SBTypeNameSpecifier() { LLDB_INSTRUMENT_VA(this); }
SBTypeNameSpecifier::SBTypeNameSpecifier(const char *name, bool is_regex)
: m_opaque_sp(new TypeNameSpecifierImpl(name, is_regex)) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeNameSpecifier, (const char *, bool), name,
- is_regex);
+ LLDB_INSTRUMENT_VA(this, name, is_regex);
if (name == nullptr || (*name) == 0)
m_opaque_sp.reset();
}
-SBTypeNameSpecifier::SBTypeNameSpecifier(SBType type) : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR(SBTypeNameSpecifier, (lldb::SBType), type);
+SBTypeNameSpecifier::SBTypeNameSpecifier(SBType type) {
+ LLDB_INSTRUMENT_VA(this, type);
if (type.IsValid())
m_opaque_sp = TypeNameSpecifierImplSP(
@@ -40,24 +37,23 @@ SBTypeNameSpecifier::SBTypeNameSpecifier(SBType type) : m_opaque_sp() {
SBTypeNameSpecifier::SBTypeNameSpecifier(const lldb::SBTypeNameSpecifier &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeNameSpecifier,
- (const lldb::SBTypeNameSpecifier &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeNameSpecifier::~SBTypeNameSpecifier() = default;
bool SBTypeNameSpecifier::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeNameSpecifier, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeNameSpecifier::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeNameSpecifier, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
const char *SBTypeNameSpecifier::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeNameSpecifier, GetName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return nullptr;
@@ -66,18 +62,18 @@ const char *SBTypeNameSpecifier::GetName() {
}
SBType SBTypeNameSpecifier::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBTypeNameSpecifier, GetType);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
- return LLDB_RECORD_RESULT(SBType());
+ return SBType();
lldb_private::CompilerType c_type = m_opaque_sp->GetCompilerType();
if (c_type.IsValid())
- return LLDB_RECORD_RESULT(SBType(c_type));
- return LLDB_RECORD_RESULT(SBType());
+ return SBType(c_type);
+ return SBType();
}
bool SBTypeNameSpecifier::IsRegex() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeNameSpecifier, IsRegex);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -87,9 +83,7 @@ bool SBTypeNameSpecifier::IsRegex() {
bool SBTypeNameSpecifier::GetDescription(
lldb::SBStream &description, lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeNameSpecifier, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (!IsValid())
return false;
@@ -100,19 +94,16 @@ bool SBTypeNameSpecifier::GetDescription(
lldb::SBTypeNameSpecifier &SBTypeNameSpecifier::
operator=(const lldb::SBTypeNameSpecifier &rhs) {
- LLDB_RECORD_METHOD(
- lldb::SBTypeNameSpecifier &,
- SBTypeNameSpecifier, operator=,(const lldb::SBTypeNameSpecifier &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeNameSpecifier::operator==(lldb::SBTypeNameSpecifier &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBTypeNameSpecifier, operator==,(lldb::SBTypeNameSpecifier &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -120,8 +111,7 @@ bool SBTypeNameSpecifier::operator==(lldb::SBTypeNameSpecifier &rhs) {
}
bool SBTypeNameSpecifier::IsEqualTo(lldb::SBTypeNameSpecifier &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeNameSpecifier, IsEqualTo,
- (lldb::SBTypeNameSpecifier &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -135,8 +125,7 @@ bool SBTypeNameSpecifier::IsEqualTo(lldb::SBTypeNameSpecifier &rhs) {
}
bool SBTypeNameSpecifier::operator!=(lldb::SBTypeNameSpecifier &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBTypeNameSpecifier, operator!=,(lldb::SBTypeNameSpecifier &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -155,34 +144,3 @@ void SBTypeNameSpecifier::SetSP(
SBTypeNameSpecifier::SBTypeNameSpecifier(
const lldb::TypeNameSpecifierImplSP &type_namespec_sp)
: m_opaque_sp(type_namespec_sp) {}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeNameSpecifier>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeNameSpecifier, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeNameSpecifier, (const char *, bool));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeNameSpecifier, (lldb::SBType));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeNameSpecifier,
- (const lldb::SBTypeNameSpecifier &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeNameSpecifier, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeNameSpecifier, operator bool, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeNameSpecifier, GetName, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBTypeNameSpecifier, GetType, ());
- LLDB_REGISTER_METHOD(bool, SBTypeNameSpecifier, IsRegex, ());
- LLDB_REGISTER_METHOD(bool, SBTypeNameSpecifier, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeNameSpecifier &,
- SBTypeNameSpecifier, operator=,(const lldb::SBTypeNameSpecifier &));
- LLDB_REGISTER_METHOD(
- bool, SBTypeNameSpecifier, operator==,(lldb::SBTypeNameSpecifier &));
- LLDB_REGISTER_METHOD(bool, SBTypeNameSpecifier, IsEqualTo,
- (lldb::SBTypeNameSpecifier &));
- LLDB_REGISTER_METHOD(
- bool, SBTypeNameSpecifier, operator!=,(lldb::SBTypeNameSpecifier &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeSummary.cpp b/contrib/llvm-project/lldb/source/API/SBTypeSummary.cpp
index 2d7f8ef340c9..a65dfc987ad2 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeSummary.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeSummary.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeSummary.h"
-#include "SBReproducerPrivate.h"
#include "Utils.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBValue.h"
#include "lldb/DataFormatters/DataVisualization.h"
+#include "lldb/Utility/Instrumentation.h"
#include "llvm/Support/Casting.h"
@@ -19,15 +19,14 @@ using namespace lldb;
using namespace lldb_private;
SBTypeSummaryOptions::SBTypeSummaryOptions() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeSummaryOptions);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up = std::make_unique<TypeSummaryOptions>();
}
SBTypeSummaryOptions::SBTypeSummaryOptions(
const lldb::SBTypeSummaryOptions &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeSummaryOptions,
- (const lldb::SBTypeSummaryOptions &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_up = clone(rhs.m_opaque_up);
}
@@ -35,18 +34,17 @@ SBTypeSummaryOptions::SBTypeSummaryOptions(
SBTypeSummaryOptions::~SBTypeSummaryOptions() = default;
bool SBTypeSummaryOptions::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSummaryOptions, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeSummaryOptions::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeSummaryOptions, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up.get();
}
lldb::LanguageType SBTypeSummaryOptions::GetLanguage() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::LanguageType, SBTypeSummaryOptions,
- GetLanguage);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_up->GetLanguage();
@@ -54,8 +52,7 @@ lldb::LanguageType SBTypeSummaryOptions::GetLanguage() {
}
lldb::TypeSummaryCapping SBTypeSummaryOptions::GetCapping() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::TypeSummaryCapping, SBTypeSummaryOptions,
- GetCapping);
+ LLDB_INSTRUMENT_VA(this);
if (IsValid())
return m_opaque_up->GetCapping();
@@ -63,16 +60,14 @@ lldb::TypeSummaryCapping SBTypeSummaryOptions::GetCapping() {
}
void SBTypeSummaryOptions::SetLanguage(lldb::LanguageType l) {
- LLDB_RECORD_METHOD(void, SBTypeSummaryOptions, SetLanguage,
- (lldb::LanguageType), l);
+ LLDB_INSTRUMENT_VA(this, l);
if (IsValid())
m_opaque_up->SetLanguage(l);
}
void SBTypeSummaryOptions::SetCapping(lldb::TypeSummaryCapping c) {
- LLDB_RECORD_METHOD(void, SBTypeSummaryOptions, SetCapping,
- (lldb::TypeSummaryCapping), c);
+ LLDB_INSTRUMENT_VA(this, c);
if (IsValid())
m_opaque_up->SetCapping(c);
@@ -102,61 +97,48 @@ const lldb_private::TypeSummaryOptions &SBTypeSummaryOptions::ref() const {
SBTypeSummaryOptions::SBTypeSummaryOptions(
const lldb_private::TypeSummaryOptions &lldb_object)
: m_opaque_up(std::make_unique<TypeSummaryOptions>(lldb_object)) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeSummaryOptions,
- (const lldb_private::TypeSummaryOptions &),
- lldb_object);
+ LLDB_INSTRUMENT_VA(this, lldb_object);
}
-SBTypeSummary::SBTypeSummary() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeSummary);
-}
+SBTypeSummary::SBTypeSummary() { LLDB_INSTRUMENT_VA(this); }
SBTypeSummary SBTypeSummary::CreateWithSummaryString(const char *data,
uint32_t options) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithSummaryString, (const char *, uint32_t),
- data, options);
+ LLDB_INSTRUMENT_VA(data, options);
if (!data || data[0] == 0)
- return LLDB_RECORD_RESULT(SBTypeSummary());
+ return SBTypeSummary();
- return LLDB_RECORD_RESULT(
- SBTypeSummary(TypeSummaryImplSP(new StringSummaryFormat(options, data))));
+ return SBTypeSummary(
+ TypeSummaryImplSP(new StringSummaryFormat(options, data)));
}
SBTypeSummary SBTypeSummary::CreateWithFunctionName(const char *data,
uint32_t options) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithFunctionName, (const char *, uint32_t),
- data, options);
+ LLDB_INSTRUMENT_VA(data, options);
if (!data || data[0] == 0)
- return LLDB_RECORD_RESULT(SBTypeSummary());
+ return SBTypeSummary();
- return LLDB_RECORD_RESULT(
- SBTypeSummary(TypeSummaryImplSP(new ScriptSummaryFormat(options, data))));
+ return SBTypeSummary(
+ TypeSummaryImplSP(new ScriptSummaryFormat(options, data)));
}
SBTypeSummary SBTypeSummary::CreateWithScriptCode(const char *data,
uint32_t options) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithScriptCode, (const char *, uint32_t),
- data, options);
+ LLDB_INSTRUMENT_VA(data, options);
if (!data || data[0] == 0)
- return LLDB_RECORD_RESULT(SBTypeSummary());
+ return SBTypeSummary();
- return LLDB_RECORD_RESULT(SBTypeSummary(
- TypeSummaryImplSP(new ScriptSummaryFormat(options, "", data))));
+ return SBTypeSummary(
+ TypeSummaryImplSP(new ScriptSummaryFormat(options, "", data)));
}
SBTypeSummary SBTypeSummary::CreateWithCallback(FormatCallback cb,
uint32_t options,
const char *description) {
- LLDB_RECORD_DUMMY(
- lldb::SBTypeSummary, SBTypeSummary, CreateWithCallback,
- (lldb::SBTypeSummary::FormatCallback, uint32_t, const char *), cb,
- options, description);
+ LLDB_INSTRUMENT_VA(cb, options, description);
SBTypeSummary retval;
if (cb) {
@@ -180,23 +162,23 @@ SBTypeSummary SBTypeSummary::CreateWithCallback(FormatCallback cb,
SBTypeSummary::SBTypeSummary(const lldb::SBTypeSummary &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeSummary, (const lldb::SBTypeSummary &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeSummary::~SBTypeSummary() = default;
bool SBTypeSummary::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeSummary, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeSummary::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeSummary, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
bool SBTypeSummary::IsFunctionCode() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSummary, IsFunctionCode);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -209,7 +191,7 @@ bool SBTypeSummary::IsFunctionCode() {
}
bool SBTypeSummary::IsFunctionName() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSummary, IsFunctionName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -222,7 +204,7 @@ bool SBTypeSummary::IsFunctionName() {
}
bool SBTypeSummary::IsSummaryString() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSummary, IsSummaryString);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -231,7 +213,7 @@ bool SBTypeSummary::IsSummaryString() {
}
const char *SBTypeSummary::GetData() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeSummary, GetData);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return nullptr;
@@ -249,7 +231,7 @@ const char *SBTypeSummary::GetData() {
}
uint32_t SBTypeSummary::GetOptions() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeSummary, GetOptions);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return lldb::eTypeOptionNone;
@@ -257,7 +239,7 @@ uint32_t SBTypeSummary::GetOptions() {
}
void SBTypeSummary::SetOptions(uint32_t value) {
- LLDB_RECORD_METHOD(void, SBTypeSummary, SetOptions, (uint32_t), value);
+ LLDB_INSTRUMENT_VA(this, value);
if (!CopyOnWrite_Impl())
return;
@@ -265,8 +247,7 @@ void SBTypeSummary::SetOptions(uint32_t value) {
}
void SBTypeSummary::SetSummaryString(const char *data) {
- LLDB_RECORD_METHOD(void, SBTypeSummary, SetSummaryString, (const char *),
- data);
+ LLDB_INSTRUMENT_VA(this, data);
if (!IsValid())
return;
@@ -278,8 +259,7 @@ void SBTypeSummary::SetSummaryString(const char *data) {
}
void SBTypeSummary::SetFunctionName(const char *data) {
- LLDB_RECORD_METHOD(void, SBTypeSummary, SetFunctionName, (const char *),
- data);
+ LLDB_INSTRUMENT_VA(this, data);
if (!IsValid())
return;
@@ -291,8 +271,7 @@ void SBTypeSummary::SetFunctionName(const char *data) {
}
void SBTypeSummary::SetFunctionCode(const char *data) {
- LLDB_RECORD_METHOD(void, SBTypeSummary, SetFunctionCode, (const char *),
- data);
+ LLDB_INSTRUMENT_VA(this, data);
if (!IsValid())
return;
@@ -305,9 +284,7 @@ void SBTypeSummary::SetFunctionCode(const char *data) {
bool SBTypeSummary::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeSummary, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (!CopyOnWrite_Impl())
return false;
@@ -318,8 +295,7 @@ bool SBTypeSummary::GetDescription(lldb::SBStream &description,
}
bool SBTypeSummary::DoesPrintValue(lldb::SBValue value) {
- LLDB_RECORD_METHOD(bool, SBTypeSummary, DoesPrintValue, (lldb::SBValue),
- value);
+ LLDB_INSTRUMENT_VA(this, value);
if (!IsValid())
return false;
@@ -328,19 +304,16 @@ bool SBTypeSummary::DoesPrintValue(lldb::SBValue value) {
}
lldb::SBTypeSummary &SBTypeSummary::operator=(const lldb::SBTypeSummary &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeSummary &,
- SBTypeSummary, operator=,(const lldb::SBTypeSummary &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeSummary::operator==(lldb::SBTypeSummary &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeSummary, operator==,(lldb::SBTypeSummary &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -348,8 +321,7 @@ bool SBTypeSummary::operator==(lldb::SBTypeSummary &rhs) {
}
bool SBTypeSummary::IsEqualTo(lldb::SBTypeSummary &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeSummary, IsEqualTo, (lldb::SBTypeSummary &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (IsValid()) {
// valid and invalid are different
@@ -389,8 +361,7 @@ bool SBTypeSummary::IsEqualTo(lldb::SBTypeSummary &rhs) {
}
bool SBTypeSummary::operator!=(lldb::SBTypeSummary &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeSummary, operator!=,(lldb::SBTypeSummary &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -463,65 +434,3 @@ bool SBTypeSummary::ChangeSummaryType(bool want_script) {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeSummaryOptions>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSummaryOptions, ());
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSummaryOptions,
- (const lldb::SBTypeSummaryOptions &));
- LLDB_REGISTER_METHOD(bool, SBTypeSummaryOptions, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeSummaryOptions, operator bool, ());
- LLDB_REGISTER_METHOD(lldb::LanguageType, SBTypeSummaryOptions, GetLanguage,
- ());
- LLDB_REGISTER_METHOD(lldb::TypeSummaryCapping, SBTypeSummaryOptions,
- GetCapping, ());
- LLDB_REGISTER_METHOD(void, SBTypeSummaryOptions, SetLanguage,
- (lldb::LanguageType));
- LLDB_REGISTER_METHOD(void, SBTypeSummaryOptions, SetCapping,
- (lldb::TypeSummaryCapping));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSummaryOptions,
- (const lldb_private::TypeSummaryOptions &));
-}
-
-template <>
-void RegisterMethods<SBTypeSummary>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSummary, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithSummaryString,
- (const char *, uint32_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithFunctionName,
- (const char *, uint32_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTypeSummary, SBTypeSummary,
- CreateWithScriptCode, (const char *, uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSummary, (const lldb::SBTypeSummary &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeSummary, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeSummary, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, IsFunctionCode, ());
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, IsFunctionName, ());
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, IsSummaryString, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeSummary, GetData, ());
- LLDB_REGISTER_METHOD(uint32_t, SBTypeSummary, GetOptions, ());
- LLDB_REGISTER_METHOD(void, SBTypeSummary, SetOptions, (uint32_t));
- LLDB_REGISTER_METHOD(void, SBTypeSummary, SetSummaryString, (const char *));
- LLDB_REGISTER_METHOD(void, SBTypeSummary, SetFunctionName, (const char *));
- LLDB_REGISTER_METHOD(void, SBTypeSummary, SetFunctionCode, (const char *));
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, DoesPrintValue, (lldb::SBValue));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeSummary &,
- SBTypeSummary, operator=,(const lldb::SBTypeSummary &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeSummary, operator==,(lldb::SBTypeSummary &));
- LLDB_REGISTER_METHOD(bool, SBTypeSummary, IsEqualTo,
- (lldb::SBTypeSummary &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeSummary, operator!=,(lldb::SBTypeSummary &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBTypeSynthetic.cpp b/contrib/llvm-project/lldb/source/API/SBTypeSynthetic.cpp
index af5e167b9c24..7258ff04745d 100644
--- a/contrib/llvm-project/lldb/source/API/SBTypeSynthetic.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBTypeSynthetic.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBTypeSynthetic.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBStream.h"
@@ -16,54 +16,47 @@
using namespace lldb;
using namespace lldb_private;
-SBTypeSynthetic::SBTypeSynthetic() : m_opaque_sp() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBTypeSynthetic);
-}
+SBTypeSynthetic::SBTypeSynthetic() { LLDB_INSTRUMENT_VA(this); }
SBTypeSynthetic SBTypeSynthetic::CreateWithClassName(const char *data,
uint32_t options) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTypeSynthetic, SBTypeSynthetic,
- CreateWithClassName, (const char *, uint32_t), data,
- options);
+ LLDB_INSTRUMENT_VA(data, options);
if (!data || data[0] == 0)
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
- return LLDB_RECORD_RESULT(SBTypeSynthetic(ScriptedSyntheticChildrenSP(
- new ScriptedSyntheticChildren(options, data, ""))));
+ return SBTypeSynthetic();
+ return SBTypeSynthetic(ScriptedSyntheticChildrenSP(
+ new ScriptedSyntheticChildren(options, data, "")));
}
SBTypeSynthetic SBTypeSynthetic::CreateWithScriptCode(const char *data,
uint32_t options) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBTypeSynthetic, SBTypeSynthetic,
- CreateWithScriptCode, (const char *, uint32_t),
- data, options);
+ LLDB_INSTRUMENT_VA(data, options);
if (!data || data[0] == 0)
- return LLDB_RECORD_RESULT(SBTypeSynthetic());
- return LLDB_RECORD_RESULT(SBTypeSynthetic(ScriptedSyntheticChildrenSP(
- new ScriptedSyntheticChildren(options, "", data))));
+ return SBTypeSynthetic();
+ return SBTypeSynthetic(ScriptedSyntheticChildrenSP(
+ new ScriptedSyntheticChildren(options, "", data)));
}
SBTypeSynthetic::SBTypeSynthetic(const lldb::SBTypeSynthetic &rhs)
: m_opaque_sp(rhs.m_opaque_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBTypeSynthetic, (const lldb::SBTypeSynthetic &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBTypeSynthetic::~SBTypeSynthetic() = default;
bool SBTypeSynthetic::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeSynthetic, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBTypeSynthetic::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBTypeSynthetic, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_sp.get() != nullptr;
}
bool SBTypeSynthetic::IsClassCode() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSynthetic, IsClassCode);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -72,7 +65,7 @@ bool SBTypeSynthetic::IsClassCode() {
}
bool SBTypeSynthetic::IsClassName() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBTypeSynthetic, IsClassName);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -80,7 +73,7 @@ bool SBTypeSynthetic::IsClassName() {
}
const char *SBTypeSynthetic::GetData() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBTypeSynthetic, GetData);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return nullptr;
@@ -91,21 +84,21 @@ const char *SBTypeSynthetic::GetData() {
}
void SBTypeSynthetic::SetClassName(const char *data) {
- LLDB_RECORD_METHOD(void, SBTypeSynthetic, SetClassName, (const char *), data);
+ LLDB_INSTRUMENT_VA(this, data);
if (IsValid() && data && *data)
m_opaque_sp->SetPythonClassName(data);
}
void SBTypeSynthetic::SetClassCode(const char *data) {
- LLDB_RECORD_METHOD(void, SBTypeSynthetic, SetClassCode, (const char *), data);
+ LLDB_INSTRUMENT_VA(this, data);
if (IsValid() && data && *data)
m_opaque_sp->SetPythonCode(data);
}
uint32_t SBTypeSynthetic::GetOptions() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBTypeSynthetic, GetOptions);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return lldb::eTypeOptionNone;
@@ -113,7 +106,7 @@ uint32_t SBTypeSynthetic::GetOptions() {
}
void SBTypeSynthetic::SetOptions(uint32_t value) {
- LLDB_RECORD_METHOD(void, SBTypeSynthetic, SetOptions, (uint32_t), value);
+ LLDB_INSTRUMENT_VA(this, value);
if (!CopyOnWrite_Impl())
return;
@@ -122,9 +115,7 @@ void SBTypeSynthetic::SetOptions(uint32_t value) {
bool SBTypeSynthetic::GetDescription(lldb::SBStream &description,
lldb::DescriptionLevel description_level) {
- LLDB_RECORD_METHOD(bool, SBTypeSynthetic, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- description_level);
+ LLDB_INSTRUMENT_VA(this, description, description_level);
if (m_opaque_sp) {
description.Printf("%s\n", m_opaque_sp->GetDescription().c_str());
@@ -135,19 +126,16 @@ bool SBTypeSynthetic::GetDescription(lldb::SBStream &description,
lldb::SBTypeSynthetic &SBTypeSynthetic::
operator=(const lldb::SBTypeSynthetic &rhs) {
- LLDB_RECORD_METHOD(lldb::SBTypeSynthetic &,
- SBTypeSynthetic, operator=,(const lldb::SBTypeSynthetic &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
m_opaque_sp = rhs.m_opaque_sp;
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
bool SBTypeSynthetic::operator==(lldb::SBTypeSynthetic &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBTypeSynthetic, operator==,(lldb::SBTypeSynthetic &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -155,8 +143,7 @@ bool SBTypeSynthetic::operator==(lldb::SBTypeSynthetic &rhs) {
}
bool SBTypeSynthetic::IsEqualTo(lldb::SBTypeSynthetic &rhs) {
- LLDB_RECORD_METHOD(bool, SBTypeSynthetic, IsEqualTo,
- (lldb::SBTypeSynthetic &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -174,8 +161,7 @@ bool SBTypeSynthetic::IsEqualTo(lldb::SBTypeSynthetic &rhs) {
}
bool SBTypeSynthetic::operator!=(lldb::SBTypeSynthetic &rhs) {
- LLDB_RECORD_METHOD(
- bool, SBTypeSynthetic, operator!=,(lldb::SBTypeSynthetic &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (!IsValid())
return !rhs.IsValid();
@@ -209,39 +195,3 @@ bool SBTypeSynthetic::CopyOnWrite_Impl() {
return true;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBTypeSynthetic>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSynthetic, ());
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTypeSynthetic, SBTypeSynthetic,
- CreateWithClassName, (const char *, uint32_t));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBTypeSynthetic, SBTypeSynthetic,
- CreateWithScriptCode, (const char *, uint32_t));
- LLDB_REGISTER_CONSTRUCTOR(SBTypeSynthetic, (const lldb::SBTypeSynthetic &));
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeSynthetic, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBTypeSynthetic, operator bool, ());
- LLDB_REGISTER_METHOD(bool, SBTypeSynthetic, IsClassCode, ());
- LLDB_REGISTER_METHOD(bool, SBTypeSynthetic, IsClassName, ());
- LLDB_REGISTER_METHOD(const char *, SBTypeSynthetic, GetData, ());
- LLDB_REGISTER_METHOD(void, SBTypeSynthetic, SetClassName, (const char *));
- LLDB_REGISTER_METHOD(void, SBTypeSynthetic, SetClassCode, (const char *));
- LLDB_REGISTER_METHOD(uint32_t, SBTypeSynthetic, GetOptions, ());
- LLDB_REGISTER_METHOD(void, SBTypeSynthetic, SetOptions, (uint32_t));
- LLDB_REGISTER_METHOD(bool, SBTypeSynthetic, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(
- lldb::SBTypeSynthetic &,
- SBTypeSynthetic, operator=,(const lldb::SBTypeSynthetic &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeSynthetic, operator==,(lldb::SBTypeSynthetic &));
- LLDB_REGISTER_METHOD(bool, SBTypeSynthetic, IsEqualTo,
- (lldb::SBTypeSynthetic &));
- LLDB_REGISTER_METHOD(bool,
- SBTypeSynthetic, operator!=,(lldb::SBTypeSynthetic &));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBUnixSignals.cpp b/contrib/llvm-project/lldb/source/API/SBUnixSignals.cpp
index a933f6d22333..dc7a68255d13 100644
--- a/contrib/llvm-project/lldb/source/API/SBUnixSignals.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBUnixSignals.cpp
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#include "SBReproducerPrivate.h"
#include "lldb/Target/Platform.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/UnixSignals.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/lldb-defines.h"
#include "lldb/API/SBUnixSignals.h"
@@ -17,13 +17,11 @@
using namespace lldb;
using namespace lldb_private;
-SBUnixSignals::SBUnixSignals() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBUnixSignals);
-}
+SBUnixSignals::SBUnixSignals() { LLDB_INSTRUMENT_VA(this); }
SBUnixSignals::SBUnixSignals(const SBUnixSignals &rhs)
: m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBUnixSignals, (const lldb::SBUnixSignals &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
SBUnixSignals::SBUnixSignals(ProcessSP &process_sp)
@@ -33,13 +31,11 @@ SBUnixSignals::SBUnixSignals(PlatformSP &platform_sp)
: m_opaque_wp(platform_sp ? platform_sp->GetUnixSignals() : nullptr) {}
const SBUnixSignals &SBUnixSignals::operator=(const SBUnixSignals &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBUnixSignals &,
- SBUnixSignals, operator=,(const lldb::SBUnixSignals &),
- rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs)
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBUnixSignals::~SBUnixSignals() = default;
@@ -51,24 +47,23 @@ void SBUnixSignals::SetSP(const UnixSignalsSP &signals_sp) {
}
void SBUnixSignals::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBUnixSignals, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_wp.reset();
}
bool SBUnixSignals::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBUnixSignals, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBUnixSignals::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBUnixSignals, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return static_cast<bool>(GetSP());
}
const char *SBUnixSignals::GetSignalAsCString(int32_t signo) const {
- LLDB_RECORD_METHOD_CONST(const char *, SBUnixSignals, GetSignalAsCString,
- (int32_t), signo);
+ LLDB_INSTRUMENT_VA(this, signo);
if (auto signals_sp = GetSP())
return signals_sp->GetSignalAsCString(signo);
@@ -77,8 +72,7 @@ const char *SBUnixSignals::GetSignalAsCString(int32_t signo) const {
}
int32_t SBUnixSignals::GetSignalNumberFromName(const char *name) const {
- LLDB_RECORD_METHOD_CONST(int32_t, SBUnixSignals, GetSignalNumberFromName,
- (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
if (auto signals_sp = GetSP())
return signals_sp->GetSignalNumberFromName(name);
@@ -87,8 +81,7 @@ int32_t SBUnixSignals::GetSignalNumberFromName(const char *name) const {
}
bool SBUnixSignals::GetShouldSuppress(int32_t signo) const {
- LLDB_RECORD_METHOD_CONST(bool, SBUnixSignals, GetShouldSuppress, (int32_t),
- signo);
+ LLDB_INSTRUMENT_VA(this, signo);
if (auto signals_sp = GetSP())
return signals_sp->GetShouldSuppress(signo);
@@ -97,8 +90,7 @@ bool SBUnixSignals::GetShouldSuppress(int32_t signo) const {
}
bool SBUnixSignals::SetShouldSuppress(int32_t signo, bool value) {
- LLDB_RECORD_METHOD(bool, SBUnixSignals, SetShouldSuppress, (int32_t, bool),
- signo, value);
+ LLDB_INSTRUMENT_VA(this, signo, value);
auto signals_sp = GetSP();
@@ -109,8 +101,7 @@ bool SBUnixSignals::SetShouldSuppress(int32_t signo, bool value) {
}
bool SBUnixSignals::GetShouldStop(int32_t signo) const {
- LLDB_RECORD_METHOD_CONST(bool, SBUnixSignals, GetShouldStop, (int32_t),
- signo);
+ LLDB_INSTRUMENT_VA(this, signo);
if (auto signals_sp = GetSP())
return signals_sp->GetShouldStop(signo);
@@ -119,8 +110,7 @@ bool SBUnixSignals::GetShouldStop(int32_t signo) const {
}
bool SBUnixSignals::SetShouldStop(int32_t signo, bool value) {
- LLDB_RECORD_METHOD(bool, SBUnixSignals, SetShouldStop, (int32_t, bool), signo,
- value);
+ LLDB_INSTRUMENT_VA(this, signo, value);
auto signals_sp = GetSP();
@@ -131,8 +121,7 @@ bool SBUnixSignals::SetShouldStop(int32_t signo, bool value) {
}
bool SBUnixSignals::GetShouldNotify(int32_t signo) const {
- LLDB_RECORD_METHOD_CONST(bool, SBUnixSignals, GetShouldNotify, (int32_t),
- signo);
+ LLDB_INSTRUMENT_VA(this, signo);
if (auto signals_sp = GetSP())
return signals_sp->GetShouldNotify(signo);
@@ -141,8 +130,7 @@ bool SBUnixSignals::GetShouldNotify(int32_t signo) const {
}
bool SBUnixSignals::SetShouldNotify(int32_t signo, bool value) {
- LLDB_RECORD_METHOD(bool, SBUnixSignals, SetShouldNotify, (int32_t, bool),
- signo, value);
+ LLDB_INSTRUMENT_VA(this, signo, value);
auto signals_sp = GetSP();
@@ -153,7 +141,7 @@ bool SBUnixSignals::SetShouldNotify(int32_t signo, bool value) {
}
int32_t SBUnixSignals::GetNumSignals() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(int32_t, SBUnixSignals, GetNumSignals);
+ LLDB_INSTRUMENT_VA(this);
if (auto signals_sp = GetSP())
return signals_sp->GetNumSignals();
@@ -162,44 +150,10 @@ int32_t SBUnixSignals::GetNumSignals() const {
}
int32_t SBUnixSignals::GetSignalAtIndex(int32_t index) const {
- LLDB_RECORD_METHOD_CONST(int32_t, SBUnixSignals, GetSignalAtIndex, (int32_t),
- index);
+ LLDB_INSTRUMENT_VA(this, index);
if (auto signals_sp = GetSP())
return signals_sp->GetSignalAtIndex(index);
return LLDB_INVALID_SIGNAL_NUMBER;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBUnixSignals>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBUnixSignals, ());
- LLDB_REGISTER_CONSTRUCTOR(SBUnixSignals, (const lldb::SBUnixSignals &));
- LLDB_REGISTER_METHOD(
- const lldb::SBUnixSignals &,
- SBUnixSignals, operator=,(const lldb::SBUnixSignals &));
- LLDB_REGISTER_METHOD(void, SBUnixSignals, Clear, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBUnixSignals, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBUnixSignals, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(const char *, SBUnixSignals, GetSignalAsCString,
- (int32_t));
- LLDB_REGISTER_METHOD_CONST(int32_t, SBUnixSignals, GetSignalNumberFromName,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(bool, SBUnixSignals, GetShouldSuppress,
- (int32_t));
- LLDB_REGISTER_METHOD(bool, SBUnixSignals, SetShouldSuppress,
- (int32_t, bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBUnixSignals, GetShouldStop, (int32_t));
- LLDB_REGISTER_METHOD(bool, SBUnixSignals, SetShouldStop, (int32_t, bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBUnixSignals, GetShouldNotify, (int32_t));
- LLDB_REGISTER_METHOD(bool, SBUnixSignals, SetShouldNotify, (int32_t, bool));
- LLDB_REGISTER_METHOD_CONST(int32_t, SBUnixSignals, GetNumSignals, ());
- LLDB_REGISTER_METHOD_CONST(int32_t, SBUnixSignals, GetSignalAtIndex,
- (int32_t));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBValue.cpp b/contrib/llvm-project/lldb/source/API/SBValue.cpp
index e3325b8d36fa..20581cfabdd6 100644
--- a/contrib/llvm-project/lldb/source/API/SBValue.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBValue.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBValue.h"
-#include "SBReproducerPrivate.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/API/SBDeclaration.h"
#include "lldb/API/SBStream.h"
@@ -58,8 +58,8 @@ public:
ValueImpl(lldb::ValueObjectSP in_valobj_sp,
lldb::DynamicValueType use_dynamic, bool use_synthetic,
const char *name = nullptr)
- : m_valobj_sp(), m_use_dynamic(use_dynamic),
- m_use_synthetic(use_synthetic), m_name(name) {
+ : m_use_dynamic(use_dynamic), m_use_synthetic(use_synthetic),
+ m_name(name) {
if (in_valobj_sp) {
if ((m_valobj_sp = in_valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eNoDynamicValues, false))) {
@@ -215,38 +215,37 @@ private:
Status m_lock_error;
};
-SBValue::SBValue() : m_opaque_sp() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBValue); }
+SBValue::SBValue() { LLDB_INSTRUMENT_VA(this); }
SBValue::SBValue(const lldb::ValueObjectSP &value_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBValue, (const lldb::ValueObjectSP &), value_sp);
+ LLDB_INSTRUMENT_VA(this, value_sp);
SetSP(value_sp);
}
SBValue::SBValue(const SBValue &rhs) {
- LLDB_RECORD_CONSTRUCTOR(SBValue, (const lldb::SBValue &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
SetSP(rhs.m_opaque_sp);
}
SBValue &SBValue::operator=(const SBValue &rhs) {
- LLDB_RECORD_METHOD(lldb::SBValue &,
- SBValue, operator=,(const lldb::SBValue &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
SetSP(rhs.m_opaque_sp);
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBValue::~SBValue() = default;
bool SBValue::IsValid() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBValue::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBValue, operator bool);
+ LLDB_INSTRUMENT_VA(this);
// If this function ever changes to anything that does more than just check
// if the opaque shared pointer is non NULL, then we need to update all "if
@@ -256,13 +255,13 @@ SBValue::operator bool() const {
}
void SBValue::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBValue, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_sp.reset();
}
SBError SBValue::GetError() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBValue, GetError);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
@@ -274,11 +273,11 @@ SBError SBValue::GetError() {
sb_error.SetErrorStringWithFormat("error: %s",
locker.GetError().AsCString());
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
user_id_t SBValue::GetID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::user_id_t, SBValue, GetID);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -288,7 +287,7 @@ user_id_t SBValue::GetID() {
}
const char *SBValue::GetName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
ValueLocker locker;
@@ -300,7 +299,7 @@ const char *SBValue::GetName() {
}
const char *SBValue::GetTypeName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetTypeName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
ValueLocker locker;
@@ -313,7 +312,7 @@ const char *SBValue::GetTypeName() {
}
const char *SBValue::GetDisplayTypeName() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetDisplayTypeName);
+ LLDB_INSTRUMENT_VA(this);
const char *name = nullptr;
ValueLocker locker;
@@ -326,7 +325,7 @@ const char *SBValue::GetDisplayTypeName() {
}
size_t SBValue::GetByteSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBValue, GetByteSize);
+ LLDB_INSTRUMENT_VA(this);
size_t result = 0;
@@ -340,7 +339,7 @@ size_t SBValue::GetByteSize() {
}
bool SBValue::IsInScope() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsInScope);
+ LLDB_INSTRUMENT_VA(this);
bool result = false;
@@ -354,7 +353,7 @@ bool SBValue::IsInScope() {
}
const char *SBValue::GetValue() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetValue);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
ValueLocker locker;
@@ -367,7 +366,7 @@ const char *SBValue::GetValue() {
}
ValueType SBValue::GetValueType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::ValueType, SBValue, GetValueType);
+ LLDB_INSTRUMENT_VA(this);
ValueType result = eValueTypeInvalid;
ValueLocker locker;
@@ -379,7 +378,7 @@ ValueType SBValue::GetValueType() {
}
const char *SBValue::GetObjectDescription() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetObjectDescription);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
ValueLocker locker;
@@ -392,7 +391,7 @@ const char *SBValue::GetObjectDescription() {
}
SBType SBValue::GetType() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBType, SBValue, GetType);
+ LLDB_INSTRUMENT_VA(this);
SBType sb_type;
ValueLocker locker;
@@ -403,11 +402,11 @@ SBType SBValue::GetType() {
sb_type.SetSP(type_sp);
}
- return LLDB_RECORD_RESULT(sb_type);
+ return sb_type;
}
bool SBValue::GetValueDidChange() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, GetValueDidChange);
+ LLDB_INSTRUMENT_VA(this);
bool result = false;
ValueLocker locker;
@@ -421,7 +420,7 @@ bool SBValue::GetValueDidChange() {
}
const char *SBValue::GetSummary() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetSummary);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
ValueLocker locker;
@@ -435,9 +434,7 @@ const char *SBValue::GetSummary() {
const char *SBValue::GetSummary(lldb::SBStream &stream,
lldb::SBTypeSummaryOptions &options) {
- LLDB_RECORD_METHOD(const char *, SBValue, GetSummary,
- (lldb::SBStream &, lldb::SBTypeSummaryOptions &), stream,
- options);
+ LLDB_INSTRUMENT_VA(this, stream, options);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -451,7 +448,7 @@ const char *SBValue::GetSummary(lldb::SBStream &stream,
}
const char *SBValue::GetLocation() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBValue, GetLocation);
+ LLDB_INSTRUMENT_VA(this);
const char *cstr = nullptr;
ValueLocker locker;
@@ -464,16 +461,14 @@ const char *SBValue::GetLocation() {
// Deprecated - use the one that takes an lldb::SBError
bool SBValue::SetValueFromCString(const char *value_str) {
- LLDB_RECORD_METHOD(bool, SBValue, SetValueFromCString, (const char *),
- value_str);
+ LLDB_INSTRUMENT_VA(this, value_str);
lldb::SBError dummy;
return SetValueFromCString(value_str, dummy);
}
bool SBValue::SetValueFromCString(const char *value_str, lldb::SBError &error) {
- LLDB_RECORD_METHOD(bool, SBValue, SetValueFromCString,
- (const char *, lldb::SBError &), value_str, error);
+ LLDB_INSTRUMENT_VA(this, value_str, error);
bool success = false;
ValueLocker locker;
@@ -488,7 +483,7 @@ bool SBValue::SetValueFromCString(const char *value_str, lldb::SBError &error) {
}
lldb::SBTypeFormat SBValue::GetTypeFormat() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeFormat, SBValue, GetTypeFormat);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBTypeFormat format;
ValueLocker locker;
@@ -500,11 +495,11 @@ lldb::SBTypeFormat SBValue::GetTypeFormat() {
format.SetSP(format_sp);
}
}
- return LLDB_RECORD_RESULT(format);
+ return format;
}
lldb::SBTypeSummary SBValue::GetTypeSummary() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeSummary, SBValue, GetTypeSummary);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBTypeSummary summary;
ValueLocker locker;
@@ -516,11 +511,11 @@ lldb::SBTypeSummary SBValue::GetTypeSummary() {
summary.SetSP(summary_sp);
}
}
- return LLDB_RECORD_RESULT(summary);
+ return summary;
}
lldb::SBTypeFilter SBValue::GetTypeFilter() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeFilter, SBValue, GetTypeFilter);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBTypeFilter filter;
ValueLocker locker;
@@ -536,11 +531,11 @@ lldb::SBTypeFilter SBValue::GetTypeFilter() {
}
}
}
- return LLDB_RECORD_RESULT(filter);
+ return filter;
}
lldb::SBTypeSynthetic SBValue::GetTypeSynthetic() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTypeSynthetic, SBValue, GetTypeSynthetic);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBTypeSynthetic synthetic;
ValueLocker locker;
@@ -556,14 +551,12 @@ lldb::SBTypeSynthetic SBValue::GetTypeSynthetic() {
}
}
}
- return LLDB_RECORD_RESULT(synthetic);
+ return synthetic;
}
lldb::SBValue SBValue::CreateChildAtOffset(const char *name, uint32_t offset,
SBType type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, CreateChildAtOffset,
- (const char *, uint32_t, lldb::SBType), name, offset,
- type);
+ LLDB_INSTRUMENT_VA(this, name, offset, type);
lldb::SBValue sb_value;
ValueLocker locker;
@@ -577,11 +570,11 @@ lldb::SBValue SBValue::CreateChildAtOffset(const char *name, uint32_t offset,
GetPreferDynamicValue(), GetPreferSyntheticValue(), name);
}
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBValue::Cast(SBType type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, Cast, (lldb::SBType), type);
+ LLDB_INSTRUMENT_VA(this, type);
lldb::SBValue sb_value;
ValueLocker locker;
@@ -590,26 +583,22 @@ lldb::SBValue SBValue::Cast(SBType type) {
if (value_sp && type_sp)
sb_value.SetSP(value_sp->Cast(type_sp->GetCompilerType(false)),
GetPreferDynamicValue(), GetPreferSyntheticValue());
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBValue::CreateValueFromExpression(const char *name,
const char *expression) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, CreateValueFromExpression,
- (const char *, const char *), name, expression);
+ LLDB_INSTRUMENT_VA(this, name, expression);
SBExpressionOptions options;
options.ref().SetKeepInMemory(true);
- return LLDB_RECORD_RESULT(
- CreateValueFromExpression(name, expression, options));
+ return CreateValueFromExpression(name, expression, options);
}
lldb::SBValue SBValue::CreateValueFromExpression(const char *name,
const char *expression,
SBExpressionOptions &options) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, CreateValueFromExpression,
- (const char *, const char *, lldb::SBExpressionOptions &),
- name, expression, options);
+ LLDB_INSTRUMENT_VA(this, name, expression, options);
lldb::SBValue sb_value;
ValueLocker locker;
@@ -623,15 +612,13 @@ lldb::SBValue SBValue::CreateValueFromExpression(const char *name,
new_value_sp->SetName(ConstString(name));
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBValue::CreateValueFromAddress(const char *name,
lldb::addr_t address,
SBType sb_type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, CreateValueFromAddress,
- (const char *, lldb::addr_t, lldb::SBType), name, address,
- sb_type);
+ LLDB_INSTRUMENT_VA(this, name, address, sb_type);
lldb::SBValue sb_value;
ValueLocker locker;
@@ -645,14 +632,12 @@ lldb::SBValue SBValue::CreateValueFromAddress(const char *name,
exe_ctx, ast_type);
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBValue::CreateValueFromData(const char *name, SBData data,
SBType sb_type) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, CreateValueFromData,
- (const char *, lldb::SBData, lldb::SBType), name, data,
- sb_type);
+ LLDB_INSTRUMENT_VA(this, name, data, sb_type);
lldb::SBValue sb_value;
lldb::ValueObjectSP new_value_sp;
@@ -666,11 +651,11 @@ lldb::SBValue SBValue::CreateValueFromData(const char *name, SBData data,
new_value_sp->SetAddressTypeOfChildren(eAddressTypeLoad);
}
sb_value.SetSP(new_value_sp);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
SBValue SBValue::GetChildAtIndex(uint32_t idx) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetChildAtIndex, (uint32_t), idx);
+ LLDB_INSTRUMENT_VA(this, idx);
const bool can_create_synthetic = false;
lldb::DynamicValueType use_dynamic = eNoDynamicValues;
@@ -681,16 +666,13 @@ SBValue SBValue::GetChildAtIndex(uint32_t idx) {
if (target_sp)
use_dynamic = target_sp->GetPreferDynamicValue();
- return LLDB_RECORD_RESULT(
- GetChildAtIndex(idx, use_dynamic, can_create_synthetic));
+ return GetChildAtIndex(idx, use_dynamic, can_create_synthetic);
}
SBValue SBValue::GetChildAtIndex(uint32_t idx,
lldb::DynamicValueType use_dynamic,
bool can_create_synthetic) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetChildAtIndex,
- (uint32_t, lldb::DynamicValueType, bool), idx, use_dynamic,
- can_create_synthetic);
+ LLDB_INSTRUMENT_VA(this, idx, use_dynamic, can_create_synthetic);
lldb::ValueObjectSP child_sp;
@@ -707,12 +689,11 @@ SBValue SBValue::GetChildAtIndex(uint32_t idx,
SBValue sb_value;
sb_value.SetSP(child_sp, use_dynamic, GetPreferSyntheticValue());
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
uint32_t SBValue::GetIndexOfChildWithName(const char *name) {
- LLDB_RECORD_METHOD(uint32_t, SBValue, GetIndexOfChildWithName, (const char *),
- name);
+ LLDB_INSTRUMENT_VA(this, name);
uint32_t idx = UINT32_MAX;
ValueLocker locker;
@@ -724,8 +705,7 @@ uint32_t SBValue::GetIndexOfChildWithName(const char *name) {
}
SBValue SBValue::GetChildMemberWithName(const char *name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetChildMemberWithName,
- (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
lldb::DynamicValueType use_dynamic_value = eNoDynamicValues;
TargetSP target_sp;
@@ -734,15 +714,13 @@ SBValue SBValue::GetChildMemberWithName(const char *name) {
if (target_sp)
use_dynamic_value = target_sp->GetPreferDynamicValue();
- return LLDB_RECORD_RESULT(GetChildMemberWithName(name, use_dynamic_value));
+ return GetChildMemberWithName(name, use_dynamic_value);
}
SBValue
SBValue::GetChildMemberWithName(const char *name,
lldb::DynamicValueType use_dynamic_value) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetChildMemberWithName,
- (const char *, lldb::DynamicValueType), name,
- use_dynamic_value);
+ LLDB_INSTRUMENT_VA(this, name, use_dynamic_value);
lldb::ValueObjectSP child_sp;
const ConstString str_name(name);
@@ -756,12 +734,11 @@ SBValue::GetChildMemberWithName(const char *name,
SBValue sb_value;
sb_value.SetSP(child_sp, use_dynamic_value, GetPreferSyntheticValue());
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::SBValue SBValue::GetDynamicValue(lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetDynamicValue,
- (lldb::DynamicValueType), use_dynamic);
+ LLDB_INSTRUMENT_VA(this, use_dynamic);
SBValue value_sb;
if (IsValid()) {
@@ -769,11 +746,11 @@ lldb::SBValue SBValue::GetDynamicValue(lldb::DynamicValueType use_dynamic) {
m_opaque_sp->GetUseSynthetic()));
value_sb.SetSP(proxy_sp);
}
- return LLDB_RECORD_RESULT(value_sb);
+ return value_sb;
}
lldb::SBValue SBValue::GetStaticValue() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBValue, GetStaticValue);
+ LLDB_INSTRUMENT_VA(this);
SBValue value_sb;
if (IsValid()) {
@@ -782,11 +759,11 @@ lldb::SBValue SBValue::GetStaticValue() {
m_opaque_sp->GetUseSynthetic()));
value_sb.SetSP(proxy_sp);
}
- return LLDB_RECORD_RESULT(value_sb);
+ return value_sb;
}
lldb::SBValue SBValue::GetNonSyntheticValue() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBValue, GetNonSyntheticValue);
+ LLDB_INSTRUMENT_VA(this);
SBValue value_sb;
if (IsValid()) {
@@ -794,12 +771,11 @@ lldb::SBValue SBValue::GetNonSyntheticValue() {
m_opaque_sp->GetUseDynamic(), false));
value_sb.SetSP(proxy_sp);
}
- return LLDB_RECORD_RESULT(value_sb);
+ return value_sb;
}
lldb::DynamicValueType SBValue::GetPreferDynamicValue() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::DynamicValueType, SBValue,
- GetPreferDynamicValue);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return eNoDynamicValues;
@@ -807,15 +783,14 @@ lldb::DynamicValueType SBValue::GetPreferDynamicValue() {
}
void SBValue::SetPreferDynamicValue(lldb::DynamicValueType use_dynamic) {
- LLDB_RECORD_METHOD(void, SBValue, SetPreferDynamicValue,
- (lldb::DynamicValueType), use_dynamic);
+ LLDB_INSTRUMENT_VA(this, use_dynamic);
if (IsValid())
return m_opaque_sp->SetUseDynamic(use_dynamic);
}
bool SBValue::GetPreferSyntheticValue() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, GetPreferSyntheticValue);
+ LLDB_INSTRUMENT_VA(this);
if (!IsValid())
return false;
@@ -823,15 +798,14 @@ bool SBValue::GetPreferSyntheticValue() {
}
void SBValue::SetPreferSyntheticValue(bool use_synthetic) {
- LLDB_RECORD_METHOD(void, SBValue, SetPreferSyntheticValue, (bool),
- use_synthetic);
+ LLDB_INSTRUMENT_VA(this, use_synthetic);
if (IsValid())
return m_opaque_sp->SetUseSynthetic(use_synthetic);
}
bool SBValue::IsDynamic() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsDynamic);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -841,7 +815,7 @@ bool SBValue::IsDynamic() {
}
bool SBValue::IsSynthetic() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsSynthetic);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -851,7 +825,7 @@ bool SBValue::IsSynthetic() {
}
bool SBValue::IsSyntheticChildrenGenerated() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsSyntheticChildrenGenerated);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -861,7 +835,7 @@ bool SBValue::IsSyntheticChildrenGenerated() {
}
void SBValue::SetSyntheticChildrenGenerated(bool is) {
- LLDB_RECORD_METHOD(void, SBValue, SetSyntheticChildrenGenerated, (bool), is);
+ LLDB_INSTRUMENT_VA(this, is);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -870,8 +844,7 @@ void SBValue::SetSyntheticChildrenGenerated(bool is) {
}
lldb::SBValue SBValue::GetValueForExpressionPath(const char *expr_path) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, GetValueForExpressionPath,
- (const char *), expr_path);
+ LLDB_INSTRUMENT_VA(this, expr_path);
lldb::ValueObjectSP child_sp;
ValueLocker locker;
@@ -884,12 +857,11 @@ lldb::SBValue SBValue::GetValueForExpressionPath(const char *expr_path) {
SBValue sb_value;
sb_value.SetSP(child_sp, GetPreferDynamicValue(), GetPreferSyntheticValue());
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
int64_t SBValue::GetValueAsSigned(SBError &error, int64_t fail_value) {
- LLDB_RECORD_METHOD(int64_t, SBValue, GetValueAsSigned,
- (lldb::SBError &, int64_t), error, fail_value);
+ LLDB_INSTRUMENT_VA(this, error, fail_value);
error.Clear();
ValueLocker locker;
@@ -909,8 +881,7 @@ int64_t SBValue::GetValueAsSigned(SBError &error, int64_t fail_value) {
}
uint64_t SBValue::GetValueAsUnsigned(SBError &error, uint64_t fail_value) {
- LLDB_RECORD_METHOD(uint64_t, SBValue, GetValueAsUnsigned,
- (lldb::SBError &, uint64_t), error, fail_value);
+ LLDB_INSTRUMENT_VA(this, error, fail_value);
error.Clear();
ValueLocker locker;
@@ -930,7 +901,7 @@ uint64_t SBValue::GetValueAsUnsigned(SBError &error, uint64_t fail_value) {
}
int64_t SBValue::GetValueAsSigned(int64_t fail_value) {
- LLDB_RECORD_METHOD(int64_t, SBValue, GetValueAsSigned, (int64_t), fail_value);
+ LLDB_INSTRUMENT_VA(this, fail_value);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -941,8 +912,7 @@ int64_t SBValue::GetValueAsSigned(int64_t fail_value) {
}
uint64_t SBValue::GetValueAsUnsigned(uint64_t fail_value) {
- LLDB_RECORD_METHOD(uint64_t, SBValue, GetValueAsUnsigned, (uint64_t),
- fail_value);
+ LLDB_INSTRUMENT_VA(this, fail_value);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -953,7 +923,7 @@ uint64_t SBValue::GetValueAsUnsigned(uint64_t fail_value) {
}
bool SBValue::MightHaveChildren() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, MightHaveChildren);
+ LLDB_INSTRUMENT_VA(this);
bool has_children = false;
ValueLocker locker;
@@ -965,7 +935,7 @@ bool SBValue::MightHaveChildren() {
}
bool SBValue::IsRuntimeSupportValue() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, IsRuntimeSupportValue);
+ LLDB_INSTRUMENT_VA(this);
bool is_support = false;
ValueLocker locker;
@@ -977,13 +947,13 @@ bool SBValue::IsRuntimeSupportValue() {
}
uint32_t SBValue::GetNumChildren() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBValue, GetNumChildren);
+ LLDB_INSTRUMENT_VA(this);
return GetNumChildren(UINT32_MAX);
}
uint32_t SBValue::GetNumChildren(uint32_t max) {
- LLDB_RECORD_METHOD(uint32_t, SBValue, GetNumChildren, (uint32_t), max);
+ LLDB_INSTRUMENT_VA(this, max);
uint32_t num_children = 0;
@@ -996,7 +966,7 @@ uint32_t SBValue::GetNumChildren(uint32_t max) {
}
SBValue SBValue::Dereference() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBValue, Dereference);
+ LLDB_INSTRUMENT_VA(this);
SBValue sb_value;
ValueLocker locker;
@@ -1006,18 +976,18 @@ SBValue SBValue::Dereference() {
sb_value = value_sp->Dereference(error);
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
// Deprecated - please use GetType().IsPointerType() instead.
bool SBValue::TypeIsPointerType() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBValue, TypeIsPointerType);
+ LLDB_INSTRUMENT_VA(this);
return GetType().IsPointerType();
}
void *SBValue::GetOpaqueType() {
- LLDB_RECORD_METHOD_NO_ARGS(void *, SBValue, GetOpaqueType);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1027,7 +997,7 @@ void *SBValue::GetOpaqueType() {
}
lldb::SBTarget SBValue::GetTarget() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBTarget, SBValue, GetTarget);
+ LLDB_INSTRUMENT_VA(this);
SBTarget sb_target;
TargetSP target_sp;
@@ -1036,11 +1006,11 @@ lldb::SBTarget SBValue::GetTarget() {
sb_target.SetSP(target_sp);
}
- return LLDB_RECORD_RESULT(sb_target);
+ return sb_target;
}
lldb::SBProcess SBValue::GetProcess() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBProcess, SBValue, GetProcess);
+ LLDB_INSTRUMENT_VA(this);
SBProcess sb_process;
ProcessSP process_sp;
@@ -1049,11 +1019,11 @@ lldb::SBProcess SBValue::GetProcess() {
sb_process.SetSP(process_sp);
}
- return LLDB_RECORD_RESULT(sb_process);
+ return sb_process;
}
lldb::SBThread SBValue::GetThread() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBThread, SBValue, GetThread);
+ LLDB_INSTRUMENT_VA(this);
SBThread sb_thread;
ThreadSP thread_sp;
@@ -1062,11 +1032,11 @@ lldb::SBThread SBValue::GetThread() {
sb_thread.SetThread(thread_sp);
}
- return LLDB_RECORD_RESULT(sb_thread);
+ return sb_thread;
}
lldb::SBFrame SBValue::GetFrame() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBFrame, SBValue, GetFrame);
+ LLDB_INSTRUMENT_VA(this);
SBFrame sb_frame;
StackFrameSP frame_sp;
@@ -1075,7 +1045,7 @@ lldb::SBFrame SBValue::GetFrame() {
sb_frame.SetFrameSP(frame_sp);
}
- return LLDB_RECORD_RESULT(sb_frame);
+ return sb_frame;
}
lldb::ValueObjectSP SBValue::GetSP(ValueLocker &locker) const {
@@ -1087,10 +1057,10 @@ lldb::ValueObjectSP SBValue::GetSP(ValueLocker &locker) const {
}
lldb::ValueObjectSP SBValue::GetSP() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::ValueObjectSP, SBValue, GetSP);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
- return LLDB_RECORD_RESULT(GetSP(locker));
+ return GetSP(locker);
}
void SBValue::SetSP(ValueImplSP impl_sp) { m_opaque_sp = impl_sp; }
@@ -1148,8 +1118,7 @@ void SBValue::SetSP(const lldb::ValueObjectSP &sp,
}
bool SBValue::GetExpressionPath(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBValue, GetExpressionPath, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1162,8 +1131,7 @@ bool SBValue::GetExpressionPath(SBStream &description) {
bool SBValue::GetExpressionPath(SBStream &description,
bool qualify_cxx_base_classes) {
- LLDB_RECORD_METHOD(bool, SBValue, GetExpressionPath, (lldb::SBStream &, bool),
- description, qualify_cxx_base_classes);
+ LLDB_INSTRUMENT_VA(this, description, qualify_cxx_base_classes);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1175,59 +1143,52 @@ bool SBValue::GetExpressionPath(SBStream &description,
}
lldb::SBValue SBValue::EvaluateExpression(const char *expr) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBValue, SBValue, EvaluateExpression,
- (const char *), expr);
+ LLDB_INSTRUMENT_VA(this, expr);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
if (!value_sp)
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
lldb::TargetSP target_sp = value_sp->GetTargetSP();
if (!target_sp)
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
lldb::SBExpressionOptions options;
options.SetFetchDynamicValue(target_sp->GetPreferDynamicValue());
options.SetUnwindOnError(true);
options.SetIgnoreBreakpoints(true);
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options, nullptr));
+ return EvaluateExpression(expr, options, nullptr);
}
lldb::SBValue
SBValue::EvaluateExpression(const char *expr,
const SBExpressionOptions &options) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBValue, SBValue, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &),
- expr, options);
+ LLDB_INSTRUMENT_VA(this, expr, options);
- return LLDB_RECORD_RESULT(EvaluateExpression(expr, options, nullptr));
+ return EvaluateExpression(expr, options, nullptr);
}
lldb::SBValue SBValue::EvaluateExpression(const char *expr,
const SBExpressionOptions &options,
const char *name) const {
- LLDB_RECORD_METHOD_CONST(
- lldb::SBValue, SBValue, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &, const char *), expr,
- options, name);
-
+ LLDB_INSTRUMENT_VA(this, expr, options, name);
if (!expr || expr[0] == '\0') {
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
}
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
if (!value_sp) {
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
}
lldb::TargetSP target_sp = value_sp->GetTargetSP();
if (!target_sp) {
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
}
std::lock_guard<std::recursive_mutex> guard(target_sp->GetAPIMutex());
@@ -1235,7 +1196,7 @@ lldb::SBValue SBValue::EvaluateExpression(const char *expr,
StackFrame *frame = exe_ctx.GetFramePtr();
if (!frame) {
- return LLDB_RECORD_RESULT(SBValue());
+ return SBValue();
}
ValueObjectSP res_val_sp;
@@ -1247,12 +1208,11 @@ lldb::SBValue SBValue::EvaluateExpression(const char *expr,
SBValue result;
result.SetSP(res_val_sp, options.GetFetchDynamicValue());
- return LLDB_RECORD_RESULT(result);
+ return result;
}
bool SBValue::GetDescription(SBStream &description) {
- LLDB_RECORD_METHOD(bool, SBValue, GetDescription, (lldb::SBStream &),
- description);
+ LLDB_INSTRUMENT_VA(this, description);
Stream &strm = description.ref();
@@ -1267,7 +1227,7 @@ bool SBValue::GetDescription(SBStream &description) {
}
lldb::Format SBValue::GetFormat() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::Format, SBValue, GetFormat);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1277,7 +1237,7 @@ lldb::Format SBValue::GetFormat() {
}
void SBValue::SetFormat(lldb::Format format) {
- LLDB_RECORD_METHOD(void, SBValue, SetFormat, (lldb::Format), format);
+ LLDB_INSTRUMENT_VA(this, format);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1286,7 +1246,7 @@ void SBValue::SetFormat(lldb::Format format) {
}
lldb::SBValue SBValue::AddressOf() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBValue, AddressOf);
+ LLDB_INSTRUMENT_VA(this);
SBValue sb_value;
ValueLocker locker;
@@ -1297,11 +1257,11 @@ lldb::SBValue SBValue::AddressOf() {
GetPreferSyntheticValue());
}
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
lldb::addr_t SBValue::GetLoadAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBValue, GetLoadAddress);
+ LLDB_INSTRUMENT_VA(this);
lldb::addr_t value = LLDB_INVALID_ADDRESS;
ValueLocker locker;
@@ -1331,7 +1291,7 @@ lldb::addr_t SBValue::GetLoadAddress() {
}
lldb::SBAddress SBValue::GetAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBAddress, SBValue, GetAddress);
+ LLDB_INSTRUMENT_VA(this);
Address addr;
ValueLocker locker;
@@ -1356,12 +1316,11 @@ lldb::SBAddress SBValue::GetAddress() {
}
}
- return LLDB_RECORD_RESULT(SBAddress(addr));
+ return SBAddress(addr);
}
lldb::SBData SBValue::GetPointeeData(uint32_t item_idx, uint32_t item_count) {
- LLDB_RECORD_METHOD(lldb::SBData, SBValue, GetPointeeData,
- (uint32_t, uint32_t), item_idx, item_count);
+ LLDB_INSTRUMENT_VA(this, item_idx, item_count);
lldb::SBData sb_data;
ValueLocker locker;
@@ -1376,11 +1335,11 @@ lldb::SBData SBValue::GetPointeeData(uint32_t item_idx, uint32_t item_count) {
}
}
- return LLDB_RECORD_RESULT(sb_data);
+ return sb_data;
}
lldb::SBData SBValue::GetData() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBData, SBValue, GetData);
+ LLDB_INSTRUMENT_VA(this);
lldb::SBData sb_data;
ValueLocker locker;
@@ -1393,12 +1352,11 @@ lldb::SBData SBValue::GetData() {
*sb_data = data_sp;
}
- return LLDB_RECORD_RESULT(sb_data);
+ return sb_data;
}
bool SBValue::SetData(lldb::SBData &data, SBError &error) {
- LLDB_RECORD_METHOD(bool, SBValue, SetData, (lldb::SBData &, lldb::SBError &),
- data, error);
+ LLDB_INSTRUMENT_VA(this, data, error);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1432,7 +1390,7 @@ bool SBValue::SetData(lldb::SBData &data, SBError &error) {
}
lldb::SBValue SBValue::Clone(const char *new_name) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValue, Clone, (const char *), new_name);
+ LLDB_INSTRUMENT_VA(this, new_name);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1444,7 +1402,7 @@ lldb::SBValue SBValue::Clone(const char *new_name) {
}
lldb::SBDeclaration SBValue::GetDeclaration() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBDeclaration, SBValue, GetDeclaration);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1454,14 +1412,12 @@ lldb::SBDeclaration SBValue::GetDeclaration() {
if (value_sp->GetDeclaration(decl))
decl_sb.SetDeclaration(decl);
}
- return LLDB_RECORD_RESULT(decl_sb);
+ return decl_sb;
}
lldb::SBWatchpoint SBValue::Watch(bool resolve_location, bool read, bool write,
SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBWatchpoint, SBValue, Watch,
- (bool, bool, bool, lldb::SBError &), resolve_location,
- read, write, error);
+ LLDB_INSTRUMENT_VA(this, resolve_location, read, write, error);
SBWatchpoint sb_watchpoint;
@@ -1472,18 +1428,18 @@ lldb::SBWatchpoint SBValue::Watch(bool resolve_location, bool read, bool write,
if (value_sp && target_sp) {
// Read and Write cannot both be false.
if (!read && !write)
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
// If the value is not in scope, don't try and watch and invalid value
if (!IsInScope())
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
addr_t addr = GetLoadAddress();
if (addr == LLDB_INVALID_ADDRESS)
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
size_t byte_size = GetByteSize();
if (byte_size == 0)
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
uint32_t watch_type = 0;
if (read)
@@ -1516,7 +1472,7 @@ lldb::SBWatchpoint SBValue::Watch(bool resolve_location, bool read, bool write,
error.SetErrorString("could not set watchpoint, a target is required");
}
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
// FIXME: Remove this method impl (as well as the decl in .h) once it is no
@@ -1524,27 +1480,24 @@ lldb::SBWatchpoint SBValue::Watch(bool resolve_location, bool read, bool write,
// Backward compatibility fix in the interim.
lldb::SBWatchpoint SBValue::Watch(bool resolve_location, bool read,
bool write) {
- LLDB_RECORD_METHOD(lldb::SBWatchpoint, SBValue, Watch, (bool, bool, bool),
- resolve_location, read, write);
+ LLDB_INSTRUMENT_VA(this, resolve_location, read, write);
SBError error;
- return LLDB_RECORD_RESULT(Watch(resolve_location, read, write, error));
+ return Watch(resolve_location, read, write, error);
}
lldb::SBWatchpoint SBValue::WatchPointee(bool resolve_location, bool read,
bool write, SBError &error) {
- LLDB_RECORD_METHOD(lldb::SBWatchpoint, SBValue, WatchPointee,
- (bool, bool, bool, lldb::SBError &), resolve_location,
- read, write, error);
+ LLDB_INSTRUMENT_VA(this, resolve_location, read, write, error);
SBWatchpoint sb_watchpoint;
if (IsInScope() && GetType().IsPointerType())
sb_watchpoint = Dereference().Watch(resolve_location, read, write, error);
- return LLDB_RECORD_RESULT(sb_watchpoint);
+ return sb_watchpoint;
}
lldb::SBValue SBValue::Persist() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBValue, SBValue, Persist);
+ LLDB_INSTRUMENT_VA(this);
ValueLocker locker;
lldb::ValueObjectSP value_sp(GetSP(locker));
@@ -1552,132 +1505,5 @@ lldb::SBValue SBValue::Persist() {
if (value_sp) {
persisted_sb.SetSP(value_sp->Persist());
}
- return LLDB_RECORD_RESULT(persisted_sb);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBValue>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBValue, ());
- LLDB_REGISTER_CONSTRUCTOR(SBValue, (const lldb::ValueObjectSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBValue, (const lldb::SBValue &));
- LLDB_REGISTER_METHOD(lldb::SBValue &,
- SBValue, operator=,(const lldb::SBValue &));
- LLDB_REGISTER_METHOD(bool, SBValue, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBValue, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBValue, Clear, ());
- LLDB_REGISTER_METHOD(lldb::SBError, SBValue, GetError, ());
- LLDB_REGISTER_METHOD(lldb::user_id_t, SBValue, GetID, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetName, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetTypeName, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetDisplayTypeName, ());
- LLDB_REGISTER_METHOD(size_t, SBValue, GetByteSize, ());
- LLDB_REGISTER_METHOD(bool, SBValue, IsInScope, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetValue, ());
- LLDB_REGISTER_METHOD(lldb::ValueType, SBValue, GetValueType, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetObjectDescription, ());
- LLDB_REGISTER_METHOD(lldb::SBType, SBValue, GetType, ());
- LLDB_REGISTER_METHOD(bool, SBValue, GetValueDidChange, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetSummary, ());
- LLDB_REGISTER_METHOD(const char *, SBValue, GetSummary,
- (lldb::SBStream &, lldb::SBTypeSummaryOptions &));
- LLDB_REGISTER_METHOD(const char *, SBValue, GetLocation, ());
- LLDB_REGISTER_METHOD(bool, SBValue, SetValueFromCString, (const char *));
- LLDB_REGISTER_METHOD(bool, SBValue, SetValueFromCString,
- (const char *, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBTypeFormat, SBValue, GetTypeFormat, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeSummary, SBValue, GetTypeSummary, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeFilter, SBValue, GetTypeFilter, ());
- LLDB_REGISTER_METHOD(lldb::SBTypeSynthetic, SBValue, GetTypeSynthetic, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, CreateChildAtOffset,
- (const char *, uint32_t, lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, Cast, (lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, CreateValueFromExpression,
- (const char *, const char *));
- LLDB_REGISTER_METHOD(
- lldb::SBValue, SBValue, CreateValueFromExpression,
- (const char *, const char *, lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, CreateValueFromAddress,
- (const char *, lldb::addr_t, lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, CreateValueFromData,
- (const char *, lldb::SBData, lldb::SBType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetChildAtIndex, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetChildAtIndex,
- (uint32_t, lldb::DynamicValueType, bool));
- LLDB_REGISTER_METHOD(uint32_t, SBValue, GetIndexOfChildWithName,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetChildMemberWithName,
- (const char *));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetChildMemberWithName,
- (const char *, lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetDynamicValue,
- (lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetStaticValue, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetNonSyntheticValue, ());
- LLDB_REGISTER_METHOD(lldb::DynamicValueType, SBValue, GetPreferDynamicValue,
- ());
- LLDB_REGISTER_METHOD(void, SBValue, SetPreferDynamicValue,
- (lldb::DynamicValueType));
- LLDB_REGISTER_METHOD(bool, SBValue, GetPreferSyntheticValue, ());
- LLDB_REGISTER_METHOD(void, SBValue, SetPreferSyntheticValue, (bool));
- LLDB_REGISTER_METHOD(bool, SBValue, IsDynamic, ());
- LLDB_REGISTER_METHOD(bool, SBValue, IsSynthetic, ());
- LLDB_REGISTER_METHOD(bool, SBValue, IsSyntheticChildrenGenerated, ());
- LLDB_REGISTER_METHOD(void, SBValue, SetSyntheticChildrenGenerated, (bool));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, GetValueForExpressionPath,
- (const char *));
- LLDB_REGISTER_METHOD(int64_t, SBValue, GetValueAsSigned,
- (lldb::SBError &, int64_t));
- LLDB_REGISTER_METHOD(uint64_t, SBValue, GetValueAsUnsigned,
- (lldb::SBError &, uint64_t));
- LLDB_REGISTER_METHOD(int64_t, SBValue, GetValueAsSigned, (int64_t));
- LLDB_REGISTER_METHOD(uint64_t, SBValue, GetValueAsUnsigned, (uint64_t));
- LLDB_REGISTER_METHOD(bool, SBValue, MightHaveChildren, ());
- LLDB_REGISTER_METHOD(bool, SBValue, IsRuntimeSupportValue, ());
- LLDB_REGISTER_METHOD(uint32_t, SBValue, GetNumChildren, ());
- LLDB_REGISTER_METHOD(uint32_t, SBValue, GetNumChildren, (uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, Dereference, ());
- LLDB_REGISTER_METHOD(bool, SBValue, TypeIsPointerType, ());
- LLDB_REGISTER_METHOD(void *, SBValue, GetOpaqueType, ());
- LLDB_REGISTER_METHOD(lldb::SBTarget, SBValue, GetTarget, ());
- LLDB_REGISTER_METHOD(lldb::SBProcess, SBValue, GetProcess, ());
- LLDB_REGISTER_METHOD(lldb::SBThread, SBValue, GetThread, ());
- LLDB_REGISTER_METHOD(lldb::SBFrame, SBValue, GetFrame, ());
- LLDB_REGISTER_METHOD_CONST(lldb::ValueObjectSP, SBValue, GetSP, ());
- LLDB_REGISTER_METHOD(bool, SBValue, GetExpressionPath, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(bool, SBValue, GetExpressionPath,
- (lldb::SBStream &, bool));
- LLDB_REGISTER_METHOD_CONST(lldb::SBValue, SBValue, EvaluateExpression,
- (const char *));
- LLDB_REGISTER_METHOD_CONST(
- lldb::SBValue, SBValue, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &));
- LLDB_REGISTER_METHOD_CONST(
- lldb::SBValue, SBValue, EvaluateExpression,
- (const char *, const lldb::SBExpressionOptions &, const char *));
- LLDB_REGISTER_METHOD(bool, SBValue, GetDescription, (lldb::SBStream &));
- LLDB_REGISTER_METHOD(lldb::Format, SBValue, GetFormat, ());
- LLDB_REGISTER_METHOD(void, SBValue, SetFormat, (lldb::Format));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, AddressOf, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBValue, GetLoadAddress, ());
- LLDB_REGISTER_METHOD(lldb::SBAddress, SBValue, GetAddress, ());
- LLDB_REGISTER_METHOD(lldb::SBData, SBValue, GetPointeeData,
- (uint32_t, uint32_t));
- LLDB_REGISTER_METHOD(lldb::SBData, SBValue, GetData, ());
- LLDB_REGISTER_METHOD(bool, SBValue, SetData,
- (lldb::SBData &, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, Clone, (const char *));
- LLDB_REGISTER_METHOD(lldb::SBDeclaration, SBValue, GetDeclaration, ());
- LLDB_REGISTER_METHOD(lldb::SBWatchpoint, SBValue, Watch,
- (bool, bool, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBWatchpoint, SBValue, Watch,
- (bool, bool, bool));
- LLDB_REGISTER_METHOD(lldb::SBWatchpoint, SBValue, WatchPointee,
- (bool, bool, bool, lldb::SBError &));
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValue, Persist, ());
-}
-
-}
+ return persisted_sb;
}
diff --git a/contrib/llvm-project/lldb/source/API/SBValueList.cpp b/contrib/llvm-project/lldb/source/API/SBValueList.cpp
index 0fd2a591c321..a67030c506f4 100644
--- a/contrib/llvm-project/lldb/source/API/SBValueList.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBValueList.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBValueList.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBStream.h"
#include "lldb/API/SBValue.h"
#include "lldb/Core/ValueObjectList.h"
+#include "lldb/Utility/Instrumentation.h"
#include <vector>
@@ -19,7 +19,7 @@ using namespace lldb_private;
class ValueListImpl {
public:
- ValueListImpl() : m_values() {}
+ ValueListImpl() {}
ValueListImpl(const ValueListImpl &rhs) : m_values(rhs.m_values) {}
@@ -67,18 +67,16 @@ private:
std::vector<lldb::SBValue> m_values;
};
-SBValueList::SBValueList() : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBValueList);
-}
+SBValueList::SBValueList() { LLDB_INSTRUMENT_VA(this); }
-SBValueList::SBValueList(const SBValueList &rhs) : m_opaque_up() {
- LLDB_RECORD_CONSTRUCTOR(SBValueList, (const lldb::SBValueList &), rhs);
+SBValueList::SBValueList(const SBValueList &rhs) {
+ LLDB_INSTRUMENT_VA(this, rhs);
if (rhs.IsValid())
m_opaque_up = std::make_unique<ValueListImpl>(*rhs);
}
-SBValueList::SBValueList(const ValueListImpl *lldb_object_ptr) : m_opaque_up() {
+SBValueList::SBValueList(const ValueListImpl *lldb_object_ptr) {
if (lldb_object_ptr)
m_opaque_up = std::make_unique<ValueListImpl>(*lldb_object_ptr);
}
@@ -86,24 +84,23 @@ SBValueList::SBValueList(const ValueListImpl *lldb_object_ptr) : m_opaque_up() {
SBValueList::~SBValueList() = default;
bool SBValueList::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBValueList, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBValueList::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBValueList, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return (m_opaque_up != nullptr);
}
void SBValueList::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBValueList, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_up.reset();
}
const SBValueList &SBValueList::operator=(const SBValueList &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBValueList &,
- SBValueList, operator=,(const lldb::SBValueList &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
if (this != &rhs) {
if (rhs.IsValid())
@@ -111,7 +108,7 @@ const SBValueList &SBValueList::operator=(const SBValueList &rhs) {
else
m_opaque_up.reset();
}
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
ValueListImpl *SBValueList::operator->() { return m_opaque_up.get(); }
@@ -125,8 +122,7 @@ const ValueListImpl *SBValueList::operator->() const {
const ValueListImpl &SBValueList::operator*() const { return *m_opaque_up; }
void SBValueList::Append(const SBValue &val_obj) {
- LLDB_RECORD_METHOD(void, SBValueList, Append, (const lldb::SBValue &),
- val_obj);
+ LLDB_INSTRUMENT_VA(this, val_obj);
CreateIfNeeded();
m_opaque_up->Append(val_obj);
@@ -140,8 +136,7 @@ void SBValueList::Append(lldb::ValueObjectSP &val_obj_sp) {
}
void SBValueList::Append(const lldb::SBValueList &value_list) {
- LLDB_RECORD_METHOD(void, SBValueList, Append, (const lldb::SBValueList &),
- value_list);
+ LLDB_INSTRUMENT_VA(this, value_list);
if (value_list.IsValid()) {
CreateIfNeeded();
@@ -150,19 +145,17 @@ void SBValueList::Append(const lldb::SBValueList &value_list) {
}
SBValue SBValueList::GetValueAtIndex(uint32_t idx) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBValue, SBValueList, GetValueAtIndex,
- (uint32_t), idx);
-
+ LLDB_INSTRUMENT_VA(this, idx);
SBValue sb_value;
if (m_opaque_up)
sb_value = m_opaque_up->GetValueAtIndex(idx);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
uint32_t SBValueList::GetSize() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(uint32_t, SBValueList, GetSize);
+ LLDB_INSTRUMENT_VA(this);
uint32_t size = 0;
if (m_opaque_up)
@@ -177,23 +170,21 @@ void SBValueList::CreateIfNeeded() {
}
SBValue SBValueList::FindValueObjectByUID(lldb::user_id_t uid) {
- LLDB_RECORD_METHOD(lldb::SBValue, SBValueList, FindValueObjectByUID,
- (lldb::user_id_t), uid);
+ LLDB_INSTRUMENT_VA(this, uid);
SBValue sb_value;
if (m_opaque_up)
sb_value = m_opaque_up->FindValueByUID(uid);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
SBValue SBValueList::GetFirstValueByName(const char *name) const {
- LLDB_RECORD_METHOD_CONST(lldb::SBValue, SBValueList, GetFirstValueByName,
- (const char *), name);
+ LLDB_INSTRUMENT_VA(this, name);
SBValue sb_value;
if (m_opaque_up)
sb_value = m_opaque_up->GetFirstValueByName(name);
- return LLDB_RECORD_RESULT(sb_value);
+ return sb_value;
}
void *SBValueList::opaque_ptr() { return m_opaque_up.get(); }
@@ -202,30 +193,3 @@ ValueListImpl &SBValueList::ref() {
CreateIfNeeded();
return *m_opaque_up;
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBValueList>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBValueList, ());
- LLDB_REGISTER_CONSTRUCTOR(SBValueList, (const lldb::SBValueList &));
- LLDB_REGISTER_METHOD_CONST(bool, SBValueList, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBValueList, operator bool, ());
- LLDB_REGISTER_METHOD(void, SBValueList, Clear, ());
- LLDB_REGISTER_METHOD(const lldb::SBValueList &,
- SBValueList, operator=,(const lldb::SBValueList &));
- LLDB_REGISTER_METHOD(void, SBValueList, Append, (const lldb::SBValue &));
- LLDB_REGISTER_METHOD(void, SBValueList, Append,
- (const lldb::SBValueList &));
- LLDB_REGISTER_METHOD_CONST(lldb::SBValue, SBValueList, GetValueAtIndex,
- (uint32_t));
- LLDB_REGISTER_METHOD_CONST(uint32_t, SBValueList, GetSize, ());
- LLDB_REGISTER_METHOD(lldb::SBValue, SBValueList, FindValueObjectByUID,
- (lldb::user_id_t));
- LLDB_REGISTER_METHOD_CONST(lldb::SBValue, SBValueList, GetFirstValueByName,
- (const char *));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBVariablesOptions.cpp b/contrib/llvm-project/lldb/source/API/SBVariablesOptions.cpp
index 1af582a0c3d7..989d159139cc 100644
--- a/contrib/llvm-project/lldb/source/API/SBVariablesOptions.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBVariablesOptions.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBVariablesOptions.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBTarget.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/lldb-private.h"
@@ -80,132 +80,116 @@ private:
SBVariablesOptions::SBVariablesOptions()
: m_opaque_up(new VariablesOptionsImpl()) {
- LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBVariablesOptions);
+ LLDB_INSTRUMENT_VA(this);
}
SBVariablesOptions::SBVariablesOptions(const SBVariablesOptions &options)
: m_opaque_up(new VariablesOptionsImpl(options.ref())) {
- LLDB_RECORD_CONSTRUCTOR(SBVariablesOptions,
- (const lldb::SBVariablesOptions &), options);
+ LLDB_INSTRUMENT_VA(this, options);
}
SBVariablesOptions &SBVariablesOptions::
operator=(const SBVariablesOptions &options) {
- LLDB_RECORD_METHOD(
- lldb::SBVariablesOptions &,
- SBVariablesOptions, operator=,(const lldb::SBVariablesOptions &),
- options);
+ LLDB_INSTRUMENT_VA(this, options);
m_opaque_up = std::make_unique<VariablesOptionsImpl>(options.ref());
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBVariablesOptions::~SBVariablesOptions() = default;
bool SBVariablesOptions::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBVariablesOptions::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up != nullptr;
}
bool SBVariablesOptions::GetIncludeArguments() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions,
- GetIncludeArguments);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetIncludeArguments();
}
void SBVariablesOptions::SetIncludeArguments(bool arguments) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetIncludeArguments, (bool),
- arguments);
+ LLDB_INSTRUMENT_VA(this, arguments);
m_opaque_up->SetIncludeArguments(arguments);
}
bool SBVariablesOptions::GetIncludeRecognizedArguments(
const lldb::SBTarget &target) const {
- LLDB_RECORD_METHOD_CONST(bool, SBVariablesOptions,
- GetIncludeRecognizedArguments,
- (const lldb::SBTarget &), target);
+ LLDB_INSTRUMENT_VA(this, target);
return m_opaque_up->GetIncludeRecognizedArguments(target.GetSP());
}
void SBVariablesOptions::SetIncludeRecognizedArguments(bool arguments) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetIncludeRecognizedArguments,
- (bool), arguments);
+ LLDB_INSTRUMENT_VA(this, arguments);
m_opaque_up->SetIncludeRecognizedArguments(arguments);
}
bool SBVariablesOptions::GetIncludeLocals() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions, GetIncludeLocals);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetIncludeLocals();
}
void SBVariablesOptions::SetIncludeLocals(bool locals) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetIncludeLocals, (bool),
- locals);
+ LLDB_INSTRUMENT_VA(this, locals);
m_opaque_up->SetIncludeLocals(locals);
}
bool SBVariablesOptions::GetIncludeStatics() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions, GetIncludeStatics);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetIncludeStatics();
}
void SBVariablesOptions::SetIncludeStatics(bool statics) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetIncludeStatics, (bool),
- statics);
+ LLDB_INSTRUMENT_VA(this, statics);
m_opaque_up->SetIncludeStatics(statics);
}
bool SBVariablesOptions::GetInScopeOnly() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions, GetInScopeOnly);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetInScopeOnly();
}
void SBVariablesOptions::SetInScopeOnly(bool in_scope_only) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetInScopeOnly, (bool),
- in_scope_only);
+ LLDB_INSTRUMENT_VA(this, in_scope_only);
m_opaque_up->SetInScopeOnly(in_scope_only);
}
bool SBVariablesOptions::GetIncludeRuntimeSupportValues() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBVariablesOptions,
- GetIncludeRuntimeSupportValues);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetIncludeRuntimeSupportValues();
}
void SBVariablesOptions::SetIncludeRuntimeSupportValues(
bool runtime_support_values) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetIncludeRuntimeSupportValues,
- (bool), runtime_support_values);
+ LLDB_INSTRUMENT_VA(this, runtime_support_values);
m_opaque_up->SetIncludeRuntimeSupportValues(runtime_support_values);
}
lldb::DynamicValueType SBVariablesOptions::GetUseDynamic() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::DynamicValueType, SBVariablesOptions,
- GetUseDynamic);
+ LLDB_INSTRUMENT_VA(this);
return m_opaque_up->GetUseDynamic();
}
void SBVariablesOptions::SetUseDynamic(lldb::DynamicValueType dynamic) {
- LLDB_RECORD_METHOD(void, SBVariablesOptions, SetUseDynamic,
- (lldb::DynamicValueType), dynamic);
+ LLDB_INSTRUMENT_VA(this, dynamic);
m_opaque_up->SetUseDynamic(dynamic);
}
@@ -232,43 +216,3 @@ SBVariablesOptions::SBVariablesOptions(VariablesOptionsImpl *lldb_object_ptr)
void SBVariablesOptions::SetOptions(VariablesOptionsImpl *lldb_object_ptr) {
m_opaque_up.reset(std::move(lldb_object_ptr));
}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBVariablesOptions>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBVariablesOptions, ());
- LLDB_REGISTER_CONSTRUCTOR(SBVariablesOptions,
- (const lldb::SBVariablesOptions &));
- LLDB_REGISTER_METHOD(
- lldb::SBVariablesOptions &,
- SBVariablesOptions, operator=,(const lldb::SBVariablesOptions &));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, GetIncludeArguments,
- ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions, SetIncludeArguments, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions,
- GetIncludeRecognizedArguments,
- (const lldb::SBTarget &));
- LLDB_REGISTER_METHOD(void, SBVariablesOptions,
- SetIncludeRecognizedArguments, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, GetIncludeLocals, ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions, SetIncludeLocals, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, GetIncludeStatics, ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions, SetIncludeStatics, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions, GetInScopeOnly, ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions, SetInScopeOnly, (bool));
- LLDB_REGISTER_METHOD_CONST(bool, SBVariablesOptions,
- GetIncludeRuntimeSupportValues, ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions,
- SetIncludeRuntimeSupportValues, (bool));
- LLDB_REGISTER_METHOD_CONST(lldb::DynamicValueType, SBVariablesOptions,
- GetUseDynamic, ());
- LLDB_REGISTER_METHOD(void, SBVariablesOptions, SetUseDynamic,
- (lldb::DynamicValueType));
-}
-
-}
-}
diff --git a/contrib/llvm-project/lldb/source/API/SBWatchpoint.cpp b/contrib/llvm-project/lldb/source/API/SBWatchpoint.cpp
index eba75dea8f8d..f5bd9cd1a946 100644
--- a/contrib/llvm-project/lldb/source/API/SBWatchpoint.cpp
+++ b/contrib/llvm-project/lldb/source/API/SBWatchpoint.cpp
@@ -7,12 +7,12 @@
//===----------------------------------------------------------------------===//
#include "lldb/API/SBWatchpoint.h"
-#include "SBReproducerPrivate.h"
#include "lldb/API/SBAddress.h"
#include "lldb/API/SBDebugger.h"
#include "lldb/API/SBDefines.h"
#include "lldb/API/SBEvent.h"
#include "lldb/API/SBStream.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Breakpoint/Watchpoint.h"
#include "lldb/Breakpoint/WatchpointList.h"
@@ -26,31 +26,29 @@
using namespace lldb;
using namespace lldb_private;
-SBWatchpoint::SBWatchpoint() { LLDB_RECORD_CONSTRUCTOR_NO_ARGS(SBWatchpoint); }
+SBWatchpoint::SBWatchpoint() { LLDB_INSTRUMENT_VA(this); }
SBWatchpoint::SBWatchpoint(const lldb::WatchpointSP &wp_sp)
: m_opaque_wp(wp_sp) {
- LLDB_RECORD_CONSTRUCTOR(SBWatchpoint, (const lldb::WatchpointSP &), wp_sp);
+ LLDB_INSTRUMENT_VA(this, wp_sp);
}
SBWatchpoint::SBWatchpoint(const SBWatchpoint &rhs)
: m_opaque_wp(rhs.m_opaque_wp) {
- LLDB_RECORD_CONSTRUCTOR(SBWatchpoint, (const lldb::SBWatchpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
}
const SBWatchpoint &SBWatchpoint::operator=(const SBWatchpoint &rhs) {
- LLDB_RECORD_METHOD(const lldb::SBWatchpoint &,
- SBWatchpoint, operator=,(const lldb::SBWatchpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
m_opaque_wp = rhs.m_opaque_wp;
- return LLDB_RECORD_RESULT(*this);
+ return *this;
}
SBWatchpoint::~SBWatchpoint() = default;
watch_id_t SBWatchpoint::GetID() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::watch_id_t, SBWatchpoint, GetID);
-
+ LLDB_INSTRUMENT_VA(this);
watch_id_t watch_id = LLDB_INVALID_WATCH_ID;
lldb::WatchpointSP watchpoint_sp(GetSP());
@@ -61,42 +59,40 @@ watch_id_t SBWatchpoint::GetID() {
}
bool SBWatchpoint::IsValid() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBWatchpoint, IsValid);
+ LLDB_INSTRUMENT_VA(this);
return this->operator bool();
}
SBWatchpoint::operator bool() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(bool, SBWatchpoint, operator bool);
+ LLDB_INSTRUMENT_VA(this);
return bool(m_opaque_wp.lock());
}
bool SBWatchpoint::operator==(const SBWatchpoint &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBWatchpoint, operator==,(const SBWatchpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return GetSP() == rhs.GetSP();
}
bool SBWatchpoint::operator!=(const SBWatchpoint &rhs) const {
- LLDB_RECORD_METHOD_CONST(
- bool, SBWatchpoint, operator!=,(const SBWatchpoint &), rhs);
+ LLDB_INSTRUMENT_VA(this, rhs);
return !(*this == rhs);
}
SBError SBWatchpoint::GetError() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::SBError, SBWatchpoint, GetError);
+ LLDB_INSTRUMENT_VA(this);
SBError sb_error;
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
sb_error.SetError(watchpoint_sp->GetError());
}
- return LLDB_RECORD_RESULT(sb_error);
+ return sb_error;
}
int32_t SBWatchpoint::GetHardwareIndex() {
- LLDB_RECORD_METHOD_NO_ARGS(int32_t, SBWatchpoint, GetHardwareIndex);
+ LLDB_INSTRUMENT_VA(this);
int32_t hw_index = -1;
@@ -111,7 +107,7 @@ int32_t SBWatchpoint::GetHardwareIndex() {
}
addr_t SBWatchpoint::GetWatchAddress() {
- LLDB_RECORD_METHOD_NO_ARGS(lldb::addr_t, SBWatchpoint, GetWatchAddress);
+ LLDB_INSTRUMENT_VA(this);
addr_t ret_addr = LLDB_INVALID_ADDRESS;
@@ -126,7 +122,7 @@ addr_t SBWatchpoint::GetWatchAddress() {
}
size_t SBWatchpoint::GetWatchSize() {
- LLDB_RECORD_METHOD_NO_ARGS(size_t, SBWatchpoint, GetWatchSize);
+ LLDB_INSTRUMENT_VA(this);
size_t watch_size = 0;
@@ -141,7 +137,7 @@ size_t SBWatchpoint::GetWatchSize() {
}
void SBWatchpoint::SetEnabled(bool enabled) {
- LLDB_RECORD_METHOD(void, SBWatchpoint, SetEnabled, (bool), enabled);
+ LLDB_INSTRUMENT_VA(this, enabled);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -161,7 +157,7 @@ void SBWatchpoint::SetEnabled(bool enabled) {
}
bool SBWatchpoint::IsEnabled() {
- LLDB_RECORD_METHOD_NO_ARGS(bool, SBWatchpoint, IsEnabled);
+ LLDB_INSTRUMENT_VA(this);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -173,7 +169,7 @@ bool SBWatchpoint::IsEnabled() {
}
uint32_t SBWatchpoint::GetHitCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBWatchpoint, GetHitCount);
+ LLDB_INSTRUMENT_VA(this);
uint32_t count = 0;
lldb::WatchpointSP watchpoint_sp(GetSP());
@@ -187,7 +183,7 @@ uint32_t SBWatchpoint::GetHitCount() {
}
uint32_t SBWatchpoint::GetIgnoreCount() {
- LLDB_RECORD_METHOD_NO_ARGS(uint32_t, SBWatchpoint, GetIgnoreCount);
+ LLDB_INSTRUMENT_VA(this);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -199,7 +195,7 @@ uint32_t SBWatchpoint::GetIgnoreCount() {
}
void SBWatchpoint::SetIgnoreCount(uint32_t n) {
- LLDB_RECORD_METHOD(void, SBWatchpoint, SetIgnoreCount, (uint32_t), n);
+ LLDB_INSTRUMENT_VA(this, n);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -210,7 +206,7 @@ void SBWatchpoint::SetIgnoreCount(uint32_t n) {
}
const char *SBWatchpoint::GetCondition() {
- LLDB_RECORD_METHOD_NO_ARGS(const char *, SBWatchpoint, GetCondition);
+ LLDB_INSTRUMENT_VA(this);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -222,8 +218,7 @@ const char *SBWatchpoint::GetCondition() {
}
void SBWatchpoint::SetCondition(const char *condition) {
- LLDB_RECORD_METHOD(void, SBWatchpoint, SetCondition, (const char *),
- condition);
+ LLDB_INSTRUMENT_VA(this, condition);
lldb::WatchpointSP watchpoint_sp(GetSP());
if (watchpoint_sp) {
@@ -235,9 +230,7 @@ void SBWatchpoint::SetCondition(const char *condition) {
bool SBWatchpoint::GetDescription(SBStream &description,
DescriptionLevel level) {
- LLDB_RECORD_METHOD(bool, SBWatchpoint, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel), description,
- level);
+ LLDB_INSTRUMENT_VA(this, description, level);
Stream &strm = description.ref();
@@ -254,27 +247,25 @@ bool SBWatchpoint::GetDescription(SBStream &description,
}
void SBWatchpoint::Clear() {
- LLDB_RECORD_METHOD_NO_ARGS(void, SBWatchpoint, Clear);
+ LLDB_INSTRUMENT_VA(this);
m_opaque_wp.reset();
}
lldb::WatchpointSP SBWatchpoint::GetSP() const {
- LLDB_RECORD_METHOD_CONST_NO_ARGS(lldb::WatchpointSP, SBWatchpoint, GetSP);
+ LLDB_INSTRUMENT_VA(this);
- return LLDB_RECORD_RESULT(m_opaque_wp.lock());
+ return m_opaque_wp.lock();
}
void SBWatchpoint::SetSP(const lldb::WatchpointSP &sp) {
- LLDB_RECORD_METHOD(void, SBWatchpoint, SetSP, (const lldb::WatchpointSP &),
- sp);
+ LLDB_INSTRUMENT_VA(this, sp);
m_opaque_wp = sp;
}
bool SBWatchpoint::EventIsWatchpointEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(bool, SBWatchpoint, EventIsWatchpointEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
return Watchpoint::WatchpointEventData::GetEventDataFromEvent(event.get()) !=
nullptr;
@@ -282,9 +273,7 @@ bool SBWatchpoint::EventIsWatchpointEvent(const lldb::SBEvent &event) {
WatchpointEventType
SBWatchpoint::GetWatchpointEventTypeFromEvent(const SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::WatchpointEventType, SBWatchpoint,
- GetWatchpointEventTypeFromEvent,
- (const lldb::SBEvent &), event);
+ LLDB_INSTRUMENT_VA(event);
if (event.IsValid())
return Watchpoint::WatchpointEventData::GetWatchpointEventTypeFromEvent(
@@ -293,60 +282,11 @@ SBWatchpoint::GetWatchpointEventTypeFromEvent(const SBEvent &event) {
}
SBWatchpoint SBWatchpoint::GetWatchpointFromEvent(const lldb::SBEvent &event) {
- LLDB_RECORD_STATIC_METHOD(lldb::SBWatchpoint, SBWatchpoint,
- GetWatchpointFromEvent, (const lldb::SBEvent &),
- event);
+ LLDB_INSTRUMENT_VA(event);
SBWatchpoint sb_watchpoint;
if (event.IsValid())
sb_watchpoint =
Watchpoint::WatchpointEventData::GetWatchpointFromEvent(event.GetSP());
- return LLDB_RECORD_RESULT(sb_watchpoint);
-}
-
-namespace lldb_private {
-namespace repro {
-
-template <>
-void RegisterMethods<SBWatchpoint>(Registry &R) {
- LLDB_REGISTER_CONSTRUCTOR(SBWatchpoint, ());
- LLDB_REGISTER_CONSTRUCTOR(SBWatchpoint, (const lldb::WatchpointSP &));
- LLDB_REGISTER_CONSTRUCTOR(SBWatchpoint, (const lldb::SBWatchpoint &));
- LLDB_REGISTER_METHOD(const lldb::SBWatchpoint &,
- SBWatchpoint, operator=,(const lldb::SBWatchpoint &));
- LLDB_REGISTER_METHOD(lldb::watch_id_t, SBWatchpoint, GetID, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBWatchpoint, IsValid, ());
- LLDB_REGISTER_METHOD_CONST(bool, SBWatchpoint, operator bool, ());
- LLDB_REGISTER_METHOD_CONST(
- bool, SBWatchpoint, operator==,(const lldb::SBWatchpoint &));
- LLDB_REGISTER_METHOD_CONST(
- bool, SBWatchpoint, operator!=,(const lldb::SBWatchpoint &));
- LLDB_REGISTER_METHOD(lldb::SBError, SBWatchpoint, GetError, ());
- LLDB_REGISTER_METHOD(int32_t, SBWatchpoint, GetHardwareIndex, ());
- LLDB_REGISTER_METHOD(lldb::addr_t, SBWatchpoint, GetWatchAddress, ());
- LLDB_REGISTER_METHOD(size_t, SBWatchpoint, GetWatchSize, ());
- LLDB_REGISTER_METHOD(void, SBWatchpoint, SetEnabled, (bool));
- LLDB_REGISTER_METHOD(bool, SBWatchpoint, IsEnabled, ());
- LLDB_REGISTER_METHOD(uint32_t, SBWatchpoint, GetHitCount, ());
- LLDB_REGISTER_METHOD(uint32_t, SBWatchpoint, GetIgnoreCount, ());
- LLDB_REGISTER_METHOD(void, SBWatchpoint, SetIgnoreCount, (uint32_t));
- LLDB_REGISTER_METHOD(const char *, SBWatchpoint, GetCondition, ());
- LLDB_REGISTER_METHOD(void, SBWatchpoint, SetCondition, (const char *));
- LLDB_REGISTER_METHOD(bool, SBWatchpoint, GetDescription,
- (lldb::SBStream &, lldb::DescriptionLevel));
- LLDB_REGISTER_METHOD(void, SBWatchpoint, Clear, ());
- LLDB_REGISTER_METHOD_CONST(lldb::WatchpointSP, SBWatchpoint, GetSP, ());
- LLDB_REGISTER_METHOD(void, SBWatchpoint, SetSP,
- (const lldb::WatchpointSP &));
- LLDB_REGISTER_STATIC_METHOD(bool, SBWatchpoint, EventIsWatchpointEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::WatchpointEventType, SBWatchpoint,
- GetWatchpointEventTypeFromEvent,
- (const lldb::SBEvent &));
- LLDB_REGISTER_STATIC_METHOD(lldb::SBWatchpoint, SBWatchpoint,
- GetWatchpointFromEvent,
- (const lldb::SBEvent &));
-}
-
-}
+ return sb_watchpoint;
}
diff --git a/contrib/llvm-project/lldb/source/API/SystemInitializerFull.cpp b/contrib/llvm-project/lldb/source/API/SystemInitializerFull.cpp
index b01cb2ff545b..d662d370f813 100644
--- a/contrib/llvm-project/lldb/source/API/SystemInitializerFull.cpp
+++ b/contrib/llvm-project/lldb/source/API/SystemInitializerFull.cpp
@@ -39,7 +39,7 @@ constexpr lldb_private::HostInfo::SharedLibraryDirectoryHelper
#else
constexpr lldb_private::HostInfo::SharedLibraryDirectoryHelper
- *g_shlib_dir_helper = 0;
+ *g_shlib_dir_helper = nullptr;
#endif
using namespace lldb_private;
diff --git a/contrib/llvm-project/lldb/source/Breakpoint/Breakpoint.cpp b/contrib/llvm-project/lldb/source/Breakpoint/Breakpoint.cpp
index d6acf659e852..eeb8eac33567 100644
--- a/contrib/llvm-project/lldb/source/Breakpoint/Breakpoint.cpp
+++ b/contrib/llvm-project/lldb/source/Breakpoint/Breakpoint.cpp
@@ -1094,7 +1094,7 @@ Breakpoint::BreakpointEventData::GetBreakpointLocationAtIndexFromEvent(
json::Value Breakpoint::GetStatistics() {
json::Object bp;
bp.try_emplace("id", GetID());
- bp.try_emplace("resolveTime", m_resolve_time.count());
+ bp.try_emplace("resolveTime", m_resolve_time.get().count());
bp.try_emplace("numLocations", (int64_t)GetNumLocations());
bp.try_emplace("numResolvedLocations", (int64_t)GetNumResolvedLocations());
bp.try_emplace("internal", IsInternal());
diff --git a/contrib/llvm-project/lldb/source/Breakpoint/BreakpointResolverFileRegex.cpp b/contrib/llvm-project/lldb/source/Breakpoint/BreakpointResolverFileRegex.cpp
index 435983769c05..4a11d82ecc6e 100644
--- a/contrib/llvm-project/lldb/source/Breakpoint/BreakpointResolverFileRegex.cpp
+++ b/contrib/llvm-project/lldb/source/Breakpoint/BreakpointResolverFileRegex.cpp
@@ -110,7 +110,7 @@ Searcher::CallbackReturn BreakpointResolverFileRegex::SearchCallback(
// TODO: Handle SourceLocationSpec column information
SourceLocationSpec location_spec(cu_file_spec, line_matches[i],
/*column=*/llvm::None,
- /*search_inlines=*/false, m_exact_match);
+ /*check_inlines=*/false, m_exact_match);
cu->ResolveSymbolContext(location_spec, eSymbolContextEverything, sc_list);
// Find all the function names:
if (!m_function_names.empty()) {
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandCompletions.cpp b/contrib/llvm-project/lldb/source/Commands/CommandCompletions.cpp
index 42b0bac717bd..ae1ee1fdd30b 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandCompletions.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandCompletions.cpp
@@ -129,7 +129,7 @@ class SourceFileCompleter : public Completer {
public:
SourceFileCompleter(CommandInterpreter &interpreter,
CompletionRequest &request)
- : Completer(interpreter, request), m_matching_files() {
+ : Completer(interpreter, request) {
FileSpec partial_spec(m_request.GetCursorArgumentPrefix());
m_file_name = partial_spec.GetFilename().GetCString();
m_dir_name = partial_spec.GetDirectory().GetCString();
@@ -600,7 +600,7 @@ void CommandCompletions::VariablePath(CommandInterpreter &interpreter,
void CommandCompletions::Registers(CommandInterpreter &interpreter,
CompletionRequest &request,
SearchFilter *searcher) {
- std::string reg_prefix = "";
+ std::string reg_prefix;
if (request.GetCursorArgumentPrefix().startswith("$"))
reg_prefix = "$";
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpoint.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpoint.cpp
index 3f88a2fa6378..c4e55fdb3b9c 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpoint.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpoint.cpp
@@ -49,7 +49,7 @@ static void AddBreakpointDescription(Stream *s, Breakpoint *bp,
class lldb_private::BreakpointOptionGroup : public OptionGroup {
public:
- BreakpointOptionGroup() : OptionGroup(), m_bp_opts(false) {}
+ BreakpointOptionGroup() : m_bp_opts(false) {}
~BreakpointOptionGroup() override = default;
@@ -179,7 +179,7 @@ public:
class BreakpointDummyOptionGroup : public OptionGroup {
public:
- BreakpointDummyOptionGroup() : OptionGroup() {}
+ BreakpointDummyOptionGroup() {}
~BreakpointDummyOptionGroup() override = default;
@@ -234,8 +234,7 @@ public:
interpreter, "breakpoint set",
"Sets a breakpoint or set of breakpoints in the executable.",
"breakpoint set <cmd-options>"),
- m_bp_opts(), m_python_class_options("scripted breakpoint", true, 'P'),
- m_options() {
+ m_python_class_options("scripted breakpoint", true, 'P') {
// We're picking up all the normal options, commands and disable.
m_all_options.Append(&m_python_class_options,
LLDB_OPT_SET_1 | LLDB_OPT_SET_2, LLDB_OPT_SET_11);
@@ -253,9 +252,7 @@ public:
class CommandOptions : public OptionGroup {
public:
- CommandOptions()
- : OptionGroup(), m_condition(), m_filenames(), m_func_names(),
- m_func_regexp(), m_source_text_regexp(), m_modules() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -809,8 +806,7 @@ public:
"created breakpoint. "
"With the exception of -e, -d and -i, passing an "
"empty argument clears the modification.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeBreakpointID,
eArgTypeBreakpointIDRange);
@@ -1100,8 +1096,7 @@ public:
: CommandObjectParsed(
interpreter, "breakpoint list",
"List some or all breakpoints at configurable levels of detail.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandArgumentData bp_id_arg;
@@ -1123,7 +1118,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1246,8 +1241,7 @@ public:
: CommandObjectParsed(interpreter, "breakpoint clear",
"Delete or disable breakpoints matching the "
"specified source file and line.",
- "breakpoint clear <cmd-options>"),
- m_options() {}
+ "breakpoint clear <cmd-options>") {}
~CommandObjectBreakpointClear() override = default;
@@ -1255,7 +1249,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_filename() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1384,8 +1378,7 @@ public:
: CommandObjectParsed(interpreter, "breakpoint delete",
"Delete the specified breakpoint(s). If no "
"breakpoints are specified, delete them all.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeBreakpointID,
eArgTypeBreakpointIDRange);
@@ -1408,7 +1401,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1565,8 +1558,7 @@ private:
class BreakpointNameOptionGroup : public OptionGroup {
public:
BreakpointNameOptionGroup()
- : OptionGroup(), m_breakpoint(LLDB_INVALID_BREAK_ID), m_use_dummy(false) {
- }
+ : m_breakpoint(LLDB_INVALID_BREAK_ID), m_use_dummy(false) {}
~BreakpointNameOptionGroup() override = default;
@@ -1626,7 +1618,7 @@ public:
class BreakpointAccessOptionGroup : public OptionGroup {
public:
- BreakpointAccessOptionGroup() : OptionGroup() {}
+ BreakpointAccessOptionGroup() {}
~BreakpointAccessOptionGroup() override = default;
@@ -1696,8 +1688,7 @@ public:
"the breakpoint, otherwise only the options specified will be set "
"on the name.",
"breakpoint name configure <command-options> "
- "<breakpoint-name-list>"),
- m_bp_opts(), m_option_group() {
+ "<breakpoint-name-list>") {
// Create the first variant for the first (and only) argument for this
// command.
CommandArgumentEntry arg1;
@@ -1787,8 +1778,7 @@ public:
CommandObjectBreakpointNameAdd(CommandInterpreter &interpreter)
: CommandObjectParsed(
interpreter, "add", "Add a name to the breakpoints provided.",
- "breakpoint name add <command-options> <breakpoint-id-list>"),
- m_name_options(), m_option_group() {
+ "breakpoint name add <command-options> <breakpoint-id-list>") {
// Create the first variant for the first (and only) argument for this
// command.
CommandArgumentEntry arg1;
@@ -1872,8 +1862,7 @@ public:
: CommandObjectParsed(
interpreter, "delete",
"Delete a name from the breakpoints provided.",
- "breakpoint name delete <command-options> <breakpoint-id-list>"),
- m_name_options(), m_option_group() {
+ "breakpoint name delete <command-options> <breakpoint-id-list>") {
// Create the first variant for the first (and only) argument for this
// command.
CommandArgumentEntry arg1;
@@ -1956,8 +1945,7 @@ public:
"List either the names for a breakpoint or info "
"about a given name. With no arguments, lists all "
"names",
- "breakpoint name list <command-options>"),
- m_name_options(), m_option_group() {
+ "breakpoint name list <command-options>") {
m_option_group.Append(&m_name_options, LLDB_OPT_SET_3, LLDB_OPT_SET_ALL);
m_option_group.Finalize();
}
@@ -2063,8 +2051,7 @@ public:
: CommandObjectParsed(interpreter, "breakpoint read",
"Read and set the breakpoints previously saved to "
"a file with \"breakpoint write\". ",
- nullptr),
- m_options() {}
+ nullptr) {}
~CommandObjectBreakpointRead() override = default;
@@ -2072,7 +2059,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2245,8 +2232,7 @@ public:
"Write the breakpoints listed to a file that can "
"be read in with \"breakpoint read\". "
"If given no arguments, writes all breakpoints.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeBreakpointID,
eArgTypeBreakpointIDRange);
@@ -2269,7 +2255,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpointCommand.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpointCommand.cpp
index 26d35c82f57d..637e8b8bd578 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpointCommand.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectBreakpointCommand.cpp
@@ -69,7 +69,7 @@ public:
nullptr),
IOHandlerDelegateMultiline("DONE",
IOHandlerDelegate::Completion::LLDBCommand),
- m_options(), m_func_options("breakpoint command", false, 'F') {
+ m_func_options("breakpoint command", false, 'F') {
SetHelpLong(
R"(
General information about entering breakpoint commands
@@ -281,7 +281,7 @@ are no syntax errors may indicate that a function was declared but never called.
class CommandOptions : public OptionGroup {
public:
- CommandOptions() : OptionGroup(), m_one_liner() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -479,8 +479,7 @@ public:
CommandObjectBreakpointCommandDelete(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "delete",
"Delete the set of commands from a breakpoint.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandArgumentData bp_id_arg;
@@ -502,7 +501,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectCommands.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectCommands.cpp
index 1ec54cf7eded..4b4932dd367b 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectCommands.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectCommands.cpp
@@ -38,8 +38,7 @@ public:
: CommandObjectParsed(
interpreter, "command source",
"Read and execute LLDB commands from the file <filename>.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg;
CommandArgumentData file_arg;
@@ -76,8 +75,8 @@ protected:
class CommandOptions : public Options {
public:
CommandOptions()
- : Options(), m_stop_on_error(true), m_silent_run(false),
- m_stop_on_continue(true), m_cmd_relative_to_command_file(false) {}
+ : m_stop_on_error(true), m_silent_run(false), m_stop_on_continue(true),
+ m_cmd_relative_to_command_file(false) {}
~CommandOptions() override = default;
@@ -207,7 +206,7 @@ class CommandObjectCommandsAlias : public CommandObjectRaw {
protected:
class CommandOptions : public OptionGroup {
public:
- CommandOptions() : OptionGroup(), m_help(), m_long_help() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -258,8 +257,7 @@ public:
CommandObjectCommandsAlias(CommandInterpreter &interpreter)
: CommandObjectRaw(
interpreter, "command alias",
- "Define a custom command in terms of an existing command."),
- m_option_group(), m_command_options() {
+ "Define a custom command in terms of an existing command.") {
m_option_group.Append(&m_command_options);
m_option_group.Finalize();
@@ -485,8 +483,9 @@ protected:
OptionArgVectorSP option_arg_vector_sp =
OptionArgVectorSP(new OptionArgVector);
- if (CommandObjectSP cmd_obj_sp =
- m_interpreter.GetCommandSPExact(cmd_obj.GetCommandName())) {
+ const bool include_aliases = true;
+ if (CommandObjectSP cmd_obj_sp = m_interpreter.GetCommandSPExact(
+ cmd_obj.GetCommandName(), include_aliases)) {
if (m_interpreter.AliasExists(alias_command) ||
m_interpreter.UserCommandExists(alias_command)) {
result.AppendWarningWithFormat(
@@ -792,8 +791,7 @@ public:
"regular expressions.",
"command regex <cmd-name> [s/<regex>/<subst>/ ...]"),
IOHandlerDelegateMultiline("",
- IOHandlerDelegate::Completion::LLDBCommand),
- m_options() {
+ IOHandlerDelegate::Completion::LLDBCommand) {
SetHelpLong(
R"(
)"
@@ -1024,7 +1022,7 @@ private:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1238,8 +1236,7 @@ class CommandObjectCommandsScriptImport : public CommandObjectParsed {
public:
CommandObjectCommandsScriptImport(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "command script import",
- "Import a scripting module in LLDB.", nullptr),
- m_options() {
+ "Import a scripting module in LLDB.", nullptr) {
CommandArgumentEntry arg1;
CommandArgumentData cmd_arg;
@@ -1270,7 +1267,7 @@ public:
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1394,7 +1391,7 @@ public:
"must be a path to a user-added container "
"command, and the last element will be the new "
"command name."),
- IOHandlerDelegateMultiline("DONE"), m_options() {
+ IOHandlerDelegateMultiline("DONE") {
CommandArgumentEntry arg1;
CommandArgumentData cmd_arg;
@@ -1425,8 +1422,7 @@ public:
protected:
class CommandOptions : public Options {
public:
- CommandOptions()
- : Options(), m_class_name(), m_funct_name(), m_short_help() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1883,7 +1879,7 @@ public:
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_short_help(), m_long_help() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
index 02a16622c76b..e3c40ed73cf6 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
@@ -30,8 +30,7 @@ using namespace lldb_private;
#define LLDB_OPTIONS_disassemble
#include "CommandOptions.inc"
-CommandObjectDisassemble::CommandOptions::CommandOptions()
- : Options(), func_name(), plugin_name(), flavor_string(), arch() {
+CommandObjectDisassemble::CommandOptions::CommandOptions() {
OptionParsingStarting(nullptr);
}
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
index 9d13ccab6d3e..e1a289b219c3 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
@@ -24,7 +24,7 @@
using namespace lldb;
using namespace lldb_private;
-CommandObjectExpression::CommandOptions::CommandOptions() : OptionGroup() {}
+CommandObjectExpression::CommandOptions::CommandOptions() {}
CommandObjectExpression::CommandOptions::~CommandOptions() = default;
@@ -200,10 +200,10 @@ CommandObjectExpression::CommandObjectExpression(
"",
eCommandProcessMustBePaused | eCommandTryTargetAPILock),
IOHandlerDelegate(IOHandlerDelegate::Completion::Expression),
- m_option_group(), m_format_options(eFormatDefault),
+ m_format_options(eFormatDefault),
m_repl_option(LLDB_OPT_SET_1, false, "repl", 'r', "Drop into REPL", false,
true),
- m_command_options(), m_expr_line_count(0), m_expr_lines() {
+ m_command_options(), m_expr_line_count(0) {
SetHelpLong(
R"(
Single and multi-line expressions:
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectFrame.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectFrame.cpp
index 2b9f5316409f..70881f2d0061 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectFrame.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectFrame.cpp
@@ -49,7 +49,7 @@ class CommandObjectFrameDiagnose : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -110,8 +110,7 @@ public:
nullptr,
eCommandRequiresThread | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {
+ eCommandProcessMustBePaused) {
CommandArgumentEntry arg;
CommandArgumentData index_arg;
@@ -222,7 +221,7 @@ class CommandObjectFrameSelect : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -267,8 +266,7 @@ public:
nullptr,
eCommandRequiresThread | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {
+ eCommandProcessMustBePaused) {
CommandArgumentEntry arg;
CommandArgumentData index_arg;
@@ -394,27 +392,26 @@ public:
interpreter, "frame variable",
"Show variables for the current stack frame. Defaults to all "
"arguments and local variables in scope. Names of argument, "
- "local, file static and file global variables can be specified. "
- "Children of aggregate variables can be specified such as "
- "'var->child.x'. The -> and [] operators in 'frame variable' do "
- "not invoke operator overloads if they exist, but directly access "
- "the specified element. If you want to trigger operator overloads "
- "use the expression command to print the variable instead."
- "\nIt is worth noting that except for overloaded "
- "operators, when printing local variables 'expr local_var' and "
- "'frame var local_var' produce the same "
- "results. However, 'frame variable' is more efficient, since it "
- "uses debug information and memory reads directly, rather than "
- "parsing and evaluating an expression, which may even involve "
- "JITing and running code in the target program.",
+ "local, file static and file global variables can be specified.",
nullptr,
eCommandRequiresFrame | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched | eCommandProcessMustBePaused |
eCommandRequiresProcess),
- m_option_group(),
m_option_variable(
true), // Include the frame specific options by passing "true"
- m_option_format(eFormatDefault), m_varobj_options() {
+ m_option_format(eFormatDefault) {
+ SetHelpLong(R"(
+Children of aggregate variables can be specified such as 'var->child.x'. In
+'frame variable', the operators -> and [] do not invoke operator overloads if
+they exist, but directly access the specified element. If you want to trigger
+operator overloads use the expression command to print the variable instead.
+
+It is worth noting that except for overloaded operators, when printing local
+variables 'expr local_var' and 'frame var local_var' produce the same results.
+However, 'frame variable' is more efficient, since it uses debug information and
+memory reads directly, rather than parsing and evaluating an expression, which
+may even involve JITing and running code in the target program.)");
+
CommandArgumentEntry arg;
CommandArgumentData var_name_arg;
@@ -558,18 +555,16 @@ protected:
}
}
} else if (num_matches == 0) {
- result.GetErrorStream().Printf("error: no variables matched "
- "the regular expression '%s'.\n",
- entry.c_str());
+ result.AppendErrorWithFormat(
+ "no variables matched the regular expression '%s'.",
+ entry.c_str());
}
} else {
if (llvm::Error err = regex.GetError())
- result.GetErrorStream().Printf(
- "error: %s\n", llvm::toString(std::move(err)).c_str());
+ result.AppendError(llvm::toString(std::move(err)));
else
- result.GetErrorStream().Printf(
- "error: unknown regex error when compiling '%s'\n",
- entry.c_str());
+ result.AppendErrorWithFormat(
+ "unknown regex error when compiling '%s'", entry.c_str());
}
} else // No regex, either exact variable names or variable
// expressions.
@@ -605,14 +600,13 @@ protected:
valobj_sp->GetParent() ? entry.c_str() : nullptr);
valobj_sp->Dump(output_stream, options);
} else {
- const char *error_cstr = error.AsCString(nullptr);
- if (error_cstr)
- result.GetErrorStream().Printf("error: %s\n", error_cstr);
+ if (auto error_cstr = error.AsCString(nullptr))
+ result.AppendError(error_cstr);
else
- result.GetErrorStream().Printf("error: unable to find any "
- "variable expression path that "
- "matches '%s'.\n",
- entry.c_str());
+ result.AppendErrorWithFormat(
+ "unable to find any variable expression path that matches "
+ "'%s'.",
+ entry.c_str());
}
}
}
@@ -680,7 +674,8 @@ protected:
}
}
}
- result.SetStatus(eReturnStatusSuccessFinishResult);
+ if (result.GetStatus() != eReturnStatusFailed)
+ result.SetStatus(eReturnStatusSuccessFinishResult);
}
if (m_option_variable.show_recognized_args) {
@@ -731,7 +726,7 @@ class CommandObjectFrameRecognizerAdd : public CommandObjectParsed {
private:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
@@ -800,8 +795,7 @@ protected:
public:
CommandObjectFrameRecognizerAdd(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "frame recognizer add",
- "Add a new frame recognizer.", nullptr),
- m_options() {
+ "Add a new frame recognizer.", nullptr) {
SetHelpLong(R"(
Frame recognizers allow for retrieving information about special frames based on
ABI, arguments or other special properties of that frame, even without source
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.cpp
index 8c24efaa08ee..a2f682049ae0 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.cpp
@@ -46,8 +46,7 @@ CommandObjectHelp::CommandObjectHelp(CommandInterpreter &interpreter)
"Show a list of all debugger "
"commands, or give details "
"about a specific command.",
- "help [<cmd-name>]"),
- m_options() {
+ "help [<cmd-name>]") {
CommandArgumentEntry arg;
CommandArgumentData command_arg;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.h b/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.h
index c924dda7c6d4..71799ebb3121 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.h
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectHelp.h
@@ -32,7 +32,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectLog.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectLog.cpp
index 05ffba27e65f..d432ab244805 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectLog.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectLog.cpp
@@ -45,8 +45,7 @@ public:
CommandObjectLogEnable(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "log enable",
"Enable logging for a single log channel.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg1;
CommandArgumentEntry arg2;
CommandArgumentData channel_arg;
@@ -76,7 +75,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), log_file() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
index 9df42f36fafd..1b44a1bd709a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
@@ -23,6 +23,7 @@
#include "lldb/Interpreter/Options.h"
#include "lldb/Symbol/SymbolFile.h"
#include "lldb/Symbol/TypeList.h"
+#include "lldb/Target/ABI.h"
#include "lldb/Target/Language.h"
#include "lldb/Target/MemoryHistory.h"
#include "lldb/Target/MemoryRegionInfo.h"
@@ -47,7 +48,7 @@ using namespace lldb_private;
class OptionGroupReadMemory : public OptionGroup {
public:
OptionGroupReadMemory()
- : m_num_per_line(1, 1), m_view_as_type(), m_offset(0, 0),
+ : m_num_per_line(1, 1), m_offset(0, 0),
m_language_for_type(eLanguageTypeUnknown) {}
~OptionGroupReadMemory() override = default;
@@ -90,6 +91,10 @@ public:
error = m_offset.SetValueFromString(option_value);
break;
+ case '\x01':
+ m_show_tags = true;
+ break;
+
default:
llvm_unreachable("Unimplemented option");
}
@@ -103,6 +108,7 @@ public:
m_force = false;
m_offset.Clear();
m_language_for_type.Clear();
+ m_show_tags = false;
}
Status FinalizeSettings(Target *target, OptionGroupFormat &format_options) {
@@ -276,6 +282,7 @@ public:
bool m_force;
OptionValueUInt64 m_offset;
OptionValueLanguage m_language_for_type;
+ bool m_show_tags = false;
};
// Read memory from the inferior process
@@ -286,12 +293,10 @@ public:
interpreter, "memory read",
"Read from the memory of the current target process.", nullptr,
eCommandRequiresTarget | eCommandProcessMustBePaused),
- m_option_group(), m_format_options(eFormatBytesWithASCII, 1, 8),
- m_memory_options(), m_outfile_options(), m_varobj_options(),
+ m_format_options(eFormatBytesWithASCII, 1, 8),
+
m_next_addr(LLDB_INVALID_ADDRESS), m_prev_byte_size(0),
- m_prev_format_options(eFormatBytesWithASCII, 1, 8),
- m_prev_memory_options(), m_prev_outfile_options(),
- m_prev_varobj_options() {
+ m_prev_format_options(eFormatBytesWithASCII, 1, 8) {
CommandArgumentEntry arg1;
CommandArgumentEntry arg2;
CommandArgumentData start_addr_arg;
@@ -590,9 +595,16 @@ protected:
return false;
}
+ ABISP abi = m_exe_ctx.GetProcessPtr()->GetABI();
+ if (abi)
+ addr = abi->FixDataAddress(addr);
+
if (argc == 2) {
lldb::addr_t end_addr = OptionArgParser::ToAddress(
&m_exe_ctx, command[1].ref(), LLDB_INVALID_ADDRESS, nullptr);
+ if (end_addr != LLDB_INVALID_ADDRESS && abi)
+ end_addr = abi->FixDataAddress(end_addr);
+
if (end_addr == LLDB_INVALID_ADDRESS) {
result.AppendError("invalid end address expression.");
result.AppendError(error.AsCString());
@@ -854,7 +866,7 @@ protected:
size_t bytes_dumped = DumpDataExtractor(
data, output_stream_p, 0, format, item_byte_size, item_count,
num_per_line / target->GetArchitecture().GetDataByteSize(), addr, 0, 0,
- exe_scope);
+ exe_scope, m_memory_options.m_show_tags);
m_next_addr = addr + bytes_dumped;
output_stream_p->EOL();
return true;
@@ -882,7 +894,7 @@ class CommandObjectMemoryFind : public CommandObjectParsed {
public:
class OptionGroupFindMemory : public OptionGroup {
public:
- OptionGroupFindMemory() : OptionGroup(), m_count(1), m_offset(0) {}
+ OptionGroupFindMemory() : m_count(1), m_offset(0) {}
~OptionGroupFindMemory() override = default;
@@ -936,8 +948,7 @@ public:
: CommandObjectParsed(
interpreter, "memory find",
"Find a value in the memory of the current target process.",
- nullptr, eCommandRequiresProcess | eCommandProcessMustBeLaunched),
- m_option_group(), m_memory_options() {
+ nullptr, eCommandRequiresProcess | eCommandProcessMustBeLaunched) {
CommandArgumentEntry arg1;
CommandArgumentEntry arg2;
CommandArgumentData addr_arg;
@@ -1027,6 +1038,12 @@ protected:
return false;
}
+ ABISP abi = m_exe_ctx.GetProcessPtr()->GetABI();
+ if (abi) {
+ low_addr = abi->FixDataAddress(low_addr);
+ high_addr = abi->FixDataAddress(high_addr);
+ }
+
if (high_addr <= low_addr) {
result.AppendError(
"starting address must be smaller than ending address");
@@ -1170,7 +1187,7 @@ class CommandObjectMemoryWrite : public CommandObjectParsed {
public:
class OptionGroupWriteMemory : public OptionGroup {
public:
- OptionGroupWriteMemory() : OptionGroup() {}
+ OptionGroupWriteMemory() {}
~OptionGroupWriteMemory() override = default;
@@ -1222,16 +1239,14 @@ public:
interpreter, "memory write",
"Write to the memory of the current target process.", nullptr,
eCommandRequiresProcess | eCommandProcessMustBeLaunched),
- m_option_group(),
m_format_options(
eFormatBytes, 1, UINT64_MAX,
{std::make_tuple(
eArgTypeFormat,
"The format to use for each of the value to be written."),
- std::make_tuple(
- eArgTypeByteSize,
- "The size in bytes to write from input file or each value.")}),
- m_memory_options() {
+ std::make_tuple(eArgTypeByteSize,
+ "The size in bytes to write from input file or "
+ "each value.")}) {
CommandArgumentEntry arg1;
CommandArgumentEntry arg2;
CommandArgumentData addr_arg;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemoryTag.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemoryTag.cpp
index 840f81719d7d..d108cf58b18c 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemoryTag.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemoryTag.cpp
@@ -12,6 +12,7 @@
#include "lldb/Interpreter/OptionArgParser.h"
#include "lldb/Interpreter/OptionGroupFormat.h"
#include "lldb/Interpreter/OptionValueString.h"
+#include "lldb/Target/ABI.h"
#include "lldb/Target/Process.h"
using namespace lldb;
@@ -85,6 +86,17 @@ protected:
// If this fails the list of regions is cleared, so we don't need to read
// the return status here.
process->GetMemoryRegions(memory_regions);
+
+ lldb::addr_t logical_tag = tag_manager->GetLogicalTag(start_addr);
+
+ // The tag manager only removes tag bits. These addresses may include other
+ // non-address bits that must also be ignored.
+ ABISP abi = process->GetABI();
+ if (abi) {
+ start_addr = abi->FixDataAddress(start_addr);
+ end_addr = abi->FixDataAddress(end_addr);
+ }
+
llvm::Expected<MemoryTagManager::TagRange> tagged_range =
tag_manager->MakeTaggedRange(start_addr, end_addr, memory_regions);
@@ -101,7 +113,6 @@ protected:
return false;
}
- lldb::addr_t logical_tag = tag_manager->GetLogicalTag(start_addr);
result.AppendMessageWithFormatv("Logical tag: {0:x}", logical_tag);
result.AppendMessage("Allocation tags:");
@@ -127,7 +138,7 @@ class CommandObjectMemoryTagWrite : public CommandObjectParsed {
public:
class OptionGroupTagWrite : public OptionGroup {
public:
- OptionGroupTagWrite() : OptionGroup(), m_end_addr(LLDB_INVALID_ADDRESS) {}
+ OptionGroupTagWrite() : m_end_addr(LLDB_INVALID_ADDRESS) {}
~OptionGroupTagWrite() override = default;
@@ -166,8 +177,7 @@ public:
"contains the given address.",
nullptr,
eCommandRequiresTarget | eCommandRequiresProcess |
- eCommandProcessMustBePaused),
- m_option_group(), m_tag_write_options() {
+ eCommandProcessMustBePaused) {
// Address
m_arguments.push_back(
CommandArgumentEntry{CommandArgumentData(eArgTypeAddressOrExpression)});
@@ -231,6 +241,12 @@ protected:
// the return status here.
process->GetMemoryRegions(memory_regions);
+ // The tag manager only removes tag bits. These addresses may include other
+ // non-address bits that must also be ignored.
+ ABISP abi = process->GetABI();
+ if (abi)
+ start_addr = abi->FixDataAddress(start_addr);
+
// We have to assume start_addr is not granule aligned.
// So if we simply made a range:
// (start_addr, start_addr + (N * granule_size))
@@ -254,6 +270,10 @@ protected:
end_addr =
aligned_start_addr + (tags.size() * tag_manager->GetGranuleSize());
+ // Remove non-address bits that aren't memory tags
+ if (abi)
+ end_addr = abi->FixDataAddress(end_addr);
+
// Now we've aligned the start address so if we ask for another range
// using the number of tags N, we'll get back a range that is also N
// granules in size.
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectMultiword.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectMultiword.cpp
index e800bcc12bd3..0629342748aa 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectMultiword.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectMultiword.cpp
@@ -302,31 +302,6 @@ const char *CommandObjectMultiword::GetRepeatCommand(Args &current_command_args,
return sub_command_object->GetRepeatCommand(current_command_args, index);
}
-void CommandObjectMultiword::AproposAllSubCommands(llvm::StringRef prefix,
- llvm::StringRef search_word,
- StringList &commands_found,
- StringList &commands_help) {
- CommandObject::CommandMap::const_iterator pos;
-
- for (pos = m_subcommand_dict.begin(); pos != m_subcommand_dict.end(); ++pos) {
- const char *command_name = pos->first.c_str();
- CommandObject *sub_cmd_obj = pos->second.get();
- StreamString complete_command_name;
-
- complete_command_name << prefix << " " << command_name;
-
- if (sub_cmd_obj->HelpTextContainsWord(search_word)) {
- commands_found.AppendString(complete_command_name.GetString());
- commands_help.AppendString(sub_cmd_obj->GetHelp());
- }
-
- if (sub_cmd_obj->IsMultiwordObject())
- sub_cmd_obj->AproposAllSubCommands(complete_command_name.GetString(),
- search_word, commands_found,
- commands_help);
- }
-}
-
CommandObjectProxy::CommandObjectProxy(CommandInterpreter &interpreter,
const char *name, const char *help,
const char *syntax, uint32_t flags)
@@ -409,16 +384,6 @@ CommandObject *CommandObjectProxy::GetSubcommandObject(llvm::StringRef sub_cmd,
return nullptr;
}
-void CommandObjectProxy::AproposAllSubCommands(llvm::StringRef prefix,
- llvm::StringRef search_word,
- StringList &commands_found,
- StringList &commands_help) {
- CommandObject *proxy_command = GetProxyCommandObject();
- if (proxy_command)
- return proxy_command->AproposAllSubCommands(prefix, search_word,
- commands_found, commands_help);
-}
-
bool CommandObjectProxy::LoadSubCommand(
llvm::StringRef cmd_name, const lldb::CommandObjectSP &command_sp) {
CommandObject *proxy_command = GetProxyCommandObject();
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectPlatform.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectPlatform.cpp
index 10dd87824911..4c18465c868a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectPlatform.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectPlatform.cpp
@@ -145,7 +145,6 @@ public:
"Create a platform if needed and select it as the "
"current platform.",
"platform select <platform-name>", 0),
- m_option_group(),
m_platform_options(
false) // Don't include the "--platform" option by passing false
{
@@ -377,7 +376,6 @@ public:
"Set settings for the current target's platform, "
"or for a platform by name.",
"platform settings", 0),
- m_options(),
m_option_working_dir(LLDB_OPT_SET_1, false, "working-dir", 'w',
CommandCompletions::eRemoteDiskDirectoryCompletion,
eArgTypePath,
@@ -417,8 +415,7 @@ public:
CommandObjectPlatformMkDir(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "platform mkdir",
"Make a new directory on the remote end.", nullptr,
- 0),
- m_options() {}
+ 0) {}
~CommandObjectPlatformMkDir() override = default;
@@ -464,8 +461,7 @@ class CommandObjectPlatformFOpen : public CommandObjectParsed {
public:
CommandObjectPlatformFOpen(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "platform file open",
- "Open a file on the remote end.", nullptr, 0),
- m_options() {}
+ "Open a file on the remote end.", nullptr, 0) {}
~CommandObjectPlatformFOpen() override = default;
@@ -566,8 +562,7 @@ public:
CommandObjectPlatformFRead(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "platform file read",
"Read data from a file on the remote end.", nullptr,
- 0),
- m_options() {}
+ 0) {}
~CommandObjectPlatformFRead() override = default;
@@ -605,7 +600,7 @@ public:
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -660,8 +655,7 @@ public:
CommandObjectPlatformFWrite(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "platform file write",
"Write data to a file on the remote end.", nullptr,
- 0),
- m_options() {}
+ 0) {}
~CommandObjectPlatformFWrite() override = default;
@@ -698,7 +692,7 @@ public:
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1124,8 +1118,7 @@ public:
: CommandObjectParsed(interpreter, "platform process launch",
"Launch a new process on a remote platform.",
"platform process launch program",
- eCommandRequiresTarget | eCommandTryTargetAPILock),
- m_options(), m_all_options() {
+ eCommandRequiresTarget | eCommandTryTargetAPILock) {
m_all_options.Append(&m_options);
m_all_options.Finalize();
}
@@ -1217,8 +1210,7 @@ public:
: CommandObjectParsed(interpreter, "platform process list",
"List processes on a remote platform by name, pid, "
"or many other matching attributes.",
- "platform process list", 0),
- m_options() {}
+ "platform process list", 0) {}
~CommandObjectPlatformProcessList() override = default;
@@ -1324,7 +1316,7 @@ protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), match_info() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1560,7 +1552,7 @@ class CommandObjectPlatformProcessAttach : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -1622,8 +1614,7 @@ public:
CommandObjectPlatformProcessAttach(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "platform process attach",
"Attach to a process.",
- "platform process attach <cmd-options>"),
- m_options() {}
+ "platform process attach <cmd-options>") {}
~CommandObjectPlatformProcessAttach() override = default;
@@ -1689,7 +1680,7 @@ class CommandObjectPlatformShell : public CommandObjectRaw {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1747,8 +1738,7 @@ public:
CommandObjectPlatformShell(CommandInterpreter &interpreter)
: CommandObjectRaw(interpreter, "platform shell",
"Run a shell command on the current platform.",
- "platform shell <shell-command>", 0),
- m_options() {}
+ "platform shell <shell-command>", 0) {}
~CommandObjectPlatformShell() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectProcess.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectProcess.cpp
index 5fd1718e8484..c73f0df0aaf2 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectProcess.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectProcess.cpp
@@ -110,9 +110,8 @@ public:
interpreter, "process launch",
"Launch the executable in the debugger.", nullptr,
eCommandRequiresTarget, "restart"),
- m_options(),
- m_class_options("scripted process", true, 'C', 'k', 'v', 0),
- m_all_options() {
+
+ m_class_options("scripted process", true, 'C', 'k', 'v', 0) {
m_all_options.Append(&m_options);
m_all_options.Append(&m_class_options, LLDB_OPT_SET_1 | LLDB_OPT_SET_2,
LLDB_OPT_SET_ALL);
@@ -300,7 +299,7 @@ class CommandObjectProcessAttach : public CommandObjectProcessLaunchOrAttach {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -364,8 +363,7 @@ public:
CommandObjectProcessAttach(CommandInterpreter &interpreter)
: CommandObjectProcessLaunchOrAttach(
interpreter, "process attach", "Attach to a process.",
- "process attach <cmd-options>", 0, "attach"),
- m_options() {}
+ "process attach <cmd-options>", 0, "attach") {}
~CommandObjectProcessAttach() override = default;
@@ -502,15 +500,14 @@ public:
"Continue execution of all threads in the current process.",
"process continue",
eCommandRequiresProcess | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {}
~CommandObjectProcessContinue() override = default;
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -651,7 +648,7 @@ class CommandObjectProcessDetach : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -698,8 +695,7 @@ public:
"Detach from the current target process.",
"process detach",
eCommandRequiresProcess | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched),
- m_options() {}
+ eCommandProcessMustBeLaunched) {}
~CommandObjectProcessDetach() override = default;
@@ -741,7 +737,7 @@ class CommandObjectProcessConnect : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -781,8 +777,7 @@ public:
CommandObjectProcessConnect(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "process connect",
"Connect to a remote debug service.",
- "process connect <remote-url>", 0),
- m_options() {}
+ "process connect <remote-url>", 0) {}
~CommandObjectProcessConnect() override = default;
@@ -863,7 +858,7 @@ class CommandObjectProcessLoad : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -907,8 +902,7 @@ public:
"process load <filename> [<filename> ...]",
eCommandRequiresProcess | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBePaused) {}
~CommandObjectProcessLoad() override = default;
@@ -1220,8 +1214,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions()
- : Options(), m_requested_save_core_style(eSaveCoreUnspecified) {}
+ CommandOptions() : m_requested_save_core_style(eSaveCoreUnspecified) {}
~CommandOptions() override = default;
@@ -1316,8 +1309,7 @@ public:
interpreter, "process status",
"Show status and stop location for the current target process.",
"process status",
- eCommandRequiresProcess | eCommandTryTargetAPILock),
- m_options() {}
+ eCommandRequiresProcess | eCommandTryTargetAPILock) {}
~CommandObjectProcessStatus() override = default;
@@ -1325,7 +1317,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1430,7 +1422,7 @@ class CommandObjectProcessHandle : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -1477,8 +1469,7 @@ public:
"Manage LLDB handling of OS signals for the "
"current target process. Defaults to showing "
"current policy.",
- nullptr, eCommandRequiresTarget),
- m_options() {
+ nullptr, eCommandRequiresTarget) {
SetHelpLong("\nIf no signals are specified, update them all. If no update "
"option is specified, list the current values.");
CommandArgumentEntry arg;
@@ -1687,7 +1678,7 @@ class CommandObjectProcessTraceSave : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
ExecutionContext *execution_context) override {
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectRegexCommand.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectRegexCommand.cpp
index 46295421834a..7ddc5c0c7e08 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectRegexCommand.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectRegexCommand.cpp
@@ -20,7 +20,7 @@ CommandObjectRegexCommand::CommandObjectRegexCommand(
bool is_removable)
: CommandObjectRaw(interpreter, name, help, syntax),
m_max_matches(max_matches), m_completion_type_mask(completion_type_mask),
- m_entries(), m_is_removable(is_removable) {}
+ m_is_removable(is_removable) {}
// Destructor
CommandObjectRegexCommand::~CommandObjectRegexCommand() = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectRegister.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectRegister.cpp
index 6fd71c90c327..933c243dedd5 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectRegister.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectRegister.cpp
@@ -43,8 +43,7 @@ public:
nullptr,
eCommandRequiresFrame | eCommandRequiresRegContext |
eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_option_group(), m_format_options(eFormatDefault),
- m_command_options() {
+ m_format_options(eFormatDefault) {
CommandArgumentEntry arg;
CommandArgumentData register_arg;
@@ -232,8 +231,7 @@ protected:
class CommandOptions : public OptionGroup {
public:
CommandOptions()
- : OptionGroup(),
- set_indexes(OptionValue::ConvertTypeToMask(OptionValue::eTypeUInt64)),
+ : set_indexes(OptionValue::ConvertTypeToMask(OptionValue::eTypeUInt64)),
dump_all_sets(false, false), // Initial and default values are false
alternate_name(false, false) {}
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectReproducer.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectReproducer.cpp
index 4db3e070df3c..7e0ea65e148e 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectReproducer.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectReproducer.cpp
@@ -227,7 +227,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -355,7 +355,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), file() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -602,7 +602,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), file() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectScript.h b/contrib/llvm-project/lldb/source/Commands/CommandObjectScript.h
index b9fee7124818..97fc05421bd0 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectScript.h
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectScript.h
@@ -21,7 +21,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
ExecutionContext *execution_context) override;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectSession.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectSession.cpp
index c2cdfa29a3f6..c11839a48de0 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectSession.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectSession.cpp
@@ -62,8 +62,7 @@ public:
"using \"!<INDEX>\". \"!-<OFFSET>\" will re-run "
"the command that is <OFFSET> commands from the end"
" of the list (counting the current command).",
- nullptr),
- m_options() {}
+ nullptr) {}
~CommandObjectSessionHistory() override = default;
@@ -73,8 +72,7 @@ protected:
class CommandOptions : public Options {
public:
CommandOptions()
- : Options(), m_start_idx(0), m_stop_idx(0), m_count(0), m_clear(false) {
- }
+ : m_start_idx(0), m_stop_idx(0), m_count(0), m_clear(false) {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectSettings.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectSettings.cpp
index 13ff27c78dea..391e728d9d8a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectSettings.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectSettings.cpp
@@ -27,8 +27,7 @@ class CommandObjectSettingsSet : public CommandObjectRaw {
public:
CommandObjectSettingsSet(CommandInterpreter &interpreter)
: CommandObjectRaw(interpreter, "settings set",
- "Set the value of the specified debugger setting."),
- m_options() {
+ "Set the value of the specified debugger setting.") {
CommandArgumentEntry arg1;
CommandArgumentEntry arg2;
CommandArgumentData var_name_arg;
@@ -87,7 +86,7 @@ insert-before or insert-after.");
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -304,8 +303,7 @@ public:
"Write matching debugger settings and their "
"current values to a file that can be read in with "
"\"settings read\". Defaults to writing all settings.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry arg1;
CommandArgumentData var_name_arg;
@@ -327,7 +325,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -417,8 +415,7 @@ public:
: CommandObjectParsed(
interpreter, "settings read",
"Read settings previously saved to a file with \"settings write\".",
- nullptr),
- m_options() {}
+ nullptr) {}
~CommandObjectSettingsRead() override = default;
@@ -426,7 +423,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectSource.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectSource.cpp
index fb33f41b8ef9..6c6706f4a98b 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectSource.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectSource.cpp
@@ -36,7 +36,7 @@ using namespace lldb_private;
class CommandObjectSourceInfo : public CommandObjectParsed {
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -118,8 +118,7 @@ public:
"Display source line information for the current target "
"process. Defaults to instruction pointer in current stack "
"frame.",
- nullptr, eCommandRequiresTarget),
- m_options() {}
+ nullptr, eCommandRequiresTarget) {}
~CommandObjectSourceInfo() override = default;
@@ -624,7 +623,7 @@ protected:
class CommandObjectSourceList : public CommandObjectParsed {
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -723,8 +722,7 @@ public:
: CommandObjectParsed(interpreter, "source list",
"Display source code for the current target "
"process as specified by options.",
- nullptr, eCommandRequiresTarget),
- m_options() {}
+ nullptr, eCommandRequiresTarget) {}
~CommandObjectSourceList() override = default;
@@ -757,7 +755,7 @@ protected:
SourceInfo(ConstString name, const LineEntry &line_entry)
: function(name), line_entry(line_entry) {}
- SourceInfo() : function(), line_entry() {}
+ SourceInfo() {}
bool IsValid() const { return (bool)function && line_entry.IsValid(); }
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectStats.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectStats.cpp
index f32d559ca039..63aa36b39f4d 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectStats.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectStats.cpp
@@ -65,7 +65,7 @@ protected:
class CommandObjectStatsDump : public CommandObjectParsed {
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
ExecutionContext *execution_context) override {
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectTarget.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectTarget.cpp
index 2a42eb22938d..157065bde10e 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectTarget.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectTarget.cpp
@@ -216,7 +216,6 @@ public:
interpreter, "target create",
"Create a target using the argument as the main executable.",
nullptr),
- m_option_group(), m_arch_option(),
m_platform_options(true), // Include the --platform option.
m_core_file(LLDB_OPT_SET_1, false, "core", 'c', 0, eArgTypeFilename,
"Fullpath to a core file to use for this target."),
@@ -227,8 +226,7 @@ public:
"are not in the executable."),
m_remote_file(
LLDB_OPT_SET_1, false, "remote-file", 'r', 0, eArgTypeFilename,
- "Fullpath to the file on the remote host if debugging remotely."),
- m_add_dependents() {
+ "Fullpath to the file on the remote host if debugging remotely.") {
CommandArgumentEntry arg;
CommandArgumentData file_arg;
@@ -534,8 +532,8 @@ public:
: CommandObjectParsed(interpreter, "target delete",
"Delete one or more targets by target index.",
nullptr),
- m_option_group(), m_all_option(LLDB_OPT_SET_1, false, "all", 'a',
- "Delete all targets.", false, true),
+ m_all_option(LLDB_OPT_SET_1, false, "all", 'a', "Delete all targets.",
+ false, true),
m_cleanup_option(
LLDB_OPT_SET_1, false, "clean", 'c',
"Perform extra cleanup to minimize memory consumption after "
@@ -678,7 +676,6 @@ public:
"Read global variables for the current target, "
"before or while running a process.",
nullptr, eCommandRequiresTarget),
- m_option_group(),
m_option_variable(false), // Don't include frame options
m_option_format(eFormatDefault),
m_option_compile_units(LLDB_OPT_SET_1, false, "file", SHORT_OPTION_FILE,
@@ -691,8 +688,7 @@ public:
eArgTypeFilename,
"A basename or fullpath to a shared library to use in the search "
"for global "
- "variables. This option can be specified multiple times."),
- m_varobj_options() {
+ "variables. This option can be specified multiple times.") {
CommandArgumentEntry arg;
CommandArgumentData var_name_arg;
@@ -1605,7 +1601,6 @@ static size_t LookupTypeInModule(Target *target,
TypeList type_list;
if (module && name_cstr && name_cstr[0]) {
const uint32_t max_num_matches = UINT32_MAX;
- size_t num_matches = 0;
bool name_is_fully_qualified = false;
ConstString name(name_cstr);
@@ -1616,8 +1611,10 @@ static size_t LookupTypeInModule(Target *target,
if (type_list.Empty())
return 0;
+ const uint64_t num_matches = type_list.GetSize();
+
strm.Indent();
- strm.Printf("%" PRIu64 " match%s found in ", (uint64_t)num_matches,
+ strm.Printf("%" PRIu64 " match%s found in ", num_matches,
num_matches > 1 ? "es" : "");
DumpFullpath(strm, &module->GetFileSpec(), 0);
strm.PutCString(":\n");
@@ -1927,8 +1924,7 @@ public:
: CommandObjectTargetModulesModuleAutoComplete(
interpreter, "target modules dump symtab",
"Dump the symbol table from one or more target modules.", nullptr,
- eCommandRequiresTarget),
- m_options() {}
+ eCommandRequiresTarget) {}
~CommandObjectTargetModulesDumpSymtab() override = default;
@@ -1936,7 +1932,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2353,7 +2349,7 @@ protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
ExecutionContext *execution_context) override {
@@ -2422,11 +2418,11 @@ public:
"Add a new module to the current target's modules.",
"target modules add [<module>]",
eCommandRequiresTarget),
- m_option_group(), m_symbol_file(LLDB_OPT_SET_1, false, "symfile", 's',
- 0, eArgTypeFilename,
- "Fullpath to a stand alone debug "
- "symbols file for when debug symbols "
- "are not in the executable.") {
+ m_symbol_file(LLDB_OPT_SET_1, false, "symfile", 's', 0,
+ eArgTypeFilename,
+ "Fullpath to a stand alone debug "
+ "symbols file for when debug symbols "
+ "are not in the executable.") {
m_option_group.Append(&m_uuid_option_group, LLDB_OPT_SET_ALL,
LLDB_OPT_SET_1);
m_option_group.Append(&m_symbol_file, LLDB_OPT_SET_ALL, LLDB_OPT_SET_1);
@@ -2574,7 +2570,6 @@ public:
"target modules load [--file <module> --uuid <uuid>] <sect-name> "
"<address> [<sect-name> <address> ....]",
eCommandRequiresTarget),
- m_option_group(),
m_file_option(LLDB_OPT_SET_1, false, "file", 'f', 0, eArgTypeName,
"Fullpath or basename for module to load.", ""),
m_load_option(LLDB_OPT_SET_1, false, "load", 'l',
@@ -2842,7 +2837,7 @@ class CommandObjectTargetModulesList : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_format_array() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2885,8 +2880,7 @@ public:
: CommandObjectParsed(
interpreter, "target modules list",
"List current executable and dependent shared library images.",
- "target modules list [<cmd-options>]"),
- m_options() {}
+ "target modules list [<cmd-options>]") {}
~CommandObjectTargetModulesList() override = default;
@@ -3185,7 +3179,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_str() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -3242,8 +3236,7 @@ public:
interpreter, "target modules show-unwind",
"Show synthesized unwind instructions for a function.", nullptr,
eCommandRequiresTarget | eCommandRequiresProcess |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {}
~CommandObjectTargetModulesShowUnwind() override = default;
@@ -3532,7 +3525,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -3647,8 +3640,7 @@ public:
: CommandObjectParsed(interpreter, "target modules lookup",
"Look up information within executable and "
"dependent shared library images.",
- nullptr, eCommandRequiresTarget),
- m_options() {
+ nullptr, eCommandRequiresTarget) {
CommandArgumentEntry arg;
CommandArgumentData file_arg;
@@ -3954,7 +3946,6 @@ public:
"to specify a module.",
"target symbols add <cmd-options> [<symfile>]",
eCommandRequiresTarget),
- m_option_group(),
m_file_option(
LLDB_OPT_SET_1, false, "shlib", 's',
CommandCompletions::eModuleCompletion, eArgTypeShlibName,
@@ -4441,7 +4432,7 @@ class CommandObjectTargetStopHookAdd : public CommandObjectParsed,
public:
class CommandOptions : public OptionGroup {
public:
- CommandOptions() : OptionGroup(), m_line_end(UINT_MAX), m_one_liner() {}
+ CommandOptions() : m_line_end(UINT_MAX) {}
~CommandOptions() override = default;
@@ -4598,7 +4589,7 @@ public:
"target stop-hook add"),
IOHandlerDelegateMultiline("DONE",
IOHandlerDelegate::Completion::LLDBCommand),
- m_options(), m_python_class_options("scripted stop-hook", true, 'P') {
+ m_python_class_options("scripted stop-hook", true, 'P') {
SetHelpLong(
R"(
Command Based stop-hooks:
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
index 71e67f6ba208..137aaa81c61a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
@@ -47,7 +47,7 @@ class CommandObjectThreadBacktrace : public CommandObjectIterateOverThreads {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -119,8 +119,7 @@ public:
nullptr,
eCommandRequiresProcess | eCommandRequiresThread |
eCommandTryTargetAPILock | eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBePaused) {}
~CommandObjectThreadBacktrace() override = default;
@@ -203,7 +202,7 @@ static constexpr OptionEnumValues TriRunningModes() {
class ThreadStepScopeOptionGroup : public OptionGroup {
public:
- ThreadStepScopeOptionGroup() : OptionGroup() {
+ ThreadStepScopeOptionGroup() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -327,7 +326,7 @@ public:
eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched |
eCommandProcessMustBePaused),
- m_step_type(step_type), m_step_scope(step_scope), m_options(),
+ m_step_type(step_type), m_step_scope(step_scope),
m_class_options("scripted step") {
CommandArgumentEntry arg;
CommandArgumentData thread_id_arg;
@@ -780,7 +779,7 @@ public:
uint32_t m_thread_idx = LLDB_INVALID_THREAD_ID;
uint32_t m_frame_idx = LLDB_INVALID_FRAME_ID;
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -861,8 +860,7 @@ public:
" is provided, stepping will stop when the first one is hit.",
nullptr,
eCommandRequiresThread | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_options() {
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {
CommandArgumentEntry arg;
CommandArgumentData line_num_arg;
@@ -1186,7 +1184,7 @@ class CommandObjectThreadInfo : public CommandObjectIterateOverThreads {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -1231,8 +1229,7 @@ public:
"current thread.",
"thread info",
eCommandRequiresProcess | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_options() {
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {
m_add_return = false;
}
@@ -1331,7 +1328,7 @@ class CommandObjectThreadReturn : public CommandObjectRaw {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -1386,8 +1383,7 @@ public:
"thread return",
eCommandRequiresFrame | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {
+ eCommandProcessMustBePaused) {
CommandArgumentEntry arg;
CommandArgumentData expression_arg;
@@ -1496,7 +1492,7 @@ class CommandObjectThreadJump : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -1556,8 +1552,7 @@ public:
interpreter, "thread jump",
"Sets the program counter to a new address.", "thread jump",
eCommandRequiresFrame | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {}
~CommandObjectThreadJump() override = default;
@@ -1633,7 +1628,7 @@ class CommandObjectThreadPlanList : public CommandObjectIterateOverThreads {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {
+ CommandOptions() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
@@ -1695,8 +1690,7 @@ public:
nullptr,
eCommandRequiresProcess | eCommandRequiresThread |
eCommandTryTargetAPILock | eCommandProcessMustBeLaunched |
- eCommandProcessMustBePaused),
- m_options() {}
+ eCommandProcessMustBePaused) {}
~CommandObjectThreadPlanList() override = default;
@@ -2004,7 +1998,7 @@ class CommandObjectTraceDumpInstructions
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -2085,7 +2079,7 @@ public:
eCommandRequiresProcess | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched | eCommandProcessMustBePaused |
eCommandProcessMustBeTraced),
- m_options(), m_create_repeat_command_just_invoked(false) {}
+ m_create_repeat_command_just_invoked(false) {}
~CommandObjectTraceDumpInstructions() override = default;
@@ -2165,7 +2159,7 @@ class CommandObjectTraceDumpInfo : public CommandObjectIterateOverThreads {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -2213,8 +2207,7 @@ public:
nullptr,
eCommandRequiresProcess | eCommandTryTargetAPILock |
eCommandProcessMustBeLaunched | eCommandProcessMustBePaused |
- eCommandProcessMustBeTraced),
- m_options() {}
+ eCommandProcessMustBeTraced) {}
~CommandObjectTraceDumpInfo() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectTrace.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectTrace.cpp
index 62ee48ca0546..53f1b0a32e60 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectTrace.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectTrace.cpp
@@ -40,7 +40,7 @@ class CommandObjectTraceLoad : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -74,8 +74,7 @@ public:
CommandObjectTraceLoad(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "trace load",
"Load a processor trace session from a JSON file.",
- "trace load"),
- m_options() {}
+ "trace load") {}
~CommandObjectTraceLoad() override = default;
@@ -139,7 +138,7 @@ class CommandObjectTraceDump : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -173,8 +172,7 @@ public:
CommandObjectTraceDump(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "trace dump",
"Dump the loaded processor trace data.",
- "trace dump"),
- m_options() {}
+ "trace dump") {}
~CommandObjectTraceDump() override = default;
@@ -205,7 +203,7 @@ class CommandObjectTraceSchema : public CommandObjectParsed {
public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() { OptionParsingStarting(nullptr); }
+ CommandOptions() { OptionParsingStarting(nullptr); }
~CommandOptions() override = default;
@@ -240,8 +238,7 @@ public:
: CommandObjectParsed(interpreter, "trace schema",
"Show the schema of the given trace plugin.",
"trace schema <plug-in>. Use the plug-in name "
- "\"all\" to see all schemas.\n"),
- m_options() {}
+ "\"all\" to see all schemas.\n") {}
~CommandObjectTraceSchema() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectType.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectType.cpp
index 0562b6be3cb5..f9e1d0f91fb7 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectType.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectType.cpp
@@ -69,7 +69,7 @@ public:
SynthAddOptions(bool sptr, bool sref, bool casc, bool regx, std::string catg)
: m_skip_pointers(sptr), m_skip_references(sref), m_cascade(casc),
- m_regex(regx), m_target_types(), m_category(catg) {}
+ m_regex(regx), m_category(catg) {}
typedef std::shared_ptr<SynthAddOptions> SharedPointer;
};
@@ -103,7 +103,7 @@ class CommandObjectTypeSummaryAdd : public CommandObjectParsed,
private:
class CommandOptions : public Options {
public:
- CommandOptions(CommandInterpreter &interpreter) : Options() {}
+ CommandOptions(CommandInterpreter &interpreter) {}
~CommandOptions() override = default;
@@ -286,7 +286,7 @@ class CommandObjectTypeSynthAdd : public CommandObjectParsed,
private:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -498,7 +498,7 @@ class CommandObjectTypeFormatAdd : public CommandObjectParsed {
private:
class CommandOptions : public OptionGroup {
public:
- CommandOptions() : OptionGroup() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -571,8 +571,7 @@ public:
CommandObjectTypeFormatAdd(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "type format add",
"Add a new formatting style for a type.", nullptr),
- m_option_group(), m_format_options(eFormatInvalid),
- m_command_options() {
+ m_format_options(eFormatInvalid) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -708,7 +707,7 @@ class CommandObjectTypeFormatterDelete : public CommandObjectParsed {
protected:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -760,7 +759,7 @@ public:
CommandObjectTypeFormatterDelete(CommandInterpreter &interpreter,
uint32_t formatter_kind_mask,
const char *name, const char *help)
- : CommandObjectParsed(interpreter, name, help, nullptr), m_options(),
+ : CommandObjectParsed(interpreter, name, help, nullptr),
m_formatter_kind_mask(formatter_kind_mask) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -873,7 +872,7 @@ class CommandObjectTypeFormatterClear : public CommandObjectParsed {
private:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -914,7 +913,7 @@ public:
CommandObjectTypeFormatterClear(CommandInterpreter &interpreter,
uint32_t formatter_kind_mask,
const char *name, const char *help)
- : CommandObjectParsed(interpreter, name, help, nullptr), m_options(),
+ : CommandObjectParsed(interpreter, name, help, nullptr),
m_formatter_kind_mask(formatter_kind_mask) {}
~CommandObjectTypeFormatterClear() override = default;
@@ -1713,7 +1712,7 @@ class CommandObjectTypeCategoryDefine : public CommandObjectParsed {
class CommandOptions : public Options {
public:
CommandOptions()
- : Options(), m_define_enabled(false, false),
+ : m_define_enabled(false, false),
m_cate_language(eLanguageTypeUnknown, eLanguageTypeUnknown) {}
~CommandOptions() override = default;
@@ -1760,8 +1759,7 @@ public:
CommandObjectTypeCategoryDefine(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "type category define",
"Define a new category as a source of formatters.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -1817,7 +1815,7 @@ protected:
class CommandObjectTypeCategoryEnable : public CommandObjectParsed {
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -1863,8 +1861,7 @@ public:
CommandObjectTypeCategoryEnable(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "type category enable",
"Enable a category as a source of formatters.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -1995,7 +1992,7 @@ protected:
class CommandObjectTypeCategoryDisable : public CommandObjectParsed {
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2041,8 +2038,7 @@ public:
CommandObjectTypeCategoryDisable(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "type category disable",
"Disable a category as a source of formatters.",
- nullptr),
- m_options() {
+ nullptr) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -2409,7 +2405,7 @@ private:
typedef std::vector<std::string> option_vector;
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2528,8 +2524,7 @@ private:
public:
CommandObjectTypeFilterAdd(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "type filter add",
- "Add a new filter for a type.", nullptr),
- m_options() {
+ "Add a new filter for a type.", nullptr) {
CommandArgumentEntry type_arg;
CommandArgumentData type_style_arg;
@@ -2666,7 +2661,7 @@ protected:
class CommandOptions : public OptionGroup {
public:
- CommandOptions() : OptionGroup() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -2716,8 +2711,7 @@ public:
"Lookup types and declarations in the current target, "
"following language-specific naming conventions.",
"type lookup <type-specifier>",
- eCommandRequiresTarget),
- m_option_group(), m_command_options() {
+ eCommandRequiresTarget) {
m_option_group.Append(&m_command_options);
m_option_group.Finalize();
}
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpoint.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpoint.cpp
index 9fbf036a19d1..9701553bdda9 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpoint.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpoint.cpp
@@ -149,8 +149,7 @@ public:
: CommandObjectParsed(
interpreter, "watchpoint list",
"List all watchpoints at configurable levels of detail.", nullptr,
- eCommandRequiresTarget),
- m_options() {
+ eCommandRequiresTarget) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeWatchpointID,
eArgTypeWatchpointIDRange);
@@ -165,7 +164,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -432,8 +431,7 @@ public:
: CommandObjectParsed(interpreter, "watchpoint delete",
"Delete the specified watchpoint(s). If no "
"watchpoints are specified, delete them all.",
- nullptr, eCommandRequiresTarget),
- m_options() {
+ nullptr, eCommandRequiresTarget) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeWatchpointID,
eArgTypeWatchpointIDRange);
@@ -456,7 +454,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -556,8 +554,7 @@ public:
: CommandObjectParsed(interpreter, "watchpoint ignore",
"Set ignore count on the specified watchpoint(s). "
"If no watchpoints are specified, set them all.",
- nullptr, eCommandRequiresTarget),
- m_options() {
+ nullptr, eCommandRequiresTarget) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeWatchpointID,
eArgTypeWatchpointIDRange);
@@ -580,7 +577,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -682,8 +679,7 @@ public:
"If no watchpoint is specified, act on the last created "
"watchpoint. "
"Passing an empty argument clears the modification.",
- nullptr, eCommandRequiresTarget),
- m_options() {
+ nullptr, eCommandRequiresTarget) {
CommandArgumentEntry arg;
CommandObject::AddIDsArgumentData(arg, eArgTypeWatchpointID,
eArgTypeWatchpointIDRange);
@@ -706,7 +702,7 @@ public:
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_condition() {}
+ CommandOptions() {}
~CommandOptions() override = default;
@@ -813,8 +809,7 @@ public:
"to free up resources.",
nullptr,
eCommandRequiresFrame | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_option_group(), m_option_watchpoint() {
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {
SetHelpLong(
R"(
Examples:
@@ -1006,8 +1001,7 @@ public:
"to free up resources.",
"",
eCommandRequiresFrame | eCommandTryTargetAPILock |
- eCommandProcessMustBeLaunched | eCommandProcessMustBePaused),
- m_option_group(), m_option_watchpoint() {
+ eCommandProcessMustBeLaunched | eCommandProcessMustBePaused) {
SetHelpLong(
R"(
Examples:
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpointCommand.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpointCommand.cpp
index 1f4e95366385..a429e568c61a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpointCommand.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectWatchpointCommand.cpp
@@ -66,8 +66,7 @@ public:
"commands previously added to it.",
nullptr, eCommandRequiresTarget),
IOHandlerDelegateMultiline("DONE",
- IOHandlerDelegate::Completion::LLDBCommand),
- m_options() {
+ IOHandlerDelegate::Completion::LLDBCommand) {
SetHelpLong(
R"(
General information about entering watchpoint commands
@@ -314,7 +313,7 @@ are no syntax errors may indicate that a function was declared but never called.
class CommandOptions : public Options {
public:
- CommandOptions() : Options(), m_one_liner(), m_function_name() {}
+ CommandOptions() {}
~CommandOptions() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandOptionsProcessLaunch.h b/contrib/llvm-project/lldb/source/Commands/CommandOptionsProcessLaunch.h
index d18a23245080..7ab7fabe1050 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandOptionsProcessLaunch.h
+++ b/contrib/llvm-project/lldb/source/Commands/CommandOptionsProcessLaunch.h
@@ -18,7 +18,7 @@ namespace lldb_private {
class CommandOptionsProcessLaunch : public lldb_private::OptionGroup {
public:
- CommandOptionsProcessLaunch() : lldb_private::OptionGroup() {
+ CommandOptionsProcessLaunch() {
// Keep default values of all options in one place: OptionParsingStarting
// ()
OptionParsingStarting(nullptr);
diff --git a/contrib/llvm-project/lldb/source/Commands/Options.td b/contrib/llvm-project/lldb/source/Commands/Options.td
index 3e89eb0f6bda..269c937296d5 100644
--- a/contrib/llvm-project/lldb/source/Commands/Options.td
+++ b/contrib/llvm-project/lldb/source/Commands/Options.td
@@ -18,8 +18,7 @@ let Command = "help" in {
}
let Command = "settings set" in {
- def setset_global : Option<"global", "g">, Arg<"Filename">,
- Completion<"DiskFile">,
+ def setset_global : Option<"global", "g">,
Desc<"Apply the new value to the global default value.">;
def setset_force : Option<"force", "f">,
Desc<"Force an empty value to be accepted as the default.">;
@@ -489,6 +488,8 @@ let Command = "memory read" in {
"display data.">;
def memory_read_force : Option<"force", "r">, Groups<[1,2,3]>,
Desc<"Necessary if reading over target.max-memory-read-size bytes.">;
+ def memory_read_show_tags : Option<"show-tags", "\\x01">, Group<1>,
+ Desc<"Include memory tags in output (does not apply to binary output).">;
}
let Command = "memory find" in {
diff --git a/contrib/llvm-project/lldb/source/Core/CoreProperties.td b/contrib/llvm-project/lldb/source/Core/CoreProperties.td
index 5713bbb74bdc..c965b51a1d6d 100644
--- a/contrib/llvm-project/lldb/source/Core/CoreProperties.td
+++ b/contrib/llvm-project/lldb/source/Core/CoreProperties.td
@@ -62,6 +62,10 @@ let Definition = "debugger" in {
DefaultEnumValue<"eScriptLanguageLua">,
EnumValues<"OptionEnumValues(g_language_enumerators)">,
Desc<"The script language to be used for evaluating user-written scripts.">;
+ def REPLLanguage: Property<"repl-lang", "Language">,
+ Global,
+ DefaultEnumValue<"eLanguageTypeUnknown">,
+ Desc<"The language to use for the REPL.">;
def StopDisassemblyCount: Property<"stop-disassembly-count", "SInt64">,
Global,
DefaultUnsignedValue<4>,
diff --git a/contrib/llvm-project/lldb/source/Core/Debugger.cpp b/contrib/llvm-project/lldb/source/Core/Debugger.cpp
index ae454fae3322..49cc21b65951 100644
--- a/contrib/llvm-project/lldb/source/Core/Debugger.cpp
+++ b/contrib/llvm-project/lldb/source/Core/Debugger.cpp
@@ -25,6 +25,7 @@
#include "lldb/Interpreter/CommandInterpreter.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Interpreter/OptionValue.h"
+#include "lldb/Interpreter/OptionValueLanguage.h"
#include "lldb/Interpreter/OptionValueProperties.h"
#include "lldb/Interpreter/OptionValueSInt64.h"
#include "lldb/Interpreter/OptionValueString.h"
@@ -324,6 +325,20 @@ bool Debugger::SetScriptLanguage(lldb::ScriptLanguage script_lang) {
script_lang);
}
+lldb::LanguageType Debugger::GetREPLLanguage() const {
+ const uint32_t idx = ePropertyREPLLanguage;
+ OptionValueLanguage *value =
+ m_collection_sp->GetPropertyAtIndexAsOptionValueLanguage(nullptr, idx);
+ if (value)
+ return value->GetCurrentValue();
+ return LanguageType();
+}
+
+bool Debugger::SetREPLLanguage(lldb::LanguageType repl_lang) {
+ const uint32_t idx = ePropertyREPLLanguage;
+ return m_collection_sp->SetPropertyAtIndexAsLanguage(nullptr, idx, repl_lang);
+}
+
uint32_t Debugger::GetTerminalWidth() const {
const uint32_t idx = ePropertyTerminalWidth;
return m_collection_sp->GetPropertyAtIndexAsSInt64(
@@ -1753,17 +1768,20 @@ Status Debugger::RunREPL(LanguageType language, const char *repl_options) {
Status err;
FileSpec repl_executable;
+ if (language == eLanguageTypeUnknown)
+ language = GetREPLLanguage();
+
if (language == eLanguageTypeUnknown) {
LanguageSet repl_languages = Language::GetLanguagesSupportingREPLs();
if (auto single_lang = repl_languages.GetSingularLanguage()) {
language = *single_lang;
} else if (repl_languages.Empty()) {
- err.SetErrorStringWithFormat(
+ err.SetErrorString(
"LLDB isn't configured with REPL support for any languages.");
return err;
} else {
- err.SetErrorStringWithFormat(
+ err.SetErrorString(
"Multiple possible REPL languages. Please specify a language.");
return err;
}
diff --git a/contrib/llvm-project/lldb/source/Core/DumpDataExtractor.cpp b/contrib/llvm-project/lldb/source/Core/DumpDataExtractor.cpp
index 175ffef04a81..211e16a2e033 100644
--- a/contrib/llvm-project/lldb/source/Core/DumpDataExtractor.cpp
+++ b/contrib/llvm-project/lldb/source/Core/DumpDataExtractor.cpp
@@ -17,6 +17,9 @@
#include "lldb/Target/ABI.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/ExecutionContextScope.h"
+#include "lldb/Target/MemoryRegionInfo.h"
+#include "lldb/Target/MemoryTagManager.h"
+#include "lldb/Target/MemoryTagMap.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/SectionLoadList.h"
#include "lldb/Target/Target.h"
@@ -253,6 +256,85 @@ void DumpFloatingPoint(std::ostringstream &ss, FloatT f) {
ss << f;
}
+static llvm::Optional<MemoryTagMap>
+GetMemoryTags(lldb::addr_t addr, size_t length,
+ ExecutionContextScope *exe_scope) {
+ assert(addr != LLDB_INVALID_ADDRESS);
+
+ if (!exe_scope)
+ return llvm::None;
+
+ TargetSP target_sp = exe_scope->CalculateTarget();
+ if (!target_sp)
+ return llvm::None;
+
+ ProcessSP process_sp = target_sp->CalculateProcess();
+ if (!process_sp)
+ return llvm::None;
+
+ llvm::Expected<const MemoryTagManager *> tag_manager_or_err =
+ process_sp->GetMemoryTagManager();
+ if (!tag_manager_or_err) {
+ llvm::consumeError(tag_manager_or_err.takeError());
+ return llvm::None;
+ }
+
+ MemoryRegionInfos memory_regions;
+ // Don't check return status, list will be just empty if an error happened.
+ process_sp->GetMemoryRegions(memory_regions);
+
+ llvm::Expected<std::vector<MemoryTagManager::TagRange>> tagged_ranges_or_err =
+ (*tag_manager_or_err)
+ ->MakeTaggedRanges(addr, addr + length, memory_regions);
+ // Here we know that our range will not be inverted but we must still check
+ // for an error.
+ if (!tagged_ranges_or_err) {
+ llvm::consumeError(tagged_ranges_or_err.takeError());
+ return llvm::None;
+ }
+ if (tagged_ranges_or_err->empty())
+ return llvm::None;
+
+ MemoryTagMap memory_tag_map(*tag_manager_or_err);
+ for (const MemoryTagManager::TagRange &range : *tagged_ranges_or_err) {
+ llvm::Expected<std::vector<lldb::addr_t>> tags_or_err =
+ process_sp->ReadMemoryTags(range.GetRangeBase(), range.GetByteSize());
+
+ if (tags_or_err)
+ memory_tag_map.InsertTags(range.GetRangeBase(), *tags_or_err);
+ else
+ llvm::consumeError(tags_or_err.takeError());
+ }
+
+ if (memory_tag_map.Empty())
+ return llvm::None;
+
+ return memory_tag_map;
+}
+
+static void
+printMemoryTags(const DataExtractor &DE, Stream *s, lldb::addr_t addr,
+ size_t len,
+ const llvm::Optional<MemoryTagMap> &memory_tag_map) {
+ std::vector<llvm::Optional<lldb::addr_t>> tags =
+ memory_tag_map->GetTags(addr, len);
+
+ // Only print if there is at least one tag for this line
+ if (tags.empty())
+ return;
+
+ s->Printf(" (tag%s:", tags.size() > 1 ? "s" : "");
+ // Some granules may not be tagged but print something for them
+ // so that the ordering remains intact.
+ for (auto tag : tags) {
+ if (tag)
+ s->Printf(" 0x%" PRIx64, *tag);
+ else
+ s->PutCString(" <no tag>");
+ }
+ s->PutCString(")");
+}
+
lldb::offset_t lldb_private::DumpDataExtractor(
const DataExtractor &DE, Stream *s, offset_t start_offset,
lldb::Format item_format, size_t item_byte_size, size_t item_count,
@@ -261,7 +343,7 @@ lldb::offset_t lldb_private::DumpDataExtractor(
// non-zero, the value is a bitfield
uint32_t item_bit_offset, // If "item_bit_size" is non-zero, this is the
// shift amount to apply to a bitfield
- ExecutionContextScope *exe_scope) {
+ ExecutionContextScope *exe_scope, bool show_memory_tags) {
if (s == nullptr)
return start_offset;
@@ -272,6 +354,11 @@ lldb::offset_t lldb_private::DumpDataExtractor(
offset_t offset = start_offset;
+ llvm::Optional<MemoryTagMap> memory_tag_map = llvm::None;
+ if (show_memory_tags && base_addr != LLDB_INVALID_ADDRESS)
+ memory_tag_map =
+ GetMemoryTags(base_addr, DE.GetByteSize() - offset, exe_scope);
+
if (item_format == eFormatInstruction)
return DumpInstructions(DE, s, exe_scope, start_offset, base_addr,
item_count);
@@ -283,7 +370,10 @@ lldb::offset_t lldb_private::DumpDataExtractor(
lldb::offset_t line_start_offset = start_offset;
for (uint32_t count = 0; DE.ValidOffset(offset) && count < item_count;
++count) {
+ // If we are at the beginning or end of a line
+ // Note that the last line is handled outside this for loop.
if ((count % num_per_line) == 0) {
+ // If we are at the end of a line
if (count > 0) {
if (item_format == eFormatBytesWithASCII &&
offset > line_start_offset) {
@@ -295,6 +385,15 @@ lldb::offset_t lldb_private::DumpDataExtractor(
offset - line_start_offset, SIZE_MAX,
LLDB_INVALID_ADDRESS, 0, 0);
}
+
+ if (base_addr != LLDB_INVALID_ADDRESS && memory_tag_map) {
+ size_t line_len = offset - line_start_offset;
+ lldb::addr_t line_base =
+ base_addr +
+ (offset - start_offset - line_len) / DE.getTargetByteSize();
+ printMemoryTags(DE, s, line_base, line_len, memory_tag_map);
+ }
+
s->EOL();
}
if (base_addr != LLDB_INVALID_ADDRESS)
@@ -796,14 +895,28 @@ lldb::offset_t lldb_private::DumpDataExtractor(
}
}
- if (item_format == eFormatBytesWithASCII && offset > line_start_offset) {
- s->Printf("%*s", static_cast<int>(
- (num_per_line - (offset - line_start_offset)) * 3 + 2),
- "");
- DumpDataExtractor(DE, s, line_start_offset, eFormatCharPrintable, 1,
- offset - line_start_offset, SIZE_MAX,
- LLDB_INVALID_ADDRESS, 0, 0);
+ // If anything was printed we want to catch the end of the last line.
+ // Since we will exit the for loop above before we get a chance to append to
+ // it normally.
+ if (offset > line_start_offset) {
+ if (item_format == eFormatBytesWithASCII) {
+ s->Printf("%*s",
+ static_cast<int>(
+ (num_per_line - (offset - line_start_offset)) * 3 + 2),
+ "");
+ DumpDataExtractor(DE, s, line_start_offset, eFormatCharPrintable, 1,
+ offset - line_start_offset, SIZE_MAX,
+ LLDB_INVALID_ADDRESS, 0, 0);
+ }
+
+ if (base_addr != LLDB_INVALID_ADDRESS && memory_tag_map) {
+ size_t line_len = offset - line_start_offset;
+ lldb::addr_t line_base = base_addr + (offset - start_offset - line_len) /
+ DE.getTargetByteSize();
+ printMemoryTags(DE, s, line_base, line_len, memory_tag_map);
+ }
}
+
return offset; // Return the offset at which we ended up
}
diff --git a/contrib/llvm-project/lldb/source/Core/IOHandlerCursesGUI.cpp b/contrib/llvm-project/lldb/source/Core/IOHandlerCursesGUI.cpp
index 60207f75b7df..b37e84a13c5e 100644
--- a/contrib/llvm-project/lldb/source/Core/IOHandlerCursesGUI.cpp
+++ b/contrib/llvm-project/lldb/source/Core/IOHandlerCursesGUI.cpp
@@ -78,8 +78,6 @@
using namespace lldb;
using namespace lldb_private;
-using llvm::None;
-using llvm::Optional;
using llvm::StringRef;
// we may want curses to be disabled for some builds for instance, windows
diff --git a/contrib/llvm-project/lldb/source/Core/Mangled.cpp b/contrib/llvm-project/lldb/source/Core/Mangled.cpp
index c8aacdefefa2..4e10324401dc 100644
--- a/contrib/llvm-project/lldb/source/Core/Mangled.cpp
+++ b/contrib/llvm-project/lldb/source/Core/Mangled.cpp
@@ -70,23 +70,13 @@ Mangled::Mangled(llvm::StringRef name) {
SetValue(ConstString(name));
}
-// Convert to pointer operator. This allows code to check any Mangled objects
+// Convert to bool operator. This allows code to check any Mangled objects
// to see if they contain anything valid using code such as:
//
// Mangled mangled(...);
// if (mangled)
// { ...
-Mangled::operator void *() const {
- return (m_mangled) ? const_cast<Mangled *>(this) : nullptr;
-}
-
-// Logical NOT operator. This allows code to check any Mangled objects to see
-// if they are invalid using code such as:
-//
-// Mangled mangled(...);
-// if (!file_spec)
-// { ...
-bool Mangled::operator!() const { return !m_mangled; }
+Mangled::operator bool() const { return m_mangled || m_demangled; }
// Clear the mangled and demangled values.
void Mangled::Clear() {
diff --git a/contrib/llvm-project/lldb/source/DataFormatters/FormatManager.cpp b/contrib/llvm-project/lldb/source/DataFormatters/FormatManager.cpp
index 0ef5f0adc832..f07bb9a7136a 100644
--- a/contrib/llvm-project/lldb/source/DataFormatters/FormatManager.cpp
+++ b/contrib/llvm-project/lldb/source/DataFormatters/FormatManager.cpp
@@ -730,7 +730,7 @@ void FormatManager::LoadSystemFormatters() {
GetCategory(m_system_category_name);
sys_category_sp->GetRegexTypeSummariesContainer()->Add(
- RegularExpression(R"(^((un)?signed )?char ?(\*|\[\])$)"), string_format);
+ RegularExpression(R"(^(unsigned )?char ?(\*|\[\])$)"), string_format);
sys_category_sp->GetRegexTypeSummariesContainer()->Add(
std::move(any_size_char_arr), string_array_format);
diff --git a/contrib/llvm-project/lldb/source/Expression/DWARFExpression.cpp b/contrib/llvm-project/lldb/source/Expression/DWARFExpression.cpp
index 890d8b5d3107..ccd41fb4a94e 100644
--- a/contrib/llvm-project/lldb/source/Expression/DWARFExpression.cpp
+++ b/contrib/llvm-project/lldb/source/Expression/DWARFExpression.cpp
@@ -825,7 +825,7 @@ static bool Evaluate_DW_OP_entry_value(std::vector<Value> &stack,
const DWARFExpression &param_expr = matched_param->LocationInCaller;
if (!param_expr.Evaluate(&parent_exe_ctx,
parent_frame->GetRegisterContext().get(),
- /*loclist_base_addr=*/LLDB_INVALID_ADDRESS,
+ /*loclist_base_load_addr=*/LLDB_INVALID_ADDRESS,
/*initial_value_ptr=*/nullptr,
/*object_address_ptr=*/nullptr, result, error_ptr)) {
LLDB_LOG(log,
diff --git a/contrib/llvm-project/lldb/source/Expression/Materializer.cpp b/contrib/llvm-project/lldb/source/Expression/Materializer.cpp
index 3945f3a70f75..d40365d60fb3 100644
--- a/contrib/llvm-project/lldb/source/Expression/Materializer.cpp
+++ b/contrib/llvm-project/lldb/source/Expression/Materializer.cpp
@@ -700,7 +700,7 @@ public:
DumpHexBytes(&dump_stream, data.GetBytes(), data.GetByteSize(), 16,
load_addr);
- lldb::offset_t offset;
+ lldb::offset_t offset = 0;
ptr = extractor.GetAddress(&offset);
@@ -978,7 +978,7 @@ public:
DumpHexBytes(&dump_stream, data.GetBytes(), data.GetByteSize(), 16,
load_addr);
- lldb::offset_t offset;
+ lldb::offset_t offset = 0;
ptr = extractor.GetAddress(&offset);
diff --git a/contrib/llvm-project/lldb/source/Host/common/Host.cpp b/contrib/llvm-project/lldb/source/Host/common/Host.cpp
index d14ebe99fd15..53a096c617df 100644
--- a/contrib/llvm-project/lldb/source/Host/common/Host.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/Host.cpp
@@ -191,7 +191,7 @@ static thread_result_t MonitorChildProcessThreadFunction(void *arg) {
::sigaction(SIGUSR1, &sigUsr1Action, nullptr);
#endif // __linux__
- while (1) {
+ while (true) {
log = lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS);
LLDB_LOGF(log, "%s ::waitpid (pid = %" PRIi32 ", &status, options = %i)...",
function, pid, options);
diff --git a/contrib/llvm-project/lldb/source/Host/common/HostNativeThreadBase.cpp b/contrib/llvm-project/lldb/source/Host/common/HostNativeThreadBase.cpp
index b15160b143ca..b8223e3ec42a 100644
--- a/contrib/llvm-project/lldb/source/Host/common/HostNativeThreadBase.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/HostNativeThreadBase.cpp
@@ -18,7 +18,7 @@ using namespace lldb;
using namespace lldb_private;
HostNativeThreadBase::HostNativeThreadBase(thread_t thread)
- : m_thread(thread), m_result(0) {}
+ : m_thread(thread), m_result(0) {} // NOLINT(modernize-use-nullptr)
lldb::thread_t HostNativeThreadBase::GetSystemHandle() const {
return m_thread;
@@ -34,7 +34,7 @@ bool HostNativeThreadBase::IsJoinable() const {
void HostNativeThreadBase::Reset() {
m_thread = LLDB_INVALID_HOST_THREAD;
- m_result = 0;
+ m_result = 0; // NOLINT(modernize-use-nullptr)
}
bool HostNativeThreadBase::EqualsThread(lldb::thread_t thread) const {
@@ -44,7 +44,7 @@ bool HostNativeThreadBase::EqualsThread(lldb::thread_t thread) const {
lldb::thread_t HostNativeThreadBase::Release() {
lldb::thread_t result = m_thread;
m_thread = LLDB_INVALID_HOST_THREAD;
- m_result = 0;
+ m_result = 0; // NOLINT(modernize-use-nullptr)
return result;
}
diff --git a/contrib/llvm-project/lldb/source/Host/common/Socket.cpp b/contrib/llvm-project/lldb/source/Host/common/Socket.cpp
index cc0659797530..d8b8f54a6468 100644
--- a/contrib/llvm-project/lldb/source/Host/common/Socket.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/Socket.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/WindowsError.h"
#if LLDB_ENABLE_POSIX
@@ -281,9 +282,9 @@ Status Socket::Close() {
static_cast<void *>(this), static_cast<uint64_t>(m_socket));
#if defined(_WIN32)
- bool success = !!closesocket(m_socket);
+ bool success = closesocket(m_socket) == 0;
#else
- bool success = !!::close(m_socket);
+ bool success = ::close(m_socket) == 0;
#endif
// A reference to a FD was passed in, set it to an invalid value
m_socket = kInvalidSocketValue;
diff --git a/contrib/llvm-project/lldb/source/Host/common/Terminal.cpp b/contrib/llvm-project/lldb/source/Host/common/Terminal.cpp
index 2a1c12e667bc..831e9dff4eb1 100644
--- a/contrib/llvm-project/lldb/source/Host/common/Terminal.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/Terminal.cpp
@@ -417,8 +417,8 @@ bool TerminalState::Save(Terminal term, bool save_process_group) {
Clear();
m_tty = term;
if (m_tty.IsATerminal()) {
- int fd = m_tty.GetFileDescriptor();
#if LLDB_ENABLE_POSIX
+ int fd = m_tty.GetFileDescriptor();
m_tflags = ::fcntl(fd, F_GETFL, 0);
#if LLDB_ENABLE_TERMIOS
std::unique_ptr<Terminal::Data> new_data{new Terminal::Data()};
diff --git a/contrib/llvm-project/lldb/source/Host/common/XML.cpp b/contrib/llvm-project/lldb/source/Host/common/XML.cpp
index 79128b98dc38..2d48175a8234 100644
--- a/contrib/llvm-project/lldb/source/Host/common/XML.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/XML.cpp
@@ -130,22 +130,25 @@ XMLNode XMLNode::GetChild() const {
#endif
}
-llvm::StringRef XMLNode::GetAttributeValue(const char *name,
- const char *fail_value) const {
- const char *attr_value = nullptr;
+std::string XMLNode::GetAttributeValue(const char *name,
+ const char *fail_value) const {
+ std::string attr_value;
#if LLDB_ENABLE_LIBXML2
-
- if (IsValid())
- attr_value = (const char *)xmlGetProp(m_node, (const xmlChar *)name);
- else
- attr_value = fail_value;
+ if (IsValid()) {
+ xmlChar *value = xmlGetProp(m_node, (const xmlChar *)name);
+ if (value) {
+ attr_value = (const char *)value;
+ xmlFree(value);
+ }
+ } else {
+ if (fail_value)
+ attr_value = fail_value;
+ }
#else
- attr_value = fail_value;
+ if (fail_value)
+ attr_value = fail_value;
#endif
- if (attr_value)
- return llvm::StringRef(attr_value);
- else
- return llvm::StringRef();
+ return attr_value;
}
bool XMLNode::GetAttributeValueAsUnsigned(const char *name, uint64_t &value,
diff --git a/contrib/llvm-project/lldb/source/Host/posix/ProcessLauncherPosixFork.cpp b/contrib/llvm-project/lldb/source/Host/posix/ProcessLauncherPosixFork.cpp
index 2f08b9fa8857..635dbb14a027 100644
--- a/contrib/llvm-project/lldb/source/Host/posix/ProcessLauncherPosixFork.cpp
+++ b/contrib/llvm-project/lldb/source/Host/posix/ProcessLauncherPosixFork.cpp
@@ -38,43 +38,40 @@
using namespace lldb;
using namespace lldb_private;
-static void FixupEnvironment(Environment &env) {
-#ifdef __ANDROID__
- // If there is no PATH variable specified inside the environment then set the
- // path to /system/bin. It is required because the default path used by
- // execve() is wrong on android.
- env.try_emplace("PATH", "/system/bin");
-#endif
+// Begin code running in the child process
+// NB: This code needs to be async-signal safe, since we're invoking fork from
+// multithreaded contexts.
+
+static void write_string(int error_fd, const char *str) {
+ int r = write(error_fd, str, strlen(str));
+ (void)r;
}
[[noreturn]] static void ExitWithError(int error_fd,
const char *operation) {
int err = errno;
- llvm::raw_fd_ostream os(error_fd, true);
- os << operation << " failed: " << llvm::sys::StrError(err);
- os.flush();
+ write_string(error_fd, operation);
+ write_string(error_fd, " failed: ");
+ // strerror is not guaranteed to be async-signal safe, but it usually is.
+ write_string(error_fd, strerror(err));
_exit(1);
}
-static void DisableASLRIfRequested(int error_fd, const ProcessLaunchInfo &info) {
+static void DisableASLR(int error_fd) {
#if defined(__linux__)
- if (info.GetFlags().Test(lldb::eLaunchFlagDisableASLR)) {
- const unsigned long personality_get_current = 0xffffffff;
- int value = personality(personality_get_current);
- if (value == -1)
- ExitWithError(error_fd, "personality get");
-
- value = personality(ADDR_NO_RANDOMIZE | value);
- if (value == -1)
- ExitWithError(error_fd, "personality set");
- }
+ const unsigned long personality_get_current = 0xffffffff;
+ int value = personality(personality_get_current);
+ if (value == -1)
+ ExitWithError(error_fd, "personality get");
+
+ value = personality(ADDR_NO_RANDOMIZE | value);
+ if (value == -1)
+ ExitWithError(error_fd, "personality set");
#endif
}
-static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
- int flags) {
- int target_fd = llvm::sys::RetryAfterSignal(-1, ::open,
- file_spec.GetCString(), flags, 0666);
+static void DupDescriptor(int error_fd, const char *file, int fd, int flags) {
+ int target_fd = llvm::sys::RetryAfterSignal(-1, ::open, file, flags, 0666);
if (target_fd == -1)
ExitWithError(error_fd, "DupDescriptor-open");
@@ -88,44 +85,67 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
::close(target_fd);
}
-[[noreturn]] static void ChildFunc(int error_fd,
- const ProcessLaunchInfo &info) {
- if (info.GetFlags().Test(eLaunchFlagLaunchInSeparateProcessGroup)) {
+namespace {
+struct ForkFileAction {
+ ForkFileAction(const FileAction &act);
+
+ FileAction::Action action;
+ int fd;
+ std::string path;
+ int arg;
+};
+
+struct ForkLaunchInfo {
+ ForkLaunchInfo(const ProcessLaunchInfo &info);
+
+ bool separate_process_group;
+ bool debug;
+ bool disable_aslr;
+ std::string wd;
+ const char **argv;
+ Environment::Envp envp;
+ std::vector<ForkFileAction> actions;
+
+ bool has_action(int fd) const {
+ for (const ForkFileAction &action : actions) {
+ if (action.fd == fd)
+ return true;
+ }
+ return false;
+ }
+};
+} // namespace
+
+[[noreturn]] static void ChildFunc(int error_fd, const ForkLaunchInfo &info) {
+ if (info.separate_process_group) {
if (setpgid(0, 0) != 0)
ExitWithError(error_fd, "setpgid");
}
- for (size_t i = 0; i < info.GetNumFileActions(); ++i) {
- const FileAction &action = *info.GetFileActionAtIndex(i);
- switch (action.GetAction()) {
+ for (const ForkFileAction &action : info.actions) {
+ switch (action.action) {
case FileAction::eFileActionClose:
- if (close(action.GetFD()) != 0)
+ if (close(action.fd) != 0)
ExitWithError(error_fd, "close");
break;
case FileAction::eFileActionDuplicate:
- if (dup2(action.GetFD(), action.GetActionArgument()) == -1)
+ if (dup2(action.fd, action.arg) == -1)
ExitWithError(error_fd, "dup2");
break;
case FileAction::eFileActionOpen:
- DupDescriptor(error_fd, action.GetFileSpec(), action.GetFD(),
- action.GetActionArgument());
+ DupDescriptor(error_fd, action.path.c_str(), action.fd, action.arg);
break;
case FileAction::eFileActionNone:
break;
}
}
- const char **argv = info.GetArguments().GetConstArgumentVector();
-
// Change working directory
- if (info.GetWorkingDirectory() &&
- 0 != ::chdir(info.GetWorkingDirectory().GetCString()))
+ if (!info.wd.empty() && 0 != ::chdir(info.wd.c_str()))
ExitWithError(error_fd, "chdir");
- DisableASLRIfRequested(error_fd, info);
- Environment env = info.GetEnvironment();
- FixupEnvironment(env);
- Environment::Envp envp = env.getEnvp();
+ if (info.disable_aslr)
+ DisableASLR(error_fd);
// Clear the signal mask to prevent the child from being affected by any
// masking done by the parent.
@@ -134,7 +154,7 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
pthread_sigmask(SIG_SETMASK, &set, nullptr) != 0)
ExitWithError(error_fd, "pthread_sigmask");
- if (info.GetFlags().Test(eLaunchFlagDebug)) {
+ if (info.debug) {
// Do not inherit setgid powers.
if (setgid(getgid()) != 0)
ExitWithError(error_fd, "setgid");
@@ -143,6 +163,8 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
// Close everything besides stdin, stdout, and stderr that has no file
// action to avoid leaking. Only do this when debugging, as elsewhere we
// actually rely on passing open descriptors to child processes.
+ // NB: This code is not async-signal safe, but we currently do not launch
+ // processes for debugging from within multithreaded contexts.
const llvm::StringRef proc_fd_path = "/proc/self/fd";
std::error_code ec;
@@ -157,7 +179,7 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
// Don't close first three entries since they are stdin, stdout and
// stderr.
- if (fd > 2 && !info.GetFileActionForFD(fd) && fd != error_fd)
+ if (fd > 2 && !info.has_action(fd) && fd != error_fd)
files_to_close.push_back(fd);
}
for (int file_to_close : files_to_close)
@@ -166,7 +188,7 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
// Since /proc/self/fd didn't work, trying the slow way instead.
int max_fd = sysconf(_SC_OPEN_MAX);
for (int fd = 3; fd < max_fd; ++fd)
- if (!info.GetFileActionForFD(fd) && fd != error_fd)
+ if (!info.has_action(fd) && fd != error_fd)
close(fd);
}
@@ -176,7 +198,7 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
}
// Execute. We should never return...
- execve(argv[0], const_cast<char *const *>(argv), envp);
+ execve(info.argv[0], const_cast<char *const *>(info.argv), info.envp);
#if defined(__linux__)
if (errno == ETXTBSY) {
@@ -189,7 +211,7 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
// Since this state should clear up quickly, wait a while and then give it
// one more go.
usleep(50000);
- execve(argv[0], const_cast<char *const *>(argv), envp);
+ execve(info.argv[0], const_cast<char *const *>(info.argv), info.envp);
}
#endif
@@ -198,12 +220,43 @@ static void DupDescriptor(int error_fd, const FileSpec &file_spec, int fd,
ExitWithError(error_fd, "execve");
}
+// End of code running in the child process.
+
+ForkFileAction::ForkFileAction(const FileAction &act)
+ : action(act.GetAction()), fd(act.GetFD()), path(act.GetPath().str()),
+ arg(act.GetActionArgument()) {}
+
+static std::vector<ForkFileAction>
+MakeForkActions(const ProcessLaunchInfo &info) {
+ std::vector<ForkFileAction> result;
+ for (size_t i = 0; i < info.GetNumFileActions(); ++i)
+ result.emplace_back(*info.GetFileActionAtIndex(i));
+ return result;
+}
+
+static Environment::Envp FixupEnvironment(Environment env) {
+#ifdef __ANDROID__
+ // If there is no PATH variable specified inside the environment then set the
+ // path to /system/bin. It is required because the default path used by
+ // execve() is wrong on android.
+ env.try_emplace("PATH", "/system/bin");
+#endif
+ return env.getEnvp();
+}
+
+ForkLaunchInfo::ForkLaunchInfo(const ProcessLaunchInfo &info)
+ : separate_process_group(
+ info.GetFlags().Test(eLaunchFlagLaunchInSeparateProcessGroup)),
+ debug(info.GetFlags().Test(eLaunchFlagDebug)),
+ disable_aslr(info.GetFlags().Test(eLaunchFlagDisableASLR)),
+ wd(info.GetWorkingDirectory().GetPath()),
+ argv(info.GetArguments().GetConstArgumentVector()),
+ envp(FixupEnvironment(info.GetEnvironment())),
+ actions(MakeForkActions(info)) {}
+
HostProcess
ProcessLauncherPosixFork::LaunchProcess(const ProcessLaunchInfo &launch_info,
Status &error) {
- char exe_path[PATH_MAX];
- launch_info.GetExecutableFile().GetPath(exe_path, sizeof(exe_path));
-
// A pipe used by the child process to report errors.
PipePosix pipe;
const bool child_processes_inherit = false;
@@ -211,6 +264,8 @@ ProcessLauncherPosixFork::LaunchProcess(const ProcessLaunchInfo &launch_info,
if (error.Fail())
return HostProcess();
+ const ForkLaunchInfo fork_launch_info(launch_info);
+
::pid_t pid = ::fork();
if (pid == -1) {
// Fork failed
@@ -221,7 +276,7 @@ ProcessLauncherPosixFork::LaunchProcess(const ProcessLaunchInfo &launch_info,
if (pid == 0) {
// child process
pipe.CloseReadFileDescriptor();
- ChildFunc(pipe.ReleaseWriteFileDescriptor(), launch_info);
+ ChildFunc(pipe.ReleaseWriteFileDescriptor(), fork_launch_info);
}
// parent process
diff --git a/contrib/llvm-project/lldb/source/Interpreter/CommandInterpreter.cpp b/contrib/llvm-project/lldb/source/Interpreter/CommandInterpreter.cpp
index bd03f18b47c0..59c23716bf89 100644
--- a/contrib/llvm-project/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/contrib/llvm-project/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -2259,13 +2259,15 @@ static void GetHomeInitFile(llvm::SmallVectorImpl<char> &init_file,
FileSystem::Instance().Resolve(init_file);
}
-static void GetHomeREPLInitFile(llvm::SmallVectorImpl<char> &init_file) {
- LanguageSet repl_languages = Language::GetLanguagesSupportingREPLs();
- LanguageType language = eLanguageTypeUnknown;
- if (auto main_repl_language = repl_languages.GetSingularLanguage())
- language = *main_repl_language;
- else
- return;
+static void GetHomeREPLInitFile(llvm::SmallVectorImpl<char> &init_file,
+ LanguageType language) {
+ if (language == eLanguageTypeUnknown) {
+ LanguageSet repl_languages = Language::GetLanguagesSupportingREPLs();
+ if (auto main_repl_language = repl_languages.GetSingularLanguage())
+ language = *main_repl_language;
+ else
+ return;
+ }
std::string init_file_name =
(llvm::Twine(".lldbinit-") +
@@ -2355,7 +2357,7 @@ void CommandInterpreter::SourceInitFileHome(CommandReturnObject &result,
llvm::SmallString<128> init_file;
if (is_repl)
- GetHomeREPLInitFile(init_file);
+ GetHomeREPLInitFile(init_file, GetDebugger().GetREPLLanguage());
if (init_file.empty())
GetHomeInitFile(init_file);
@@ -2839,12 +2841,10 @@ void CommandInterpreter::OutputHelpText(Stream &strm, llvm::StringRef word_text,
void CommandInterpreter::FindCommandsForApropos(
llvm::StringRef search_word, StringList &commands_found,
- StringList &commands_help, CommandObject::CommandMap &command_map) {
- CommandObject::CommandMap::const_iterator pos;
-
- for (pos = command_map.begin(); pos != command_map.end(); ++pos) {
- llvm::StringRef command_name = pos->first;
- CommandObject *cmd_obj = pos->second.get();
+ StringList &commands_help, const CommandObject::CommandMap &command_map) {
+ for (const auto &pair : command_map) {
+ llvm::StringRef command_name = pair.first;
+ CommandObject *cmd_obj = pair.second.get();
const bool search_short_help = true;
const bool search_long_help = false;
@@ -2854,14 +2854,19 @@ void CommandInterpreter::FindCommandsForApropos(
cmd_obj->HelpTextContainsWord(search_word, search_short_help,
search_long_help, search_syntax,
search_options)) {
- commands_found.AppendString(cmd_obj->GetCommandName());
+ commands_found.AppendString(command_name);
commands_help.AppendString(cmd_obj->GetHelp());
}
- if (cmd_obj->IsMultiwordObject()) {
- CommandObjectMultiword *cmd_multiword = cmd_obj->GetAsMultiwordCommand();
- FindCommandsForApropos(search_word, commands_found, commands_help,
- cmd_multiword->GetSubcommandDictionary());
+ if (auto *multiword_cmd = cmd_obj->GetAsMultiwordCommand()) {
+ StringList subcommands_found;
+ FindCommandsForApropos(search_word, subcommands_found, commands_help,
+ multiword_cmd->GetSubcommandDictionary());
+ for (const auto &subcommand_name : subcommands_found) {
+ std::string qualified_name =
+ (command_name + " " + subcommand_name).str();
+ commands_found.AppendString(qualified_name);
+ }
}
}
}
diff --git a/contrib/llvm-project/lldb/source/Interpreter/OptionValueProperties.cpp b/contrib/llvm-project/lldb/source/Interpreter/OptionValueProperties.cpp
index 1a8f2f0ab180..6e6580574edf 100644
--- a/contrib/llvm-project/lldb/source/Interpreter/OptionValueProperties.cpp
+++ b/contrib/llvm-project/lldb/source/Interpreter/OptionValueProperties.cpp
@@ -226,6 +226,17 @@ OptionValueProperties::GetPropertyAtIndexAsOptionValueLanguage(
return nullptr;
}
+bool OptionValueProperties::SetPropertyAtIndexAsLanguage(
+ const ExecutionContext *exe_ctx, uint32_t idx, const LanguageType lang) {
+ const Property *property = GetPropertyAtIndex(exe_ctx, true, idx);
+ if (property) {
+ OptionValue *value = property->GetValue().get();
+ if (value)
+ return value->SetLanguageValue(lang);
+ }
+ return false;
+}
+
bool OptionValueProperties::GetPropertyAtIndexAsArgs(
const ExecutionContext *exe_ctx, uint32_t idx, Args &args) const {
const Property *property = GetPropertyAtIndex(exe_ctx, false, idx);
diff --git a/contrib/llvm-project/lldb/source/Interpreter/Options.cpp b/contrib/llvm-project/lldb/source/Interpreter/Options.cpp
index 4aa298f0382b..feebe338bc9a 100644
--- a/contrib/llvm-project/lldb/source/Interpreter/Options.cpp
+++ b/contrib/llvm-project/lldb/source/Interpreter/Options.cpp
@@ -403,7 +403,12 @@ void Options::GenerateOptionUsage(Stream &strm, CommandObject *cmd,
} else
name = "";
- strm.PutCString("\nCommand Options Usage:\n");
+ const uint32_t num_options = NumCommandOptions();
+ if (num_options == 0)
+ return;
+
+ if (!only_print_args)
+ strm.PutCString("\nCommand Options Usage:\n");
strm.IndentMore(2);
@@ -413,10 +418,6 @@ void Options::GenerateOptionUsage(Stream &strm, CommandObject *cmd,
// [options-for-level-1]
// etc.
- const uint32_t num_options = NumCommandOptions();
- if (num_options == 0)
- return;
-
uint32_t num_option_sets = GetRequiredOptions().size();
uint32_t i;
@@ -531,9 +532,9 @@ void Options::GenerateOptionUsage(Stream &strm, CommandObject *cmd,
strm << " " << arguments_str.GetString();
}
- strm.Printf("\n\n");
-
if (!only_print_args) {
+ strm.Printf("\n\n");
+
// Now print out all the detailed information about the various options:
// long form, short form and help text:
// -short <argument> ( --long_name <argument> )
diff --git a/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp b/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
index 2cf32bdd3800..8c54219f0a14 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
@@ -1111,7 +1111,7 @@ DisassemblerLLVMC::DisassemblerLLVMC(const ArchSpec &arch,
triple.getSubArch() == llvm::Triple::NoSubArch)
triple.setArchName("armv8.7a");
- std::string features_str = "";
+ std::string features_str;
const char *triple_str = triple.getTriple().c_str();
// ARM Cortex M0-M7 devices only execute thumb instructions
diff --git a/contrib/llvm-project/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp b/contrib/llvm-project/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
index d9cbcce22c52..188b6da7a712 100644
--- a/contrib/llvm-project/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
@@ -338,7 +338,7 @@ bool DynamicLoaderPOSIXDYLD::SetRendezvousBreakpoint() {
dyld_break = target.CreateBreakpoint(
&containingModules, /*containingSourceFiles=*/nullptr,
DebugStateCandidates, eFunctionNameTypeFull, eLanguageTypeC,
- /*offset=*/0,
+ /*m_offset=*/0,
/*skip_prologue=*/eLazyBoolNo,
/*internal=*/true,
/*request_hardware=*/false);
@@ -348,7 +348,7 @@ bool DynamicLoaderPOSIXDYLD::SetRendezvousBreakpoint() {
dyld_break = target.CreateBreakpoint(
&containingModules, /*containingSourceFiles=*/nullptr,
DebugStateCandidates, eFunctionNameTypeFull, eLanguageTypeC,
- /*offset=*/0,
+ /*m_offset=*/0,
/*skip_prologue=*/eLazyBoolNo,
/*internal=*/true,
/*request_hardware=*/false);
diff --git a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
index a0cff3cc9bf8..51f34369c383 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
@@ -995,7 +995,8 @@ public:
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) override {
+ SourceLocation OpenParLoc,
+ bool Braced) override {
// At the moment we don't filter out any overloaded candidates.
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/CxxModuleHandler.cpp b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/CxxModuleHandler.cpp
index 74dd04600b4b..fecffd1183f8 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/CxxModuleHandler.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/CxxModuleHandler.cpp
@@ -138,7 +138,7 @@ getEqualLocalDeclContext(Sema &sema, DeclContext *foreign_ctxt) {
// We currently only support building namespaces.
if (foreign_ctxt->isNamespace()) {
- NamedDecl *ns = llvm::dyn_cast<NamedDecl>(foreign_ctxt);
+ NamedDecl *ns = llvm::cast<NamedDecl>(foreign_ctxt);
llvm::StringRef ns_name = ns->getName();
auto lookup_result = emulateLookupInCtxt(sema, ns_name, *parent);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS/EmulateInstructionMIPS.cpp b/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS/EmulateInstructionMIPS.cpp
index ea9c95c55cbb..4ef0a034b6dd 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS/EmulateInstructionMIPS.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS/EmulateInstructionMIPS.cpp
@@ -137,7 +137,7 @@ EmulateInstructionMIPS::EmulateInstructionMIPS(
break;
}
- std::string features = "";
+ std::string features;
uint32_t arch_flags = arch.GetFlags();
if (arch_flags & ArchSpec::eMIPSAse_msa)
features += "+msa,";
diff --git a/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS64/EmulateInstructionMIPS64.cpp b/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS64/EmulateInstructionMIPS64.cpp
index e5732a50f3f2..26736f4c58ba 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS64/EmulateInstructionMIPS64.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Instruction/MIPS64/EmulateInstructionMIPS64.cpp
@@ -137,7 +137,7 @@ EmulateInstructionMIPS64::EmulateInstructionMIPS64(
break;
}
- std::string features = "";
+ std::string features;
uint32_t arch_flags = arch.GetFlags();
if (arch_flags & ArchSpec::eMIPSAse_msa)
features += "+msa,";
diff --git a/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/MainThreadChecker/InstrumentationRuntimeMainThreadChecker.cpp b/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/MainThreadChecker/InstrumentationRuntimeMainThreadChecker.cpp
index dc8c7c96aa11..a5c23615309d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/MainThreadChecker/InstrumentationRuntimeMainThreadChecker.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/MainThreadChecker/InstrumentationRuntimeMainThreadChecker.cpp
@@ -100,14 +100,14 @@ InstrumentationRuntimeMainThreadChecker::RetrieveReportData(
if (!apiname_ptr)
return StructuredData::ObjectSP();
- std::string apiName = "";
+ std::string apiName;
Status read_error;
target.ReadCStringFromMemory(apiname_ptr, apiName, read_error);
if (read_error.Fail())
return StructuredData::ObjectSP();
- std::string className = "";
- std::string selector = "";
+ std::string className;
+ std::string selector;
if (apiName.substr(0, 2) == "-[") {
size_t spacePos = apiName.find(' ');
if (spacePos != std::string::npos) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/TSan/InstrumentationRuntimeTSan.cpp b/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/TSan/InstrumentationRuntimeTSan.cpp
index aef10bb2a778..977d8e4dbe07 100644
--- a/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/TSan/InstrumentationRuntimeTSan.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/InstrumentationRuntime/TSan/InstrumentationRuntimeTSan.cpp
@@ -711,7 +711,7 @@ addr_t InstrumentationRuntimeTSan::GetMainRacyAddress(
std::string InstrumentationRuntimeTSan::GetLocationDescription(
StructuredData::ObjectSP report, addr_t &global_addr,
std::string &global_name, std::string &filename, uint32_t &line) {
- std::string result = "";
+ std::string result;
ProcessSP process_sp = GetProcessSP();
@@ -820,8 +820,8 @@ bool InstrumentationRuntimeTSan::NotifyBreakpointHit(
report->GetAsDictionary()->AddIntegerItem("memory_address", main_address);
addr_t global_addr = 0;
- std::string global_name = "";
- std::string location_filename = "";
+ std::string global_name;
+ std::string location_filename;
uint32_t location_line = 0;
std::string location_description = instance->GetLocationDescription(
report, global_addr, global_name, location_filename, location_line);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
index df61cc3853eb..0fb65f5a317d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp
@@ -579,6 +579,51 @@ static void LoadLibCxxFormatters(lldb::TypeCategoryImplSP cpp_category_sp) {
"std::__[[:alnum:]]+::allocator<wchar_t> >$"),
stl_summary_flags, true);
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxStringViewSummaryProviderASCII,
+ "std::string_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::string_view$"),
+ stl_summary_flags, true);
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxStringViewSummaryProviderASCII,
+ "std::string_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::basic_string_view<char, "
+ "std::__[[:alnum:]]+::char_traits<char> >$"),
+ stl_summary_flags, true);
+ AddCXXSummary(
+ cpp_category_sp,
+ lldb_private::formatters::LibcxxStringViewSummaryProviderASCII,
+ "std::string_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::basic_string_view<unsigned char, "
+ "std::__[[:alnum:]]+::char_traits<unsigned char> >$"),
+ stl_summary_flags, true);
+
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxStringViewSummaryProviderUTF16,
+ "std::u16string_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::basic_string_view<char16_t, "
+ "std::__[[:alnum:]]+::char_traits<char16_t> >$"),
+ stl_summary_flags, true);
+
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxStringViewSummaryProviderUTF32,
+ "std::u32string_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::basic_string_view<char32_t, "
+ "std::__[[:alnum:]]+::char_traits<char32_t> >$"),
+ stl_summary_flags, true);
+
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxWStringViewSummaryProvider,
+ "std::wstring_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::wstring_view$"),
+ stl_summary_flags, true);
+ AddCXXSummary(cpp_category_sp,
+ lldb_private::formatters::LibcxxWStringViewSummaryProvider,
+ "std::wstring_view summary provider",
+ ConstString("^std::__[[:alnum:]]+::basic_string_view<wchar_t, "
+ "std::__[[:alnum:]]+::char_traits<wchar_t> >$"),
+ stl_summary_flags, true);
+
SyntheticChildren::Flags stl_synth_flags;
stl_synth_flags.SetCascades(true).SetSkipPointers(false).SetSkipReferences(
false);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/GenericBitset.cpp b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/GenericBitset.cpp
index c8063915b178..d1d844bb4ca4 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/GenericBitset.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/GenericBitset.cpp
@@ -72,6 +72,7 @@ ConstString GenericBitsetFrontEnd::GetDataContainerMemberName() {
case StdLib::LibStdcpp:
return ConstString("_M_w");
}
+ llvm_unreachable("Unknown StdLib enum");
}
bool GenericBitsetFrontEnd::Update() {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
index b9aef0ae7d9e..21196393371e 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
@@ -19,6 +19,7 @@
#include "lldb/Target/ProcessStructReader.h"
#include "lldb/Target/SectionLoadList.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/ConstString.h"
#include "lldb/Utility/DataBufferHeap.h"
#include "lldb/Utility/Endian.h"
#include "lldb/Utility/Status.h"
@@ -26,6 +27,7 @@
#include "Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h"
#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
+#include <tuple>
using namespace lldb;
using namespace lldb_private;
@@ -560,7 +562,7 @@ ExtractLibcxxStringInfo(ValueObject &valobj) {
return {};
ValueObjectSP layout_decider(
- D->GetChildAtIndexPath(llvm::ArrayRef<size_t>({0, 0})));
+ D->GetChildAtIndexPath(llvm::ArrayRef<size_t>({0, 0})));
// this child should exist
if (!layout_decider)
@@ -643,16 +645,10 @@ ExtractLibcxxStringInfo(ValueObject &valobj) {
return std::make_pair(size, location_sp);
}
-bool lldb_private::formatters::LibcxxWStringSummaryProvider(
- ValueObject &valobj, Stream &stream,
- const TypeSummaryOptions &summary_options) {
- auto string_info = ExtractLibcxxStringInfo(valobj);
- if (!string_info)
- return false;
- uint64_t size;
- ValueObjectSP location_sp;
- std::tie(size, location_sp) = *string_info;
-
+static bool
+LibcxxWStringSummaryProvider(ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options,
+ ValueObjectSP location_sp, size_t size) {
if (size == 0) {
stream.Printf("L\"\"");
return true;
@@ -660,7 +656,6 @@ bool lldb_private::formatters::LibcxxWStringSummaryProvider(
if (!location_sp)
return false;
-
StringPrinter::ReadBufferAndDumpToStreamOptions options(valobj);
if (summary_options.GetCapping() == TypeSummaryCapping::eTypeSummaryCapped) {
const auto max_size = valobj.GetTargetSP()->GetMaximumSizeOfStringSummary();
@@ -714,10 +709,9 @@ bool lldb_private::formatters::LibcxxWStringSummaryProvider(
return false;
}
-template <StringPrinter::StringElementType element_type>
-bool LibcxxStringSummaryProvider(ValueObject &valobj, Stream &stream,
- const TypeSummaryOptions &summary_options,
- std::string prefix_token) {
+bool lldb_private::formatters::LibcxxWStringSummaryProvider(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options) {
auto string_info = ExtractLibcxxStringInfo(valobj);
if (!string_info)
return false;
@@ -725,6 +719,17 @@ bool LibcxxStringSummaryProvider(ValueObject &valobj, Stream &stream,
ValueObjectSP location_sp;
std::tie(size, location_sp) = *string_info;
+ return ::LibcxxWStringSummaryProvider(valobj, stream, summary_options,
+ location_sp, size);
+}
+
+template <StringPrinter::StringElementType element_type>
+static bool
+LibcxxStringSummaryProvider(ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options,
+ std::string prefix_token, ValueObjectSP location_sp,
+ uint64_t size) {
+
if (size == 0) {
stream.Printf("\"\"");
return true;
@@ -763,6 +768,21 @@ bool LibcxxStringSummaryProvider(ValueObject &valobj, Stream &stream,
}
template <StringPrinter::StringElementType element_type>
+static bool
+LibcxxStringSummaryProvider(ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options,
+ std::string prefix_token) {
+ auto string_info = ExtractLibcxxStringInfo(valobj);
+ if (!string_info)
+ return false;
+ uint64_t size;
+ ValueObjectSP location_sp;
+ std::tie(size, location_sp) = *string_info;
+
+ return LibcxxStringSummaryProvider<element_type>(
+ valobj, stream, summary_options, prefix_token, location_sp, size);
+}
+template <StringPrinter::StringElementType element_type>
static bool formatStringImpl(ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &summary_options,
std::string prefix_token) {
@@ -796,3 +816,83 @@ bool lldb_private::formatters::LibcxxStringSummaryProviderUTF32(
return formatStringImpl<StringPrinter::StringElementType::UTF32>(
valobj, stream, summary_options, "U");
}
+
+static std::tuple<bool, ValueObjectSP, size_t>
+LibcxxExtractStringViewData(ValueObject& valobj) {
+ ConstString g_data_name("__data");
+ ConstString g_size_name("__size");
+ auto dataobj = valobj.GetChildMemberWithName(g_data_name, true);
+ auto sizeobj = valobj.GetChildMemberWithName(g_size_name, true);
+
+ if (!dataobj || !sizeobj)
+ return std::make_tuple<bool,ValueObjectSP,size_t>(false, {}, {});
+
+ if (!dataobj->GetError().Success() || !sizeobj->GetError().Success())
+ return std::make_tuple<bool,ValueObjectSP,size_t>(false, {}, {});
+
+ bool success{false};
+ uint64_t size = sizeobj->GetValueAsUnsigned(0, &success);
+ if (!success)
+ return std::make_tuple<bool,ValueObjectSP,size_t>(false, {}, {});
+
+ return std::make_tuple(true,dataobj,size);
+}
+
+template <StringPrinter::StringElementType element_type>
+static bool formatStringViewImpl(ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options,
+ std::string prefix_token) {
+
+ bool success;
+ ValueObjectSP dataobj;
+ size_t size;
+ std::tie(success, dataobj, size) = LibcxxExtractStringViewData(valobj);
+
+ if (!success) {
+ stream << "Summary Unavailable";
+ return true;
+ }
+
+ return LibcxxStringSummaryProvider<element_type>(
+ valobj, stream, summary_options, prefix_token, dataobj, size);
+}
+
+bool lldb_private::formatters::LibcxxStringViewSummaryProviderASCII(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options) {
+ return formatStringViewImpl<StringPrinter::StringElementType::ASCII>(
+ valobj, stream, summary_options, "");
+}
+
+bool lldb_private::formatters::LibcxxStringViewSummaryProviderUTF16(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options) {
+ return formatStringViewImpl<StringPrinter::StringElementType::UTF16>(
+ valobj, stream, summary_options, "u");
+}
+
+bool lldb_private::formatters::LibcxxStringViewSummaryProviderUTF32(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options) {
+ return formatStringViewImpl<StringPrinter::StringElementType::UTF32>(
+ valobj, stream, summary_options, "U");
+}
+
+bool lldb_private::formatters::LibcxxWStringViewSummaryProvider(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options) {
+
+ bool success;
+ ValueObjectSP dataobj;
+ size_t size;
+ std::tie( success, dataobj, size ) = LibcxxExtractStringViewData(valobj);
+
+ if (!success) {
+ stream << "Summary Unavailable";
+ return true;
+ }
+
+
+ return ::LibcxxWStringSummaryProvider(valobj, stream, summary_options,
+ dataobj, size);
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h
index 80dc71787ceb..0f166ae24912 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/LibCxx.h
@@ -34,6 +34,22 @@ bool LibcxxWStringSummaryProvider(
ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &options); // libc++ std::wstring
+bool LibcxxStringViewSummaryProviderASCII(
+ ValueObject &valueObj, Stream &stream,
+ const TypeSummaryOptions &summary_options); // libc++ std::string_view
+
+bool LibcxxStringViewSummaryProviderUTF16(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options); // libc++ std::u16string_view
+
+bool LibcxxStringViewSummaryProviderUTF32(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &summary_options); // libc++ std::u32string_view
+
+bool LibcxxWStringViewSummaryProvider(
+ ValueObject &valobj, Stream &stream,
+ const TypeSummaryOptions &options); // libc++ std::wstring_view
+
bool LibcxxOptionalSummaryProvider(
ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &options); // libc++ std::optional<>
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/ObjC/NSString.cpp b/contrib/llvm-project/lldb/source/Plugins/Language/ObjC/NSString.cpp
index 2b5161e781f2..61705c866778 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/ObjC/NSString.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/ObjC/NSString.cpp
@@ -8,14 +8,13 @@
#include "NSString.h"
-#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
#include "lldb/Core/ValueObject.h"
#include "lldb/Core/ValueObjectConstResult.h"
#include "lldb/DataFormatters/FormattersHelpers.h"
#include "lldb/DataFormatters/StringPrinter.h"
#include "lldb/Target/Language.h"
-#include "lldb/Target/ProcessStructReader.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/ConstString.h"
#include "lldb/Utility/DataBufferHeap.h"
#include "lldb/Utility/Endian.h"
#include "lldb/Utility/Status.h"
@@ -31,24 +30,6 @@ NSString_Additionals::GetAdditionalSummaries() {
return g_map;
}
-static CompilerType GetNSPathStore2Type(Target &target) {
- static ConstString g_type_name("__lldb_autogen_nspathstore2");
-
- TypeSystemClang *ast_ctx = ScratchTypeSystemClang::GetForTarget(target);
-
- if (!ast_ctx)
- return CompilerType();
-
- CompilerType voidstar =
- ast_ctx->GetBasicType(lldb::eBasicTypeVoid).GetPointerType();
- CompilerType uint32 =
- ast_ctx->GetBuiltinTypeForEncodingAndBitSize(eEncodingUint, 32);
-
- return ast_ctx->GetOrCreateStructForIdentifier(
- g_type_name,
- {{"isa", voidstar}, {"lengthAndRef", uint32}, {"buffer", voidstar}});
-}
-
bool lldb_private::formatters::NSStringSummaryProvider(
ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &summary_options) {
@@ -229,11 +210,17 @@ bool lldb_private::formatters::NSStringSummaryProvider(
return StringPrinter::ReadStringAndDumpToStream<
StringPrinter::StringElementType::UTF16>(options);
} else if (is_path_store) {
- ProcessStructReader reader(valobj.GetProcessSP().get(),
- valobj.GetValueAsUnsigned(0),
- GetNSPathStore2Type(*valobj.GetTargetSP()));
- explicit_length =
- reader.GetField<uint32_t>(ConstString("lengthAndRef")) >> 20;
+ // _lengthAndRefCount is the first ivar of NSPathStore2 (after the isa).
+ uint64_t length_ivar_offset = 1 * ptr_size;
+ CompilerType length_type = valobj.GetCompilerType().GetBasicTypeFromAST(
+ lldb::eBasicTypeUnsignedInt);
+ ValueObjectSP length_valobj_sp =
+ valobj.GetSyntheticChildAtOffset(length_ivar_offset, length_type, true,
+ ConstString("_lengthAndRefCount"));
+ if (!length_valobj_sp)
+ return false;
+ // Get the length out of _lengthAndRefCount.
+ explicit_length = length_valobj_sp->GetValueAsUnsigned(0) >> 20;
lldb::addr_t location = valobj.GetValueAsUnsigned(0) + ptr_size + 4;
options.SetLocation(location);
diff --git a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
index bd6b6335ca8c..f2cf25f93eb2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
@@ -671,7 +671,7 @@ AppleObjCRuntimeV2::AppleObjCRuntimeV2(Process *process,
static const ConstString g_objc_copyRealizedClassList(
"_ZL33objc_copyRealizedClassList_nolockPj");
m_has_objc_copyRealizedClassList = HasSymbol(g_objc_copyRealizedClassList);
-
+ WarnIfNoExpandedSharedCache();
RegisterObjCExceptionRecognizer(process);
}
@@ -2355,6 +2355,32 @@ void AppleObjCRuntimeV2::WarnIfNoClassesCached(
}
}
+void AppleObjCRuntimeV2::WarnIfNoExpandedSharedCache() {
+ if (!m_objc_module_sp)
+ return;
+
+ ObjectFile *object_file = m_objc_module_sp->GetObjectFile();
+ if (!object_file)
+ return;
+
+ if (!object_file->IsInMemory())
+ return;
+
+ Target &target = GetProcess()->GetTarget();
+ Debugger &debugger = target.GetDebugger();
+ if (auto stream = debugger.GetAsyncOutputStream()) {
+ const char *msg = "read from the shared cache";
+ if (PlatformSP platform_sp = target.GetPlatform())
+ msg = platform_sp->IsHost()
+ ? "read from the host's in-memory shared cache"
+ : "find the on-disk shared cache for this device";
+ stream->Printf("warning: libobjc.A.dylib is being read from process "
+ "memory. This indicates that LLDB could not %s. This will "
+ "likely reduce debugging performance.\n",
+ msg);
+ }
+}
+
DeclVendor *AppleObjCRuntimeV2::GetDeclVendor() {
if (!m_decl_vendor_up)
m_decl_vendor_up = std::make_unique<AppleObjCDeclVendor>(*this);
diff --git a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h
index 6266634e64c5..e1a6b7cde48a 100644
--- a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h
+++ b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.h
@@ -399,6 +399,7 @@ private:
};
void WarnIfNoClassesCached(SharedCacheWarningReason reason);
+ void WarnIfNoExpandedSharedCache();
lldb::addr_t GetSharedCacheReadOnlyAddress();
lldb::addr_t GetSharedCacheBaseAddress();
diff --git a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTrampolineHandler.cpp b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTrampolineHandler.cpp
index aa6306bef8b9..ff41f187ba9d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTrampolineHandler.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTrampolineHandler.cpp
@@ -889,8 +889,8 @@ AppleObjCTrampolineHandler::GetStepThroughDispatchPlan(Thread &thread,
ThreadPlanSP ret_plan_sp;
lldb::addr_t curr_pc = thread.GetRegisterContext()->GetPC();
- DispatchFunction vtable_dispatch
- = {"vtable", 0, false, false, DispatchFunction::eFixUpFixed};
+ DispatchFunction vtable_dispatch = {"vtable", false, false, false,
+ DispatchFunction::eFixUpFixed};
// First step is to look and see if we are in one of the known ObjC
// dispatch functions. We've already compiled a table of same, so
diff --git a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTypeEncodingParser.cpp b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTypeEncodingParser.cpp
index c6cb2be981a7..40a0ea3e97a4 100644
--- a/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTypeEncodingParser.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCTypeEncodingParser.cpp
@@ -59,7 +59,7 @@ uint32_t AppleObjCTypeEncodingParser::ReadNumber(StringLexer &type) {
// "{CGRect=\"origin\"{CGPoint=\"x\"d\"y\"d}\"size\"{CGSize=\"width\"d\"height\"d}}"
AppleObjCTypeEncodingParser::StructElement::StructElement()
- : name(""), type(clang::QualType()) {}
+ : type(clang::QualType()) {}
AppleObjCTypeEncodingParser::StructElement
AppleObjCTypeEncodingParser::ReadStructElement(TypeSystemClang &ast_ctx,
diff --git a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
index a70e6a079f76..516bcb21b019 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
@@ -115,7 +115,7 @@ Status MinidumpFileBuilder::AddSystemInfo(const llvm::Triple &target_triple) {
sys_info.PlatformId = platform_id;
m_data.AppendData(&sys_info, sizeof(llvm::minidump::SystemInfo));
- std::string csd_string = "";
+ std::string csd_string;
error = WriteString(csd_string, &m_data);
if (error.Fail()) {
@@ -272,7 +272,8 @@ Status MinidumpFileBuilder::AddModuleList(Target &target) {
mod->GetObjectFile()->GetBaseAddress().GetLoadAddress(&target));
m.SizeOfImage = static_cast<llvm::support::ulittle32_t>(mod_size);
m.Checksum = static_cast<llvm::support::ulittle32_t>(0);
- m.TimeDateStamp = static_cast<llvm::support::ulittle32_t>(std::time(0));
+ m.TimeDateStamp =
+ static_cast<llvm::support::ulittle32_t>(std::time(nullptr));
m.ModuleNameRVA = static_cast<llvm::support::ulittle32_t>(
size_before + module_stream_size + helper_data.GetByteSize());
m.VersionInfo = info;
@@ -719,7 +720,7 @@ Status MinidumpFileBuilder::Dump(lldb::FileUP &core_file) const {
header.Checksum = static_cast<llvm::support::ulittle32_t>(
0u), // not used in most of the writers
header.TimeDateStamp =
- static_cast<llvm::support::ulittle32_t>(std::time(0));
+ static_cast<llvm::support::ulittle32_t>(std::time(nullptr));
header.Flags =
static_cast<llvm::support::ulittle64_t>(0u); // minidump normal flag
diff --git a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/PDB/ObjectFilePDB.h b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/PDB/ObjectFilePDB.h
index da999d2b55a7..a1a3eeb656dd 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/PDB/ObjectFilePDB.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/PDB/ObjectFilePDB.h
@@ -102,4 +102,4 @@ private:
};
} // namespace lldb_private
-#endif // LLDB_PLUGINS_OBJECTFILE_PDB_OBJECTFILEPDB_H
+#endif // LLDB_SOURCE_PLUGINS_OBJECTFILE_PDB_OBJECTFILEPDB_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.cpp
index 754d06de7cb9..2bf0a44e4a3e 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.cpp
@@ -183,3 +183,97 @@ MmapArgList PlatformFreeBSD::GetMmapArgumentList(const ArchSpec &arch,
args.push_back(0);
return args;
}
+
+CompilerType PlatformFreeBSD::GetSiginfoType(const llvm::Triple &triple) {
+ if (!m_type_system_up)
+ m_type_system_up.reset(new TypeSystemClang("siginfo", triple));
+ TypeSystemClang *ast = m_type_system_up.get();
+
+ // generic types
+ CompilerType int_type = ast->GetBasicType(eBasicTypeInt);
+ CompilerType uint_type = ast->GetBasicType(eBasicTypeUnsignedInt);
+ CompilerType long_type = ast->GetBasicType(eBasicTypeLong);
+ CompilerType voidp_type = ast->GetBasicType(eBasicTypeVoid).GetPointerType();
+
+ // platform-specific types
+ CompilerType &pid_type = int_type;
+ CompilerType &uid_type = uint_type;
+
+ CompilerType sigval_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "__lldb_sigval_t",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(sigval_type);
+ ast->AddFieldToRecordType(sigval_type, "sival_int", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(sigval_type, "sival_ptr", voidp_type,
+ lldb::eAccessPublic, 0);
+ ast->CompleteTagDeclarationDefinition(sigval_type);
+
+ // siginfo_t
+ CompilerType siginfo_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "__lldb_siginfo_t",
+ clang::TTK_Struct, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(siginfo_type);
+ ast->AddFieldToRecordType(siginfo_type, "si_signo", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_errno", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_code", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_pid", pid_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_uid", uid_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_status", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_addr", voidp_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(siginfo_type, "si_value", sigval_type,
+ lldb::eAccessPublic, 0);
+
+ // union used to hold the signal data
+ CompilerType union_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(union_type);
+
+ ast->AddFieldToRecordType(
+ union_type, "_fault",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_trapno", int_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_timer",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_timerid", int_type},
+ {"_overrun", int_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_mesgq",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_mqd", int_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_poll",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_band", long_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->CompleteTagDeclarationDefinition(union_type);
+ ast->AddFieldToRecordType(siginfo_type, "_reason", union_type,
+ lldb::eAccessPublic, 0);
+
+ ast->CompleteTagDeclarationDefinition(siginfo_type);
+ return siginfo_type;
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.h b/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.h
index fd37b13de017..7f9dfd87a59a 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/FreeBSD/PlatformFreeBSD.h
@@ -10,6 +10,7 @@
#define LLDB_SOURCE_PLUGINS_PLATFORM_FREEBSD_PLATFORMFREEBSD_H
#include "Plugins/Platform/POSIX/PlatformPOSIX.h"
+#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
namespace lldb_private {
namespace platform_freebsd {
@@ -53,7 +54,12 @@ public:
unsigned flags, lldb::addr_t fd,
lldb::addr_t offset) override;
+ CompilerType GetSiginfoType(const llvm::Triple &triple) override;
+
std::vector<ArchSpec> m_supported_architectures;
+
+private:
+ std::unique_ptr<TypeSystemClang> m_type_system_up;
};
} // namespace platform_freebsd
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.cpp
index 552b3890615c..ddba64bc5d11 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.cpp
@@ -202,3 +202,144 @@ MmapArgList PlatformNetBSD::GetMmapArgumentList(const ArchSpec &arch,
MmapArgList args({addr, length, prot, flags_platform, fd, offset});
return args;
}
+
+CompilerType PlatformNetBSD::GetSiginfoType(const llvm::Triple &triple) {
+ if (!m_type_system_up)
+ m_type_system_up.reset(new TypeSystemClang("siginfo", triple));
+ TypeSystemClang *ast = m_type_system_up.get();
+
+ // generic types
+ CompilerType int_type = ast->GetBasicType(eBasicTypeInt);
+ CompilerType uint_type = ast->GetBasicType(eBasicTypeUnsignedInt);
+ CompilerType long_type = ast->GetBasicType(eBasicTypeLong);
+ CompilerType long_long_type = ast->GetBasicType(eBasicTypeLongLong);
+ CompilerType voidp_type = ast->GetBasicType(eBasicTypeVoid).GetPointerType();
+
+ // platform-specific types
+ CompilerType &pid_type = int_type;
+ CompilerType &uid_type = uint_type;
+ CompilerType &clock_type = uint_type;
+ CompilerType &lwpid_type = int_type;
+
+ CompilerType sigval_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "__lldb_sigval_t",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(sigval_type);
+ ast->AddFieldToRecordType(sigval_type, "sival_int", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(sigval_type, "sival_ptr", voidp_type,
+ lldb::eAccessPublic, 0);
+ ast->CompleteTagDeclarationDefinition(sigval_type);
+
+ CompilerType ptrace_option_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(ptrace_option_type);
+ ast->AddFieldToRecordType(ptrace_option_type, "_pe_other_pid", pid_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(ptrace_option_type, "_pe_lwp", lwpid_type,
+ lldb::eAccessPublic, 0);
+ ast->CompleteTagDeclarationDefinition(ptrace_option_type);
+
+ // siginfo_t
+ CompilerType siginfo_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "__lldb_siginfo_t",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(siginfo_type);
+
+ // struct _ksiginfo
+ CompilerType ksiginfo_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "",
+ clang::TTK_Struct, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(ksiginfo_type);
+ ast->AddFieldToRecordType(ksiginfo_type, "_signo", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(ksiginfo_type, "_code", int_type,
+ lldb::eAccessPublic, 0);
+ ast->AddFieldToRecordType(ksiginfo_type, "_errno", int_type,
+ lldb::eAccessPublic, 0);
+
+ // the structure is padded on 64-bit arches to fix alignment
+ if (triple.isArch64Bit())
+ ast->AddFieldToRecordType(ksiginfo_type, "__pad0", int_type,
+ lldb::eAccessPublic, 0);
+
+ // union used to hold the signal data
+ CompilerType union_type = ast->CreateRecordType(
+ nullptr, OptionalClangModuleID(), lldb::eAccessPublic, "",
+ clang::TTK_Union, lldb::eLanguageTypeC);
+ ast->StartTagDeclarationDefinition(union_type);
+
+ ast->AddFieldToRecordType(
+ union_type, "_rt",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_pid", pid_type},
+ {"_uid", uid_type},
+ {"_value", sigval_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_child",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_pid", pid_type},
+ {"_uid", uid_type},
+ {"_status", int_type},
+ {"_utime", clock_type},
+ {"_stime", clock_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_fault",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_addr", voidp_type},
+ {"_trap", int_type},
+ {"_trap2", int_type},
+ {"_trap3", int_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_poll",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_band", long_type},
+ {"_fd", int_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(union_type, "_syscall",
+ ast->CreateStructForIdentifier(
+ ConstString(),
+ {
+ {"_sysnum", int_type},
+ {"_retval", int_type.GetArrayType(2)},
+ {"_error", int_type},
+ {"_args", long_long_type.GetArrayType(8)},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->AddFieldToRecordType(
+ union_type, "_ptrace_state",
+ ast->CreateStructForIdentifier(ConstString(),
+ {
+ {"_pe_report_event", int_type},
+ {"_option", ptrace_option_type},
+ }),
+ lldb::eAccessPublic, 0);
+
+ ast->CompleteTagDeclarationDefinition(union_type);
+ ast->AddFieldToRecordType(ksiginfo_type, "_reason", union_type,
+ lldb::eAccessPublic, 0);
+
+ ast->CompleteTagDeclarationDefinition(ksiginfo_type);
+ ast->AddFieldToRecordType(siginfo_type, "_info", ksiginfo_type,
+ lldb::eAccessPublic, 0);
+
+ ast->CompleteTagDeclarationDefinition(siginfo_type);
+ return siginfo_type;
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.h b/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.h
index 7158fbd26efb..433cf6653126 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/NetBSD/PlatformNetBSD.h
@@ -10,6 +10,7 @@
#define LLDB_SOURCE_PLUGINS_PLATFORM_NETBSD_PLATFORMNETBSD_H
#include "Plugins/Platform/POSIX/PlatformPOSIX.h"
+#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
namespace lldb_private {
namespace platform_netbsd {
@@ -55,7 +56,12 @@ public:
unsigned flags, lldb::addr_t fd,
lldb::addr_t offset) override;
+ CompilerType GetSiginfoType(const llvm::Triple &triple) override;
+
std::vector<ArchSpec> m_supported_architectures;
+
+private:
+ std::unique_ptr<TypeSystemClang> m_type_system_up;
};
} // namespace platform_netbsd
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.cpp b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.cpp
index 67c9484680a4..dd7546d8fa15 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.cpp
@@ -162,7 +162,10 @@ lldb::ProcessSP PlatformQemuUser::DebugProcess(ProcessLaunchInfo &launch_info,
Target &target, Status &error) {
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
- std::string qemu = GetGlobalProperties().GetEmulatorPath().GetPath();
+ FileSpec qemu = GetGlobalProperties().GetEmulatorPath();
+ if (!qemu)
+ qemu.SetPath(("qemu-" + GetGlobalProperties().GetArchitecture()).str());
+ FileSystem::Instance().ResolveExecutableLocation(qemu);
llvm::SmallString<0> socket_model, socket_path;
HostInfo::GetProcessTempDir().GetPath(socket_model);
@@ -171,7 +174,11 @@ lldb::ProcessSP PlatformQemuUser::DebugProcess(ProcessLaunchInfo &launch_info,
llvm::sys::fs::createUniquePath(socket_model, socket_path, false);
} while (FileSystem::Instance().Exists(socket_path));
- Args args({qemu, "-g", socket_path});
+ Args args({qemu.GetPath(), "-g", socket_path});
+ if (!launch_info.GetArg0().empty()) {
+ args.AppendArgument("-0");
+ args.AppendArgument(launch_info.GetArg0());
+ }
args.AppendArguments(GetGlobalProperties().GetEmulatorArgs());
args.AppendArgument("--");
args.AppendArgument(launch_info.GetExecutableFile().GetPath());
@@ -184,6 +191,8 @@ lldb::ProcessSP PlatformQemuUser::DebugProcess(ProcessLaunchInfo &launch_info,
launch_info.SetArguments(args, true);
Environment emulator_env = Host::GetEnvironment();
+ if (ConstString sysroot = GetSDKRootDirectory())
+ emulator_env["QEMU_LD_PREFIX"] = sysroot.GetStringRef().str();
for (const auto &KV : GetGlobalProperties().GetEmulatorEnvVars())
emulator_env[KV.first()] = KV.second;
launch_info.GetEnvironment() = ComputeLaunchEnvironment(
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.h b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.h
index 71df1b7b7811..c5439e126db1 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUser.h
@@ -47,6 +47,14 @@ public:
Environment GetEnvironment() override;
+ MmapArgList GetMmapArgumentList(const ArchSpec &arch, lldb::addr_t addr,
+ lldb::addr_t length, unsigned prot,
+ unsigned flags, lldb::addr_t fd,
+ lldb::addr_t offset) override {
+ return Platform::GetHostPlatform()->GetMmapArgumentList(
+ arch, addr, length, prot, flags, fd, offset);
+ }
+
private:
static lldb::PlatformSP CreateInstance(bool force, const ArchSpec *arch);
static void DebuggerInitialize(Debugger &debugger);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUserProperties.td b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUserProperties.td
index 4e8fbcfd6760..c7ec4bbc6e78 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUserProperties.td
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/QemuUser/PlatformQemuUserProperties.td
@@ -8,7 +8,7 @@ let Definition = "platformqemuuser" in {
def EmulatorPath: Property<"emulator-path", "FileSpec">,
Global,
DefaultStringValue<"">,
- Desc<"Path to the emulator binary.">;
+ Desc<"Path to the emulator binary. If the path does not contain a directory separator, the filename is looked up in the PATH environment variable. If empty, the filename is derived from the architecture setting.">;
def EmulatorArgs: Property<"emulator-args", "Args">,
Global,
DefaultStringValue<"">,
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp b/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp
index 3535a5ad739d..0929a060d0b8 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp
@@ -96,7 +96,8 @@ bool PlatformRemoteGDBServer::GetModuleSpec(const FileSpec &module_file_spec,
const auto module_path = module_file_spec.GetPath(false);
- if (!m_gdb_client.GetModuleInfo(module_file_spec, arch, module_spec)) {
+ if (!m_gdb_client_up ||
+ !m_gdb_client_up->GetModuleInfo(module_file_spec, arch, module_spec)) {
LLDB_LOGF(
log,
"PlatformRemoteGDBServer::%s - failed to get module info for %s:%s",
@@ -127,11 +128,7 @@ Status PlatformRemoteGDBServer::GetFileWithUUID(const FileSpec &platform_file,
/// Default Constructor
PlatformRemoteGDBServer::PlatformRemoteGDBServer()
- : Platform(false), // This is a remote platform
- m_gdb_client() {
- m_gdb_client.SetPacketTimeout(
- process_gdb_remote::ProcessGDBRemote::GetPacketTimeout());
-}
+ : Platform(/*is_host=*/false) {}
/// Destructor.
///
@@ -147,29 +144,36 @@ size_t PlatformRemoteGDBServer::GetSoftwareBreakpointTrapOpcode(
}
bool PlatformRemoteGDBServer::GetRemoteOSVersion() {
- m_os_version = m_gdb_client.GetOSVersion();
+ if (m_gdb_client_up)
+ m_os_version = m_gdb_client_up->GetOSVersion();
return !m_os_version.empty();
}
llvm::Optional<std::string> PlatformRemoteGDBServer::GetRemoteOSBuildString() {
- return m_gdb_client.GetOSBuildString();
+ if (!m_gdb_client_up)
+ return llvm::None;
+ return m_gdb_client_up->GetOSBuildString();
}
llvm::Optional<std::string>
PlatformRemoteGDBServer::GetRemoteOSKernelDescription() {
- return m_gdb_client.GetOSKernelDescription();
+ if (!m_gdb_client_up)
+ return llvm::None;
+ return m_gdb_client_up->GetOSKernelDescription();
}
// Remote Platform subclasses need to override this function
ArchSpec PlatformRemoteGDBServer::GetRemoteSystemArchitecture() {
- return m_gdb_client.GetSystemArchitecture();
+ if (!m_gdb_client_up)
+ return ArchSpec();
+ return m_gdb_client_up->GetSystemArchitecture();
}
FileSpec PlatformRemoteGDBServer::GetRemoteWorkingDirectory() {
if (IsConnected()) {
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
FileSpec working_dir;
- if (m_gdb_client.GetWorkingDir(working_dir) && log)
+ if (m_gdb_client_up->GetWorkingDir(working_dir) && log)
LLDB_LOGF(log,
"PlatformRemoteGDBServer::GetRemoteWorkingDirectory() -> '%s'",
working_dir.GetCString());
@@ -187,13 +191,17 @@ bool PlatformRemoteGDBServer::SetRemoteWorkingDirectory(
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log, "PlatformRemoteGDBServer::SetRemoteWorkingDirectory('%s')",
working_dir.GetCString());
- return m_gdb_client.SetWorkingDir(working_dir) == 0;
+ return m_gdb_client_up->SetWorkingDir(working_dir) == 0;
} else
return Platform::SetRemoteWorkingDirectory(working_dir);
}
bool PlatformRemoteGDBServer::IsConnected() const {
- return m_gdb_client.IsConnected();
+ if (m_gdb_client_up) {
+ assert(m_gdb_client_up->IsConnected());
+ return true;
+ }
+ return false;
}
Status PlatformRemoteGDBServer::ConnectRemote(Args &args) {
@@ -224,26 +232,31 @@ Status PlatformRemoteGDBServer::ConnectRemote(Args &args) {
m_platform_scheme = parsed_url->scheme.str();
m_platform_hostname = parsed_url->hostname.str();
- m_gdb_client.SetConnection(std::make_unique<ConnectionFileDescriptor>());
+ auto client_up =
+ std::make_unique<process_gdb_remote::GDBRemoteCommunicationClient>();
+ client_up->SetPacketTimeout(
+ process_gdb_remote::ProcessGDBRemote::GetPacketTimeout());
+ client_up->SetConnection(std::make_unique<ConnectionFileDescriptor>());
if (repro::Generator *g = repro::Reproducer::Instance().GetGenerator()) {
repro::GDBRemoteProvider &provider =
g->GetOrCreate<repro::GDBRemoteProvider>();
- m_gdb_client.SetPacketRecorder(provider.GetNewPacketRecorder());
+ client_up->SetPacketRecorder(provider.GetNewPacketRecorder());
}
- m_gdb_client.Connect(url, &error);
+ client_up->Connect(url, &error);
if (error.Fail())
return error;
- if (m_gdb_client.HandshakeWithServer(&error)) {
- m_gdb_client.GetHostInfo();
+ if (client_up->HandshakeWithServer(&error)) {
+ m_gdb_client_up = std::move(client_up);
+ m_gdb_client_up->GetHostInfo();
// If a working directory was set prior to connecting, send it down
// now.
if (m_working_dir)
- m_gdb_client.SetWorkingDir(m_working_dir);
+ m_gdb_client_up->SetWorkingDir(m_working_dir);
m_supported_architectures.clear();
- ArchSpec remote_arch = m_gdb_client.GetSystemArchitecture();
+ ArchSpec remote_arch = m_gdb_client_up->GetSystemArchitecture();
if (remote_arch) {
m_supported_architectures.push_back(remote_arch);
if (remote_arch.GetTriple().isArch64Bit())
@@ -251,7 +264,7 @@ Status PlatformRemoteGDBServer::ConnectRemote(Args &args) {
ArchSpec(remote_arch.GetTriple().get32BitArchVariant()));
}
} else {
- m_gdb_client.Disconnect();
+ client_up->Disconnect();
if (error.Success())
error.SetErrorString("handshake failed");
}
@@ -260,13 +273,14 @@ Status PlatformRemoteGDBServer::ConnectRemote(Args &args) {
Status PlatformRemoteGDBServer::DisconnectRemote() {
Status error;
- m_gdb_client.Disconnect(&error);
+ m_gdb_client_up.reset();
m_remote_signals_sp.reset();
return error;
}
const char *PlatformRemoteGDBServer::GetHostname() {
- m_gdb_client.GetHostname(m_name);
+ if (m_gdb_client_up)
+ m_gdb_client_up->GetHostname(m_name);
if (m_name.empty())
return nullptr;
return m_name.c_str();
@@ -275,7 +289,7 @@ const char *PlatformRemoteGDBServer::GetHostname() {
llvm::Optional<std::string>
PlatformRemoteGDBServer::DoGetUserName(UserIDResolver::id_t uid) {
std::string name;
- if (m_gdb_client.GetUserName(uid, name))
+ if (m_gdb_client_up && m_gdb_client_up->GetUserName(uid, name))
return std::move(name);
return llvm::None;
}
@@ -283,7 +297,7 @@ PlatformRemoteGDBServer::DoGetUserName(UserIDResolver::id_t uid) {
llvm::Optional<std::string>
PlatformRemoteGDBServer::DoGetGroupName(UserIDResolver::id_t gid) {
std::string name;
- if (m_gdb_client.GetGroupName(gid, name))
+ if (m_gdb_client_up && m_gdb_client_up->GetGroupName(gid, name))
return std::move(name);
return llvm::None;
}
@@ -291,12 +305,16 @@ PlatformRemoteGDBServer::DoGetGroupName(UserIDResolver::id_t gid) {
uint32_t PlatformRemoteGDBServer::FindProcesses(
const ProcessInstanceInfoMatch &match_info,
ProcessInstanceInfoList &process_infos) {
- return m_gdb_client.FindProcesses(match_info, process_infos);
+ if (m_gdb_client_up)
+ return m_gdb_client_up->FindProcesses(match_info, process_infos);
+ return 0;
}
bool PlatformRemoteGDBServer::GetProcessInfo(
lldb::pid_t pid, ProcessInstanceInfo &process_info) {
- return m_gdb_client.GetProcessInfo(pid, process_info);
+ if (m_gdb_client_up)
+ return m_gdb_client_up->GetProcessInfo(pid, process_info);
+ return false;
}
Status PlatformRemoteGDBServer::LaunchProcess(ProcessLaunchInfo &launch_info) {
@@ -305,6 +323,8 @@ Status PlatformRemoteGDBServer::LaunchProcess(ProcessLaunchInfo &launch_info) {
LLDB_LOGF(log, "PlatformRemoteGDBServer::%s() called", __FUNCTION__);
+ if (!IsConnected())
+ return Status("Not connected.");
auto num_file_actions = launch_info.GetNumFileActions();
for (decltype(num_file_actions) i = 0; i < num_file_actions; ++i) {
const auto file_action = launch_info.GetFileActionAtIndex(i);
@@ -312,34 +332,34 @@ Status PlatformRemoteGDBServer::LaunchProcess(ProcessLaunchInfo &launch_info) {
continue;
switch (file_action->GetFD()) {
case STDIN_FILENO:
- m_gdb_client.SetSTDIN(file_action->GetFileSpec());
+ m_gdb_client_up->SetSTDIN(file_action->GetFileSpec());
break;
case STDOUT_FILENO:
- m_gdb_client.SetSTDOUT(file_action->GetFileSpec());
+ m_gdb_client_up->SetSTDOUT(file_action->GetFileSpec());
break;
case STDERR_FILENO:
- m_gdb_client.SetSTDERR(file_action->GetFileSpec());
+ m_gdb_client_up->SetSTDERR(file_action->GetFileSpec());
break;
}
}
- m_gdb_client.SetDisableASLR(
+ m_gdb_client_up->SetDisableASLR(
launch_info.GetFlags().Test(eLaunchFlagDisableASLR));
- m_gdb_client.SetDetachOnError(
+ m_gdb_client_up->SetDetachOnError(
launch_info.GetFlags().Test(eLaunchFlagDetachOnError));
FileSpec working_dir = launch_info.GetWorkingDirectory();
if (working_dir) {
- m_gdb_client.SetWorkingDir(working_dir);
+ m_gdb_client_up->SetWorkingDir(working_dir);
}
// Send the environment and the program + arguments after we connect
- m_gdb_client.SendEnvironment(launch_info.GetEnvironment());
+ m_gdb_client_up->SendEnvironment(launch_info.GetEnvironment());
ArchSpec arch_spec = launch_info.GetArchitecture();
const char *arch_triple = arch_spec.GetTriple().str().c_str();
- m_gdb_client.SendLaunchArchPacket(arch_triple);
+ m_gdb_client_up->SendLaunchArchPacket(arch_triple);
LLDB_LOGF(
log,
"PlatformRemoteGDBServer::%s() set launch architecture triple to '%s'",
@@ -349,14 +369,14 @@ Status PlatformRemoteGDBServer::LaunchProcess(ProcessLaunchInfo &launch_info) {
{
// Scope for the scoped timeout object
process_gdb_remote::GDBRemoteCommunication::ScopedTimeout timeout(
- m_gdb_client, std::chrono::seconds(5));
- arg_packet_err = m_gdb_client.SendArgumentsPacket(launch_info);
+ *m_gdb_client_up, std::chrono::seconds(5));
+ arg_packet_err = m_gdb_client_up->SendArgumentsPacket(launch_info);
}
if (arg_packet_err == 0) {
std::string error_str;
- if (m_gdb_client.GetLaunchSuccess(error_str)) {
- const auto pid = m_gdb_client.GetCurrentProcessID(false);
+ if (m_gdb_client_up->GetLaunchSuccess(error_str)) {
+ const auto pid = m_gdb_client_up->GetCurrentProcessID(false);
if (pid != LLDB_INVALID_PROCESS_ID) {
launch_info.SetProcessID(pid);
LLDB_LOGF(log,
@@ -428,6 +448,8 @@ PlatformRemoteGDBServer::DebugProcess(ProcessLaunchInfo &launch_info,
bool PlatformRemoteGDBServer::LaunchGDBServer(lldb::pid_t &pid,
std::string &connect_url) {
+ assert(IsConnected());
+
ArchSpec remote_arch = GetRemoteSystemArchitecture();
llvm::Triple &remote_triple = remote_arch.GetTriple();
@@ -440,11 +462,11 @@ bool PlatformRemoteGDBServer::LaunchGDBServer(lldb::pid_t &pid,
// localhost, so we will need the remote debugserver to accept connections
// only from localhost, no matter what our current hostname is
launch_result =
- m_gdb_client.LaunchGDBServer("127.0.0.1", pid, port, socket_name);
+ m_gdb_client_up->LaunchGDBServer("127.0.0.1", pid, port, socket_name);
} else {
// All other hosts should use their actual hostname
launch_result =
- m_gdb_client.LaunchGDBServer(nullptr, pid, port, socket_name);
+ m_gdb_client_up->LaunchGDBServer(nullptr, pid, port, socket_name);
}
if (!launch_result)
@@ -457,7 +479,8 @@ bool PlatformRemoteGDBServer::LaunchGDBServer(lldb::pid_t &pid,
}
bool PlatformRemoteGDBServer::KillSpawnedProcess(lldb::pid_t pid) {
- return m_gdb_client.KillSpawnedProcess(pid);
+ assert(IsConnected());
+ return m_gdb_client_up->KillSpawnedProcess(pid);
}
lldb::ProcessSP PlatformRemoteGDBServer::Attach(
@@ -513,7 +536,9 @@ lldb::ProcessSP PlatformRemoteGDBServer::Attach(
Status PlatformRemoteGDBServer::MakeDirectory(const FileSpec &file_spec,
uint32_t mode) {
- Status error = m_gdb_client.MakeDirectory(file_spec, mode);
+ if (!IsConnected())
+ return Status("Not connected.");
+ Status error = m_gdb_client_up->MakeDirectory(file_spec, mode);
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log,
"PlatformRemoteGDBServer::MakeDirectory(path='%s', mode=%o) "
@@ -524,7 +549,10 @@ Status PlatformRemoteGDBServer::MakeDirectory(const FileSpec &file_spec,
Status PlatformRemoteGDBServer::GetFilePermissions(const FileSpec &file_spec,
uint32_t &file_permissions) {
- Status error = m_gdb_client.GetFilePermissions(file_spec, file_permissions);
+ if (!IsConnected())
+ return Status("Not connected.");
+ Status error =
+ m_gdb_client_up->GetFilePermissions(file_spec, file_permissions);
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log,
"PlatformRemoteGDBServer::GetFilePermissions(path='%s', "
@@ -536,7 +564,10 @@ Status PlatformRemoteGDBServer::GetFilePermissions(const FileSpec &file_spec,
Status PlatformRemoteGDBServer::SetFilePermissions(const FileSpec &file_spec,
uint32_t file_permissions) {
- Status error = m_gdb_client.SetFilePermissions(file_spec, file_permissions);
+ if (!IsConnected())
+ return Status("Not connected.");
+ Status error =
+ m_gdb_client_up->SetFilePermissions(file_spec, file_permissions);
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log,
"PlatformRemoteGDBServer::SetFilePermissions(path='%s', "
@@ -550,33 +581,47 @@ lldb::user_id_t PlatformRemoteGDBServer::OpenFile(const FileSpec &file_spec,
File::OpenOptions flags,
uint32_t mode,
Status &error) {
- return m_gdb_client.OpenFile(file_spec, flags, mode, error);
+ if (IsConnected())
+ return m_gdb_client_up->OpenFile(file_spec, flags, mode, error);
+ return LLDB_INVALID_UID;
}
bool PlatformRemoteGDBServer::CloseFile(lldb::user_id_t fd, Status &error) {
- return m_gdb_client.CloseFile(fd, error);
+ if (IsConnected())
+ return m_gdb_client_up->CloseFile(fd, error);
+ error = Status("Not connected.");
+ return false;
}
lldb::user_id_t
PlatformRemoteGDBServer::GetFileSize(const FileSpec &file_spec) {
- return m_gdb_client.GetFileSize(file_spec);
+ if (IsConnected())
+ return m_gdb_client_up->GetFileSize(file_spec);
+ return LLDB_INVALID_UID;
}
void PlatformRemoteGDBServer::AutoCompleteDiskFileOrDirectory(
CompletionRequest &request, bool only_dir) {
- m_gdb_client.AutoCompleteDiskFileOrDirectory(request, only_dir);
+ if (IsConnected())
+ m_gdb_client_up->AutoCompleteDiskFileOrDirectory(request, only_dir);
}
uint64_t PlatformRemoteGDBServer::ReadFile(lldb::user_id_t fd, uint64_t offset,
void *dst, uint64_t dst_len,
Status &error) {
- return m_gdb_client.ReadFile(fd, offset, dst, dst_len, error);
+ if (IsConnected())
+ return m_gdb_client_up->ReadFile(fd, offset, dst, dst_len, error);
+ error = Status("Not connected.");
+ return 0;
}
uint64_t PlatformRemoteGDBServer::WriteFile(lldb::user_id_t fd, uint64_t offset,
const void *src, uint64_t src_len,
Status &error) {
- return m_gdb_client.WriteFile(fd, offset, src, src_len, error);
+ if (IsConnected())
+ return m_gdb_client_up->WriteFile(fd, offset, src, src_len, error);
+ error = Status("Not connected.");
+ return 0;
}
Status PlatformRemoteGDBServer::PutFile(const FileSpec &source,
@@ -589,7 +634,9 @@ Status PlatformRemoteGDBServer::CreateSymlink(
const FileSpec &src, // The name of the link is in src
const FileSpec &dst) // The symlink points to dst
{
- Status error = m_gdb_client.CreateSymlink(src, dst);
+ if (!IsConnected())
+ return Status("Not connected.");
+ Status error = m_gdb_client_up->CreateSymlink(src, dst);
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log,
"PlatformRemoteGDBServer::CreateSymlink(src='%s', dst='%s') "
@@ -600,7 +647,9 @@ Status PlatformRemoteGDBServer::CreateSymlink(
}
Status PlatformRemoteGDBServer::Unlink(const FileSpec &file_spec) {
- Status error = m_gdb_client.Unlink(file_spec);
+ if (!IsConnected())
+ return Status("Not connected.");
+ Status error = m_gdb_client_up->Unlink(file_spec);
Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_PLATFORM);
LLDB_LOGF(log, "PlatformRemoteGDBServer::Unlink(path='%s') error = %u (%s)",
file_spec.GetCString(), error.GetError(), error.AsCString());
@@ -608,7 +657,9 @@ Status PlatformRemoteGDBServer::Unlink(const FileSpec &file_spec) {
}
bool PlatformRemoteGDBServer::GetFileExists(const FileSpec &file_spec) {
- return m_gdb_client.GetFileExists(file_spec);
+ if (IsConnected())
+ return m_gdb_client_up->GetFileExists(file_spec);
+ return false;
}
Status PlatformRemoteGDBServer::RunShellCommand(
@@ -621,8 +672,10 @@ Status PlatformRemoteGDBServer::RunShellCommand(
std::string
*command_output, // Pass NULL if you don't want the command output
const Timeout<std::micro> &timeout) {
- return m_gdb_client.RunShellCommand(command, working_dir, status_ptr,
- signo_ptr, command_output, timeout);
+ if (!IsConnected())
+ return Status("Not connected.");
+ return m_gdb_client_up->RunShellCommand(command, working_dir, status_ptr,
+ signo_ptr, command_output, timeout);
}
void PlatformRemoteGDBServer::CalculateTrapHandlerSymbolNames() {
@@ -642,7 +695,7 @@ const UnixSignalsSP &PlatformRemoteGDBServer::GetRemoteUnixSignals() {
StringExtractorGDBRemote response;
auto result =
- m_gdb_client.SendPacketAndWaitForResponse("jSignalsInfo", response);
+ m_gdb_client_up->SendPacketAndWaitForResponse("jSignalsInfo", response);
if (result != decltype(result)::Success ||
response.GetResponseType() != response.eResponse)
@@ -693,7 +746,7 @@ const UnixSignalsSP &PlatformRemoteGDBServer::GetRemoteUnixSignals() {
if (object_sp && object_sp->IsValid())
notify = object_sp->GetBooleanValue();
- std::string description{""};
+ std::string description;
object_sp = dict->GetValueForKey("description");
if (object_sp && object_sp->IsValid())
description = std::string(object_sp->GetStringValue());
@@ -754,7 +807,9 @@ size_t PlatformRemoteGDBServer::ConnectToWaitingProcesses(Debugger &debugger,
size_t PlatformRemoteGDBServer::GetPendingGdbServerList(
std::vector<std::string> &connection_urls) {
std::vector<std::pair<uint16_t, std::string>> remote_servers;
- m_gdb_client.QueryGDBServer(remote_servers);
+ if (!IsConnected())
+ return 0;
+ m_gdb_client_up->QueryGDBServer(remote_servers);
for (const auto &gdbserver : remote_servers) {
const char *socket_name_cstr =
gdbserver.second.empty() ? nullptr : gdbserver.second.c_str();
diff --git a/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.h b/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.h
index f594f43b3f13..263516f520d5 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.h
@@ -14,7 +14,6 @@
#include "Plugins/Process/Utility/GDBRemoteSignals.h"
#include "Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h"
-#include "Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.h"
#include "lldb/Target/Platform.h"
namespace lldb_private {
@@ -154,8 +153,8 @@ public:
GetPendingGdbServerList(std::vector<std::string> &connection_urls);
protected:
- process_gdb_remote::GDBRemoteCommunicationClient m_gdb_client;
- process_gdb_remote::GDBRemoteCommunicationReplayServer m_gdb_replay_server;
+ std::unique_ptr<process_gdb_remote::GDBRemoteCommunicationClient>
+ m_gdb_client_up;
std::string m_platform_description; // After we connect we can get a more
// complete description of what we are
// connected to
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
index a62d3c1ba052..21c9ead0eca4 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
@@ -54,7 +54,7 @@ llvm::Expected<std::unique_ptr<NativeProcessProtocol>>
NativeProcessFreeBSD::Factory::Launch(ProcessLaunchInfo &launch_info,
NativeDelegate &native_delegate,
MainLoop &mainloop) const {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
Status status;
::pid_t pid = ProcessLauncherPosixFork()
@@ -108,7 +108,7 @@ llvm::Expected<std::unique_ptr<NativeProcessProtocol>>
NativeProcessFreeBSD::Factory::Attach(
lldb::pid_t pid, NativeProcessProtocol::NativeDelegate &native_delegate,
MainLoop &mainloop) const {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid = {0:x}", pid);
// Retrieve the architecture for the running process.
@@ -135,7 +135,8 @@ NativeProcessFreeBSD::Factory::GetSupportedExtensions() const {
Extension::savecore |
#endif
Extension::multiprocess | Extension::fork | Extension::vfork |
- Extension::pass_signals | Extension::auxv | Extension::libraries_svr4;
+ Extension::pass_signals | Extension::auxv | Extension::libraries_svr4 |
+ Extension::siginfo_read;
}
// Public Instance Methods
@@ -170,7 +171,7 @@ void NativeProcessFreeBSD::MonitorCallback(lldb::pid_t pid, int signal) {
}
void NativeProcessFreeBSD::MonitorExited(lldb::pid_t pid, WaitStatus status) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "got exit signal({0}) , pid = {1}", status, pid);
@@ -193,7 +194,7 @@ void NativeProcessFreeBSD::MonitorSIGSTOP(lldb::pid_t pid) {
}
void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
struct ptrace_lwpinfo info;
const auto siginfo_err = PtraceWrapper(PT_LWPINFO, pid, &info, sizeof(info));
@@ -254,6 +255,7 @@ void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
for (const auto &thread : m_threads)
static_cast<NativeThreadFreeBSD &>(*thread).SetStoppedByExec();
+ SetCurrentThreadID(m_threads.front()->GetID());
SetState(StateType::eStateStopped, true);
return;
}
@@ -312,6 +314,7 @@ void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
} else
thread->SetStoppedByBreakpoint();
FixupBreakpointPCAsNeeded(*thread);
+ SetCurrentThreadID(thread->GetID());
}
SetState(StateType::eStateStopped, true);
return;
@@ -333,11 +336,13 @@ void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
if (wp_index != LLDB_INVALID_INDEX32) {
regctx.ClearWatchpointHit(wp_index);
thread->SetStoppedByWatchpoint(wp_index);
+ SetCurrentThreadID(thread->GetID());
SetState(StateType::eStateStopped, true);
break;
}
thread->SetStoppedByTrace();
+ SetCurrentThreadID(thread->GetID());
}
SetState(StateType::eStateStopped, true);
@@ -352,7 +357,7 @@ void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
}
void NativeProcessFreeBSD::MonitorSignal(lldb::pid_t pid, int signal) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
struct ptrace_lwpinfo info;
const auto siginfo_err = PtraceWrapper(PT_LWPINFO, pid, &info, sizeof(info));
@@ -370,9 +375,10 @@ void NativeProcessFreeBSD::MonitorSignal(lldb::pid_t pid, int signal) {
static_cast<NativeThreadFreeBSD &>(*abs_thread);
assert(info.pl_lwpid >= 0);
if (info.pl_lwpid == 0 ||
- static_cast<lldb::tid_t>(info.pl_lwpid) == thread.GetID())
+ static_cast<lldb::tid_t>(info.pl_lwpid) == thread.GetID()) {
thread.SetStoppedBySignal(info.pl_siginfo.si_signo, &info.pl_siginfo);
- else
+ SetCurrentThreadID(thread.GetID());
+ } else
thread.SetStoppedWithNoReason();
}
SetState(StateType::eStateStopped, true);
@@ -380,7 +386,7 @@ void NativeProcessFreeBSD::MonitorSignal(lldb::pid_t pid, int signal) {
Status NativeProcessFreeBSD::PtraceWrapper(int req, lldb::pid_t pid, void *addr,
int data, int *result) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PTRACE));
+ Log *log = GetLog(POSIXLog::Ptrace);
Status error;
int ret;
@@ -424,7 +430,7 @@ NativeProcessFreeBSD::GetSoftwareBreakpointTrapOpcode(size_t size_hint) {
}
Status NativeProcessFreeBSD::Resume(const ResumeActionList &resume_actions) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid {0}", GetID());
Status ret;
@@ -521,7 +527,7 @@ Status NativeProcessFreeBSD::Signal(int signo) {
Status NativeProcessFreeBSD::Interrupt() { return Halt(); }
Status NativeProcessFreeBSD::Kill() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid {0}", GetID());
Status error;
@@ -608,7 +614,7 @@ Status NativeProcessFreeBSD::GetMemoryRegionInfo(lldb::addr_t load_addr,
}
Status NativeProcessFreeBSD::PopulateMemoryRegionCache() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
// If our cache is empty, pull the latest. There should always be at least
// one memory region if memory region handling is supported.
if (!m_mem_region_cache.empty()) {
@@ -735,7 +741,7 @@ NativeProcessFreeBSD::GetFileLoadAddress(const llvm::StringRef &file_name,
}
void NativeProcessFreeBSD::SigchldHandler() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
int status;
::pid_t wait_pid =
llvm::sys::RetryAfterSignal(-1, waitpid, GetID(), &status, WNOHANG);
@@ -780,7 +786,7 @@ bool NativeProcessFreeBSD::HasThreadNoLock(lldb::tid_t thread_id) {
}
NativeThreadFreeBSD &NativeProcessFreeBSD::AddThread(lldb::tid_t thread_id) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "pid {0} adding thread with tid {1}", GetID(), thread_id);
assert(thread_id > 0);
@@ -796,7 +802,7 @@ NativeThreadFreeBSD &NativeProcessFreeBSD::AddThread(lldb::tid_t thread_id) {
}
void NativeProcessFreeBSD::RemoveThread(lldb::tid_t thread_id) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "pid {0} removing thread with tid {1}", GetID(), thread_id);
assert(thread_id > 0);
@@ -809,6 +815,9 @@ void NativeProcessFreeBSD::RemoveThread(lldb::tid_t thread_id) {
break;
}
}
+
+ if (GetCurrentThreadID() == thread_id)
+ SetCurrentThreadID(m_threads.front()->GetID());
}
Status NativeProcessFreeBSD::Attach() {
@@ -845,7 +854,7 @@ Status NativeProcessFreeBSD::ReadMemory(lldb::addr_t addr, void *buf,
unsigned char *dst = static_cast<unsigned char *>(buf);
struct ptrace_io_desc io;
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_MEMORY));
+ Log *log = GetLog(POSIXLog::Memory);
LLDB_LOG(log, "addr = {0}, buf = {1}, size = {2}", addr, buf, size);
bytes_read = 0;
@@ -873,7 +882,7 @@ Status NativeProcessFreeBSD::WriteMemory(lldb::addr_t addr, const void *buf,
Status error;
struct ptrace_io_desc io;
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_MEMORY));
+ Log *log = GetLog(POSIXLog::Memory);
LLDB_LOG(log, "addr = {0}, buf = {1}, size = {2}", addr, buf, size);
bytes_written = 0;
@@ -953,7 +962,7 @@ bool NativeProcessFreeBSD::SupportHardwareSingleStepping() const {
void NativeProcessFreeBSD::MonitorClone(::pid_t child_pid, bool is_vfork,
NativeThreadFreeBSD &parent_thread) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "fork, child_pid={0}", child_pid);
int status;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_arm64.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_arm64.cpp
index 4578138a89b3..143d94069bc6 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_arm64.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_arm64.cpp
@@ -227,7 +227,7 @@ llvm::Error NativeRegisterContextFreeBSD_arm64::CopyHardwareWatchpointsFrom(
llvm::Error NativeRegisterContextFreeBSD_arm64::ReadHardwareDebugInfo() {
#ifdef LLDB_HAS_FREEBSD_WATCHPOINT
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_REGISTERS));
+ Log *log = GetLog(POSIXLog::Registers);
// we're fully stateful, so no need to reread control registers ever
if (m_read_dbreg)
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.cpp
index 80b3527aebce..a75668f3b5c7 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.cpp
@@ -75,7 +75,7 @@ Status NativeThreadFreeBSD::Suspend() {
void NativeThreadFreeBSD::SetStoppedBySignal(uint32_t signo,
const siginfo_t *info) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "tid = {0} in called with signal {1}", GetID(), signo);
SetStopped();
@@ -178,7 +178,7 @@ void NativeThreadFreeBSD::SetStepping() {
}
std::string NativeThreadFreeBSD::GetName() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
std::vector<struct kinfo_proc> kp;
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID | KERN_PROC_INC_THREAD,
@@ -213,7 +213,7 @@ lldb::StateType NativeThreadFreeBSD::GetState() { return m_state; }
bool NativeThreadFreeBSD::GetStopReason(ThreadStopInfo &stop_info,
std::string &description) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
description.clear();
switch (m_state) {
@@ -313,3 +313,27 @@ NativeThreadFreeBSD::CopyWatchpointsFrom(NativeThreadFreeBSD &source) {
}
return s;
}
+
+llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+NativeThreadFreeBSD::GetSiginfo() const {
+ Log *log = GetLog(POSIXLog::Process);
+
+ struct ptrace_lwpinfo info;
+ const auto siginfo_err = NativeProcessFreeBSD::PtraceWrapper(
+ PT_LWPINFO, GetID(), &info, sizeof(info));
+ if (siginfo_err.Fail()) {
+ LLDB_LOG(log, "PT_LWPINFO failed {0}", siginfo_err);
+ return siginfo_err.ToError();
+ }
+
+ if (info.pl_event != PL_EVENT_SIGNAL)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Thread not signaled");
+ if (!(info.pl_flags & PL_FLAG_SI))
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "No siginfo for thread");
+
+ return llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::StringRef(reinterpret_cast<const char *>(&info.pl_siginfo),
+ sizeof(info.pl_siginfo)));
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.h b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.h
index 3ec6daa409e4..6294a7a70963 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeThreadFreeBSD.h
@@ -47,6 +47,9 @@ public:
Status RemoveHardwareBreakpoint(lldb::addr_t addr) override;
+ llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ GetSiginfo() const override;
+
private:
// Interface for friend classes
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.cpp
index 339d33d25110..e3707365a9c3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.cpp
@@ -137,12 +137,115 @@ bool ProcessFreeBSDKernel::DoUpdateThreadList(ThreadList &old_thread_list,
return false;
}
- const Symbol *pcb_sym =
- GetTarget().GetExecutableModule()->FindFirstSymbolWithNameAndType(
- ConstString("dumppcb"));
- ThreadSP thread_sp(new ThreadFreeBSDKernel(
- *this, 1, pcb_sym ? pcb_sym->GetFileAddress() : LLDB_INVALID_ADDRESS));
- new_thread_list.AddThread(thread_sp);
+ Status error;
+
+ // struct field offsets are written as symbols so that we don't have
+ // to figure them out ourselves
+ int32_t offset_p_list = ReadSignedIntegerFromMemory(
+ FindSymbol("proc_off_p_list"), 4, -1, error);
+ int32_t offset_p_pid =
+ ReadSignedIntegerFromMemory(FindSymbol("proc_off_p_pid"), 4, -1, error);
+ int32_t offset_p_threads = ReadSignedIntegerFromMemory(
+ FindSymbol("proc_off_p_threads"), 4, -1, error);
+ int32_t offset_p_comm = ReadSignedIntegerFromMemory(
+ FindSymbol("proc_off_p_comm"), 4, -1, error);
+
+ int32_t offset_td_tid = ReadSignedIntegerFromMemory(
+ FindSymbol("thread_off_td_tid"), 4, -1, error);
+ int32_t offset_td_plist = ReadSignedIntegerFromMemory(
+ FindSymbol("thread_off_td_plist"), 4, -1, error);
+ int32_t offset_td_pcb = ReadSignedIntegerFromMemory(
+ FindSymbol("thread_off_td_pcb"), 4, -1, error);
+ int32_t offset_td_oncpu = ReadSignedIntegerFromMemory(
+ FindSymbol("thread_off_td_oncpu"), 4, -1, error);
+ int32_t offset_td_name = ReadSignedIntegerFromMemory(
+ FindSymbol("thread_off_td_name"), 4, -1, error);
+
+ // fail if we were not able to read any of the offsets
+ if (offset_p_list == -1 || offset_p_pid == -1 || offset_p_threads == -1 ||
+ offset_p_comm == -1 || offset_td_tid == -1 || offset_td_plist == -1 ||
+ offset_td_pcb == -1 || offset_td_oncpu == -1 || offset_td_name == -1)
+ return false;
+
+ // dumptid contains the thread-id of the crashing thread
+ // dumppcb contains its PCB
+ int32_t dumptid =
+ ReadSignedIntegerFromMemory(FindSymbol("dumptid"), 4, -1, error);
+ lldb::addr_t dumppcb = FindSymbol("dumppcb");
+
+ // stoppcbs is an array of PCBs on all CPUs
+ // each element is of size pcb_size
+ int32_t pcbsize =
+ ReadSignedIntegerFromMemory(FindSymbol("pcb_size"), 4, -1, error);
+ lldb::addr_t stoppcbs = FindSymbol("stoppcbs");
+
+ // from FreeBSD sys/param.h
+ constexpr size_t fbsd_maxcomlen = 19;
+
+ // iterate through a linked list of all processes
+ // allproc is a pointer to the first list element, p_list field
+ // (found at offset_p_list) specifies the next element
+ for (lldb::addr_t proc =
+ ReadPointerFromMemory(FindSymbol("allproc"), error);
+ proc != 0 && proc != LLDB_INVALID_ADDRESS;
+ proc = ReadPointerFromMemory(proc + offset_p_list, error)) {
+ int32_t pid =
+ ReadSignedIntegerFromMemory(proc + offset_p_pid, 4, -1, error);
+ // process' command-line string
+ char comm[fbsd_maxcomlen + 1];
+ ReadCStringFromMemory(proc + offset_p_comm, comm, sizeof(comm), error);
+
+ // iterate through a linked list of all process' threads
+ // the initial thread is found in process' p_threads, subsequent
+ // elements are linked via td_plist field
+ for (lldb::addr_t td =
+ ReadPointerFromMemory(proc + offset_p_threads, error);
+ td != 0; td = ReadPointerFromMemory(td + offset_td_plist, error)) {
+ int32_t tid =
+ ReadSignedIntegerFromMemory(td + offset_td_tid, 4, -1, error);
+ lldb::addr_t pcb_addr =
+ ReadPointerFromMemory(td + offset_td_pcb, error);
+ // whether process was on CPU (-1 if not, otherwise CPU number)
+ int32_t oncpu =
+ ReadSignedIntegerFromMemory(td + offset_td_oncpu, 4, -2, error);
+ // thread name
+ char thread_name[fbsd_maxcomlen + 1];
+ ReadCStringFromMemory(td + offset_td_name, thread_name,
+ sizeof(thread_name), error);
+
+ // if we failed to read TID, ignore this thread
+ if (tid == -1)
+ continue;
+
+ std::string thread_desc = llvm::formatv("(pid {0}) {1}", pid, comm);
+ if (*thread_name && strcmp(thread_name, comm)) {
+ thread_desc += '/';
+ thread_desc += thread_name;
+ }
+
+ // roughly:
+ // 1. if the thread crashed, its PCB is going to be at "dumppcb"
+ // 2. if the thread was on CPU, its PCB is going to be on the CPU
+ // 3. otherwise, its PCB is in the thread struct
+ if (tid == dumptid) {
+ // NB: dumppcb can be LLDB_INVALID_ADDRESS if reading it failed
+ pcb_addr = dumppcb;
+ thread_desc += " (crashed)";
+ } else if (oncpu != -1) {
+ // if we managed to read stoppcbs and pcb_size, use them to find
+ // the correct PCB
+ if (stoppcbs != LLDB_INVALID_ADDRESS && pcbsize > 0)
+ pcb_addr = stoppcbs + oncpu * pcbsize;
+ else
+ pcb_addr = LLDB_INVALID_ADDRESS;
+ thread_desc += llvm::formatv(" (on CPU {0})", oncpu);
+ }
+
+ ThreadSP thread_sp{
+ new ThreadFreeBSDKernel(*this, tid, pcb_addr, thread_desc)};
+ new_thread_list.AddThread(thread_sp);
+ }
+ }
} else {
const uint32_t num_threads = old_thread_list.GetSize(false);
for (uint32_t i = 0; i < num_threads; ++i)
@@ -163,6 +266,12 @@ DynamicLoader *ProcessFreeBSDKernel::GetDynamicLoader() {
return m_dyld_up.get();
}
+lldb::addr_t ProcessFreeBSDKernel::FindSymbol(const char *name) {
+ ModuleSP mod_sp = GetTarget().GetExecutableModule();
+ const Symbol *sym = mod_sp->FindFirstSymbolWithNameAndType(ConstString(name));
+ return sym ? sym->GetLoadAddress(&GetTarget()) : LLDB_INVALID_ADDRESS;
+}
+
#if LLDB_ENABLE_FBSDVMCORE
ProcessFreeBSDKernelFVC::ProcessFreeBSDKernelFVC(lldb::TargetSP target_sp,
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.h b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.h
index 558eec5403db..5bd463126307 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ProcessFreeBSDKernel.h
@@ -46,6 +46,8 @@ public:
protected:
bool DoUpdateThreadList(lldb_private::ThreadList &old_thread_list,
lldb_private::ThreadList &new_thread_list) override;
+
+ lldb::addr_t FindSymbol(const char* name);
};
#endif // LLDB_SOURCE_PLUGINS_PROCESS_FREEBSDKERNEL_PROCESSFREEBSDKERNEL_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.cpp
index 124c65d587ff..8d304086a163 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.cpp
@@ -24,8 +24,10 @@ using namespace lldb;
using namespace lldb_private;
ThreadFreeBSDKernel::ThreadFreeBSDKernel(Process &process, lldb::tid_t tid,
- lldb::addr_t pcb_addr)
- : Thread(process, tid), m_pcb_addr(pcb_addr) {}
+ lldb::addr_t pcb_addr,
+ std::string thread_name)
+ : Thread(process, tid), m_thread_name(std::move(thread_name)),
+ m_pcb_addr(pcb_addr) {}
ThreadFreeBSDKernel::~ThreadFreeBSDKernel() {}
@@ -61,9 +63,8 @@ ThreadFreeBSDKernel::CreateRegisterContextForFrame(StackFrame *frame) {
m_pcb_addr);
break;
case llvm::Triple::x86:
- m_thread_reg_ctx_sp =
- std::make_shared<RegisterContextFreeBSDKernel_i386>(
- *this, new RegisterContextFreeBSD_i386(arch), m_pcb_addr);
+ m_thread_reg_ctx_sp = std::make_shared<RegisterContextFreeBSDKernel_i386>(
+ *this, new RegisterContextFreeBSD_i386(arch), m_pcb_addr);
break;
case llvm::Triple::x86_64:
m_thread_reg_ctx_sp =
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.h b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.h
index 2842eba64e56..3bc019b63e68 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSDKernel/ThreadFreeBSDKernel.h
@@ -14,7 +14,7 @@
class ThreadFreeBSDKernel : public lldb_private::Thread {
public:
ThreadFreeBSDKernel(lldb_private::Process &process, lldb::tid_t tid,
- lldb::addr_t pcb_addr);
+ lldb::addr_t pcb_addr, std::string thread_name);
~ThreadFreeBSDKernel() override;
@@ -25,10 +25,24 @@ public:
lldb::RegisterContextSP
CreateRegisterContextForFrame(lldb_private::StackFrame *frame) override;
+ const char *GetName() override {
+ if (m_thread_name.empty())
+ return nullptr;
+ return m_thread_name.c_str();
+ }
+
+ void SetName(const char *name) override {
+ if (name && name[0])
+ m_thread_name.assign(name);
+ else
+ m_thread_name.clear();
+ }
+
protected:
bool CalculateStopInfo() override;
private:
+ std::string m_thread_name;
lldb::RegisterContextSP m_thread_reg_ctx_sp;
lldb::addr_t m_pcb_addr;
};
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
index 0420d00e39d6..182eefb7bee7 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeProcessNetBSD.cpp
@@ -59,7 +59,7 @@ llvm::Expected<std::unique_ptr<NativeProcessProtocol>>
NativeProcessNetBSD::Factory::Launch(ProcessLaunchInfo &launch_info,
NativeDelegate &native_delegate,
MainLoop &mainloop) const {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
Status status;
::pid_t pid = ProcessLauncherPosixFork()
@@ -113,7 +113,7 @@ llvm::Expected<std::unique_ptr<NativeProcessProtocol>>
NativeProcessNetBSD::Factory::Attach(
lldb::pid_t pid, NativeProcessProtocol::NativeDelegate &native_delegate,
MainLoop &mainloop) const {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid = {0:x}", pid);
// Retrieve the architecture for the running process.
@@ -172,7 +172,7 @@ void NativeProcessNetBSD::MonitorCallback(lldb::pid_t pid, int signal) {
}
void NativeProcessNetBSD::MonitorExited(lldb::pid_t pid, WaitStatus status) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "got exit signal({0}) , pid = {1}", status, pid);
@@ -207,7 +207,7 @@ void NativeProcessNetBSD::MonitorSIGSTOP(lldb::pid_t pid) {
}
void NativeProcessNetBSD::MonitorSIGTRAP(lldb::pid_t pid) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
ptrace_siginfo_t info;
const auto siginfo_err =
@@ -359,7 +359,7 @@ void NativeProcessNetBSD::MonitorSIGTRAP(lldb::pid_t pid) {
}
void NativeProcessNetBSD::MonitorSignal(lldb::pid_t pid, int signal) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
ptrace_siginfo_t info;
const auto siginfo_err =
@@ -383,7 +383,7 @@ void NativeProcessNetBSD::MonitorSignal(lldb::pid_t pid, int signal) {
Status NativeProcessNetBSD::PtraceWrapper(int req, lldb::pid_t pid, void *addr,
int data, int *result) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PTRACE));
+ Log *log = GetLog(POSIXLog::Ptrace);
Status error;
int ret;
@@ -459,7 +459,7 @@ static llvm::Expected<ptrace_siginfo_t> ComputeSignalInfo(
}
Status NativeProcessNetBSD::Resume(const ResumeActionList &resume_actions) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid {0}", GetID());
Status ret;
@@ -562,7 +562,7 @@ Status NativeProcessNetBSD::Interrupt() {
}
Status NativeProcessNetBSD::Kill() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "pid {0}", GetID());
Status error;
@@ -654,7 +654,7 @@ Status NativeProcessNetBSD::GetMemoryRegionInfo(lldb::addr_t load_addr,
}
Status NativeProcessNetBSD::PopulateMemoryRegionCache() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
// If our cache is empty, pull the latest. There should always be at least
// one memory region if memory region handling is supported.
if (!m_mem_region_cache.empty()) {
@@ -772,7 +772,7 @@ Status NativeProcessNetBSD::GetFileLoadAddress(const llvm::StringRef &file_name,
}
void NativeProcessNetBSD::SigchldHandler() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
int status;
::pid_t wait_pid = llvm::sys::RetryAfterSignal(-1, waitpid, GetID(), &status,
WALLSIG | WNOHANG);
@@ -817,7 +817,7 @@ bool NativeProcessNetBSD::HasThreadNoLock(lldb::tid_t thread_id) {
}
NativeThreadNetBSD &NativeProcessNetBSD::AddThread(lldb::tid_t thread_id) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "pid {0} adding thread with tid {1}", GetID(), thread_id);
assert(thread_id > 0);
@@ -833,7 +833,7 @@ NativeThreadNetBSD &NativeProcessNetBSD::AddThread(lldb::tid_t thread_id) {
}
void NativeProcessNetBSD::RemoveThread(lldb::tid_t thread_id) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "pid {0} removing thread with tid {1}", GetID(), thread_id);
assert(thread_id > 0);
@@ -882,7 +882,7 @@ Status NativeProcessNetBSD::ReadMemory(lldb::addr_t addr, void *buf,
unsigned char *dst = static_cast<unsigned char *>(buf);
struct ptrace_io_desc io;
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_MEMORY));
+ Log *log = GetLog(POSIXLog::Memory);
LLDB_LOG(log, "addr = {0}, buf = {1}, size = {2}", addr, buf, size);
bytes_read = 0;
@@ -910,7 +910,7 @@ Status NativeProcessNetBSD::WriteMemory(lldb::addr_t addr, const void *buf,
Status error;
struct ptrace_io_desc io;
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_MEMORY));
+ Log *log = GetLog(POSIXLog::Memory);
LLDB_LOG(log, "addr = {0}, buf = {1}, size = {2}", addr, buf, size);
bytes_written = 0;
@@ -1013,7 +1013,7 @@ Status NativeProcessNetBSD::ReinitializeThreads() {
void NativeProcessNetBSD::MonitorClone(::pid_t child_pid, bool is_vfork,
NativeThreadNetBSD &parent_thread) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_PROCESS));
+ Log *log = GetLog(POSIXLog::Process);
LLDB_LOG(log, "clone, child_pid={0}", child_pid);
int status;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
index 400b89a5fddf..3e8cf9fd9f23 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
@@ -75,7 +75,7 @@ Status NativeThreadNetBSD::Suspend() {
void NativeThreadNetBSD::SetStoppedBySignal(uint32_t signo,
const siginfo_t *info) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
LLDB_LOG(log, "tid = {0} in called with signal {1}", GetID(), signo);
SetStopped();
@@ -178,7 +178,7 @@ void NativeThreadNetBSD::SetStepping() {
}
std::string NativeThreadNetBSD::GetName() {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
#ifdef PT_LWPSTATUS
struct ptrace_lwpstatus info = {};
@@ -225,7 +225,7 @@ lldb::StateType NativeThreadNetBSD::GetState() { return m_state; }
bool NativeThreadNetBSD::GetStopReason(ThreadStopInfo &stop_info,
std::string &description) {
- Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_THREAD));
+ Log *log = GetLog(POSIXLog::Thread);
description.clear();
switch (m_state) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.cpp
index f4d0803b264a..7ad88aabc2c0 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.cpp
@@ -13,16 +13,20 @@
using namespace lldb_private;
static constexpr Log::Category g_categories[] = {
- {{"break"}, {"log breakpoints"}, POSIX_LOG_BREAKPOINTS},
- {{"memory"}, {"log memory reads and writes"}, POSIX_LOG_MEMORY},
- {{"process"}, {"log process events and activities"}, POSIX_LOG_PROCESS},
- {{"ptrace"}, {"log all calls to ptrace"}, POSIX_LOG_PTRACE},
- {{"registers"}, {"log register read/writes"}, POSIX_LOG_REGISTERS},
- {{"thread"}, {"log thread events and activities"}, POSIX_LOG_THREAD},
- {{"watch"}, {"log watchpoint related activities"}, POSIX_LOG_WATCHPOINTS},
+ {{"break"}, {"log breakpoints"}, POSIXLog::Breakpoints},
+ {{"memory"}, {"log memory reads and writes"}, POSIXLog::Memory},
+ {{"process"}, {"log process events and activities"}, POSIXLog::Process},
+ {{"ptrace"}, {"log all calls to ptrace"}, POSIXLog::Ptrace},
+ {{"registers"}, {"log register read/writes"}, POSIXLog::Registers},
+ {{"thread"}, {"log thread events and activities"}, POSIXLog::Thread},
+ {{"watch"}, {"log watchpoint related activities"}, POSIXLog::Watchpoints},
};
-Log::Channel ProcessPOSIXLog::g_channel(g_categories, POSIX_LOG_DEFAULT);
+static Log::Channel g_channel(g_categories, POSIXLog::Process);
+
+template <> Log::Channel &lldb_private::LogChannelFor<POSIXLog>() {
+ return g_channel;
+}
void ProcessPOSIXLog::Initialize() {
static llvm::once_flag g_once_flag;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.h b/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.h
index c0147c43410f..7b8b6cdbf255 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/POSIX/ProcessPOSIXLog.h
@@ -13,27 +13,25 @@
#include "lldb/Utility/Log.h"
-#define POSIX_LOG_PROCESS (1u << 1)
-#define POSIX_LOG_THREAD (1u << 2)
-#define POSIX_LOG_MEMORY (1u << 4) // Log memory reads/writes calls
-#define POSIX_LOG_PTRACE (1u << 5)
-#define POSIX_LOG_REGISTERS (1u << 6)
-#define POSIX_LOG_BREAKPOINTS (1u << 7)
-#define POSIX_LOG_WATCHPOINTS (1u << 8)
-#define POSIX_LOG_ALL (UINT32_MAX)
-#define POSIX_LOG_DEFAULT POSIX_LOG_PROCESS
-
namespace lldb_private {
-class ProcessPOSIXLog {
- static Log::Channel g_channel;
+enum class POSIXLog : Log::MaskType {
+ Breakpoints = Log::ChannelFlag<0>,
+ Memory = Log::ChannelFlag<1>,
+ Process = Log::ChannelFlag<2>,
+ Ptrace = Log::ChannelFlag<3>,
+ Registers = Log::ChannelFlag<4>,
+ Thread = Log::ChannelFlag<5>,
+ Watchpoints = Log::ChannelFlag<6>,
+ LLVM_MARK_AS_BITMASK_ENUM(Watchpoints)
+};
+
+class ProcessPOSIXLog {
public:
static void Initialize();
-
- static Log *GetLogIfAllCategoriesSet(uint32_t mask) {
- return g_channel.GetLogIfAll(mask);
- }
};
-}
+
+template <> Log::Channel &LogChannelFor<POSIXLog>();
+} // namespace lldb_private
#endif // liblldb_ProcessPOSIXLog_h_
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
index d74b66b58afc..b71de4cadb18 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "MemoryTagManagerAArch64MTE.h"
+#include "llvm/Support/Error.h"
+#include <assert.h>
using namespace lldb_private;
@@ -20,7 +22,7 @@ MemoryTagManagerAArch64MTE::GetLogicalTag(lldb::addr_t addr) const {
}
lldb::addr_t
-MemoryTagManagerAArch64MTE::RemoveNonAddressBits(lldb::addr_t addr) const {
+MemoryTagManagerAArch64MTE::RemoveTagBits(lldb::addr_t addr) const {
// Here we're ignoring the whole top byte. If you've got MTE
// you must also have TBI (top byte ignore).
// The other 4 bits could contain other extension bits or
@@ -30,7 +32,7 @@ MemoryTagManagerAArch64MTE::RemoveNonAddressBits(lldb::addr_t addr) const {
ptrdiff_t MemoryTagManagerAArch64MTE::AddressDiff(lldb::addr_t addr1,
lldb::addr_t addr2) const {
- return RemoveNonAddressBits(addr1) - RemoveNonAddressBits(addr2);
+ return RemoveTagBits(addr1) - RemoveTagBits(addr2);
}
lldb::addr_t MemoryTagManagerAArch64MTE::GetGranuleSize() const {
@@ -66,6 +68,15 @@ MemoryTagManagerAArch64MTE::ExpandToGranule(TagRange range) const {
return TagRange(new_start, new_len);
}
+static llvm::Error MakeInvalidRangeErr(lldb::addr_t addr,
+ lldb::addr_t end_addr) {
+ return llvm::createStringError(
+ llvm::inconvertibleErrorCode(),
+ "End address (0x%" PRIx64
+ ") must be greater than the start address (0x%" PRIx64 ")",
+ end_addr, addr);
+}
+
llvm::Expected<MemoryTagManager::TagRange>
MemoryTagManagerAArch64MTE::MakeTaggedRange(
lldb::addr_t addr, lldb::addr_t end_addr,
@@ -74,17 +85,12 @@ MemoryTagManagerAArch64MTE::MakeTaggedRange(
// We must remove tags here otherwise an address with a higher
// tag value will always be > the other.
ptrdiff_t len = AddressDiff(end_addr, addr);
- if (len <= 0) {
- return llvm::createStringError(
- llvm::inconvertibleErrorCode(),
- "End address (0x%" PRIx64
- ") must be greater than the start address (0x%" PRIx64 ")",
- end_addr, addr);
- }
+ if (len <= 0)
+ return MakeInvalidRangeErr(addr, end_addr);
// Region addresses will not have memory tags. So when searching
// we must use an untagged address.
- MemoryRegionInfo::RangeType tag_range(RemoveNonAddressBits(addr), len);
+ MemoryRegionInfo::RangeType tag_range(RemoveTagBits(addr), len);
tag_range = ExpandToGranule(tag_range);
// Make a copy so we can use the original for errors and the final return.
@@ -123,6 +129,91 @@ MemoryTagManagerAArch64MTE::MakeTaggedRange(
return tag_range;
}
+llvm::Expected<std::vector<MemoryTagManager::TagRange>>
+MemoryTagManagerAArch64MTE::MakeTaggedRanges(
+ lldb::addr_t addr, lldb::addr_t end_addr,
+ const lldb_private::MemoryRegionInfos &memory_regions) const {
+ // First check that the range is not inverted.
+ // We must remove tags here otherwise an address with a higher
+ // tag value will always be > the other.
+ ptrdiff_t len = AddressDiff(end_addr, addr);
+ if (len <= 0)
+ return MakeInvalidRangeErr(addr, end_addr);
+
+ std::vector<MemoryTagManager::TagRange> tagged_ranges;
+ // No memory regions means no tagged memory at all
+ if (memory_regions.empty())
+ return tagged_ranges;
+
+ // For the logic to work regions must be in ascending order
+ // which is what you'd have if you used GetMemoryRegions.
+ assert(std::is_sorted(
+ memory_regions.begin(), memory_regions.end(),
+ [](const MemoryRegionInfo &lhs, const MemoryRegionInfo &rhs) {
+ return lhs.GetRange().GetRangeBase() < rhs.GetRange().GetRangeBase();
+ }));
+
+ // If we're debugging userspace in an OS like Linux that uses an MMU,
+ // the only reason we'd get overlapping regions is incorrect data.
+ // It is possible that won't hold for embedded with memory protection
+ // units (MPUs) that allow overlaps.
+ //
+ // For now we're going to assume the former, as there is no good way
+ // to handle overlaps. For example:
+ // < requested range >
+ // [-- region 1 --]
+ // [-- region 2--]
+ // Where the first region will reduce the requested range to nothing
+ // and exit early before it sees the overlap.
+ MemoryRegionInfos::const_iterator overlap = std::adjacent_find(
+ memory_regions.begin(), memory_regions.end(),
+ [](const MemoryRegionInfo &lhs, const MemoryRegionInfo &rhs) {
+ return rhs.GetRange().DoesIntersect(lhs.GetRange());
+ });
+ UNUSED_IF_ASSERT_DISABLED(overlap);
+ assert(overlap == memory_regions.end());
+
+ // Region addresses will not have memory tags so when searching
+ // we must use an untagged address.
+ MemoryRegionInfo::RangeType range(RemoveTagBits(addr), len);
+ range = ExpandToGranule(range);
+
+ // While there are regions to check and the range has non zero length
+ for (const MemoryRegionInfo &region : memory_regions) {
+ // If range we're checking has been reduced to zero length, exit early
+ if (!range.IsValid())
+ break;
+
+ // If the region doesn't overlap the range at all, ignore it.
+ if (!region.GetRange().DoesIntersect(range))
+ continue;
+
+ // If it's tagged record this sub-range.
+ // (assuming that it's already granule aligned)
+ if (region.GetMemoryTagged()) {
+ // The region found may extend outside the requested range.
+ // For example the first region might start before the range.
+ // We must only add what covers the requested range.
+ lldb::addr_t start =
+ std::max(range.GetRangeBase(), region.GetRange().GetRangeBase());
+ lldb::addr_t end =
+ std::min(range.GetRangeEnd(), region.GetRange().GetRangeEnd());
+ tagged_ranges.push_back(MemoryTagManager::TagRange(start, end - start));
+ }
+
+ // Move the range up to start at the end of the region.
+ lldb::addr_t old_end = range.GetRangeEnd();
+ // This "slides" the range so it moves the end as well.
+ range.SetRangeBase(region.GetRange().GetRangeEnd());
+ // So we set the end back to the original end address after sliding it up.
+ range.SetRangeEnd(old_end);
+ // (if the above were to try to set end < begin the range will just be set
+ // to 0 size)
+ }
+
+ return tagged_ranges;
+}
+
llvm::Expected<std::vector<lldb::addr_t>>
MemoryTagManagerAArch64MTE::UnpackTagsData(const std::vector<uint8_t> &tags,
size_t granules /*=0*/) const {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
index d4e8249da93f..7cda728b140f 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
@@ -27,7 +27,7 @@ public:
size_t GetTagSizeInBytes() const override;
lldb::addr_t GetLogicalTag(lldb::addr_t addr) const override;
- lldb::addr_t RemoveNonAddressBits(lldb::addr_t addr) const override;
+ lldb::addr_t RemoveTagBits(lldb::addr_t addr) const override;
ptrdiff_t AddressDiff(lldb::addr_t addr1, lldb::addr_t addr2) const override;
TagRange ExpandToGranule(TagRange range) const override;
@@ -36,6 +36,10 @@ public:
lldb::addr_t addr, lldb::addr_t end_addr,
const lldb_private::MemoryRegionInfos &memory_regions) const override;
+ llvm::Expected<std::vector<TagRange>> MakeTaggedRanges(
+ lldb::addr_t addr, lldb::addr_t end_addr,
+ const lldb_private::MemoryRegionInfos &memory_regions) const override;
+
llvm::Expected<std::vector<lldb::addr_t>>
UnpackTagsData(const std::vector<uint8_t> &tags,
size_t granules = 0) const override;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp
index 6c130be7b741..d6c4a8687ec5 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp
@@ -178,10 +178,10 @@ static const lldb_private::RegisterSet g_reg_sets_arm64[k_num_register_sets] = {
g_sve_regnums_arm64}};
static const lldb_private::RegisterSet g_reg_set_pauth_arm64 = {
- "Pointer Authentication Registers", "pauth", k_num_pauth_register, NULL};
+ "Pointer Authentication Registers", "pauth", k_num_pauth_register, nullptr};
static const lldb_private::RegisterSet g_reg_set_mte_arm64 = {
- "MTE Control Register", "mte", k_num_mte_register, NULL};
+ "MTE Control Register", "mte", k_num_mte_register, nullptr};
RegisterInfoPOSIX_arm64::RegisterInfoPOSIX_arm64(
const lldb_private::ArchSpec &target_arch, lldb_private::Flags opt_regsets)
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
index b5b105351de5..f6526d03863b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
@@ -173,6 +173,13 @@ bool GDBRemoteCommunicationClient::GetQXferMemoryMapReadSupported() {
return m_supports_qXfer_memory_map_read == eLazyBoolYes;
}
+bool GDBRemoteCommunicationClient::GetQXferSigInfoReadSupported() {
+ if (m_supports_qXfer_siginfo_read == eLazyBoolCalculate) {
+ GetRemoteQSupported();
+ }
+ return m_supports_qXfer_siginfo_read == eLazyBoolYes;
+}
+
uint64_t GDBRemoteCommunicationClient::GetRemoteMaxPacketSize() {
if (m_max_packet_size == 0) {
GetRemoteQSupported();
@@ -273,6 +280,7 @@ void GDBRemoteCommunicationClient::ResetDiscoverableSettings(bool did_exec) {
m_supports_qXfer_libraries_svr4_read = eLazyBoolCalculate;
m_supports_qXfer_features_read = eLazyBoolCalculate;
m_supports_qXfer_memory_map_read = eLazyBoolCalculate;
+ m_supports_qXfer_siginfo_read = eLazyBoolCalculate;
m_supports_augmented_libraries_svr4_read = eLazyBoolCalculate;
m_uses_native_signals = eLazyBoolCalculate;
m_supports_qProcessInfoPID = true;
@@ -320,6 +328,7 @@ void GDBRemoteCommunicationClient::GetRemoteQSupported() {
m_supports_augmented_libraries_svr4_read = eLazyBoolNo;
m_supports_qXfer_features_read = eLazyBoolNo;
m_supports_qXfer_memory_map_read = eLazyBoolNo;
+ m_supports_qXfer_siginfo_read = eLazyBoolNo;
m_supports_multiprocess = eLazyBoolNo;
m_supports_qEcho = eLazyBoolNo;
m_supports_QPassSignals = eLazyBoolNo;
@@ -362,6 +371,8 @@ void GDBRemoteCommunicationClient::GetRemoteQSupported() {
m_supports_qXfer_features_read = eLazyBoolYes;
else if (x == "qXfer:memory-map:read+")
m_supports_qXfer_memory_map_read = eLazyBoolYes;
+ else if (x == "qXfer:siginfo:read+")
+ m_supports_qXfer_siginfo_read = eLazyBoolYes;
else if (x == "qEcho")
m_supports_qEcho = eLazyBoolYes;
else if (x == "QPassSignals+")
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
index c69c33bb1c15..58ed22187747 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
@@ -337,6 +337,8 @@ public:
bool GetQXferMemoryMapReadSupported();
+ bool GetQXferSigInfoReadSupported();
+
LazyBool SupportsAllocDeallocMemory() // const
{
// Uncomment this to have lldb pretend the debug server doesn't respond to
@@ -551,6 +553,7 @@ protected:
LazyBool m_supports_qXfer_libraries_svr4_read = eLazyBoolCalculate;
LazyBool m_supports_qXfer_features_read = eLazyBoolCalculate;
LazyBool m_supports_qXfer_memory_map_read = eLazyBoolCalculate;
+ LazyBool m_supports_qXfer_siginfo_read = eLazyBoolCalculate;
LazyBool m_supports_augmented_libraries_svr4_read = eLazyBoolCalculate;
LazyBool m_supports_jThreadExtendedInfo = eLazyBoolCalculate;
LazyBool m_supports_jLoadedDynamicLibrariesInfos = eLazyBoolCalculate;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.cpp
deleted file mode 100644
index c91d7cb5ac30..000000000000
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.cpp
+++ /dev/null
@@ -1,314 +0,0 @@
-//===-- GDBRemoteCommunicationReplayServer.cpp ----------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <cerrno>
-
-#include "lldb/Host/Config.h"
-#include "llvm/ADT/ScopeExit.h"
-
-#include "GDBRemoteCommunicationReplayServer.h"
-#include "ProcessGDBRemoteLog.h"
-
-// C Includes
-// C++ Includes
-#include <cstring>
-
-// Project includes
-#include "lldb/Host/ThreadLauncher.h"
-#include "lldb/Utility/ConstString.h"
-#include "lldb/Utility/Event.h"
-#include "lldb/Utility/FileSpec.h"
-#include "lldb/Utility/StreamString.h"
-#include "lldb/Utility/StringExtractorGDBRemote.h"
-
-using namespace llvm;
-using namespace lldb;
-using namespace lldb_private;
-using namespace lldb_private::process_gdb_remote;
-
-/// Check if the given expected packet matches the actual packet.
-static bool unexpected(llvm::StringRef expected, llvm::StringRef actual) {
- // The 'expected' string contains the raw data, including the leading $ and
- // trailing checksum. The 'actual' string contains only the packet's content.
- if (expected.contains(actual))
- return false;
- // Contains a PID which might be different.
- if (expected.contains("vAttach"))
- return false;
- // Contains a ascii-hex-path.
- if (expected.contains("QSetSTD"))
- return false;
- // Contains environment values.
- if (expected.contains("QEnvironment"))
- return false;
-
- return true;
-}
-
-/// Check if we should reply to the given packet.
-static bool skip(llvm::StringRef data) {
- assert(!data.empty() && "Empty packet?");
-
- // We've already acknowledge the '+' packet so we're done here.
- if (data == "+")
- return true;
-
- /// Don't 't reply to ^C. We need this because of stop reply packets, which
- /// are only returned when the target halts. Reproducers synchronize these
- /// 'asynchronous' replies, by recording them as a regular replies to the
- /// previous packet (e.g. vCont). As a result, we should ignore real
- /// asynchronous requests.
- if (data.data()[0] == 0x03)
- return true;
-
- return false;
-}
-
-GDBRemoteCommunicationReplayServer::GDBRemoteCommunicationReplayServer()
- : GDBRemoteCommunication("gdb-replay", "gdb-replay.rx_packet"),
- m_async_broadcaster(nullptr, "lldb.gdb-replay.async-broadcaster"),
- m_async_listener_sp(
- Listener::MakeListener("lldb.gdb-replay.async-listener")),
- m_async_thread_state_mutex() {
- m_async_broadcaster.SetEventName(eBroadcastBitAsyncContinue,
- "async thread continue");
- m_async_broadcaster.SetEventName(eBroadcastBitAsyncThreadShouldExit,
- "async thread should exit");
-
- const uint32_t async_event_mask =
- eBroadcastBitAsyncContinue | eBroadcastBitAsyncThreadShouldExit;
- m_async_listener_sp->StartListeningForEvents(&m_async_broadcaster,
- async_event_mask);
-}
-
-GDBRemoteCommunicationReplayServer::~GDBRemoteCommunicationReplayServer() {
- StopAsyncThread();
-}
-
-GDBRemoteCommunication::PacketResult
-GDBRemoteCommunicationReplayServer::GetPacketAndSendResponse(
- Timeout<std::micro> timeout, Status &error, bool &interrupt, bool &quit) {
- std::lock_guard<std::recursive_mutex> guard(m_async_thread_state_mutex);
-
- StringExtractorGDBRemote packet;
- PacketResult packet_result = WaitForPacketNoLock(packet, timeout, false);
-
- if (packet_result != PacketResult::Success) {
- if (!IsConnected()) {
- error.SetErrorString("lost connection");
- quit = true;
- } else {
- error.SetErrorString("timeout");
- }
- return packet_result;
- }
-
- m_async_broadcaster.BroadcastEvent(eBroadcastBitAsyncContinue);
-
- // Check if we should reply to this packet.
- if (skip(packet.GetStringRef()))
- return PacketResult::Success;
-
- // This completes the handshake. Since m_send_acks was true, we can unset it
- // already.
- if (packet.GetStringRef() == "QStartNoAckMode")
- m_send_acks = false;
-
- // A QEnvironment packet is sent for every environment variable. If the
- // number of environment variables is different during replay, the replies
- // become out of sync.
- if (packet.GetStringRef().find("QEnvironment") == 0)
- return SendRawPacketNoLock("$OK#9a");
-
- Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PROCESS));
- while (!m_packet_history.empty()) {
- // Pop last packet from the history.
- GDBRemotePacket entry = m_packet_history.back();
- m_packet_history.pop_back();
-
- // Decode run-length encoding.
- const std::string expanded_data =
- GDBRemoteCommunication::ExpandRLE(entry.packet.data);
-
- // We've handled the handshake implicitly before. Skip the packet and move
- // on.
- if (entry.packet.data == "+")
- continue;
-
- if (entry.type == GDBRemotePacket::ePacketTypeSend) {
- if (unexpected(expanded_data, packet.GetStringRef())) {
- LLDB_LOG(log,
- "GDBRemoteCommunicationReplayServer expected packet: '{0}'",
- expanded_data);
- LLDB_LOG(log, "GDBRemoteCommunicationReplayServer actual packet: '{0}'",
- packet.GetStringRef());
-#ifndef NDEBUG
- // This behaves like a regular assert, but prints the expected and
- // received packet before aborting.
- printf("Reproducer expected packet: '%s'\n", expanded_data.c_str());
- printf("Reproducer received packet: '%s'\n",
- packet.GetStringRef().data());
- llvm::report_fatal_error("Encountered unexpected packet during replay");
-#endif
- return PacketResult::ErrorSendFailed;
- }
-
- // Ignore QEnvironment packets as they're handled earlier.
- if (expanded_data.find("QEnvironment") == 1) {
- assert(m_packet_history.back().type ==
- GDBRemotePacket::ePacketTypeRecv);
- m_packet_history.pop_back();
- }
-
- continue;
- }
-
- if (entry.type == GDBRemotePacket::ePacketTypeInvalid) {
- LLDB_LOG(
- log,
- "GDBRemoteCommunicationReplayServer skipped invalid packet: '{0}'",
- packet.GetStringRef());
- continue;
- }
-
- LLDB_LOG(log,
- "GDBRemoteCommunicationReplayServer replied to '{0}' with '{1}'",
- packet.GetStringRef(), entry.packet.data);
- return SendRawPacketNoLock(entry.packet.data);
- }
-
- quit = true;
-
- return packet_result;
-}
-
-llvm::Error
-GDBRemoteCommunicationReplayServer::LoadReplayHistory(const FileSpec &path) {
- auto error_or_file = MemoryBuffer::getFile(path.GetPath());
- if (auto err = error_or_file.getError())
- return errorCodeToError(err);
-
- yaml::Input yin((*error_or_file)->getBuffer());
- yin >> m_packet_history;
-
- if (auto err = yin.error())
- return errorCodeToError(err);
-
- // We want to manipulate the vector like a stack so we need to reverse the
- // order of the packets to have the oldest on at the back.
- std::reverse(m_packet_history.begin(), m_packet_history.end());
-
- return Error::success();
-}
-
-bool GDBRemoteCommunicationReplayServer::StartAsyncThread() {
- std::lock_guard<std::recursive_mutex> guard(m_async_thread_state_mutex);
- if (!m_async_thread.IsJoinable()) {
- // Create a thread that watches our internal state and controls which
- // events make it to clients (into the DCProcess event queue).
- llvm::Expected<HostThread> async_thread = ThreadLauncher::LaunchThread(
- "<lldb.gdb-replay.async>",
- GDBRemoteCommunicationReplayServer::AsyncThread, this);
- if (!async_thread) {
- LLDB_LOG_ERROR(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_HOST),
- async_thread.takeError(),
- "failed to launch host thread: {}");
- return false;
- }
- m_async_thread = *async_thread;
- }
-
- // Wait for handshake.
- m_async_broadcaster.BroadcastEvent(eBroadcastBitAsyncContinue);
-
- return m_async_thread.IsJoinable();
-}
-
-void GDBRemoteCommunicationReplayServer::StopAsyncThread() {
- std::lock_guard<std::recursive_mutex> guard(m_async_thread_state_mutex);
-
- if (!m_async_thread.IsJoinable())
- return;
-
- // Request thread to stop.
- m_async_broadcaster.BroadcastEvent(eBroadcastBitAsyncThreadShouldExit);
-
- // Disconnect client.
- Disconnect();
-
- // Stop the thread.
- m_async_thread.Join(nullptr);
- m_async_thread.Reset();
-}
-
-void GDBRemoteCommunicationReplayServer::ReceivePacket(
- GDBRemoteCommunicationReplayServer &server, bool &done) {
- Status error;
- bool interrupt;
- auto packet_result = server.GetPacketAndSendResponse(std::chrono::seconds(1),
- error, interrupt, done);
- if (packet_result != GDBRemoteCommunication::PacketResult::Success &&
- packet_result !=
- GDBRemoteCommunication::PacketResult::ErrorReplyTimeout) {
- done = true;
- } else {
- server.m_async_broadcaster.BroadcastEvent(eBroadcastBitAsyncContinue);
- }
-}
-
-thread_result_t GDBRemoteCommunicationReplayServer::AsyncThread(void *arg) {
- GDBRemoteCommunicationReplayServer *server =
- (GDBRemoteCommunicationReplayServer *)arg;
- auto D = make_scope_exit([&]() { server->Disconnect(); });
- EventSP event_sp;
- bool done = false;
- while (!done) {
- if (server->m_async_listener_sp->GetEvent(event_sp, llvm::None)) {
- const uint32_t event_type = event_sp->GetType();
- if (event_sp->BroadcasterIs(&server->m_async_broadcaster)) {
- switch (event_type) {
- case eBroadcastBitAsyncContinue:
- ReceivePacket(*server, done);
- if (done)
- return {};
- break;
- case eBroadcastBitAsyncThreadShouldExit:
- default:
- return {};
- }
- }
- }
- }
-
- return {};
-}
-
-Status GDBRemoteCommunicationReplayServer::Connect(
- process_gdb_remote::GDBRemoteCommunicationClient &client) {
- repro::Loader *loader = repro::Reproducer::Instance().GetLoader();
- if (!loader)
- return Status("No loader provided.");
-
- static std::unique_ptr<repro::MultiLoader<repro::GDBRemoteProvider>>
- multi_loader = repro::MultiLoader<repro::GDBRemoteProvider>::Create(
- repro::Reproducer::Instance().GetLoader());
- if (!multi_loader)
- return Status("No gdb remote provider found.");
-
- llvm::Optional<std::string> history_file = multi_loader->GetNextFile();
- if (!history_file)
- return Status("No gdb remote packet log found.");
-
- if (auto error = LoadReplayHistory(FileSpec(*history_file)))
- return Status("Unable to load replay history");
-
- if (auto error = GDBRemoteCommunication::ConnectLocally(client, *this))
- return Status("Unable to connect to replay server");
-
- return {};
-}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.h
deleted file mode 100644
index 2f8770d0accf..000000000000
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationReplayServer.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- GDBRemoteCommunicationReplayServer.h --------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTECOMMUNICATIONREPLAYSERVER_H
-#define LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTECOMMUNICATIONREPLAYSERVER_H
-
-// Other libraries and framework includes
-#include "GDBRemoteCommunication.h"
-#include "GDBRemoteCommunicationClient.h"
-#include "GDBRemoteCommunicationHistory.h"
-
-// Project includes
-#include "lldb/Host/HostThread.h"
-#include "lldb/Utility/Broadcaster.h"
-#include "lldb/lldb-private-forward.h"
-#include "llvm/Support/Error.h"
-
-// C Includes
-// C++ Includes
-#include <functional>
-#include <map>
-#include <thread>
-
-class StringExtractorGDBRemote;
-
-namespace lldb_private {
-namespace process_gdb_remote {
-
-class ProcessGDBRemote;
-
-/// Dummy GDB server that replays packets from the GDB Remote Communication
-/// history. This is used to replay GDB packets.
-class GDBRemoteCommunicationReplayServer : public GDBRemoteCommunication {
-public:
- GDBRemoteCommunicationReplayServer();
-
- ~GDBRemoteCommunicationReplayServer() override;
-
- PacketResult GetPacketAndSendResponse(Timeout<std::micro> timeout,
- Status &error, bool &interrupt,
- bool &quit);
-
- bool HandshakeWithClient() { return GetAck() == PacketResult::Success; }
-
- llvm::Error LoadReplayHistory(const FileSpec &path);
-
- bool StartAsyncThread();
- void StopAsyncThread();
-
- Status Connect(process_gdb_remote::GDBRemoteCommunicationClient &client);
-
-protected:
- enum {
- eBroadcastBitAsyncContinue = (1 << 0),
- eBroadcastBitAsyncThreadShouldExit = (1 << 1),
- };
-
- static void ReceivePacket(GDBRemoteCommunicationReplayServer &server,
- bool &done);
- static lldb::thread_result_t AsyncThread(void *arg);
-
- /// Replay history with the oldest packet at the end.
- std::vector<GDBRemotePacket> m_packet_history;
-
- /// Server thread.
- Broadcaster m_async_broadcaster;
- lldb::ListenerSP m_async_listener_sp;
- HostThread m_async_thread;
- std::recursive_mutex m_async_thread_state_mutex;
-
- bool m_skip_acks = false;
-
-private:
- GDBRemoteCommunicationReplayServer(
- const GDBRemoteCommunicationReplayServer &) = delete;
- const GDBRemoteCommunicationReplayServer &
- operator=(const GDBRemoteCommunicationReplayServer &) = delete;
-};
-
-} // namespace process_gdb_remote
-} // namespace lldb_private
-
-#endif // LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTECOMMUNICATIONREPLAYSERVER_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
index 30f14a52dfb5..123a8198a89b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
@@ -1086,7 +1086,7 @@ void GDBRemoteCommunicationServerLLGS::NewSubprocess(
}
void GDBRemoteCommunicationServerLLGS::DataAvailableCallback() {
- Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_COMM));
+ Log *log = GetLog(GDBRLog::Comm);
bool interrupt = false;
bool done = false;
@@ -2920,6 +2920,18 @@ GDBRemoteCommunicationServerLLGS::ReadXferObject(llvm::StringRef object,
return std::move(*buffer_or_error);
}
+ if (object == "siginfo") {
+ NativeThreadProtocol *thread = m_current_process->GetCurrentThread();
+ if (!thread)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "no current thread");
+
+ auto buffer_or_error = thread->GetSiginfo();
+ if (!buffer_or_error)
+ return buffer_or_error.takeError();
+ return std::move(*buffer_or_error);
+ }
+
if (object == "libraries-svr4") {
auto library_list = m_current_process->GetLoadedSVR4Libraries();
if (!library_list)
@@ -3838,6 +3850,8 @@ std::vector<std::string> GDBRemoteCommunicationServerLLGS::HandleFeatures(
ret.push_back("qXfer:auxv:read+");
if (bool(plugin_features & Extension::libraries_svr4))
ret.push_back("qXfer:libraries-svr4:read+");
+ if (bool(plugin_features & Extension::siginfo_read))
+ ret.push_back("qXfer:siginfo:read+");
if (bool(plugin_features & Extension::memory_tagging))
ret.push_back("memory-tagging+");
if (bool(plugin_features & Extension::savecore))
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.cpp
new file mode 100644
index 000000000000..b391edced695
--- /dev/null
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.cpp
@@ -0,0 +1,86 @@
+//===-- GDBRemoteRegisterFallback.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GDBRemoteRegisterFallback.h"
+
+namespace lldb_private {
+namespace process_gdb_remote {
+
+#define REG(name, size) \
+ DynamicRegisterInfo::Register { \
+ ConstString(#name), empty_alt_name, reg_set, size, LLDB_INVALID_INDEX32, \
+ lldb::eEncodingUint, lldb::eFormatHex, LLDB_INVALID_REGNUM, \
+ LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, {}, {} \
+ }
+#define R64(name) REG(name, 8)
+#define R32(name) REG(name, 4)
+
+static std::vector<DynamicRegisterInfo::Register> GetRegisters_aarch64() {
+ ConstString empty_alt_name;
+ ConstString reg_set{"general purpose registers"};
+
+ std::vector<DynamicRegisterInfo::Register> registers{
+ R64(x0), R64(x1), R64(x2), R64(x3), R64(x4), R64(x5), R64(x6),
+ R64(x7), R64(x8), R64(x9), R64(x10), R64(x11), R64(x12), R64(x13),
+ R64(x14), R64(x15), R64(x16), R64(x17), R64(x18), R64(x19), R64(x20),
+ R64(x21), R64(x22), R64(x23), R64(x24), R64(x25), R64(x26), R64(x27),
+ R64(x28), R64(x29), R64(x30), R64(sp), R64(pc), R32(cpsr),
+ };
+
+ return registers;
+}
+
+static std::vector<DynamicRegisterInfo::Register> GetRegisters_x86() {
+ ConstString empty_alt_name;
+ ConstString reg_set{"general purpose registers"};
+
+ std::vector<DynamicRegisterInfo::Register> registers{
+ R32(eax), R32(ecx), R32(edx), R32(ebx), R32(esp), R32(ebp),
+ R32(esi), R32(edi), R32(eip), R32(eflags), R32(cs), R32(ss),
+ R32(ds), R32(es), R32(fs), R32(gs),
+ };
+
+ return registers;
+}
+
+static std::vector<DynamicRegisterInfo::Register> GetRegisters_x86_64() {
+ ConstString empty_alt_name;
+ ConstString reg_set{"general purpose registers"};
+
+ std::vector<DynamicRegisterInfo::Register> registers{
+ R64(rax), R64(rbx), R64(rcx), R64(rdx), R64(rsi), R64(rdi),
+ R64(rbp), R64(rsp), R64(r8), R64(r9), R64(r10), R64(r11),
+ R64(r12), R64(r13), R64(r14), R64(r15), R64(rip), R32(eflags),
+ R32(cs), R32(ss), R32(ds), R32(es), R32(fs), R32(gs),
+ };
+
+ return registers;
+}
+
+#undef R32
+#undef R64
+#undef REG
+
+std::vector<DynamicRegisterInfo::Register>
+GetFallbackRegisters(const ArchSpec &arch_to_use) {
+ switch (arch_to_use.GetMachine()) {
+ case llvm::Triple::aarch64:
+ return GetRegisters_aarch64();
+ case llvm::Triple::x86:
+ return GetRegisters_x86();
+ case llvm::Triple::x86_64:
+ return GetRegisters_x86_64();
+ default:
+ break;
+ }
+
+ return {};
+}
+
+} // namespace process_gdb_remote
+} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.h
new file mode 100644
index 000000000000..82e03c6b9b1c
--- /dev/null
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterFallback.h
@@ -0,0 +1,26 @@
+//===-- GDBRemoteRegisterFallback.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTEREGISTERFALLBACK_H
+#define LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTEREGISTERFALLBACK_H
+
+#include <vector>
+
+#include "lldb/Target/DynamicRegisterInfo.h"
+#include "lldb/Utility/ArchSpec.h"
+
+namespace lldb_private {
+namespace process_gdb_remote {
+
+std::vector<DynamicRegisterInfo::Register>
+GetFallbackRegisters(const ArchSpec &arch_to_use);
+
+} // namespace process_gdb_remote
+} // namespace lldb_private
+
+#endif // LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_GDBREMOTEREGISTERFALLBACK_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
index af95ef0718a5..5f3c1ad9c1e1 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
@@ -23,13 +23,6 @@
#include <ctime>
#include <sys/types.h>
-#include <algorithm>
-#include <csignal>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <sstream>
-
#include "lldb/Breakpoint/Watchpoint.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/Module.h"
@@ -70,8 +63,16 @@
#include "lldb/Utility/State.h"
#include "lldb/Utility/StreamString.h"
#include "lldb/Utility/Timer.h"
+#include <algorithm>
+#include <csignal>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <thread>
#include "GDBRemoteRegisterContext.h"
+#include "GDBRemoteRegisterFallback.h"
#ifdef LLDB_ENABLE_ALL
#include "Plugins/Platform/MacOSX/PlatformRemoteiOS.h"
#endif // LLDB_ENABLE_ALL
@@ -255,9 +256,8 @@ ProcessGDBRemote::ProcessGDBRemote(lldb::TargetSP target_sp,
m_addr_to_mmap_size(), m_thread_create_bp_sp(),
m_waiting_for_attach(false), m_destroy_tried_resuming(false),
m_command_sp(), m_breakpoint_pc_offset(0),
- m_initial_tid(LLDB_INVALID_THREAD_ID), m_replay_mode(false),
- m_allow_flash_writes(false), m_erased_flash_ranges(),
- m_vfork_in_progress(false) {
+ m_initial_tid(LLDB_INVALID_THREAD_ID), m_allow_flash_writes(false),
+ m_erased_flash_ranges(), m_vfork_in_progress(false) {
m_async_broadcaster.SetEventName(eBroadcastBitAsyncThreadShouldExit,
"async thread should exit");
m_async_broadcaster.SetEventName(eBroadcastBitAsyncContinue,
@@ -397,6 +397,7 @@ void ProcessGDBRemote::BuildDynamicRegisterInfo(bool force) {
// 2 - If the target definition doesn't have any of the info from the
// target.xml (registers) then proceed to read the target.xml.
// 3 - Fall back on the qRegisterInfo packets.
+ // 4 - Use hardcoded defaults if available.
FileSpec target_definition_fspec =
GetGlobalPluginProperties().GetTargetDefinitionFile();
@@ -510,6 +511,9 @@ void ProcessGDBRemote::BuildDynamicRegisterInfo(bool force) {
}
}
+ if (registers.empty())
+ registers = GetFallbackRegisters(arch_to_use);
+
AddRemoteRegisters(registers, arch_to_use);
}
@@ -527,7 +531,7 @@ Status ProcessGDBRemote::WillAttachToProcessWithName(const char *process_name,
}
Status ProcessGDBRemote::DoConnectRemote(llvm::StringRef remote_url) {
- Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PROCESS));
+ Log *log = GetLog(GDBRLog::Process);
Status error(WillLaunchOrAttach());
if (error.Fail())
@@ -604,8 +608,7 @@ Status ProcessGDBRemote::DoConnectRemote(llvm::StringRef remote_url) {
ReadModuleFromMemory(FileSpec(namebuf), standalone_value);
}
- Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(
- LIBLLDB_LOG_DYNAMIC_LOADER));
+ Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
if (module_sp.get()) {
target.GetImages().AppendIfNeeded(module_sp, false);
@@ -3320,24 +3323,6 @@ Status ProcessGDBRemote::DoSignal(int signo) {
return error;
}
-Status ProcessGDBRemote::ConnectToReplayServer() {
- Status status = m_gdb_replay_server.Connect(m_gdb_comm);
- if (status.Fail())
- return status;
-
- // Enable replay mode.
- m_replay_mode = true;
-
- // Start server thread.
- m_gdb_replay_server.StartAsyncThread();
-
- // Start client thread.
- StartAsyncThread();
-
- // Do the usual setup.
- return ConnectToDebugserver("");
-}
-
Status
ProcessGDBRemote::EstablishConnectionIfNeeded(const ProcessInfo &process_info) {
// Make sure we aren't already connected?
@@ -4374,9 +4359,9 @@ bool ProcessGDBRemote::GetGDBServerRegisterInfoXMLAndProcess(
} else if (name == "osabi") {
node.GetElementText(target_info.osabi);
} else if (name == "xi:include" || name == "include") {
- llvm::StringRef href = node.GetAttributeValue("href");
+ std::string href = node.GetAttributeValue("href");
if (!href.empty())
- target_info.includes.push_back(href.str());
+ target_info.includes.push_back(href);
} else if (name == "feature") {
feature_nodes.push_back(node);
} else if (name == "groups") {
@@ -4415,9 +4400,9 @@ bool ProcessGDBRemote::GetGDBServerRegisterInfoXMLAndProcess(
const XMLNode &node) -> bool {
llvm::StringRef name = node.GetName();
if (name == "xi:include" || name == "include") {
- llvm::StringRef href = node.GetAttributeValue("href");
+ std::string href = node.GetAttributeValue("href");
if (!href.empty())
- target_info.includes.push_back(href.str());
+ target_info.includes.push_back(href);
}
return true;
});
@@ -4553,7 +4538,7 @@ llvm::Expected<LoadedModuleInfoList> ProcessGDBRemote::GetLoadedModuleList() {
"Error finding library-list-svr4 xml element");
// main link map structure
- llvm::StringRef main_lm = root_element.GetAttributeValue("main-lm");
+ std::string main_lm = root_element.GetAttributeValue("main-lm");
// FIXME: we're silently ignoring invalid data here
if (!main_lm.empty())
llvm::to_integer(main_lm, list.m_link_map);
@@ -4641,15 +4626,15 @@ llvm::Expected<LoadedModuleInfoList> ProcessGDBRemote::GetLoadedModuleList() {
"library", [log, &list](const XMLNode &library) -> bool {
LoadedModuleInfoList::LoadedModuleInfo module;
- llvm::StringRef name = library.GetAttributeValue("name");
- module.set_name(name.str());
+ std::string name = library.GetAttributeValue("name");
+ module.set_name(name);
// The base address of a given library will be the address of its
// first section. Most remotes send only one section for Windows
// targets for example.
const XMLNode &section =
library.FindFirstChildElementWithName("section");
- llvm::StringRef address = section.GetAttributeValue("address");
+ std::string address = section.GetAttributeValue("address");
uint64_t address_value = LLDB_INVALID_ADDRESS;
llvm::to_integer(address, address_value);
module.set_base(address_value);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
index 488336b8c1b8..bdf130e3ec11 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h
@@ -34,7 +34,6 @@
#include "lldb/lldb-private-forward.h"
#include "GDBRemoteCommunicationClient.h"
-#include "GDBRemoteCommunicationReplayServer.h"
#include "GDBRemoteRegisterContext.h"
#include "llvm/ADT/DenseMap.h"
@@ -251,7 +250,6 @@ protected:
};
GDBRemoteCommunicationClient m_gdb_comm;
- GDBRemoteCommunicationReplayServer m_gdb_replay_server;
std::atomic<lldb::pid_t> m_debugserver_pid;
llvm::Optional<StringExtractorGDBRemote> m_last_stop_packet;
@@ -292,7 +290,6 @@ protected:
lldb::tid_t m_initial_tid; // The initial thread ID, given by stub on attach
bool m_use_g_packet_for_reading;
- bool m_replay_mode;
bool m_allow_flash_writes;
using FlashRangeVector = lldb_private::RangeVector<lldb::addr_t, size_t>;
using FlashRange = FlashRangeVector::Entry;
@@ -320,8 +317,6 @@ protected:
bool DoUpdateThreadList(ThreadList &old_thread_list,
ThreadList &new_thread_list) override;
- Status ConnectToReplayServer();
-
Status EstablishConnectionIfNeeded(const ProcessInfo &process_info);
Status LaunchAndConnectToDebugserver(const ProcessInfo &process_info);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.cpp
index 40990ef66494..3322f6b8048a 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.cpp
@@ -15,25 +15,29 @@ using namespace lldb_private;
using namespace lldb_private::process_gdb_remote;
static constexpr Log::Category g_categories[] = {
- {{"async"}, {"log asynchronous activity"}, GDBR_LOG_ASYNC},
- {{"break"}, {"log breakpoints"}, GDBR_LOG_BREAKPOINTS},
- {{"comm"}, {"log communication activity"}, GDBR_LOG_COMM},
- {{"packets"}, {"log gdb remote packets"}, GDBR_LOG_PACKETS},
- {{"memory"}, {"log memory reads and writes"}, GDBR_LOG_MEMORY},
+ {{"async"}, {"log asynchronous activity"}, GDBRLog::Async},
+ {{"break"}, {"log breakpoints"}, GDBRLog::Breakpoints},
+ {{"comm"}, {"log communication activity"}, GDBRLog::Comm},
+ {{"packets"}, {"log gdb remote packets"}, GDBRLog::Packets},
+ {{"memory"}, {"log memory reads and writes"}, GDBRLog::Memory},
{{"data-short"},
{"log memory bytes for memory reads and writes for short transactions "
"only"},
- GDBR_LOG_MEMORY_DATA_SHORT},
+ GDBRLog::MemoryDataShort},
{{"data-long"},
{"log memory bytes for memory reads and writes for all transactions"},
- GDBR_LOG_MEMORY_DATA_LONG},
- {{"process"}, {"log process events and activities"}, GDBR_LOG_PROCESS},
- {{"step"}, {"log step related activities"}, GDBR_LOG_STEP},
- {{"thread"}, {"log thread events and activities"}, GDBR_LOG_THREAD},
- {{"watch"}, {"log watchpoint related activities"}, GDBR_LOG_WATCHPOINTS},
+ GDBRLog::MemoryDataLong},
+ {{"process"}, {"log process events and activities"}, GDBRLog::Process},
+ {{"step"}, {"log step related activities"}, GDBRLog::Step},
+ {{"thread"}, {"log thread events and activities"}, GDBRLog::Thread},
+ {{"watch"}, {"log watchpoint related activities"}, GDBRLog::Watchpoints},
};
-Log::Channel ProcessGDBRemoteLog::g_channel(g_categories, GDBR_LOG_DEFAULT);
+static Log::Channel g_channel(g_categories, GDBRLog::Packets);
+
+template <> Log::Channel &lldb_private::LogChannelFor<GDBRLog>() {
+ return g_channel;
+}
void ProcessGDBRemoteLog::Initialize() {
static llvm::once_flag g_once_flag;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.h
index bd3e993cf72a..44e390ec8cad 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemoteLog.h
@@ -11,35 +11,52 @@
#include "lldb/Utility/Log.h"
-#define GDBR_LOG_PROCESS (1u << 1)
-#define GDBR_LOG_THREAD (1u << 2)
-#define GDBR_LOG_PACKETS (1u << 3)
-#define GDBR_LOG_MEMORY (1u << 4) // Log memory reads/writes calls
-#define GDBR_LOG_MEMORY_DATA_SHORT \
- (1u << 5) // Log short memory reads/writes bytes
-#define GDBR_LOG_MEMORY_DATA_LONG (1u << 6) // Log all memory reads/writes bytes
-#define GDBR_LOG_BREAKPOINTS (1u << 7)
-#define GDBR_LOG_WATCHPOINTS (1u << 8)
-#define GDBR_LOG_STEP (1u << 9)
-#define GDBR_LOG_COMM (1u << 10)
-#define GDBR_LOG_ASYNC (1u << 11)
-#define GDBR_LOG_ALL (UINT32_MAX)
-#define GDBR_LOG_DEFAULT GDBR_LOG_PACKETS
-
namespace lldb_private {
namespace process_gdb_remote {
-class ProcessGDBRemoteLog {
- static Log::Channel g_channel;
+enum class GDBRLog : Log::MaskType {
+ Async = Log::ChannelFlag<0>,
+ Breakpoints = Log::ChannelFlag<1>,
+ Comm = Log::ChannelFlag<2>,
+ Memory = Log::ChannelFlag<3>, // Log memory reads/writes calls
+ MemoryDataLong = Log::ChannelFlag<4>, // Log all memory reads/writes bytes
+ MemoryDataShort = Log::ChannelFlag<5>, // Log short memory reads/writes bytes
+ Packets = Log::ChannelFlag<6>,
+ Process = Log::ChannelFlag<7>,
+ Step = Log::ChannelFlag<8>,
+ Thread = Log::ChannelFlag<9>,
+ Watchpoints = Log::ChannelFlag<10>,
+ LLVM_MARK_AS_BITMASK_ENUM(Watchpoints)
+};
+#define GDBR_LOG_PROCESS ::lldb_private::process_gdb_remote::GDBRLog::Process
+#define GDBR_LOG_THREAD ::lldb_private::process_gdb_remote::GDBRLog::Thread
+#define GDBR_LOG_PACKETS ::lldb_private::process_gdb_remote::GDBRLog::Packets
+#define GDBR_LOG_MEMORY ::lldb_private::process_gdb_remote::GDBRLog::Memory
+#define GDBR_LOG_MEMORY_DATA_SHORT \
+ ::lldb_private::process_gdb_remote::GDBRLog::MemoryDataShort
+#define GDBR_LOG_MEMORY_DATA_LONG \
+ ::lldb_private::process_gdb_remote::GDBRLog::MemoryDataLong
+#define GDBR_LOG_BREAKPOINTS \
+ ::lldb_private::process_gdb_remote::GDBRLog::Breakpoints
+#define GDBR_LOG_WATCHPOINTS \
+ ::lldb_private::process_gdb_remote::GDBRLog::Watchpoints
+#define GDBR_LOG_STEP ::lldb_private::process_gdb_remote::GDBRLog::Step
+#define GDBR_LOG_COMM ::lldb_private::process_gdb_remote::GDBRLog::Comm
+#define GDBR_LOG_ASYNC ::lldb_private::process_gdb_remote::GDBRLog::Async
+
+class ProcessGDBRemoteLog {
public:
static void Initialize();
- static Log *GetLogIfAllCategoriesSet(uint32_t mask) { return g_channel.GetLogIfAll(mask); }
- static Log *GetLogIfAnyCategoryIsSet(uint32_t mask) { return g_channel.GetLogIfAny(mask); }
+ static Log *GetLogIfAllCategoriesSet(GDBRLog mask) { return GetLog(mask); }
+ static Log *GetLogIfAnyCategoryIsSet(GDBRLog mask) { return GetLog(mask); }
};
} // namespace process_gdb_remote
+
+template <> Log::Channel &LogChannelFor<process_gdb_remote::GDBRLog>();
+
} // namespace lldb_private
#endif // LLDB_SOURCE_PLUGINS_PROCESS_GDB_REMOTE_PROCESSGDBREMOTELOG_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp
index 2a9896e41085..3d23c074c1be 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp
@@ -39,7 +39,7 @@ ThreadGDBRemote::ThreadGDBRemote(Process &process, lldb::tid_t tid)
m_dispatch_queue_t(LLDB_INVALID_ADDRESS), m_queue_kind(eQueueKindUnknown),
m_queue_serial_number(LLDB_INVALID_QUEUE_ID),
m_associated_with_libdispatch_queue(eLazyBoolCalculate) {
- Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_THREAD));
+ Log *log = GetLog(GDBRLog::Thread);
LLDB_LOG(log, "this = {0}, pid = {1}, tid = {2}", this, process.GetID(),
GetID());
// At this point we can clone reg_info for architectures supporting
@@ -54,7 +54,7 @@ ThreadGDBRemote::ThreadGDBRemote(Process &process, lldb::tid_t tid)
ThreadGDBRemote::~ThreadGDBRemote() {
ProcessSP process_sp(GetProcess());
- Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_THREAD));
+ Log *log = GetLog(GDBRLog::Thread);
LLDB_LOG(log, "this = {0}, pid = {1}, tid = {2}", this,
process_sp ? process_sp->GetID() : LLDB_INVALID_PROCESS_ID, GetID());
DestroyThread();
@@ -222,7 +222,7 @@ void ThreadGDBRemote::SetAssociatedWithLibdispatchQueue(
StructuredData::ObjectSP ThreadGDBRemote::FetchThreadExtendedInfo() {
StructuredData::ObjectSP object_sp;
const lldb::user_id_t tid = GetProtocolID();
- Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_THREAD));
+ Log *log = GetLog(GDBRLog::Thread);
LLDB_LOGF(log, "Fetching extended information for thread %4.4" PRIx64, tid);
ProcessSP process_sp(GetProcess());
if (process_sp) {
@@ -236,7 +236,7 @@ StructuredData::ObjectSP ThreadGDBRemote::FetchThreadExtendedInfo() {
void ThreadGDBRemote::WillResume(StateType resume_state) {
int signo = GetResumeSignal();
const lldb::user_id_t tid = GetProtocolID();
- Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_THREAD));
+ Log *log = GetLog(GDBRLog::Thread);
LLDB_LOGF(log, "Resuming thread: %4.4" PRIx64 " with state: %s.", tid,
StateAsCString(resume_state));
@@ -346,3 +346,23 @@ bool ThreadGDBRemote::CalculateStopInfo() {
->CalculateThreadStopInfo(this);
return false;
}
+
+llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ThreadGDBRemote::GetSiginfo(size_t max_size) const {
+ ProcessSP process_sp(GetProcess());
+ if (!process_sp)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "no process");
+ ProcessGDBRemote *gdb_process =
+ static_cast<ProcessGDBRemote *>(process_sp.get());
+ if (!gdb_process->m_gdb_comm.GetQXferSigInfoReadSupported())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "qXfer:siginfo:read not supported");
+
+ llvm::Expected<std::string> response =
+ gdb_process->m_gdb_comm.ReadExtFeature("siginfo", "");
+ if (!response)
+ return response.takeError();
+
+ return llvm::MemoryBuffer::getMemBufferCopy(response.get());
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h
index b7d75021c062..fb83c74fd2c5 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h
@@ -90,6 +90,9 @@ public:
StructuredData::ObjectSP FetchThreadExtendedInfo() override;
+ llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ GetSiginfo(size_t max_size) const override;
+
protected:
friend class ProcessGDBRemote;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedProcess.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedProcess.cpp
index cb21a3e7e65f..5eb7cb0e6a5c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedProcess.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedProcess.cpp
@@ -164,9 +164,6 @@ Status ScriptedProcess::DoLaunch(Module *exe_module,
SetPrivateState(eStateStopped);
- UpdateThreadListIfNeeded();
- GetThreadList();
-
return {};
}
@@ -225,8 +222,8 @@ bool ScriptedProcess::IsAlive() {
size_t ScriptedProcess::DoReadMemory(lldb::addr_t addr, void *buf, size_t size,
Status &error) {
if (!m_interpreter)
- return GetInterface().ErrorWithMessage<size_t>(LLVM_PRETTY_FUNCTION,
- "No interpreter.", error);
+ return ScriptedInterface::ErrorWithMessage<size_t>(
+ LLVM_PRETTY_FUNCTION, "No interpreter.", error);
lldb::DataExtractorSP data_extractor_sp =
GetInterface().ReadMemoryAtAddress(addr, size, error);
@@ -238,7 +235,7 @@ size_t ScriptedProcess::DoReadMemory(lldb::addr_t addr, void *buf, size_t size,
0, data_extractor_sp->GetByteSize(), buf, size, GetByteOrder());
if (!bytes_copied || bytes_copied == LLDB_INVALID_OFFSET)
- return GetInterface().ErrorWithMessage<size_t>(
+ return ScriptedInterface::ErrorWithMessage<size_t>(
LLVM_PRETTY_FUNCTION, "Failed to copy read memory to buffer.", error);
return size;
@@ -296,7 +293,7 @@ bool ScriptedProcess::DoUpdateThreadList(ThreadList &old_thread_list,
ScriptLanguage language = m_interpreter->GetLanguage();
if (language != eScriptLanguagePython)
- return GetInterface().ErrorWithMessage<bool>(
+ return ScriptedInterface::ErrorWithMessage<bool>(
LLVM_PRETTY_FUNCTION,
llvm::Twine("ScriptInterpreter language (" +
llvm::Twine(m_interpreter->LanguageToString(language)) +
@@ -304,19 +301,57 @@ bool ScriptedProcess::DoUpdateThreadList(ThreadList &old_thread_list,
.str(),
error);
- lldb::ThreadSP thread_sp;
- thread_sp = std::make_shared<ScriptedThread>(*this, error);
+ StructuredData::DictionarySP thread_info_sp = GetInterface().GetThreadsInfo();
+
+ if (!thread_info_sp)
+ return ScriptedInterface::ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION,
+ "Couldn't fetch thread list from Scripted Process.", error);
+
+ auto create_scripted_thread =
+ [this, &old_thread_list, &error,
+ &new_thread_list](ConstString key, StructuredData::Object *val) -> bool {
+ if (!val)
+ return ScriptedInterface::ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION, "Invalid thread info object", error);
+
+ lldb::tid_t tid = LLDB_INVALID_THREAD_ID;
+ if (!llvm::to_integer(key.AsCString(), tid))
+ return ScriptedInterface::ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION, "Invalid thread id", error);
+
+ if (ThreadSP thread_sp =
+ old_thread_list.FindThreadByID(tid, false /*=can_update*/)) {
+ // If the thread was already in the old_thread_list,
+ // just add it back to the new_thread_list.
+ new_thread_list.AddThread(thread_sp);
+ return true;
+ }
+
+ auto thread_or_error = ScriptedThread::Create(*this, val->GetAsGeneric());
+
+ if (!thread_or_error)
+ return ScriptedInterface::ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION, toString(thread_or_error.takeError()), error);
+
+ ThreadSP thread_sp = thread_or_error.get();
+ lldbassert(thread_sp && "Couldn't initialize scripted thread.");
+
+ RegisterContextSP reg_ctx_sp = thread_sp->GetRegisterContext();
+ if (!reg_ctx_sp)
+ return ScriptedInterface::ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION,
+ llvm::Twine("Invalid Register Context for thread " +
+ llvm::Twine(key.AsCString()))
+ .str(),
+ error);
- if (!thread_sp || error.Fail())
- return GetInterface().ErrorWithMessage<bool>(LLVM_PRETTY_FUNCTION,
- error.AsCString(), error);
+ new_thread_list.AddThread(thread_sp);
- RegisterContextSP reg_ctx_sp = thread_sp->GetRegisterContext();
- if (!reg_ctx_sp)
- return GetInterface().ErrorWithMessage<bool>(
- LLVM_PRETTY_FUNCTION, "Invalid Register Context", error);
+ return true;
+ };
- new_thread_list.AddThread(thread_sp);
+ thread_info_sp->ForEach(create_scripted_thread);
return new_thread_list.GetSize(false) > 0;
}
@@ -324,7 +359,6 @@ bool ScriptedProcess::DoUpdateThreadList(ThreadList &old_thread_list,
void ScriptedProcess::RefreshStateAfterStop() {
// Let all threads recover from stopping and do any clean up based on the
// previous thread state (if any).
- m_thread_list.RefreshStateAfterStop();
}
bool ScriptedProcess::GetProcessInfo(ProcessInstanceInfo &info) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp
index 959b8c581885..b6cbb62fd6e6 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp
@@ -28,44 +28,60 @@ void ScriptedThread::CheckInterpreterAndScriptObject() const {
lldbassert(GetInterface() && "Invalid Scripted Thread Interface.");
}
-ScriptedThread::ScriptedThread(ScriptedProcess &process, Status &error)
- : Thread(process, LLDB_INVALID_THREAD_ID), m_scripted_process(process) {
- if (!process.IsValid()) {
- error.SetErrorString("Invalid scripted process");
- return;
- }
+llvm::Expected<std::shared_ptr<ScriptedThread>>
+ScriptedThread::Create(ScriptedProcess &process,
+ StructuredData::Generic *script_object) {
+ if (!process.IsValid())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Invalid scripted process.");
process.CheckInterpreterAndScriptObject();
- auto scripted_thread_interface = GetInterface();
- if (!scripted_thread_interface) {
- error.SetErrorString("Failed to get scripted thread interface.");
- return;
- }
-
- llvm::Optional<std::string> class_name =
- process.GetInterface().GetScriptedThreadPluginName();
- if (!class_name || class_name->empty()) {
- error.SetErrorString("Failed to get scripted thread class name.");
- return;
+ auto scripted_thread_interface =
+ process.GetInterface().CreateScriptedThreadInterface();
+ if (!scripted_thread_interface)
+ return llvm::createStringError(
+ llvm::inconvertibleErrorCode(),
+ "Failed to create scripted thread interface.");
+
+ llvm::StringRef thread_class_name;
+ if (!script_object) {
+ llvm::Optional<std::string> class_name =
+ process.GetInterface().GetScriptedThreadPluginName();
+ if (!class_name || class_name->empty())
+ return llvm::createStringError(
+ llvm::inconvertibleErrorCode(),
+ "Failed to get scripted thread class name.");
+ thread_class_name = *class_name;
}
ExecutionContext exe_ctx(process);
-
- StructuredData::GenericSP object_sp =
+ StructuredData::GenericSP owned_script_object_sp =
scripted_thread_interface->CreatePluginObject(
- class_name->c_str(), exe_ctx,
- process.m_scripted_process_info.GetArgsSP());
- if (!object_sp || !object_sp->IsValid()) {
- error.SetErrorString("Failed to create valid script object");
- return;
- }
+ thread_class_name, exe_ctx,
+ process.m_scripted_process_info.GetArgsSP(), script_object);
- m_script_object_sp = object_sp;
+ if (!owned_script_object_sp)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Failed to create script object.");
+ if (!owned_script_object_sp->IsValid())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Created script object is invalid.");
- SetID(scripted_thread_interface->GetThreadID());
+ lldb::tid_t tid = scripted_thread_interface->GetThreadID();
+
+ return std::make_shared<ScriptedThread>(process, scripted_thread_interface,
+ tid, owned_script_object_sp);
}
+ScriptedThread::ScriptedThread(ScriptedProcess &process,
+ ScriptedThreadInterfaceSP interface_sp,
+ lldb::tid_t tid,
+ StructuredData::GenericSP script_object_sp)
+ : Thread(process, tid), m_scripted_process(process),
+ m_scripted_thread_interface_sp(interface_sp),
+ m_script_object_sp(script_object_sp) {}
+
ScriptedThread::~ScriptedThread() { DestroyThread(); }
const char *ScriptedThread::GetName() {
@@ -137,6 +153,11 @@ bool ScriptedThread::CalculateStopInfo() {
StructuredData::DictionarySP dict_sp = GetInterface()->GetStopReason();
Status error;
+ if (!dict_sp)
+ return GetInterface()->ErrorWithMessage<bool>(
+ LLVM_PRETTY_FUNCTION, "Failed to get scripted thread stop info.", error,
+ LIBLLDB_LOG_THREAD);
+
lldb::StopInfoSP stop_info_sp;
lldb::StopReason stop_reason_type;
@@ -150,12 +171,12 @@ bool ScriptedThread::CalculateStopInfo() {
if (!dict_sp->GetValueForKeyAsDictionary("data", data_dict))
return GetInterface()->ErrorWithMessage<bool>(
LLVM_PRETTY_FUNCTION,
- "Couldn't find value for key 'type' in stop reason dictionary.", error,
+ "Couldn't find value for key 'data' in stop reason dictionary.", error,
LIBLLDB_LOG_THREAD);
switch (stop_reason_type) {
case lldb::eStopReasonNone:
- break;
+ return true;
case lldb::eStopReasonBreakpoint: {
lldb::break_id_t break_id;
data_dict->GetValueForKeyAsInteger("break_id", break_id,
@@ -172,6 +193,13 @@ bool ScriptedThread::CalculateStopInfo() {
stop_info_sp =
StopInfo::CreateStopReasonWithSignal(*this, signal, description.data());
} break;
+ case lldb::eStopReasonException: {
+ llvm::StringRef description;
+ data_dict->GetValueForKeyAsString("desc", description);
+
+ stop_info_sp =
+ StopInfo::CreateStopReasonWithException(*this, description.data());
+ } break;
default:
return GetInterface()->ErrorWithMessage<bool>(
LLVM_PRETTY_FUNCTION,
@@ -181,6 +209,9 @@ bool ScriptedThread::CalculateStopInfo() {
error, LIBLLDB_LOG_THREAD);
}
+ if (!stop_info_sp)
+ return false;
+
SetStopInfo(stop_info_sp);
return true;
}
@@ -190,7 +221,7 @@ void ScriptedThread::RefreshStateAfterStop() {
}
lldb::ScriptedThreadInterfaceSP ScriptedThread::GetInterface() const {
- return m_scripted_process.GetInterface().GetScriptedThreadInterface();
+ return m_scripted_thread_interface_sp;
}
std::shared_ptr<DynamicRegisterInfo> ScriptedThread::GetDynamicRegisterInfo() {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.h b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.h
index cdcd543702a4..8d8a7c2a3df9 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/scripted/ScriptedThread.h
@@ -25,11 +25,18 @@ class ScriptedProcess;
namespace lldb_private {
class ScriptedThread : public lldb_private::Thread {
+
public:
- ScriptedThread(ScriptedProcess &process, Status &error);
+ ScriptedThread(ScriptedProcess &process,
+ lldb::ScriptedThreadInterfaceSP interface_sp, lldb::tid_t tid,
+ StructuredData::GenericSP script_object_sp = nullptr);
~ScriptedThread() override;
+ static llvm::Expected<std::shared_ptr<ScriptedThread>>
+ Create(ScriptedProcess &process,
+ StructuredData::Generic *script_object = nullptr);
+
lldb::RegisterContextSP GetRegisterContext() override;
lldb::RegisterContextSP
@@ -59,8 +66,9 @@ private:
std::shared_ptr<DynamicRegisterInfo> GetDynamicRegisterInfo();
const ScriptedProcess &m_scripted_process;
+ lldb::ScriptedThreadInterfaceSP m_scripted_thread_interface_sp = nullptr;
+ lldb_private::StructuredData::GenericSP m_script_object_sp = nullptr;
std::shared_ptr<DynamicRegisterInfo> m_register_info_sp = nullptr;
- lldb_private::StructuredData::ObjectSP m_script_object_sp = nullptr;
};
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
index 7c71c9329e57..68f4e90d70f6 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
@@ -69,6 +69,30 @@ Expected<std::string> python::As<std::string>(Expected<PythonObject> &&obj) {
return std::string(utf8.get());
}
+static bool python_is_finalizing() {
+#if PY_MAJOR_VERSION == 2
+ return false;
+#elif PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 7
+ return _Py_Finalizing != nullptr;
+#else
+ return _Py_IsFinalizing();
+#endif
+}
+
+void PythonObject::Reset() {
+ if (m_py_obj && Py_IsInitialized()) {
+ if (python_is_finalizing()) {
+ // Leak m_py_obj rather than crashing the process.
+ // https://docs.python.org/3/c-api/init.html#c.PyGILState_Ensure
+ } else {
+ PyGILState_STATE state = PyGILState_Ensure();
+ Py_DECREF(m_py_obj);
+ PyGILState_Release(state);
+ }
+ }
+ m_py_obj = nullptr;
+}
+
Expected<long long> PythonObject::AsLongLong() const {
if (!m_py_obj)
return nullDeref();
@@ -257,6 +281,9 @@ PythonObject PythonObject::GetAttributeValue(llvm::StringRef attr) const {
}
StructuredData::ObjectSP PythonObject::CreateStructuredObject() const {
+#if PY_MAJOR_VERSION >= 3
+ assert(PyGILState_Check());
+#endif
switch (GetObjectType()) {
case PyObjectType::Dictionary:
return PythonDictionary(PyRefType::Borrowed, m_py_obj)
@@ -279,7 +306,8 @@ StructuredData::ObjectSP PythonObject::CreateStructuredObject() const {
case PyObjectType::None:
return StructuredData::ObjectSP();
default:
- return StructuredData::ObjectSP(new StructuredPythonObject(m_py_obj));
+ return StructuredData::ObjectSP(new StructuredPythonObject(
+ PythonObject(PyRefType::Borrowed, m_py_obj)));
}
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
index 56bc55d239d1..2094f0b3afd2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.h
@@ -83,30 +83,6 @@ protected:
PyGILState_STATE m_state;
};
-class StructuredPythonObject : public StructuredData::Generic {
-public:
- StructuredPythonObject() : StructuredData::Generic() {}
-
- StructuredPythonObject(void *obj) : StructuredData::Generic(obj) {
- Py_XINCREF(GetValue());
- }
-
- ~StructuredPythonObject() override {
- if (Py_IsInitialized())
- Py_XDECREF(GetValue());
- SetValue(nullptr);
- }
-
- bool IsValid() const override { return GetValue() && GetValue() != Py_None; }
-
- void Serialize(llvm::json::OStream &s) const override;
-
-private:
- StructuredPythonObject(const StructuredPythonObject &) = delete;
- const StructuredPythonObject &
- operator=(const StructuredPythonObject &) = delete;
-};
-
enum class PyObjectType {
Unknown,
None,
@@ -263,11 +239,7 @@ public:
~PythonObject() { Reset(); }
- void Reset() {
- if (m_py_obj && Py_IsInitialized())
- Py_DECREF(m_py_obj);
- m_py_obj = nullptr;
- }
+ void Reset();
void Dump() const {
if (m_py_obj)
@@ -767,6 +739,30 @@ public:
}
};
+class StructuredPythonObject : public StructuredData::Generic {
+public:
+ StructuredPythonObject() : StructuredData::Generic() {}
+
+ // Take ownership of the object we received.
+ StructuredPythonObject(PythonObject obj)
+ : StructuredData::Generic(obj.release()) {}
+
+ ~StructuredPythonObject() override {
+ // Hand ownership back to a (temporary) PythonObject instance and let it
+ // take care of releasing it.
+ PythonObject(PyRefType::Owned, static_cast<PyObject *>(GetValue()));
+ }
+
+ bool IsValid() const override { return GetValue() && GetValue() != Py_None; }
+
+ void Serialize(llvm::json::OStream &s) const override;
+
+private:
+ StructuredPythonObject(const StructuredPythonObject &) = delete;
+ const StructuredPythonObject &
+ operator=(const StructuredPythonObject &) = delete;
+};
+
} // namespace python
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
index 2bb69dc47731..4df235356737 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
@@ -18,6 +18,7 @@
// LLDB Python header must be included first
#include "lldb-python.h"
+#include "Plugins/ScriptInterpreter/Python/PythonDataObjects.h"
#include "lldb/lldb-forward.h"
#include "lldb/lldb-types.h"
#include "llvm/Support/Error.h"
@@ -54,17 +55,15 @@ void *LLDBSWIGPython_CastPyObjectToSBMemoryRegionInfo(PyObject *data);
// Although these are scripting-language specific, their definition depends on
// the public API.
-void *LLDBSwigPythonCreateScriptedProcess(const char *python_class_name,
- const char *session_dictionary_name,
- const lldb::TargetSP &target_sp,
- const StructuredDataImpl &args_impl,
- std::string &error_string);
+python::PythonObject LLDBSwigPythonCreateScriptedProcess(
+ const char *python_class_name, const char *session_dictionary_name,
+ const lldb::TargetSP &target_sp, const StructuredDataImpl &args_impl,
+ std::string &error_string);
-void *LLDBSwigPythonCreateScriptedThread(const char *python_class_name,
- const char *session_dictionary_name,
- const lldb::ProcessSP &process_sp,
- const StructuredDataImpl &args_impl,
- std::string &error_string);
+python::PythonObject LLDBSwigPythonCreateScriptedThread(
+ const char *python_class_name, const char *session_dictionary_name,
+ const lldb::ProcessSP &process_sp, const StructuredDataImpl &args_impl,
+ std::string &error_string);
llvm::Expected<bool> LLDBSwigPythonBreakpointCallbackFunction(
const char *python_function_name, const char *session_dictionary_name,
@@ -83,16 +82,17 @@ bool LLDBSwigPythonCallTypeScript(const char *python_function_name,
const lldb::TypeSummaryOptionsSP &options_sp,
std::string &retval);
-void *
+python::PythonObject
LLDBSwigPythonCreateSyntheticProvider(const char *python_class_name,
const char *session_dictionary_name,
const lldb::ValueObjectSP &valobj_sp);
-void *LLDBSwigPythonCreateCommandObject(const char *python_class_name,
- const char *session_dictionary_name,
- lldb::DebuggerSP debugger_sp);
+python::PythonObject
+LLDBSwigPythonCreateCommandObject(const char *python_class_name,
+ const char *session_dictionary_name,
+ lldb::DebuggerSP debugger_sp);
-void *LLDBSwigPythonCreateScriptedThreadPlan(
+python::PythonObject LLDBSwigPythonCreateScriptedThreadPlan(
const char *python_class_name, const char *session_dictionary_name,
const StructuredDataImpl &args_data, std::string &error_string,
const lldb::ThreadPlanSP &thread_plan_sp);
@@ -101,7 +101,7 @@ bool LLDBSWIGPythonCallThreadPlan(void *implementor, const char *method_name,
lldb_private::Event *event_sp,
bool &got_error);
-void *LLDBSwigPythonCreateScriptedBreakpointResolver(
+python::PythonObject LLDBSwigPythonCreateScriptedBreakpointResolver(
const char *python_class_name, const char *session_dictionary_name,
const StructuredDataImpl &args, const lldb::BreakpointSP &bkpt_sp);
@@ -109,11 +109,10 @@ unsigned int
LLDBSwigPythonCallBreakpointResolver(void *implementor, const char *method_name,
lldb_private::SymbolContext *sym_ctx);
-void *LLDBSwigPythonCreateScriptedStopHook(lldb::TargetSP target_sp,
- const char *python_class_name,
- const char *session_dictionary_name,
- const StructuredDataImpl &args,
- lldb_private::Status &error);
+python::PythonObject LLDBSwigPythonCreateScriptedStopHook(
+ lldb::TargetSP target_sp, const char *python_class_name,
+ const char *session_dictionary_name, const StructuredDataImpl &args,
+ lldb_private::Status &error);
bool LLDBSwigPythonStopHookCallHandleStop(void *implementor,
lldb::ExecutionContextRefSP exc_ctx,
@@ -150,12 +149,14 @@ bool LLDBSwigPythonCallModuleInit(const char *python_module_name,
const char *session_dictionary_name,
lldb::DebuggerSP debugger);
-void *LLDBSWIGPythonCreateOSPlugin(const char *python_class_name,
- const char *session_dictionary_name,
- const lldb::ProcessSP &process_sp);
+python::PythonObject
+LLDBSWIGPythonCreateOSPlugin(const char *python_class_name,
+ const char *session_dictionary_name,
+ const lldb::ProcessSP &process_sp);
-void *LLDBSWIGPython_CreateFrameRecognizer(const char *python_class_name,
- const char *session_dictionary_name);
+python::PythonObject
+LLDBSWIGPython_CreateFrameRecognizer(const char *python_class_name,
+ const char *session_dictionary_name);
PyObject *
LLDBSwigPython_GetRecognizedArguments(PyObject *implementor,
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index 6afa4742698b..1bf647e4acfc 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -37,7 +37,7 @@
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlan.h"
-#include "lldb/Utility/ReproducerInstrumentation.h"
+#include "lldb/Utility/Instrumentation.h"
#include "lldb/Utility/Timer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -70,6 +70,14 @@ extern "C" void init_lldb(void);
#define LLDBSwigPyInit init_lldb
#endif
+#if defined(_WIN32)
+// Don't mess with the signal handlers on Windows.
+#define LLDB_USE_PYTHON_SET_INTERRUPT 0
+#else
+// PyErr_SetInterrupt was introduced in 3.2.
+#define LLDB_USE_PYTHON_SET_INTERRUPT \
+ (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || (PY_MAJOR_VERSION > 3)
+#endif
static ScriptInterpreterPythonImpl *GetPythonInterpreter(Debugger &debugger) {
ScriptInterpreter *script_interpreter =
@@ -77,8 +85,6 @@ static ScriptInterpreterPythonImpl *GetPythonInterpreter(Debugger &debugger) {
return static_cast<ScriptInterpreterPythonImpl *>(script_interpreter);
}
-static bool g_initialized = false;
-
namespace {
// Initializing Python is not a straightforward process. We cannot control
@@ -211,6 +217,28 @@ private:
PyGILState_STATE m_gil_state = PyGILState_UNLOCKED;
bool m_was_already_initialized = false;
};
+
+#if LLDB_USE_PYTHON_SET_INTERRUPT
+/// Saves the current signal handler for the specified signal and restores
+/// it at the end of the current scope.
+struct RestoreSignalHandlerScope {
+ /// The signal handler.
+ struct sigaction m_prev_handler;
+ int m_signal_code;
+ RestoreSignalHandlerScope(int signal_code) : m_signal_code(signal_code) {
+ // Initialize sigaction to their default state.
+ std::memset(&m_prev_handler, 0, sizeof(m_prev_handler));
+ // Don't install a new handler, just read back the old one.
+ struct sigaction *new_handler = nullptr;
+ int signal_err = ::sigaction(m_signal_code, new_handler, &m_prev_handler);
+ lldbassert(signal_err == 0 && "sigaction failed to read handler");
+ }
+ ~RestoreSignalHandlerScope() {
+ int signal_err = ::sigaction(m_signal_code, &m_prev_handler, nullptr);
+ lldbassert(signal_err == 0 && "sigaction failed to restore old handler");
+ }
+};
+#endif
} // namespace
void ScriptInterpreterPython::ComputePythonDirForApple(
@@ -325,12 +353,12 @@ llvm::StringRef ScriptInterpreterPython::GetPluginDescriptionStatic() {
void ScriptInterpreterPython::Initialize() {
static llvm::once_flag g_once_flag;
-
llvm::call_once(g_once_flag, []() {
PluginManager::RegisterPlugin(GetPluginNameStatic(),
GetPluginDescriptionStatic(),
lldb::eScriptLanguagePython,
ScriptInterpreterPythonImpl::CreateInstance);
+ ScriptInterpreterPythonImpl::Initialize();
});
}
@@ -342,7 +370,6 @@ ScriptInterpreterPythonImpl::Locker::Locker(
: ScriptInterpreterLocker(),
m_teardown_session((on_leave & TearDownSession) == TearDownSession),
m_python_interpreter(py_interpreter) {
- repro::Recorder::PrivateThread();
DoAcquireLock();
if ((on_entry & InitSession) == InitSession) {
if (!DoInitSession(on_entry, in, out, err)) {
@@ -408,8 +435,6 @@ ScriptInterpreterPythonImpl::ScriptInterpreterPythonImpl(Debugger &debugger)
m_active_io_handler(eIOHandlerNone), m_session_is_active(false),
m_pty_secondary_is_open(false), m_valid_session(true), m_lock_count(0),
m_command_thread_state(nullptr) {
- InitializePrivate();
-
m_scripted_process_interface_up =
std::make_unique<ScriptedProcessPythonInterface>(*this);
@@ -921,6 +946,22 @@ void ScriptInterpreterPythonImpl::ExecuteInterpreterLoop() {
}
bool ScriptInterpreterPythonImpl::Interrupt() {
+#if LLDB_USE_PYTHON_SET_INTERRUPT
+ // If the interpreter isn't evaluating any Python at the moment then return
+ // false to signal that this function didn't handle the interrupt and the
+ // next component should try handling it.
+ if (!IsExecutingPython())
+ return false;
+
+ // Tell Python that it should pretend to have received a SIGINT.
+ PyErr_SetInterrupt();
+ // PyErr_SetInterrupt has no way to return an error so we can only pretend the
+ // signal got successfully handled and return true.
+ // Python 3.10 introduces PyErr_SetInterruptEx that could return an error, but
+ // the error handling is limited to checking the arguments which would be
+ // just our (hardcoded) input signal code SIGINT, so that's not useful at all.
+ return true;
+#else
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_SCRIPT));
if (IsExecutingPython()) {
@@ -942,6 +983,7 @@ bool ScriptInterpreterPythonImpl::Interrupt() {
"ScriptInterpreterPythonImpl::Interrupt() python code not running, "
"can't interrupt");
return false;
+#endif
}
bool ScriptInterpreterPythonImpl::ExecuteOneLineWithReturn(
@@ -1414,16 +1456,12 @@ ScriptInterpreterPythonImpl::CreateFrameRecognizer(const char *class_name) {
if (class_name == nullptr || class_name[0] == '\0')
return StructuredData::GenericSP();
- void *ret_val;
-
- {
- Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
- Locker::FreeLock);
- ret_val = LLDBSWIGPython_CreateFrameRecognizer(class_name,
- m_dictionary_name.c_str());
- }
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+ PythonObject ret_val = LLDBSWIGPython_CreateFrameRecognizer(
+ class_name, m_dictionary_name.c_str());
- return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ return StructuredData::GenericSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
lldb::ValueObjectListSP ScriptInterpreterPythonImpl::GetRecognizedArguments(
@@ -1478,16 +1516,12 @@ ScriptInterpreterPythonImpl::OSPlugin_CreatePluginObject(
if (!process_sp)
return StructuredData::GenericSP();
- void *ret_val;
-
- {
- Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
- Locker::FreeLock);
- ret_val = LLDBSWIGPythonCreateOSPlugin(
- class_name, m_dictionary_name.c_str(), process_sp);
- }
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN, Locker::FreeLock);
+ PythonObject ret_val = LLDBSWIGPythonCreateOSPlugin(
+ class_name, m_dictionary_name.c_str(), process_sp);
- return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ return StructuredData::GenericSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
StructuredData::DictionarySP ScriptInterpreterPythonImpl::OSPlugin_RegisterInfo(
@@ -1733,19 +1767,16 @@ StructuredData::ObjectSP ScriptInterpreterPythonImpl::CreateScriptedThreadPlan(
if (!python_interpreter)
return {};
- void *ret_val;
-
- {
- Locker py_lock(this,
- Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
- ret_val = LLDBSwigPythonCreateScriptedThreadPlan(
- class_name, python_interpreter->m_dictionary_name.c_str(),
- args_data, error_str, thread_plan_sp);
- if (!ret_val)
- return {};
- }
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ PythonObject ret_val = LLDBSwigPythonCreateScriptedThreadPlan(
+ class_name, python_interpreter->m_dictionary_name.c_str(), args_data,
+ error_str, thread_plan_sp);
+ if (!ret_val)
+ return {};
- return StructuredData::ObjectSP(new StructuredPythonObject(ret_val));
+ return StructuredData::ObjectSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
bool ScriptInterpreterPythonImpl::ScriptedThreadPlanExplainsStop(
@@ -1836,18 +1867,15 @@ ScriptInterpreterPythonImpl::CreateScriptedBreakpointResolver(
if (!python_interpreter)
return StructuredData::GenericSP();
- void *ret_val;
-
- {
- Locker py_lock(this,
- Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
- ret_val = LLDBSwigPythonCreateScriptedBreakpointResolver(
- class_name, python_interpreter->m_dictionary_name.c_str(), args_data,
- bkpt_sp);
- }
+ PythonObject ret_val = LLDBSwigPythonCreateScriptedBreakpointResolver(
+ class_name, python_interpreter->m_dictionary_name.c_str(), args_data,
+ bkpt_sp);
- return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ return StructuredData::GenericSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
bool ScriptInterpreterPythonImpl::ScriptedBreakpointResolverSearchCallback(
@@ -1911,18 +1939,15 @@ StructuredData::GenericSP ScriptInterpreterPythonImpl::CreateScriptedStopHook(
return StructuredData::GenericSP();
}
- void *ret_val;
-
- {
- Locker py_lock(this,
- Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
- ret_val = LLDBSwigPythonCreateScriptedStopHook(
- target_sp, class_name, python_interpreter->m_dictionary_name.c_str(),
- args_data, error);
- }
+ PythonObject ret_val = LLDBSwigPythonCreateScriptedStopHook(
+ target_sp, class_name, python_interpreter->m_dictionary_name.c_str(),
+ args_data, error);
- return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ return StructuredData::GenericSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
bool ScriptInterpreterPythonImpl::ScriptedStopHookHandleStop(
@@ -2011,16 +2036,13 @@ ScriptInterpreterPythonImpl::CreateSyntheticScriptedProvider(
if (!python_interpreter)
return StructuredData::ObjectSP();
- void *ret_val = nullptr;
-
- {
- Locker py_lock(this,
- Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
- ret_val = LLDBSwigPythonCreateSyntheticProvider(
- class_name, python_interpreter->m_dictionary_name.c_str(), valobj);
- }
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ PythonObject ret_val = LLDBSwigPythonCreateSyntheticProvider(
+ class_name, python_interpreter->m_dictionary_name.c_str(), valobj);
- return StructuredData::ObjectSP(new StructuredPythonObject(ret_val));
+ return StructuredData::ObjectSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
StructuredData::GenericSP
@@ -2033,16 +2055,13 @@ ScriptInterpreterPythonImpl::CreateScriptCommandObject(const char *class_name) {
if (!debugger_sp.get())
return StructuredData::GenericSP();
- void *ret_val;
-
- {
- Locker py_lock(this,
- Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
- ret_val = LLDBSwigPythonCreateCommandObject(
- class_name, m_dictionary_name.c_str(), debugger_sp);
- }
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ PythonObject ret_val = LLDBSwigPythonCreateCommandObject(
+ class_name, m_dictionary_name.c_str(), debugger_sp);
- return StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ return StructuredData::GenericSP(
+ new StructuredPythonObject(std::move(ret_val)));
}
bool ScriptInterpreterPythonImpl::GenerateTypeScriptFunction(
@@ -2152,8 +2171,12 @@ bool ScriptInterpreterPythonImpl::GetScriptedSummary(
return false;
}
- if (new_callee && old_callee != new_callee)
- callee_wrapper_sp = std::make_shared<StructuredPythonObject>(new_callee);
+ if (new_callee && old_callee != new_callee) {
+ Locker py_lock(this,
+ Locker::AcquireLock | Locker::InitSession | Locker::NoSTDIN);
+ callee_wrapper_sp = std::make_shared<StructuredPythonObject>(
+ PythonObject(PyRefType::Borrowed, static_cast<PyObject *>(new_callee)));
+ }
return ret_val;
}
@@ -2805,7 +2828,8 @@ bool ScriptInterpreterPythonImpl::LoadScriptingModule(
ScriptInterpreter::eScriptReturnTypeOpaqueObject, &module_pyobj,
exc_options) &&
module_pyobj)
- *module_sp = std::make_shared<StructuredPythonObject>(module_pyobj);
+ *module_sp = std::make_shared<StructuredPythonObject>(PythonObject(
+ PyRefType::Owned, static_cast<PyObject *>(module_pyobj)));
}
return true;
@@ -3145,12 +3169,7 @@ ScriptInterpreterPythonImpl::AcquireInterpreterLock() {
return py_lock;
}
-void ScriptInterpreterPythonImpl::InitializePrivate() {
- if (g_initialized)
- return;
-
- g_initialized = true;
-
+void ScriptInterpreterPythonImpl::Initialize() {
LLDB_SCOPED_TIMER();
// RAII-based initialization which correctly handles multiple-initialization,
@@ -3180,6 +3199,25 @@ void ScriptInterpreterPythonImpl::InitializePrivate() {
"lldb.embedded_interpreter; from "
"lldb.embedded_interpreter import run_python_interpreter; "
"from lldb.embedded_interpreter import run_one_line");
+
+#if LLDB_USE_PYTHON_SET_INTERRUPT
+ // Python will not just overwrite its internal SIGINT handler but also the
+ // one from the process. Backup the current SIGINT handler to prevent that
+ // Python deletes it.
+ RestoreSignalHandlerScope save_sigint(SIGINT);
+
+ // Setup a default SIGINT signal handler that works the same way as the
+ // normal Python REPL signal handler which raises a KeyboardInterrupt.
+ // Also make sure to not pollute the user's REPL with the signal module nor
+ // our utility function.
+ PyRun_SimpleString("def lldb_setup_sigint_handler():\n"
+ " import signal;\n"
+ " def signal_handler(sig, frame):\n"
+ " raise KeyboardInterrupt()\n"
+ " signal.signal(signal.SIGINT, signal_handler);\n"
+ "lldb_setup_sigint_handler();\n"
+ "del lldb_setup_sigint_handler\n");
+#endif
}
void ScriptInterpreterPythonImpl::AddToSysPath(AddLocation location,
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
index defc2acffcfa..3b80c67d201a 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
@@ -341,7 +341,7 @@ public:
static bool WatchpointCallbackFunction(void *baton,
StoppointCallbackContext *context,
lldb::user_id_t watch_id);
- static void InitializePrivate();
+ static void Initialize();
class SynchronicityHandler {
private:
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.cpp b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.cpp
index e3c1931a565a..e39f8be73e49 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.cpp
@@ -32,7 +32,7 @@ ScriptedProcessPythonInterface::ScriptedProcessPythonInterface(
StructuredData::GenericSP ScriptedProcessPythonInterface::CreatePluginObject(
llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) {
+ StructuredData::DictionarySP args_sp, StructuredData::Generic *script_obj) {
if (class_name.empty())
return {};
@@ -43,15 +43,12 @@ StructuredData::GenericSP ScriptedProcessPythonInterface::CreatePluginObject(
Locker py_lock(&m_interpreter, Locker::AcquireLock | Locker::NoSTDIN,
Locker::FreeLock);
- void *ret_val = LLDBSwigPythonCreateScriptedProcess(
+ PythonObject ret_val = LLDBSwigPythonCreateScriptedProcess(
class_name.str().c_str(), m_interpreter.GetDictionaryName(), target_sp,
args_impl, error_string);
- if (!ret_val)
- return {};
-
m_object_instance_sp =
- StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ StructuredData::GenericSP(new StructuredPythonObject(std::move(ret_val)));
return m_object_instance_sp;
}
@@ -92,6 +89,17 @@ ScriptedProcessPythonInterface::GetMemoryRegionContainingAddress(
return mem_region;
}
+StructuredData::DictionarySP ScriptedProcessPythonInterface::GetThreadsInfo() {
+ Status error;
+ StructuredData::DictionarySP dict =
+ Dispatch<StructuredData::DictionarySP>("get_threads_info", error);
+
+ if (!CheckStructuredDataObject(LLVM_PRETTY_FUNCTION, dict, error))
+ return {};
+
+ return dict;
+}
+
StructuredData::DictionarySP
ScriptedProcessPythonInterface::GetThreadWithID(lldb::tid_t tid) {
Status error;
@@ -154,12 +162,8 @@ ScriptedProcessPythonInterface::GetScriptedThreadPluginName() {
}
lldb::ScriptedThreadInterfaceSP
-ScriptedProcessPythonInterface::GetScriptedThreadInterface() {
- if (!m_scripted_thread_interface_sp)
- m_scripted_thread_interface_sp =
- std::make_shared<ScriptedThreadPythonInterface>(m_interpreter);
-
- return m_scripted_thread_interface_sp;
+ScriptedProcessPythonInterface::CreateScriptedThreadInterface() {
+ return std::make_shared<ScriptedThreadPythonInterface>(m_interpreter);
}
#endif
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.h
index 421bdd59887c..e34a181849eb 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedProcessPythonInterface.h
@@ -25,7 +25,8 @@ public:
StructuredData::GenericSP
CreatePluginObject(const llvm::StringRef class_name,
ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) override;
+ StructuredData::DictionarySP args_sp,
+ StructuredData::Generic *script_obj = nullptr) override;
Status Launch() override;
@@ -39,6 +40,8 @@ public:
GetMemoryRegionContainingAddress(lldb::addr_t address,
Status &error) override;
+ StructuredData::DictionarySP GetThreadsInfo() override;
+
StructuredData::DictionarySP GetThreadWithID(lldb::tid_t tid) override;
StructuredData::DictionarySP GetRegistersForThread(lldb::tid_t tid) override;
@@ -55,7 +58,7 @@ public:
llvm::Optional<std::string> GetScriptedThreadPluginName() override;
private:
- lldb::ScriptedThreadInterfaceSP GetScriptedThreadInterface() override;
+ lldb::ScriptedThreadInterfaceSP CreateScriptedThreadInterface() override;
};
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.cpp b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.cpp
index 6a881bfe625c..d471b2c5f7e3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.cpp
@@ -31,9 +31,8 @@ ScriptedThreadPythonInterface::ScriptedThreadPythonInterface(
StructuredData::GenericSP ScriptedThreadPythonInterface::CreatePluginObject(
const llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) {
-
- if (class_name.empty())
+ StructuredData::DictionarySP args_sp, StructuredData::Generic *script_obj) {
+ if (class_name.empty() && !script_obj)
return {};
ProcessSP process_sp = exe_ctx.GetProcessSP();
@@ -43,15 +42,21 @@ StructuredData::GenericSP ScriptedThreadPythonInterface::CreatePluginObject(
Locker py_lock(&m_interpreter, Locker::AcquireLock | Locker::NoSTDIN,
Locker::FreeLock);
- void *ret_val = LLDBSwigPythonCreateScriptedThread(
- class_name.str().c_str(), m_interpreter.GetDictionaryName(), process_sp,
- args_impl, error_string);
+ PythonObject ret_val;
+
+ if (!script_obj)
+ ret_val = LLDBSwigPythonCreateScriptedThread(
+ class_name.str().c_str(), m_interpreter.GetDictionaryName(), process_sp,
+ args_impl, error_string);
+ else
+ ret_val = PythonObject(PyRefType::Borrowed,
+ static_cast<PyObject *>(script_obj->GetValue()));
if (!ret_val)
return {};
m_object_instance_sp =
- StructuredData::GenericSP(new StructuredPythonObject(ret_val));
+ StructuredData::GenericSP(new StructuredPythonObject(std::move(ret_val)));
return m_object_instance_sp;
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.h
index 996b8d43136b..59bb182ae3f3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/ScriptedThreadPythonInterface.h
@@ -24,7 +24,8 @@ public:
StructuredData::GenericSP
CreatePluginObject(llvm::StringRef class_name, ExecutionContext &exe_ctx,
- StructuredData::DictionarySP args_sp) override;
+ StructuredData::DictionarySP args_sp,
+ StructuredData::Generic *script_obj = nullptr) override;
lldb::tid_t GetThreadID() override;
@@ -45,4 +46,4 @@ public:
} // namespace lldb_private
#endif // LLDB_ENABLE_PYTHON
-#endif // LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_SCRIPTEDPROCESSTHREADINTERFACE_H
+#endif // LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_SCRIPTEDTHREADPYTHONINTERFACE_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/lldb-python.h b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/lldb-python.h
index 48f27b09b95c..c99372fa110c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/lldb-python.h
+++ b/contrib/llvm-project/lldb/source/Plugins/ScriptInterpreter/Python/lldb-python.h
@@ -9,6 +9,13 @@
#ifndef LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_LLDB_PYTHON_H
#define LLDB_PLUGINS_SCRIPTINTERPRETER_PYTHON_LLDB_PYTHON_H
+// BEGIN FIXME
+// This declaration works around a clang module build failure.
+// It should be deleted ASAP.
+#include "llvm/Support/Error.h"
+static llvm::Expected<bool> *g_fcxx_modules_workaround;
+// END
+
#include "lldb/Host/Config.h"
// Python.h needs to be included before any system headers in order to avoid
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.cpp
index 7a8ab9c9bcfd..25cb368763c1 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.cpp
@@ -7,8 +7,13 @@
//===----------------------------------------------------------------------===//
#include "DIERef.h"
+#include "lldb/Utility/DataEncoder.h"
+#include "lldb/Utility/DataExtractor.h"
#include "llvm/Support/Format.h"
+using namespace lldb;
+using namespace lldb_private;
+
void llvm::format_provider<DIERef>::format(const DIERef &ref, raw_ostream &OS,
StringRef Style) {
if (ref.dwo_num())
@@ -16,3 +21,35 @@ void llvm::format_provider<DIERef>::format(const DIERef &ref, raw_ostream &OS,
OS << (ref.section() == DIERef::DebugInfo ? "INFO" : "TYPE");
OS << "/" << format_hex_no_prefix(ref.die_offset(), 8);
}
+
+constexpr uint32_t k_dwo_num_mask = 0x3FFFFFFF;
+constexpr uint32_t k_dwo_num_valid_bitmask = (1u << 30);
+constexpr uint32_t k_section_bitmask = (1u << 31);
+
+llvm::Optional<DIERef> DIERef::Decode(const DataExtractor &data,
+ lldb::offset_t *offset_ptr) {
+ const uint32_t bitfield_storage = data.GetU32(offset_ptr);
+ uint32_t dwo_num = bitfield_storage & k_dwo_num_mask;
+ bool dwo_num_valid = (bitfield_storage & (k_dwo_num_valid_bitmask)) != 0;
+ Section section = (Section)((bitfield_storage & (k_section_bitmask)) != 0);
+ // DIE offsets can't be zero and if we fail to decode something from data,
+ // it will return 0
+ dw_offset_t die_offset = data.GetU32(offset_ptr);
+ if (die_offset == 0)
+ return llvm::None;
+ if (dwo_num_valid)
+ return DIERef(dwo_num, section, die_offset);
+ else
+ return DIERef(llvm::None, section, die_offset);
+}
+
+void DIERef::Encode(DataEncoder &encoder) const {
+ uint32_t bitfield_storage = m_dwo_num;
+ if (m_dwo_num_valid)
+ bitfield_storage |= k_dwo_num_valid_bitmask;
+ if (m_section)
+ bitfield_storage |= k_section_bitmask;
+ encoder.AppendU32(bitfield_storage);
+ static_assert(sizeof(m_die_offset) == 4, "m_die_offset must be 4 bytes");
+ encoder.AppendU32(m_die_offset);
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.h
index f7e09ee17283..23e1eec26ec3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DIERef.h
@@ -54,6 +54,37 @@ public:
return m_die_offset < other.m_die_offset;
}
+ bool operator==(const DIERef &rhs) const {
+ return dwo_num() == rhs.dwo_num() && m_section == rhs.m_section &&
+ m_die_offset == rhs.m_die_offset;
+ }
+
+ bool operator!=(const DIERef &rhs) const { return !(*this == rhs); }
+
+ /// Decode a serialized version of this object from data.
+ ///
+ /// \param data
+ /// The decoder object that references the serialized data.
+ ///
+ /// \param offset_ptr
+ /// A pointer that contains the offset from which the data will be decoded
+ /// from that gets updated as data gets decoded.
+ ///
+ /// \return
+ /// Returns a valid DIERef if decoding succeeded, llvm::None if there was
+ /// unsufficient or invalid values that were decoded.
+ static llvm::Optional<DIERef> Decode(const lldb_private::DataExtractor &data,
+ lldb::offset_t *offset_ptr);
+
+ /// Encode this object into a data encoder object.
+ ///
+ /// This allows this object to be serialized to disk.
+ ///
+ /// \param encoder
+ /// A data encoder object that serialized bytes will be encoded into.
+ ///
+ void Encode(lldb_private::DataEncoder &encoder) const;
+
private:
uint32_t m_dwo_num : 30;
uint32_t m_dwo_num_valid : 1;
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
index b90f104c4d21..be555c130bfe 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
@@ -933,7 +933,7 @@ TypeSP DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
DW_TAG_value_to_name(tag), type_name_cstr);
CompilerType return_clang_type;
- Type *func_type = NULL;
+ Type *func_type = nullptr;
if (attrs.type.IsValid())
func_type = dwarf->ResolveTypeUID(attrs.type.Reference(), true);
@@ -1027,7 +1027,7 @@ TypeSP DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
class_opaque_type, attrs.name.GetCString(), clang_type,
attrs.accessibility, attrs.is_artificial, is_variadic,
attrs.is_objc_direct_call);
- type_handled = objc_method_decl != NULL;
+ type_handled = objc_method_decl != nullptr;
if (type_handled) {
LinkDeclContextToDIE(objc_method_decl, die);
m_ast.SetMetadataAsUserID(objc_method_decl, die.GetID());
@@ -1178,7 +1178,7 @@ TypeSP DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
is_static, attrs.is_inline, attrs.is_explicit,
is_attr_used, attrs.is_artificial);
- type_handled = cxx_method_decl != NULL;
+ type_handled = cxx_method_decl != nullptr;
// Artificial methods are always handled even when we
// don't create a new declaration for them.
type_handled |= attrs.is_artificial;
@@ -2036,7 +2036,7 @@ bool DWARFASTParserClang::ParseTemplateDIE(
if (name && name[0])
template_param_infos.names.push_back(name);
else
- template_param_infos.names.push_back(NULL);
+ template_param_infos.names.push_back(nullptr);
// Get the signed value for any integer or enumeration if available
clang_type.IsIntegerOrEnumerationType(is_signed);
@@ -3336,7 +3336,8 @@ DWARFASTParserClang::GetOwningClangModule(const DWARFDIE &die) {
auto it = m_die_to_module.find(module_die.GetDIE());
if (it != m_die_to_module.end())
return it->second;
- const char *name = module_die.GetAttributeValueAsString(DW_AT_name, 0);
+ const char *name =
+ module_die.GetAttributeValueAsString(DW_AT_name, nullptr);
if (!name)
return {};
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
index 1d3d70dfef01..c4995e721554 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
@@ -64,11 +64,11 @@ public:
virtual void Dump(Stream &s) = 0;
- StatsDuration GetIndexTime() { return m_index_time; }
+ StatsDuration::Duration GetIndexTime() { return m_index_time; }
protected:
Module &m_module;
- StatsDuration m_index_time{0.0};
+ StatsDuration m_index_time;
/// Helper function implementing common logic for processing function dies. If
/// the function given by "ref" matches search criteria given by
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.h
index 71d4c1e6c52f..2457e8276e20 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.h
@@ -269,7 +269,7 @@ protected:
ExtractUnitDIENoDwoIfNeeded();
// m_first_die_mutex is not required as m_first_die is never cleared.
if (!m_first_die)
- return NULL;
+ return nullptr;
return &m_first_die;
}
@@ -277,7 +277,7 @@ protected:
const DWARFDebugInfoEntry *DIEPtr() {
ExtractDIEsIfNeeded();
if (m_die_array.empty())
- return NULL;
+ return nullptr;
return &m_die_array[0];
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.cpp
index 3f1d6677bacf..d2b8fe19db53 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.cpp
@@ -13,18 +13,20 @@ using namespace lldb_private;
static constexpr Log::Category g_categories[] = {
{{"comp"},
{"log insertions of object files into DWARF debug maps"},
- DWARF_LOG_TYPE_COMPLETION},
- {{"info"}, {"log the parsing of .debug_info"}, DWARF_LOG_DEBUG_INFO},
- {{"line"}, {"log the parsing of .debug_line"}, DWARF_LOG_DEBUG_LINE},
+ DWARFLog::TypeCompletion},
+ {{"info"}, {"log the parsing of .debug_info"}, DWARFLog::DebugInfo},
+ {{"line"}, {"log the parsing of .debug_line"}, DWARFLog::DebugLine},
{{"lookups"},
{"log any lookups that happen by name, regex, or address"},
- DWARF_LOG_LOOKUPS},
- {{"map"},
- {"log struct/unions/class type completions"},
- DWARF_LOG_DEBUG_MAP},
+ DWARFLog::Lookups},
+ {{"map"}, {"log struct/unions/class type completions"}, DWARFLog::DebugMap},
};
-Log::Channel LogChannelDWARF::g_channel(g_categories, DWARF_LOG_DEFAULT);
+static Log::Channel g_channel(g_categories, DWARFLog::DebugInfo);
+
+template <> Log::Channel &lldb_private::LogChannelFor<DWARFLog>() {
+ return g_channel;
+}
void LogChannelDWARF::Initialize() {
Log::Register("dwarf", g_channel);
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.h
index 2fc23563ef93..8076c719e9c4 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/LogChannelDWARF.h
@@ -11,25 +11,32 @@
#include "lldb/Utility/Log.h"
-#define DWARF_LOG_DEBUG_INFO (1u << 1)
-#define DWARF_LOG_DEBUG_LINE (1u << 2)
-#define DWARF_LOG_LOOKUPS (1u << 3)
-#define DWARF_LOG_TYPE_COMPLETION (1u << 4)
-#define DWARF_LOG_DEBUG_MAP (1u << 5)
-#define DWARF_LOG_ALL (UINT32_MAX)
-#define DWARF_LOG_DEFAULT (DWARF_LOG_DEBUG_INFO)
-
namespace lldb_private {
-class LogChannelDWARF {
- static Log::Channel g_channel;
+enum class DWARFLog : Log::MaskType {
+ DebugInfo = Log::ChannelFlag<0>,
+ DebugLine = Log::ChannelFlag<1>,
+ DebugMap = Log::ChannelFlag<2>,
+ Lookups = Log::ChannelFlag<3>,
+ TypeCompletion = Log::ChannelFlag<4>,
+ LLVM_MARK_AS_BITMASK_ENUM(TypeCompletion)
+};
+#define DWARF_LOG_DEBUG_INFO ::lldb_private::DWARFLog::DebugInfo
+#define DWARF_LOG_DEBUG_LINE ::lldb_private::DWARFLog::DebugLine
+#define DWARF_LOG_LOOKUPS ::lldb_private::DWARFLog::Lookups
+#define DWARF_LOG_TYPE_COMPLETION ::lldb_private::DWARFLog::TypeCompletion
+#define DWARF_LOG_DEBUG_MAP ::lldb_private::DWARFLog::DebugMap
+
+class LogChannelDWARF {
public:
static void Initialize();
static void Terminate();
- static Log *GetLogIfAll(uint32_t mask) { return g_channel.GetLogIfAll(mask); }
- static Log *GetLogIfAny(uint32_t mask) { return g_channel.GetLogIfAny(mask); }
+ static Log *GetLogIfAll(DWARFLog mask) { return GetLog(mask); }
+ static Log *GetLogIfAny(DWARFLog mask) { return GetLog(mask); }
};
-}
+
+template <> Log::Channel &LogChannelFor<DWARFLog>();
+} // namespace lldb_private
#endif // LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_LOGCHANNELDWARF_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
index ab10e9ca98f9..e15a22affcb2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
@@ -12,9 +12,12 @@
#include "Plugins/SymbolFile/DWARF/DWARFDeclContext.h"
#include "Plugins/SymbolFile/DWARF/LogChannelDWARF.h"
#include "Plugins/SymbolFile/DWARF/SymbolFileDWARFDwo.h"
+#include "lldb/Core/DataFileCache.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/Progress.h"
#include "lldb/Symbol/ObjectFile.h"
+#include "lldb/Utility/DataEncoder.h"
+#include "lldb/Utility/DataExtractor.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/Timer.h"
#include "llvm/Support/FormatVariadic.h"
@@ -24,17 +27,19 @@ using namespace lldb_private;
using namespace lldb;
void ManualDWARFIndex::Index() {
- if (!m_dwarf)
+ if (m_indexed)
return;
-
- SymbolFileDWARF &main_dwarf = *m_dwarf;
- m_dwarf = nullptr;
+ m_indexed = true;
ElapsedTime elapsed(m_index_time);
- LLDB_SCOPED_TIMERF("%p", static_cast<void *>(&main_dwarf));
+ LLDB_SCOPED_TIMERF("%p", static_cast<void *>(m_dwarf));
+ if (LoadFromCache()) {
+ m_dwarf->SetDebugInfoIndexWasLoadedFromCache();
+ return;
+ }
- DWARFDebugInfo &main_info = main_dwarf.DebugInfo();
- SymbolFileDWARFDwo *dwp_dwarf = main_dwarf.GetDwpSymbolFile().get();
+ DWARFDebugInfo &main_info = m_dwarf->DebugInfo();
+ SymbolFileDWARFDwo *dwp_dwarf = m_dwarf->GetDwpSymbolFile().get();
DWARFDebugInfo *dwp_info = dwp_dwarf ? &dwp_dwarf->DebugInfo() : nullptr;
std::vector<DWARFUnit *> units_to_index;
@@ -125,6 +130,8 @@ void ManualDWARFIndex::Index() {
pool.async(finalize_fn, &IndexSet::types);
pool.async(finalize_fn, &IndexSet::namespaces);
pool.wait();
+
+ SaveToCache();
}
void ManualDWARFIndex::IndexUnit(DWARFUnit &unit, SymbolFileDWARFDwo *dwp,
@@ -480,3 +487,214 @@ void ManualDWARFIndex::Dump(Stream &s) {
s.Printf("\nNamespaces:\n");
m_set.namespaces.Dump(&s);
}
+
+constexpr llvm::StringLiteral kIdentifierManualDWARFIndex("DIDX");
+// Define IDs for the different tables when encoding and decoding the
+// ManualDWARFIndex NameToDIE objects so we can avoid saving any empty maps.
+enum DataID {
+ kDataIDFunctionBasenames = 1u,
+ kDataIDFunctionFullnames,
+ kDataIDFunctionMethods,
+ kDataIDFunctionSelectors,
+ kDataIDFunctionObjcClassSelectors,
+ kDataIDGlobals,
+ kDataIDTypes,
+ kDataIDNamespaces,
+ kDataIDEnd = 255u,
+
+};
+constexpr uint32_t CURRENT_CACHE_VERSION = 1;
+
+bool ManualDWARFIndex::IndexSet::Decode(const DataExtractor &data,
+ lldb::offset_t *offset_ptr) {
+ StringTableReader strtab;
+ // We now decode the string table for all strings in the data cache file.
+ if (!strtab.Decode(data, offset_ptr))
+ return false;
+
+ llvm::StringRef identifier((const char *)data.GetData(offset_ptr, 4), 4);
+ if (identifier != kIdentifierManualDWARFIndex)
+ return false;
+ const uint32_t version = data.GetU32(offset_ptr);
+ if (version != CURRENT_CACHE_VERSION)
+ return false;
+
+ bool done = false;
+ while (!done) {
+ switch (data.GetU8(offset_ptr)) {
+ default:
+ // If we got here, this is not expected, we expect the data IDs to match
+ // one of the values from the DataID enumeration.
+ return false;
+ case kDataIDFunctionBasenames:
+ if (!function_basenames.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDFunctionFullnames:
+ if (!function_fullnames.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDFunctionMethods:
+ if (!function_methods.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDFunctionSelectors:
+ if (!function_selectors.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDFunctionObjcClassSelectors:
+ if (!objc_class_selectors.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDGlobals:
+ if (!globals.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDTypes:
+ if (!types.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDNamespaces:
+ if (!namespaces.Decode(data, offset_ptr, strtab))
+ return false;
+ break;
+ case kDataIDEnd:
+ // We got to the end of our NameToDIE encodings.
+ done = true;
+ break;
+ }
+ }
+ // Success!
+ return true;
+}
+
+void ManualDWARFIndex::IndexSet::Encode(DataEncoder &encoder) const {
+ ConstStringTable strtab;
+
+ // Encoder the DWARF index into a separate encoder first. This allows us
+ // gather all of the strings we willl need in "strtab" as we will need to
+ // write the string table out before the symbol table.
+ DataEncoder index_encoder(encoder.GetByteOrder(),
+ encoder.GetAddressByteSize());
+
+ index_encoder.AppendData(kIdentifierManualDWARFIndex);
+ // Encode the data version.
+ index_encoder.AppendU32(CURRENT_CACHE_VERSION);
+
+ if (!function_basenames.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDFunctionBasenames);
+ function_basenames.Encode(index_encoder, strtab);
+ }
+ if (!function_fullnames.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDFunctionFullnames);
+ function_fullnames.Encode(index_encoder, strtab);
+ }
+ if (!function_methods.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDFunctionMethods);
+ function_methods.Encode(index_encoder, strtab);
+ }
+ if (!function_selectors.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDFunctionSelectors);
+ function_selectors.Encode(index_encoder, strtab);
+ }
+ if (!objc_class_selectors.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDFunctionObjcClassSelectors);
+ objc_class_selectors.Encode(index_encoder, strtab);
+ }
+ if (!globals.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDGlobals);
+ globals.Encode(index_encoder, strtab);
+ }
+ if (!types.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDTypes);
+ types.Encode(index_encoder, strtab);
+ }
+ if (!namespaces.IsEmpty()) {
+ index_encoder.AppendU8(kDataIDNamespaces);
+ namespaces.Encode(index_encoder, strtab);
+ }
+ index_encoder.AppendU8(kDataIDEnd);
+
+ // Now that all strings have been gathered, we will emit the string table.
+ strtab.Encode(encoder);
+ // Followed the the symbol table data.
+ encoder.AppendData(index_encoder.GetData());
+}
+
+bool ManualDWARFIndex::Decode(const DataExtractor &data,
+ lldb::offset_t *offset_ptr,
+ bool &signature_mismatch) {
+ signature_mismatch = false;
+ CacheSignature signature;
+ if (!signature.Decode(data, offset_ptr))
+ return false;
+ if (CacheSignature(m_dwarf->GetObjectFile()) != signature) {
+ signature_mismatch = true;
+ return false;
+ }
+ IndexSet set;
+ if (!set.Decode(data, offset_ptr))
+ return false;
+ m_set = std::move(set);
+ return true;
+}
+
+bool ManualDWARFIndex::Encode(DataEncoder &encoder) const {
+ CacheSignature signature(m_dwarf->GetObjectFile());
+ if (!signature.Encode(encoder))
+ return false;
+ m_set.Encode(encoder);
+ return true;
+}
+
+std::string ManualDWARFIndex::GetCacheKey() {
+ std::string key;
+ llvm::raw_string_ostream strm(key);
+ // DWARF Index can come from different object files for the same module. A
+ // module can have one object file as the main executable and might have
+ // another object file in a separate symbol file, or we might have a .dwo file
+ // that claims its module is the main executable.
+ ObjectFile *objfile = m_dwarf->GetObjectFile();
+ strm << objfile->GetModule()->GetCacheKey() << "-dwarf-index-"
+ << llvm::format_hex(objfile->GetCacheHash(), 10);
+ return strm.str();
+}
+
+bool ManualDWARFIndex::LoadFromCache() {
+ DataFileCache *cache = Module::GetIndexCache();
+ if (!cache)
+ return false;
+ ObjectFile *objfile = m_dwarf->GetObjectFile();
+ if (!objfile)
+ return false;
+ std::unique_ptr<llvm::MemoryBuffer> mem_buffer_up =
+ cache->GetCachedData(GetCacheKey());
+ if (!mem_buffer_up)
+ return false;
+ DataExtractor data(mem_buffer_up->getBufferStart(),
+ mem_buffer_up->getBufferSize(),
+ endian::InlHostByteOrder(),
+ objfile->GetAddressByteSize());
+ bool signature_mismatch = false;
+ lldb::offset_t offset = 0;
+ const bool result = Decode(data, &offset, signature_mismatch);
+ if (signature_mismatch)
+ cache->RemoveCacheFile(GetCacheKey());
+ return result;
+}
+
+void ManualDWARFIndex::SaveToCache() {
+ DataFileCache *cache = Module::GetIndexCache();
+ if (!cache)
+ return; // Caching is not enabled.
+ ObjectFile *objfile = m_dwarf->GetObjectFile();
+ if (!objfile)
+ return;
+ DataEncoder file(endian::InlHostByteOrder(), objfile->GetAddressByteSize());
+ // Encode will return false if the object file doesn't have anything to make
+ // a signature from.
+ if (Encode(file)) {
+ if (cache->SetCachedData(GetCacheKey(), file.GetData()))
+ m_dwarf->SetDebugInfoIndexWasSavedToCache();
+ }
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
index 36f371402b90..5c5e43de9ca6 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
@@ -55,7 +55,7 @@ public:
void Dump(Stream &s) override;
-private:
+ // Make IndexSet public so we can unit test the encoding and decoding logic.
struct IndexSet {
NameToDIE function_basenames;
NameToDIE function_fullnames;
@@ -65,21 +65,113 @@ private:
NameToDIE globals;
NameToDIE types;
NameToDIE namespaces;
+ bool Decode(const DataExtractor &data, lldb::offset_t *offset_ptr);
+ void Encode(DataEncoder &encoder) const;
+ bool operator==(const IndexSet &rhs) const {
+ return function_basenames == rhs.function_basenames &&
+ function_fullnames == rhs.function_fullnames &&
+ function_methods == rhs.function_methods &&
+ function_selectors == rhs.function_selectors &&
+ objc_class_selectors == rhs.objc_class_selectors &&
+ globals == rhs.globals && types == rhs.types &&
+ namespaces == rhs.namespaces;
+ }
};
+
+private:
void Index();
+
+ /// Decode a serialized version of this object from data.
+ ///
+ /// \param data
+ /// The decoder object that references the serialized data.
+ ///
+ /// \param offset_ptr
+ /// A pointer that contains the offset from which the data will be decoded
+ /// from that gets updated as data gets decoded.
+ ///
+ /// \param strtab
+ /// All strings in cache files are put into string tables for efficiency
+ /// and cache file size reduction. Strings are stored as uint32_t string
+ /// table offsets in the cache data.
+ bool Decode(const DataExtractor &data, lldb::offset_t *offset_ptr,
+ bool &signature_mismatch);
+
+ /// Encode this object into a data encoder object.
+ ///
+ /// This allows this object to be serialized to disk.
+ ///
+ /// \param encoder
+ /// A data encoder object that serialized bytes will be encoded into.
+ ///
+ /// \param strtab
+ /// All strings in cache files are put into string tables for efficiency
+ /// and cache file size reduction. Strings are stored as uint32_t string
+ /// table offsets in the cache data.
+ ///
+ /// \return
+ /// True if the symbol table's object file can generate a valid signature
+ /// and all data for the symbol table was encoded, false otherwise.
+ bool Encode(DataEncoder &encoder) const;
+
+ /// Get the cache key string for this symbol table.
+ ///
+ /// The cache key must start with the module's cache key and is followed
+ /// by information that indicates this key is for caching the symbol table
+ /// contents and should also include the has of the object file. A module can
+ /// be represented by an ObjectFile object for the main executable, but can
+ /// also have a symbol file that is from the same or a different object file.
+ /// This means we might have two symbol tables cached in the index cache, one
+ /// for the main executable and one for the symbol file.
+ ///
+ /// \return
+ /// The unique cache key used to save and retrieve data from the index
+ /// cache.
+ std::string GetCacheKey();
+
+ /// Save the symbol table data out into a cache.
+ ///
+ /// The symbol table will only be saved to a cache file if caching is enabled.
+ ///
+ /// We cache the contents of the symbol table since symbol tables in LLDB take
+ /// some time to initialize. This is due to the many sources for data that are
+ /// used to create a symbol table:
+ /// - standard symbol table
+ /// - dynamic symbol table (ELF)
+ /// - compressed debug info sections
+ /// - unwind information
+ /// - function pointers found in runtimes for global constructor/destructors
+ /// - other sources.
+ /// All of the above sources are combined and one symbol table results after
+ /// all sources have been considered.
+ void SaveToCache();
+
+ /// Load the symbol table from the index cache.
+ ///
+ /// Quickly load the finalized symbol table from the index cache. This saves
+ /// time when the debugger starts up. The index cache file for the symbol
+ /// table has the modification time set to the same time as the main module.
+ /// If the cache file exists and the modification times match, we will load
+ /// the symbol table from the serlized cache file.
+ ///
+ /// \return
+ /// True if the symbol table was successfully loaded from the index cache,
+ /// false if the symbol table wasn't cached or was out of date.
+ bool LoadFromCache();
+
void IndexUnit(DWARFUnit &unit, SymbolFileDWARFDwo *dwp, IndexSet &set);
static void IndexUnitImpl(DWARFUnit &unit,
const lldb::LanguageType cu_language,
IndexSet &set);
- /// The DWARF file which we are indexing. Set to nullptr after the index is
- /// built.
+ /// The DWARF file which we are indexing.
SymbolFileDWARF *m_dwarf;
/// Which dwarf units should we skip while building the index.
llvm::DenseSet<dw_offset_t> m_units_to_avoid;
IndexSet m_set;
+ bool m_indexed = false;
};
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.cpp
index 493d1b4a2702..413920f33619 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.cpp
@@ -8,8 +8,11 @@
#include "NameToDIE.h"
#include "DWARFUnit.h"
+#include "lldb/Core/DataFileCache.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Utility/ConstString.h"
+#include "lldb/Utility/DataEncoder.h"
+#include "lldb/Utility/DataExtractor.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/StreamString.h"
@@ -18,7 +21,7 @@ using namespace lldb;
using namespace lldb_private;
void NameToDIE::Finalize() {
- m_map.Sort();
+ m_map.Sort(std::less<DIERef>());
m_map.SizeToFit();
}
@@ -87,3 +90,50 @@ void NameToDIE::Append(const NameToDIE &other) {
other.m_map.GetValueAtIndexUnchecked(i));
}
}
+
+constexpr llvm::StringLiteral kIdentifierNameToDIE("N2DI");
+
+bool NameToDIE::Decode(const DataExtractor &data, lldb::offset_t *offset_ptr,
+ const StringTableReader &strtab) {
+ m_map.Clear();
+ llvm::StringRef identifier((const char *)data.GetData(offset_ptr, 4), 4);
+ if (identifier != kIdentifierNameToDIE)
+ return false;
+ const uint32_t count = data.GetU32(offset_ptr);
+ for (uint32_t i = 0; i < count; ++i) {
+ llvm::StringRef str(strtab.Get(data.GetU32(offset_ptr)));
+ // No empty strings allowed in the name to DIE maps.
+ if (str.empty())
+ return false;
+ if (llvm::Optional<DIERef> die_ref = DIERef::Decode(data, offset_ptr))
+ m_map.Append(ConstString(str), die_ref.getValue());
+ else
+ return false;
+ }
+ return true;
+}
+
+void NameToDIE::Encode(DataEncoder &encoder, ConstStringTable &strtab) const {
+ encoder.AppendData(kIdentifierNameToDIE);
+ encoder.AppendU32(m_map.GetSize());
+ for (const auto &entry : m_map) {
+ // Make sure there are no empty strings.
+ assert((bool)entry.cstring);
+ encoder.AppendU32(strtab.Add(entry.cstring));
+ entry.value.Encode(encoder);
+ }
+}
+
+bool NameToDIE::operator==(const NameToDIE &rhs) const {
+ const size_t size = m_map.GetSize();
+ if (size != rhs.m_map.GetSize())
+ return false;
+ for (size_t i = 0; i < size; ++i) {
+ if (m_map.GetCStringAtIndex(i) != rhs.m_map.GetCStringAtIndex(i))
+ return false;
+ if (m_map.GetValueRefAtIndexUnchecked(i) !=
+ rhs.m_map.GetValueRefAtIndexUnchecked(i))
+ return false;
+ }
+ return true;
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.h
index 994af07189f8..61df1a628ab5 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/NameToDIE.h
@@ -48,6 +48,44 @@ public:
const DIERef &die_ref)> const
&callback) const;
+ /// Decode a serialized version of this object from data.
+ ///
+ /// \param data
+ /// The decoder object that references the serialized data.
+ ///
+ /// \param offset_ptr
+ /// A pointer that contains the offset from which the data will be decoded
+ /// from that gets updated as data gets decoded.
+ ///
+ /// \param strtab
+ /// All strings in cache files are put into string tables for efficiency
+ /// and cache file size reduction. Strings are stored as uint32_t string
+ /// table offsets in the cache data.
+ bool Decode(const lldb_private::DataExtractor &data,
+ lldb::offset_t *offset_ptr,
+ const lldb_private::StringTableReader &strtab);
+
+ /// Encode this object into a data encoder object.
+ ///
+ /// This allows this object to be serialized to disk.
+ ///
+ /// \param encoder
+ /// A data encoder object that serialized bytes will be encoded into.
+ ///
+ /// \param strtab
+ /// All strings in cache files are put into string tables for efficiency
+ /// and cache file size reduction. Strings are stored as uint32_t string
+ /// table offsets in the cache data.
+ void Encode(lldb_private::DataEncoder &encoder,
+ lldb_private::ConstStringTable &strtab) const;
+
+ /// Used for unit testing the encoding and decoding.
+ bool operator==(const NameToDIE &rhs) const;
+
+ bool IsEmpty() const { return m_map.IsEmpty(); }
+
+ void Clear() { m_map.Clear(); }
+
protected:
lldb_private::UniqueCStringMap<DIERef> m_map;
};
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 8c995ef2eb2a..02d1a6a4a8be 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -2140,7 +2140,8 @@ void SymbolFileDWARF::FindGlobalVariables(
llvm::StringRef basename;
llvm::StringRef context;
- bool name_is_mangled = (bool)Mangled(name);
+ bool name_is_mangled = Mangled::GetManglingScheme(name.GetStringRef()) !=
+ Mangled::eManglingSchemeNone;
if (!CPlusPlusLanguage::ExtractContextAndIdentifier(name.GetCString(),
context, basename))
@@ -4085,8 +4086,8 @@ LanguageType SymbolFileDWARF::GetLanguageFamily(DWARFUnit &unit) {
return LanguageTypeFromDWARF(lang);
}
-StatsDuration SymbolFileDWARF::GetDebugInfoIndexTime() {
+StatsDuration::Duration SymbolFileDWARF::GetDebugInfoIndexTime() {
if (m_index)
return m_index->GetIndexTime();
- return StatsDuration(0.0);
+ return {};
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
index e81ce28cb86e..f84a78620e17 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
@@ -319,10 +319,10 @@ public:
/// Same as GetLanguage() but reports all C++ versions as C++ (no version).
static lldb::LanguageType GetLanguageFamily(DWARFUnit &unit);
- lldb_private::StatsDuration GetDebugInfoParseTime() override {
+ lldb_private::StatsDuration::Duration GetDebugInfoParseTime() override {
return m_parse_time;
}
- lldb_private::StatsDuration GetDebugInfoIndexTime() override;
+ lldb_private::StatsDuration::Duration GetDebugInfoIndexTime() override;
lldb_private::StatsDuration &GetDebugInfoParseTimeRef() {
return m_parse_time;
@@ -559,7 +559,7 @@ protected:
/// Try to filter out this debug info by comparing it to the lowest code
/// address in the module.
lldb::addr_t m_first_code_address = LLDB_INVALID_ADDRESS;
- lldb_private::StatsDuration m_parse_time{0.0};
+ lldb_private::StatsDuration m_parse_time;
};
#endif // LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_SYMBOLFILEDWARF_H
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
index 2491f6af8c19..6ee189e04250 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
@@ -1447,8 +1447,8 @@ uint64_t SymbolFileDWARFDebugMap::GetDebugInfoSize() {
return debug_info_size;
}
-StatsDuration SymbolFileDWARFDebugMap::GetDebugInfoParseTime() {
- StatsDuration elapsed(0.0);
+StatsDuration::Duration SymbolFileDWARFDebugMap::GetDebugInfoParseTime() {
+ StatsDuration::Duration elapsed(0.0);
ForEachSymbolFile([&](SymbolFileDWARF *oso_dwarf) -> bool {
ObjectFile *oso_objfile = oso_dwarf->GetObjectFile();
if (oso_objfile) {
@@ -1464,8 +1464,8 @@ StatsDuration SymbolFileDWARFDebugMap::GetDebugInfoParseTime() {
return elapsed;
}
-StatsDuration SymbolFileDWARFDebugMap::GetDebugInfoIndexTime() {
- StatsDuration elapsed(0.0);
+StatsDuration::Duration SymbolFileDWARFDebugMap::GetDebugInfoIndexTime() {
+ StatsDuration::Duration elapsed(0.0);
ForEachSymbolFile([&](SymbolFileDWARF *oso_dwarf) -> bool {
ObjectFile *oso_objfile = oso_dwarf->GetObjectFile();
if (oso_objfile) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
index 74f32442de2f..2a6232a501b4 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
@@ -143,8 +143,8 @@ public:
llvm::StringRef GetPluginName() override { return GetPluginNameStatic(); }
uint64_t GetDebugInfoSize() override;
- lldb_private::StatsDuration GetDebugInfoParseTime() override;
- lldb_private::StatsDuration GetDebugInfoIndexTime() override;
+ lldb_private::StatsDuration::Duration GetDebugInfoParseTime() override;
+ lldb_private::StatsDuration::Duration GetDebugInfoIndexTime() override;
protected:
enum { kHaveInitializedOSOs = (1 << 0), kNumFlags };
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.cpp
index 9f09c0accc87..d8f737612c25 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.cpp
@@ -106,6 +106,24 @@ static void ParseExtendedInfo(PdbIndex &index, CompilandIndexItem &item) {
}
}
+static void ParseInlineeLineTableForCompileUnit(CompilandIndexItem &item) {
+ for (const auto &ss : item.m_debug_stream.getSubsectionsArray()) {
+ if (ss.kind() != DebugSubsectionKind::InlineeLines)
+ continue;
+
+ DebugInlineeLinesSubsectionRef inlinee_lines;
+ llvm::BinaryStreamReader reader(ss.getRecordData());
+ if (llvm::Error error = inlinee_lines.initialize(reader)) {
+ consumeError(std::move(error));
+ continue;
+ }
+
+ for (const InlineeSourceLine &Line : inlinee_lines) {
+ item.m_inline_map[Line.Header->Inlinee] = Line;
+ }
+ }
+}
+
CompilandIndexItem::CompilandIndexItem(
PdbCompilandId id, llvm::pdb::ModuleDebugStreamRef debug_stream,
llvm::pdb::DbiModuleDescriptor descriptor)
@@ -142,6 +160,7 @@ CompilandIndexItem &CompileUnitIndex::GetOrCreateCompiland(uint16_t modi) {
cci = std::make_unique<CompilandIndexItem>(
PdbCompilandId{modi}, std::move(debug_stream), std::move(descriptor));
ParseExtendedInfo(m_index, *cci);
+ ParseInlineeLineTableForCompileUnit(*cci);
cci->m_strings.initialize(debug_stream.getSubsectionsArray());
PDBStringTable &strings = cantFail(m_index.pdb().getStringTable());
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.h
index 088de970cbf3..4ad27de98695 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/CompileUnitIndex.h
@@ -9,10 +9,12 @@
#ifndef LLDB_SOURCE_PLUGINS_SYMBOLFILE_NATIVEPDB_COMPILEUNITINDEX_H
#define LLDB_SOURCE_PLUGINS_SYMBOLFILE_NATIVEPDB_COMPILEUNITINDEX_H
+#include "lldb/Utility/RangeMap.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
@@ -69,6 +71,17 @@ struct CompilandIndexItem {
// command line, etc. This usually contains exactly 5 items which
// are references to other strings.
llvm::SmallVector<llvm::codeview::TypeIndex, 5> m_build_info;
+
+ // Inlinee lines table in this compile unit.
+ std::map<llvm::codeview::TypeIndex, llvm::codeview::InlineeSourceLine>
+ m_inline_map;
+
+ // It's the line table parsed from DEBUG_S_LINES sections, mapping the file
+ // address range to file index and source line number.
+ using GlobalLineTable =
+ lldb_private::RangeDataVector<lldb::addr_t, uint32_t,
+ std::pair<uint32_t, uint32_t>>;
+ GlobalLineTable m_global_line_table;
};
/// Indexes information about all compile units. This is really just a map of
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
index 9473befa6cc3..dc0969a0ce7c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
@@ -1081,7 +1081,7 @@ PdbAstBuilder::GetOrCreateFunctionDecl(PdbCompilandSymId func_id) {
clang::FunctionDecl *function_decl = nullptr;
if (parent->isRecord()) {
- clang::QualType parent_qt = llvm::dyn_cast<clang::TypeDecl>(parent)
+ clang::QualType parent_qt = llvm::cast<clang::TypeDecl>(parent)
->getTypeForDecl()
->getCanonicalTypeInternal();
lldb::opaque_compiler_type_t parent_opaque_ty =
@@ -1318,7 +1318,7 @@ void PdbAstBuilder::ParseAllNamespacesPlusChildrenOf(
if (!context->isNamespace())
continue;
- clang::NamespaceDecl *ns = llvm::dyn_cast<clang::NamespaceDecl>(context);
+ clang::NamespaceDecl *ns = llvm::cast<clang::NamespaceDecl>(context);
std::string actual_ns = ns->getQualifiedNameAsString();
if (llvm::StringRef(actual_ns).startswith(*parent)) {
clang::QualType qt = GetOrCreateType(tid);
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbIndex.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbIndex.h
index 1b382e5263c1..edbdd9ee290b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbIndex.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbIndex.h
@@ -25,7 +25,6 @@ namespace llvm {
namespace pdb {
class DbiStream;
class TpiStream;
-class TpiStream;
class InfoStream;
class PublicsStream;
class GlobalsStream;
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp
index e859b1d5a86c..a035a791f868 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.cpp
@@ -330,31 +330,65 @@ uint32_t SymbolFileNativePDB::CalculateNumCompileUnits() {
Block &SymbolFileNativePDB::CreateBlock(PdbCompilandSymId block_id) {
CompilandIndexItem *cii = m_index->compilands().GetCompiland(block_id.modi);
CVSymbol sym = cii->m_debug_stream.readSymbolAtOffset(block_id.offset);
-
- if (sym.kind() == S_GPROC32 || sym.kind() == S_LPROC32) {
- // This is a function. It must be global. Creating the Function entry for
- // it automatically creates a block for it.
- CompUnitSP comp_unit = GetOrCreateCompileUnit(*cii);
- return GetOrCreateFunction(block_id, *comp_unit)->GetBlock(false);
- }
-
- lldbassert(sym.kind() == S_BLOCK32);
-
- // This is a block. Its parent is either a function or another block. In
- // either case, its parent can be viewed as a block (e.g. a function contains
- // 1 big block. So just get the parent block and add this block to it.
- BlockSym block(static_cast<SymbolRecordKind>(sym.kind()));
- cantFail(SymbolDeserializer::deserializeAs<BlockSym>(sym, block));
- lldbassert(block.Parent != 0);
- PdbCompilandSymId parent_id(block_id.modi, block.Parent);
- Block &parent_block = GetOrCreateBlock(parent_id);
+ CompUnitSP comp_unit = GetOrCreateCompileUnit(*cii);
lldb::user_id_t opaque_block_uid = toOpaqueUid(block_id);
BlockSP child_block = std::make_shared<Block>(opaque_block_uid);
- parent_block.AddChild(child_block);
- m_ast->GetOrCreateBlockDecl(block_id);
+ switch (sym.kind()) {
+ case S_GPROC32:
+ case S_LPROC32: {
+ // This is a function. It must be global. Creating the Function entry
+ // for it automatically creates a block for it.
+ FunctionSP func = GetOrCreateFunction(block_id, *comp_unit);
+ Block &block = func->GetBlock(false);
+ if (block.GetNumRanges() == 0)
+ block.AddRange(Block::Range(0, func->GetAddressRange().GetByteSize()));
+ return block;
+ }
+ case S_BLOCK32: {
+ // This is a block. Its parent is either a function or another block. In
+ // either case, its parent can be viewed as a block (e.g. a function
+ // contains 1 big block. So just get the parent block and add this block
+ // to it.
+ BlockSym block(static_cast<SymbolRecordKind>(sym.kind()));
+ cantFail(SymbolDeserializer::deserializeAs<BlockSym>(sym, block));
+ lldbassert(block.Parent != 0);
+ PdbCompilandSymId parent_id(block_id.modi, block.Parent);
+ Block &parent_block = GetOrCreateBlock(parent_id);
+ parent_block.AddChild(child_block);
+ m_ast->GetOrCreateBlockDecl(block_id);
+ m_blocks.insert({opaque_block_uid, child_block});
+ break;
+ }
+ case S_INLINESITE: {
+ // This ensures line table is parsed first so we have inline sites info.
+ comp_unit->GetLineTable();
+
+ std::shared_ptr<InlineSite> inline_site = m_inline_sites[opaque_block_uid];
+ Block &parent_block = GetOrCreateBlock(inline_site->parent_id);
+ parent_block.AddChild(child_block);
+
+ // Copy ranges from InlineSite to Block.
+ for (size_t i = 0; i < inline_site->ranges.GetSize(); ++i) {
+ auto *entry = inline_site->ranges.GetEntryAtIndex(i);
+ child_block->AddRange(
+ Block::Range(entry->GetRangeBase(), entry->GetByteSize()));
+ }
+ child_block->FinalizeRanges();
+
+ // Get the inlined function callsite info.
+ Declaration &decl = inline_site->inline_function_info->GetDeclaration();
+ Declaration &callsite = inline_site->inline_function_info->GetCallSite();
+ child_block->SetInlinedFunctionInfo(
+ inline_site->inline_function_info->GetName().GetCString(), nullptr,
+ &decl, &callsite);
+ m_blocks.insert({opaque_block_uid, child_block});
+ break;
+ }
+ default:
+ lldbassert(false && "Symbol is not a block!");
+ }
- m_blocks.insert({opaque_block_uid, child_block});
return *child_block;
}
@@ -971,16 +1005,22 @@ uint32_t SymbolFileNativePDB::ResolveSymbolContext(
continue;
if (type == PDB_SymType::Function) {
sc.function = GetOrCreateFunction(csid, *sc.comp_unit).get();
- sc.block = sc.GetFunctionBlock();
+ Block &block = sc.function->GetBlock(true);
+ addr_t func_base =
+ sc.function->GetAddressRange().GetBaseAddress().GetFileAddress();
+ addr_t offset = file_addr - func_base;
+ sc.block = block.FindInnermostBlockByOffset(offset);
}
if (type == PDB_SymType::Block) {
sc.block = &GetOrCreateBlock(csid);
sc.function = sc.block->CalculateSymbolContextFunction();
}
- resolved_flags |= eSymbolContextFunction;
- resolved_flags |= eSymbolContextBlock;
- break;
+ if (sc.function)
+ resolved_flags |= eSymbolContextFunction;
+ if (sc.block)
+ resolved_flags |= eSymbolContextBlock;
+ break;
}
}
@@ -998,43 +1038,24 @@ uint32_t SymbolFileNativePDB::ResolveSymbolContext(
uint32_t SymbolFileNativePDB::ResolveSymbolContext(
const SourceLocationSpec &src_location_spec,
lldb::SymbolContextItem resolve_scope, SymbolContextList &sc_list) {
- return 0;
-}
-
-static void AppendLineEntryToSequence(LineTable &table, LineSequence &sequence,
- const CompilandIndexItem &cci,
- lldb::addr_t base_addr,
- uint32_t file_number,
- const LineFragmentHeader &block,
- const LineNumberEntry &cur) {
- LineInfo cur_info(cur.Flags);
-
- if (cur_info.isAlwaysStepInto() || cur_info.isNeverStepInto())
- return;
-
- uint64_t addr = base_addr + cur.Offset;
-
- bool is_statement = cur_info.isStatement();
- bool is_prologue = IsFunctionPrologue(cci, addr);
- bool is_epilogue = IsFunctionEpilogue(cci, addr);
-
- uint32_t lno = cur_info.getStartLine();
-
- table.AppendLineEntryToSequence(&sequence, addr, lno, 0, file_number,
- is_statement, false, is_prologue, is_epilogue,
- false);
-}
+ std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
+ const uint32_t prev_size = sc_list.GetSize();
+ if (resolve_scope & eSymbolContextCompUnit) {
+ for (uint32_t cu_idx = 0, num_cus = GetNumCompileUnits(); cu_idx < num_cus;
+ ++cu_idx) {
+ CompileUnit *cu = ParseCompileUnitAtIndex(cu_idx).get();
+ if (!cu)
+ continue;
-static void TerminateLineSequence(LineTable &table,
- const LineFragmentHeader &block,
- lldb::addr_t base_addr, uint32_t file_number,
- uint32_t last_line,
- std::unique_ptr<LineSequence> seq) {
- // The end is always a terminal entry, so insert it regardless.
- table.AppendLineEntryToSequence(seq.get(), base_addr + block.CodeSize,
- last_line, 0, file_number, false, false,
- false, false, true);
- table.InsertSequence(seq.get());
+ bool file_spec_matches_cu_file_spec = FileSpec::Match(
+ src_location_spec.GetFileSpec(), cu->GetPrimaryFile());
+ if (file_spec_matches_cu_file_spec) {
+ cu->ResolveSymbolContext(src_location_spec, resolve_scope, sc_list);
+ break;
+ }
+ }
+ }
+ return sc_list.GetSize() - prev_size;
}
bool SymbolFileNativePDB::ParseLineTable(CompileUnit &comp_unit) {
@@ -1045,16 +1066,21 @@ bool SymbolFileNativePDB::ParseLineTable(CompileUnit &comp_unit) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
PdbSymUid cu_id(comp_unit.GetID());
lldbassert(cu_id.kind() == PdbSymUidKind::Compiland);
- CompilandIndexItem *cci =
- m_index->compilands().GetCompiland(cu_id.asCompiland().modi);
- lldbassert(cci);
- auto line_table = std::make_unique<LineTable>(&comp_unit);
+ uint16_t modi = cu_id.asCompiland().modi;
+ CompilandIndexItem *cii = m_index->compilands().GetCompiland(modi);
+ lldbassert(cii);
+
+ // Parse DEBUG_S_LINES subsections first, then parse all S_INLINESITE records
+ // in this CU. Add line entries into the set first so that if there are line
+ // entries with same addres, the later is always more accurate than the
+ // former.
+ std::set<LineTable::Entry, LineTableEntryComparator> line_set;
// This is basically a copy of the .debug$S subsections from all original COFF
// object files merged together with address relocations applied. We are
// looking for all DEBUG_S_LINES subsections.
for (const DebugSubsectionRecord &dssr :
- cci->m_debug_stream.getSubsectionsArray()) {
+ cii->m_debug_stream.getSubsectionsArray()) {
if (dssr.kind() != DebugSubsectionKind::Lines)
continue;
@@ -1069,42 +1095,111 @@ bool SymbolFileNativePDB::ParseLineTable(CompileUnit &comp_unit) {
uint64_t virtual_addr =
m_index->MakeVirtualAddress(lfh->RelocSegment, lfh->RelocOffset);
- const auto &checksums = cci->m_strings.checksums().getArray();
- const auto &strings = cci->m_strings.strings();
for (const LineColumnEntry &group : lines) {
- // Indices in this structure are actually offsets of records in the
- // DEBUG_S_FILECHECKSUMS subsection. Those entries then have an index
- // into the global PDB string table.
- auto iter = checksums.at(group.NameIndex);
- if (iter == checksums.end())
+ llvm::Expected<uint32_t> file_index_or_err =
+ GetFileIndex(*cii, group.NameIndex);
+ if (!file_index_or_err)
continue;
+ uint32_t file_index = file_index_or_err.get();
+ lldbassert(!group.LineNumbers.empty());
+ CompilandIndexItem::GlobalLineTable::Entry line_entry(
+ LLDB_INVALID_ADDRESS, 0);
+ for (const LineNumberEntry &entry : group.LineNumbers) {
+ LineInfo cur_info(entry.Flags);
- llvm::Expected<llvm::StringRef> efn =
- strings.getString(iter->FileNameOffset);
- if (!efn) {
- llvm::consumeError(efn.takeError());
- continue;
- }
+ if (cur_info.isAlwaysStepInto() || cur_info.isNeverStepInto())
+ continue;
- // LLDB wants the index of the file in the list of support files.
- auto fn_iter = llvm::find(cci->m_file_list, *efn);
- lldbassert(fn_iter != cci->m_file_list.end());
- uint32_t file_index = std::distance(cci->m_file_list.begin(), fn_iter);
+ uint64_t addr = virtual_addr + entry.Offset;
- std::unique_ptr<LineSequence> sequence(
- line_table->CreateLineSequenceContainer());
- lldbassert(!group.LineNumbers.empty());
+ bool is_statement = cur_info.isStatement();
+ bool is_prologue = IsFunctionPrologue(*cii, addr);
+ bool is_epilogue = IsFunctionEpilogue(*cii, addr);
- for (const LineNumberEntry &entry : group.LineNumbers) {
- AppendLineEntryToSequence(*line_table, *sequence, *cci, virtual_addr,
- file_index, *lfh, entry);
+ uint32_t lno = cur_info.getStartLine();
+
+ line_set.emplace(addr, lno, 0, file_index, is_statement, false,
+ is_prologue, is_epilogue, false);
+
+ if (line_entry.GetRangeBase() != LLDB_INVALID_ADDRESS) {
+ line_entry.SetRangeEnd(addr);
+ cii->m_global_line_table.Append(line_entry);
+ }
+ line_entry.SetRangeBase(addr);
+ line_entry.data = {file_index, lno};
}
LineInfo last_line(group.LineNumbers.back().Flags);
- TerminateLineSequence(*line_table, *lfh, virtual_addr, file_index,
- last_line.getEndLine(), std::move(sequence));
+ line_set.emplace(virtual_addr + lfh->CodeSize, last_line.getEndLine(), 0,
+ file_index, false, false, false, false, true);
+
+ if (line_entry.GetRangeBase() != LLDB_INVALID_ADDRESS) {
+ line_entry.SetRangeEnd(virtual_addr + lfh->CodeSize);
+ cii->m_global_line_table.Append(line_entry);
+ }
}
}
+ cii->m_global_line_table.Sort();
+
+ // Parse all S_INLINESITE in this CU.
+ const CVSymbolArray &syms = cii->m_debug_stream.getSymbolArray();
+ for (auto iter = syms.begin(); iter != syms.end();) {
+ if (iter->kind() != S_LPROC32 && iter->kind() != S_GPROC32) {
+ ++iter;
+ continue;
+ }
+
+ uint32_t record_offset = iter.offset();
+ CVSymbol func_record =
+ cii->m_debug_stream.readSymbolAtOffset(record_offset);
+ SegmentOffsetLength sol = GetSegmentOffsetAndLength(func_record);
+ addr_t file_vm_addr = m_index->MakeVirtualAddress(sol.so);
+ AddressRange func_range(file_vm_addr, sol.length,
+ comp_unit.GetModule()->GetSectionList());
+ Address func_base = func_range.GetBaseAddress();
+ PdbCompilandSymId func_id{modi, record_offset};
+
+ // Iterate all S_INLINESITEs in the function.
+ auto parse_inline_sites = [&](SymbolKind kind, PdbCompilandSymId id) {
+ if (kind != S_INLINESITE)
+ return false;
+
+ ParseInlineSite(id, func_base);
+
+ for (const auto &line_entry :
+ m_inline_sites[toOpaqueUid(id)]->line_entries) {
+ // If line_entry is not terminal entry, remove previous line entry at
+ // the same address and insert new one. Terminal entry inside an inline
+ // site might not be terminal entry for its parent.
+ if (!line_entry.is_terminal_entry)
+ line_set.erase(line_entry);
+ line_set.insert(line_entry);
+ }
+ // No longer useful after adding to line_set.
+ m_inline_sites[toOpaqueUid(id)]->line_entries.clear();
+ return true;
+ };
+ ParseSymbolArrayInScope(func_id, parse_inline_sites);
+ // Jump to the end of the function record.
+ iter = syms.at(getScopeEndOffset(func_record));
+ }
+
+ cii->m_global_line_table.Clear();
+
+ // Add line entries in line_set to line_table.
+ auto line_table = std::make_unique<LineTable>(&comp_unit);
+ std::unique_ptr<LineSequence> sequence(
+ line_table->CreateLineSequenceContainer());
+ for (const auto &line_entry : line_set) {
+ line_table->AppendLineEntryToSequence(
+ sequence.get(), line_entry.file_addr, line_entry.line,
+ line_entry.column, line_entry.file_idx,
+ line_entry.is_start_of_statement, line_entry.is_start_of_basic_block,
+ line_entry.is_prologue_end, line_entry.is_epilogue_begin,
+ line_entry.is_terminal_entry);
+ }
+ line_table->InsertSequence(sequence.get());
+
if (line_table->GetSize() == 0)
return false;
@@ -1117,6 +1212,33 @@ bool SymbolFileNativePDB::ParseDebugMacros(CompileUnit &comp_unit) {
return false;
}
+llvm::Expected<uint32_t>
+SymbolFileNativePDB::GetFileIndex(const CompilandIndexItem &cii,
+ uint32_t file_id) {
+ auto index_iter = m_file_indexes.find(file_id);
+ if (index_iter != m_file_indexes.end())
+ return index_iter->getSecond();
+ const auto &checksums = cii.m_strings.checksums().getArray();
+ const auto &strings = cii.m_strings.strings();
+ // Indices in this structure are actually offsets of records in the
+ // DEBUG_S_FILECHECKSUMS subsection. Those entries then have an index
+ // into the global PDB string table.
+ auto iter = checksums.at(file_id);
+ if (iter == checksums.end())
+ return llvm::make_error<RawError>(raw_error_code::no_entry);
+
+ llvm::Expected<llvm::StringRef> efn = strings.getString(iter->FileNameOffset);
+ if (!efn) {
+ return efn.takeError();
+ }
+
+ // LLDB wants the index of the file in the list of support files.
+ auto fn_iter = llvm::find(cii.m_file_list, *efn);
+ lldbassert(fn_iter != cii.m_file_list.end());
+ m_file_indexes[file_id] = std::distance(cii.m_file_list.begin(), fn_iter);
+ return m_file_indexes[file_id];
+}
+
bool SymbolFileNativePDB::ParseSupportFiles(CompileUnit &comp_unit,
FileSpecList &support_files) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
@@ -1141,11 +1263,223 @@ bool SymbolFileNativePDB::ParseImportedModules(
return false;
}
+void SymbolFileNativePDB::ParseInlineSite(PdbCompilandSymId id,
+ Address func_addr) {
+ lldb::user_id_t opaque_uid = toOpaqueUid(id);
+ if (m_inline_sites.find(opaque_uid) != m_inline_sites.end())
+ return;
+
+ addr_t func_base = func_addr.GetFileAddress();
+ CompilandIndexItem *cii = m_index->compilands().GetCompiland(id.modi);
+ CVSymbol sym = cii->m_debug_stream.readSymbolAtOffset(id.offset);
+ CompUnitSP comp_unit = GetOrCreateCompileUnit(*cii);
+
+ InlineSiteSym inline_site(static_cast<SymbolRecordKind>(sym.kind()));
+ cantFail(SymbolDeserializer::deserializeAs<InlineSiteSym>(sym, inline_site));
+ PdbCompilandSymId parent_id(id.modi, inline_site.Parent);
+
+ std::shared_ptr<InlineSite> inline_site_sp =
+ std::make_shared<InlineSite>(parent_id);
+
+ // Get the inlined function declaration info.
+ auto iter = cii->m_inline_map.find(inline_site.Inlinee);
+ if (iter == cii->m_inline_map.end())
+ return;
+ InlineeSourceLine inlinee_line = iter->second;
+
+ const FileSpecList &files = comp_unit->GetSupportFiles();
+ FileSpec decl_file;
+ llvm::Expected<uint32_t> file_index_or_err =
+ GetFileIndex(*cii, inlinee_line.Header->FileID);
+ if (!file_index_or_err)
+ return;
+ uint32_t decl_file_idx = file_index_or_err.get();
+ decl_file = files.GetFileSpecAtIndex(decl_file_idx);
+ uint32_t decl_line = inlinee_line.Header->SourceLineNum;
+ std::unique_ptr<Declaration> decl_up =
+ std::make_unique<Declaration>(decl_file, decl_line);
+
+ // Parse range and line info.
+ uint32_t code_offset = 0;
+ int32_t line_offset = 0;
+ bool has_base = false;
+ bool is_new_line_offset = false;
+
+ bool is_start_of_statement = false;
+ // The first instruction is the prologue end.
+ bool is_prologue_end = true;
+
+ auto change_code_offset = [&](uint32_t code_delta) {
+ if (has_base) {
+ inline_site_sp->ranges.Append(RangeSourceLineVector::Entry(
+ code_offset, code_delta, decl_line + line_offset));
+ is_prologue_end = false;
+ is_start_of_statement = false;
+ } else {
+ is_start_of_statement = true;
+ }
+ has_base = true;
+ code_offset += code_delta;
+
+ if (is_new_line_offset) {
+ LineTable::Entry line_entry(func_base + code_offset,
+ decl_line + line_offset, 0, decl_file_idx,
+ true, false, is_prologue_end, false, false);
+ inline_site_sp->line_entries.push_back(line_entry);
+ is_new_line_offset = false;
+ }
+ };
+ auto change_code_length = [&](uint32_t length) {
+ inline_site_sp->ranges.Append(RangeSourceLineVector::Entry(
+ code_offset, length, decl_line + line_offset));
+ has_base = false;
+
+ LineTable::Entry end_line_entry(func_base + code_offset + length,
+ decl_line + line_offset, 0, decl_file_idx,
+ false, false, false, false, true);
+ inline_site_sp->line_entries.push_back(end_line_entry);
+ };
+ auto change_line_offset = [&](int32_t line_delta) {
+ line_offset += line_delta;
+ if (has_base) {
+ LineTable::Entry line_entry(
+ func_base + code_offset, decl_line + line_offset, 0, decl_file_idx,
+ is_start_of_statement, false, is_prologue_end, false, false);
+ inline_site_sp->line_entries.push_back(line_entry);
+ } else {
+ // Add line entry in next call to change_code_offset.
+ is_new_line_offset = true;
+ }
+ };
+
+ for (auto &annot : inline_site.annotations()) {
+ switch (annot.OpCode) {
+ case BinaryAnnotationsOpCode::CodeOffset:
+ case BinaryAnnotationsOpCode::ChangeCodeOffset:
+ case BinaryAnnotationsOpCode::ChangeCodeOffsetBase:
+ change_code_offset(annot.U1);
+ break;
+ case BinaryAnnotationsOpCode::ChangeLineOffset:
+ change_line_offset(annot.S1);
+ break;
+ case BinaryAnnotationsOpCode::ChangeCodeLength:
+ change_code_length(annot.U1);
+ code_offset += annot.U1;
+ break;
+ case BinaryAnnotationsOpCode::ChangeCodeOffsetAndLineOffset:
+ change_code_offset(annot.U1);
+ change_line_offset(annot.S1);
+ break;
+ case BinaryAnnotationsOpCode::ChangeCodeLengthAndCodeOffset:
+ change_code_offset(annot.U2);
+ change_code_length(annot.U1);
+ break;
+ default:
+ break;
+ }
+ }
+
+ inline_site_sp->ranges.Sort();
+ inline_site_sp->ranges.CombineConsecutiveEntriesWithEqualData();
+
+ // Get the inlined function callsite info.
+ std::unique_ptr<Declaration> callsite_up;
+ if (!inline_site_sp->ranges.IsEmpty()) {
+ auto *entry = inline_site_sp->ranges.GetEntryAtIndex(0);
+ addr_t base_offset = entry->GetRangeBase();
+ if (cii->m_debug_stream.readSymbolAtOffset(parent_id.offset).kind() ==
+ S_INLINESITE) {
+ // Its parent is another inline site, lookup parent site's range vector
+ // for callsite line.
+ ParseInlineSite(parent_id, func_base);
+ std::shared_ptr<InlineSite> parent_site =
+ m_inline_sites[toOpaqueUid(parent_id)];
+ FileSpec &parent_decl_file =
+ parent_site->inline_function_info->GetDeclaration().GetFile();
+ if (auto *parent_entry =
+ parent_site->ranges.FindEntryThatContains(base_offset)) {
+ callsite_up =
+ std::make_unique<Declaration>(parent_decl_file, parent_entry->data);
+ }
+ } else {
+ // Its parent is a function, lookup global line table for callsite.
+ if (auto *entry = cii->m_global_line_table.FindEntryThatContains(
+ func_base + base_offset)) {
+ const FileSpec &callsite_file =
+ files.GetFileSpecAtIndex(entry->data.first);
+ callsite_up =
+ std::make_unique<Declaration>(callsite_file, entry->data.second);
+ }
+ }
+ }
+
+ // Get the inlined function name.
+ CVType inlinee_cvt = m_index->ipi().getType(inline_site.Inlinee);
+ std::string inlinee_name;
+ if (inlinee_cvt.kind() == LF_MFUNC_ID) {
+ MemberFuncIdRecord mfr;
+ cantFail(
+ TypeDeserializer::deserializeAs<MemberFuncIdRecord>(inlinee_cvt, mfr));
+ LazyRandomTypeCollection &types = m_index->tpi().typeCollection();
+ inlinee_name.append(std::string(types.getTypeName(mfr.ClassType)));
+ inlinee_name.append("::");
+ inlinee_name.append(mfr.getName().str());
+ } else if (inlinee_cvt.kind() == LF_FUNC_ID) {
+ FuncIdRecord fir;
+ cantFail(TypeDeserializer::deserializeAs<FuncIdRecord>(inlinee_cvt, fir));
+ TypeIndex parent_idx = fir.getParentScope();
+ if (!parent_idx.isNoneType()) {
+ LazyRandomTypeCollection &ids = m_index->ipi().typeCollection();
+ inlinee_name.append(std::string(ids.getTypeName(parent_idx)));
+ inlinee_name.append("::");
+ }
+ inlinee_name.append(fir.getName().str());
+ }
+ inline_site_sp->inline_function_info = std::make_shared<InlineFunctionInfo>(
+ inlinee_name.c_str(), llvm::StringRef(), decl_up.get(),
+ callsite_up.get());
+
+ m_inline_sites[opaque_uid] = inline_site_sp;
+}
+
size_t SymbolFileNativePDB::ParseBlocksRecursive(Function &func) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
- GetOrCreateBlock(PdbSymUid(func.GetID()).asCompilandSym());
- // FIXME: Parse child blocks
- return 1;
+ PdbCompilandSymId func_id = PdbSymUid(func.GetID()).asCompilandSym();
+ // After we iterate through inline sites inside the function, we already get
+ // all the info needed, removing from the map to save memory.
+ std::set<uint64_t> remove_uids;
+ auto parse_blocks = [&](SymbolKind kind, PdbCompilandSymId id) {
+ if (kind == S_GPROC32 || kind == S_LPROC32 || kind == S_BLOCK32 ||
+ kind == S_INLINESITE) {
+ GetOrCreateBlock(id);
+ if (kind == S_INLINESITE)
+ remove_uids.insert(toOpaqueUid(id));
+ return true;
+ }
+ return false;
+ };
+ size_t count = ParseSymbolArrayInScope(func_id, parse_blocks);
+ for (uint64_t uid : remove_uids) {
+ m_inline_sites.erase(uid);
+ }
+ return count;
+}
+
+size_t SymbolFileNativePDB::ParseSymbolArrayInScope(
+ PdbCompilandSymId parent_id,
+ llvm::function_ref<bool(SymbolKind, PdbCompilandSymId)> fn) {
+ CompilandIndexItem *cii = m_index->compilands().GetCompiland(parent_id.modi);
+ CVSymbolArray syms =
+ cii->m_debug_stream.getSymbolArrayForScope(parent_id.offset);
+
+ size_t count = 1;
+ for (auto iter = syms.begin(); iter != syms.end(); ++iter) {
+ PdbCompilandSymId child_id(parent_id.modi, iter.offset());
+ if (fn(iter->kind(), child_id))
+ ++count;
+ }
+
+ return count;
}
void SymbolFileNativePDB::DumpClangAST(Stream &s) { m_ast->Dump(s); }
@@ -1396,6 +1730,9 @@ size_t SymbolFileNativePDB::ParseVariablesForBlock(PdbCompilandSymId block_id) {
}
case S_BLOCK32:
break;
+ case S_INLINESITE:
+ // TODO: Handle inline site case.
+ return 0;
default:
lldbassert(false && "Symbol is not a block!");
return 0;
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h
index 56a5ec0a464d..f1b6e34ca346 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h
@@ -9,6 +9,7 @@
#ifndef LLDB_SOURCE_PLUGINS_SYMBOLFILE_NATIVEPDB_SYMBOLFILENATIVEPDB_H
#define LLDB_SOURCE_PLUGINS_SYMBOLFILE_NATIVEPDB_SYMBOLFILENATIVEPDB_H
+#include "lldb/Symbol/LineTable.h"
#include "lldb/Symbol/SymbolFile.h"
#include "llvm/ADT/DenseMap.h"
@@ -161,6 +162,25 @@ public:
void DumpClangAST(Stream &s) override;
private:
+ struct LineTableEntryComparator {
+ bool operator()(const lldb_private::LineTable::Entry &lhs,
+ const lldb_private::LineTable::Entry &rhs) const {
+ return lhs.file_addr < rhs.file_addr;
+ }
+ };
+
+ // From address range relative to function base to source line number.
+ using RangeSourceLineVector =
+ lldb_private::RangeDataVector<uint32_t, uint32_t, int32_t>;
+ // InlineSite contains information in a S_INLINESITE record.
+ struct InlineSite {
+ PdbCompilandSymId parent_id;
+ std::shared_ptr<InlineFunctionInfo> inline_function_info;
+ RangeSourceLineVector ranges;
+ std::vector<lldb_private::LineTable::Entry> line_entries;
+ InlineSite(PdbCompilandSymId parent_id) : parent_id(parent_id){};
+ };
+
uint32_t CalculateNumCompileUnits() override;
lldb::CompUnitSP ParseCompileUnitAtIndex(uint32_t index) override;
@@ -225,6 +245,16 @@ private:
VariableList &variables);
size_t ParseVariablesForBlock(PdbCompilandSymId block_id);
+ llvm::Expected<uint32_t> GetFileIndex(const CompilandIndexItem &cii,
+ uint32_t file_id);
+
+ size_t ParseSymbolArrayInScope(
+ PdbCompilandSymId parent,
+ llvm::function_ref<bool(llvm::codeview::SymbolKind, PdbCompilandSymId)>
+ fn);
+
+ void ParseInlineSite(PdbCompilandSymId inline_site_id, Address func_addr);
+
llvm::BumpPtrAllocator m_allocator;
lldb::addr_t m_obj_load_address = 0;
@@ -241,6 +271,9 @@ private:
llvm::DenseMap<lldb::user_id_t, lldb::FunctionSP> m_functions;
llvm::DenseMap<lldb::user_id_t, lldb::CompUnitSP> m_compilands;
llvm::DenseMap<lldb::user_id_t, lldb::TypeSP> m_types;
+ llvm::DenseMap<lldb::user_id_t, std::shared_ptr<InlineSite>> m_inline_sites;
+ // A map from file id in records to file index in support files.
+ llvm::DenseMap<uint32_t, uint32_t> m_file_indexes;
};
} // namespace npdb
diff --git a/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 57e763176ba0..c71a0120efde 100644
--- a/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -1345,7 +1345,30 @@ namespace {
bool IsValueParam(const clang::TemplateArgument &argument) {
return argument.getKind() == TemplateArgument::Integral;
}
+
+void AddAccessSpecifierDecl(clang::CXXRecordDecl *cxx_record_decl,
+ ASTContext &ct,
+ clang::AccessSpecifier previous_access,
+ clang::AccessSpecifier access_specifier) {
+ if (!cxx_record_decl->isClass() && !cxx_record_decl->isStruct())
+ return;
+ if (previous_access != access_specifier) {
+ // For struct, don't add AS_public if it's the first AccessSpecDecl.
+ // For class, don't add AS_private if it's the first AccessSpecDecl.
+ if ((cxx_record_decl->isStruct() &&
+ previous_access == clang::AccessSpecifier::AS_none &&
+ access_specifier == clang::AccessSpecifier::AS_public) ||
+ (cxx_record_decl->isClass() &&
+ previous_access == clang::AccessSpecifier::AS_none &&
+ access_specifier == clang::AccessSpecifier::AS_private)) {
+ return;
+ }
+ cxx_record_decl->addDecl(
+ AccessSpecDecl::Create(ct, access_specifier, cxx_record_decl,
+ SourceLocation(), SourceLocation()));
+ }
}
+} // namespace
static TemplateParameterList *CreateTemplateParameterList(
ASTContext &ast,
@@ -1453,7 +1476,7 @@ void TypeSystemClang::CreateFunctionTemplateSpecializationInfo(
/// as `int I = 3`.
static bool TemplateParameterAllowsValue(NamedDecl *param,
const TemplateArgument &value) {
- if (auto *type_param = llvm::dyn_cast<TemplateTypeParmDecl>(param)) {
+ if (llvm::isa<TemplateTypeParmDecl>(param)) {
// Compare the argument kind, i.e. ensure that <typename> != <int>.
if (value.getKind() != TemplateArgument::Type)
return false;
@@ -2552,6 +2575,22 @@ ClangASTMetadata *TypeSystemClang::GetMetadata(const clang::Type *object) {
return nullptr;
}
+void TypeSystemClang::SetCXXRecordDeclAccess(const clang::CXXRecordDecl *object,
+ clang::AccessSpecifier access) {
+ if (access == clang::AccessSpecifier::AS_none)
+ m_cxx_record_decl_access.erase(object);
+ else
+ m_cxx_record_decl_access[object] = access;
+}
+
+clang::AccessSpecifier
+TypeSystemClang::GetCXXRecordDeclAccess(const clang::CXXRecordDecl *object) {
+ auto It = m_cxx_record_decl_access.find(object);
+ if (It != m_cxx_record_decl_access.end())
+ return It->second;
+ return clang::AccessSpecifier::AS_none;
+}
+
clang::DeclContext *
TypeSystemClang::GetDeclContextForType(const CompilerType &type) {
return GetDeclContextForType(ClangUtil::GetQualType(type));
@@ -7276,9 +7315,17 @@ clang::FieldDecl *TypeSystemClang::AddFieldToRecordType(
}
if (field) {
- field->setAccess(
- TypeSystemClang::ConvertAccessTypeToAccessSpecifier(access));
-
+ clang::AccessSpecifier access_specifier =
+ TypeSystemClang::ConvertAccessTypeToAccessSpecifier(access);
+ field->setAccess(access_specifier);
+
+ if (clang::CXXRecordDecl *cxx_record_decl =
+ llvm::dyn_cast<CXXRecordDecl>(record_decl)) {
+ AddAccessSpecifierDecl(cxx_record_decl, ast->getASTContext(),
+ ast->GetCXXRecordDeclAccess(cxx_record_decl),
+ access_specifier);
+ ast->SetCXXRecordDeclAccess(cxx_record_decl, access_specifier);
+ }
record_decl->addDecl(field);
VerifyDecl(field);
@@ -7657,6 +7704,11 @@ clang::CXXMethodDecl *TypeSystemClang::AddMethodToCXXRecordType(
cxx_method_decl->setParams(llvm::ArrayRef<clang::ParmVarDecl *>(params));
+ AddAccessSpecifierDecl(cxx_record_decl, getASTContext(),
+ GetCXXRecordDeclAccess(cxx_record_decl),
+ access_specifier);
+ SetCXXRecordDeclAccess(cxx_record_decl, access_specifier);
+
cxx_record_decl->addDecl(cxx_method_decl);
// Sometimes the debug info will mention a constructor (default/copy/move),
@@ -8190,6 +8242,11 @@ bool TypeSystemClang::CompleteTagDeclarationDefinition(
if (qual_type.isNull())
return false;
+ TypeSystemClang *lldb_ast =
+ llvm::dyn_cast<TypeSystemClang>(type.GetTypeSystem());
+ if (lldb_ast == nullptr)
+ return false;
+
// Make sure we use the same methodology as
// TypeSystemClang::StartTagDeclarationDefinition() as to how we start/end
// the definition.
@@ -8220,6 +8277,8 @@ bool TypeSystemClang::CompleteTagDeclarationDefinition(
cxx_record_decl->setHasLoadedFieldsFromExternalStorage(true);
cxx_record_decl->setHasExternalLexicalStorage(false);
cxx_record_decl->setHasExternalVisibleStorage(false);
+ lldb_ast->SetCXXRecordDeclAccess(cxx_record_decl,
+ clang::AccessSpecifier::AS_none);
return true;
}
}
@@ -8233,10 +8292,6 @@ bool TypeSystemClang::CompleteTagDeclarationDefinition(
if (enum_decl->isCompleteDefinition())
return true;
- TypeSystemClang *lldb_ast =
- llvm::dyn_cast<TypeSystemClang>(type.GetTypeSystem());
- if (lldb_ast == nullptr)
- return false;
clang::ASTContext &ast = lldb_ast->getASTContext();
/// TODO This really needs to be fixed.
diff --git a/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 5e75a8aba9e8..c59ecdb31790 100644
--- a/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/contrib/llvm-project/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -196,6 +196,11 @@ public:
ClangASTMetadata *GetMetadata(const clang::Decl *object);
ClangASTMetadata *GetMetadata(const clang::Type *object);
+ void SetCXXRecordDeclAccess(const clang::CXXRecordDecl *object,
+ clang::AccessSpecifier access);
+ clang::AccessSpecifier
+ GetCXXRecordDeclAccess(const clang::CXXRecordDecl *object);
+
// Basic Types
CompilerType GetBuiltinTypeForEncodingAndBitSize(lldb::Encoding encoding,
size_t bit_size) override;
@@ -1084,6 +1089,12 @@ private:
/// Maps Types to their associated ClangASTMetadata.
TypeMetadataMap m_type_metadata;
+ typedef llvm::DenseMap<const clang::CXXRecordDecl *, clang::AccessSpecifier>
+ CXXRecordDeclAccessMap;
+ /// Maps CXXRecordDecl to their most recent added method/field's
+ /// AccessSpecifier.
+ CXXRecordDeclAccessMap m_cxx_record_decl_access;
+
/// The sema associated that is currently used to build this ASTContext.
/// May be null if we are already done parsing this ASTContext or the
/// ASTContext wasn't created by parsing source code.
diff --git a/contrib/llvm-project/lldb/source/Symbol/Function.cpp b/contrib/llvm-project/lldb/source/Symbol/Function.cpp
index 37479651fb45..dda9ec232715 100644
--- a/contrib/llvm-project/lldb/source/Symbol/Function.cpp
+++ b/contrib/llvm-project/lldb/source/Symbol/Function.cpp
@@ -195,7 +195,7 @@ Function *IndirectCallEdge::GetCallee(ModuleList &images,
Status error;
Value callee_addr_val;
if (!call_target.Evaluate(&exe_ctx, exe_ctx.GetRegisterContext(),
- /*loclist_base_addr=*/LLDB_INVALID_ADDRESS,
+ /*loclist_base_load_addr=*/LLDB_INVALID_ADDRESS,
/*initial_value_ptr=*/nullptr,
/*object_address_ptr=*/nullptr, callee_addr_val,
&error)) {
diff --git a/contrib/llvm-project/lldb/source/Symbol/Symbol.cpp b/contrib/llvm-project/lldb/source/Symbol/Symbol.cpp
index fa7226dfd046..5ee5b0fe2223 100644
--- a/contrib/llvm-project/lldb/source/Symbol/Symbol.cpp
+++ b/contrib/llvm-project/lldb/source/Symbol/Symbol.cpp
@@ -680,7 +680,7 @@ void Symbol::Encode(DataEncoder &file, ConstStringTable &strtab) const {
// symbol's base address doesn't have a section, then it is a constant value.
// If it does have a section, we will encode the file address and re-resolve
// the address when we decode it.
- bool is_addr = m_addr_range.GetBaseAddress().GetSection().get() != NULL;
+ bool is_addr = m_addr_range.GetBaseAddress().GetSection().get() != nullptr;
file.AppendU8(is_addr);
file.AppendU64(m_addr_range.GetBaseAddress().GetFileAddress());
file.AppendU64(m_addr_range.GetByteSize());
diff --git a/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp b/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp
index 75450a156c28..97dc31bc9766 100644
--- a/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp
+++ b/contrib/llvm-project/lldb/source/Symbol/Symtab.cpp
@@ -34,7 +34,8 @@ using namespace lldb_private;
Symtab::Symtab(ObjectFile *objfile)
: m_objfile(objfile), m_symbols(), m_file_addr_to_index(*this),
m_name_to_symbol_indices(), m_mutex(),
- m_file_addr_to_index_computed(false), m_name_indexes_computed(false) {
+ m_file_addr_to_index_computed(false), m_name_indexes_computed(false),
+ m_loaded_from_cache(false), m_saved_to_cache(false) {
m_name_to_symbol_indices.emplace(std::make_pair(
lldb::eFunctionNameTypeNone, UniqueCStringMap<uint32_t>()));
m_name_to_symbol_indices.emplace(std::make_pair(
@@ -1179,7 +1180,8 @@ void Symtab::SaveToCache() {
// Encode will return false if the symbol table's object file doesn't have
// anything to make a signature from.
if (Encode(file))
- cache->SetCachedData(GetCacheKey(), file.GetData());
+ if (cache->SetCachedData(GetCacheKey(), file.GetData()))
+ SetWasSavedToCache();
}
constexpr llvm::StringLiteral kIdentifierCStrMap("CMAP");
@@ -1343,5 +1345,7 @@ bool Symtab::LoadFromCache() {
const bool result = Decode(data, &offset, signature_mismatch);
if (signature_mismatch)
cache->RemoveCacheFile(GetCacheKey());
+ if (result)
+ SetWasLoadedFromCache();
return result;
}
diff --git a/contrib/llvm-project/lldb/source/Symbol/TypeSystem.cpp b/contrib/llvm-project/lldb/source/Symbol/TypeSystem.cpp
index 0b3f7e4f3bd4..3092dc0bf0a4 100644
--- a/contrib/llvm-project/lldb/source/Symbol/TypeSystem.cpp
+++ b/contrib/llvm-project/lldb/source/Symbol/TypeSystem.cpp
@@ -22,7 +22,7 @@ using namespace lldb;
static const size_t g_num_small_bitvector_bits = 64 - 8;
static_assert(eNumLanguageTypes < g_num_small_bitvector_bits,
"Languages bit vector is no longer small on 64 bit systems");
-LanguageSet::LanguageSet() : bitvector(eNumLanguageTypes, 0) {}
+LanguageSet::LanguageSet() : bitvector(eNumLanguageTypes, false) {}
llvm::Optional<LanguageType> LanguageSet::GetSingularLanguage() {
if (bitvector.count() == 1)
diff --git a/contrib/llvm-project/lldb/source/Target/MemoryTagMap.cpp b/contrib/llvm-project/lldb/source/Target/MemoryTagMap.cpp
new file mode 100644
index 000000000000..846eef9209da
--- /dev/null
+++ b/contrib/llvm-project/lldb/source/Target/MemoryTagMap.cpp
@@ -0,0 +1,64 @@
+//===-- MemoryTagMap.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lldb/Target/MemoryTagMap.h"
+
+using namespace lldb_private;
+
+MemoryTagMap::MemoryTagMap(const MemoryTagManager *manager)
+ : m_manager(manager) {
+ assert(m_manager && "valid tag manager required to construct a MemoryTagMap");
+}
+
+void MemoryTagMap::InsertTags(lldb::addr_t addr,
+ const std::vector<lldb::addr_t> tags) {
+ // We're assuming that addr has no non address bits and is granule aligned.
+ size_t granule_size = m_manager->GetGranuleSize();
+ for (auto tag : tags) {
+ m_addr_to_tag[addr] = tag;
+ addr += granule_size;
+ }
+}
+
+bool MemoryTagMap::Empty() const { return m_addr_to_tag.empty(); }
+
+std::vector<llvm::Optional<lldb::addr_t>>
+MemoryTagMap::GetTags(lldb::addr_t addr, size_t len) const {
+ // Addr and len might be unaligned
+ addr = m_manager->RemoveTagBits(addr);
+ MemoryTagManager::TagRange range(addr, len);
+ range = m_manager->ExpandToGranule(range);
+
+ std::vector<llvm::Optional<lldb::addr_t>> tags;
+ lldb::addr_t end_addr = range.GetRangeEnd();
+ addr = range.GetRangeBase();
+ bool got_valid_tags = false;
+ size_t granule_size = m_manager->GetGranuleSize();
+
+ for (; addr < end_addr; addr += granule_size) {
+ llvm::Optional<lldb::addr_t> tag = GetTag(addr);
+ tags.push_back(tag);
+ if (tag)
+ got_valid_tags = true;
+ }
+
+ // To save the caller checking if every item is llvm::None,
+ // we return an empty vector if we got no tags at all.
+ if (got_valid_tags)
+ return tags;
+ return {};
+}
+
+llvm::Optional<lldb::addr_t> MemoryTagMap::GetTag(lldb::addr_t addr) const {
+ // Here we assume that addr is granule aligned, just like when the tags
+ // were inserted.
+ auto found = m_addr_to_tag.find(addr);
+ if (found == m_addr_to_tag.end())
+ return llvm::None;
+ return found->second;
+}
diff --git a/contrib/llvm-project/lldb/source/Target/Platform.cpp b/contrib/llvm-project/lldb/source/Target/Platform.cpp
index af5ca0225169..3c331c8760df 100644
--- a/contrib/llvm-project/lldb/source/Target/Platform.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Platform.cpp
@@ -428,6 +428,9 @@ void Platform::GetStatus(Stream &strm) {
strm.Printf(" Connected: %s\n", is_connected ? "yes" : "no");
}
+ if (GetSDKRootDirectory()) {
+ strm.Format(" Sysroot: {0}\n", GetSDKRootDirectory());
+ }
if (GetWorkingDirectory()) {
strm.Printf("WorkingDir: %s\n", GetWorkingDirectory().GetCString());
}
@@ -2000,3 +2003,7 @@ size_t Platform::GetSoftwareBreakpointTrapOpcode(Target &target,
return 0;
}
+
+CompilerType Platform::GetSiginfoType(const llvm::Triple& triple) {
+ return CompilerType();
+}
diff --git a/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp b/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
index 96b69640a3a3..315ccea65d1f 100644
--- a/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
@@ -1509,7 +1509,7 @@ RegisterContextUnwind::SavedLocationForRegister(
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterFound;
} else {
- std::string unwindplan_name("");
+ std::string unwindplan_name;
if (m_full_unwind_plan_sp) {
unwindplan_name += "via '";
unwindplan_name += m_full_unwind_plan_sp->GetSourceName().AsCString();
diff --git a/contrib/llvm-project/lldb/source/Target/StackFrame.cpp b/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
index 7b4295158425..58de26b23b65 100644
--- a/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
+++ b/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
@@ -1433,13 +1433,13 @@ ValueObjectSP GetValueForDereferincingOffset(StackFrame &frame,
/// Attempt to reconstruct the ValueObject for the address contained in a
/// given register plus an offset.
///
-/// \params [in] frame
+/// \param [in] frame
/// The current stack frame.
///
-/// \params [in] reg
+/// \param [in] reg
/// The register.
///
-/// \params [in] offset
+/// \param [in] offset
/// The offset from the register.
///
/// \param [in] disassembler
diff --git a/contrib/llvm-project/lldb/source/Target/Statistics.cpp b/contrib/llvm-project/lldb/source/Target/Statistics.cpp
index 1b205c533519..ebddad837d14 100644
--- a/contrib/llvm-project/lldb/source/Target/Statistics.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Statistics.cpp
@@ -34,7 +34,8 @@ json::Value StatsSuccessFail::ToJSON() const {
}
static double elapsed(const StatsTimepoint &start, const StatsTimepoint &end) {
- StatsDuration elapsed = end.time_since_epoch() - start.time_since_epoch();
+ StatsDuration::Duration elapsed =
+ end.time_since_epoch() - start.time_since_epoch();
return elapsed.count();
}
@@ -52,12 +53,26 @@ json::Value ModuleStats::ToJSON() const {
module.try_emplace("identifier", identifier);
module.try_emplace("symbolTableParseTime", symtab_parse_time);
module.try_emplace("symbolTableIndexTime", symtab_index_time);
+ module.try_emplace("symbolTableLoadedFromCache", symtab_loaded_from_cache);
+ module.try_emplace("symbolTableSavedToCache", symtab_saved_to_cache);
module.try_emplace("debugInfoParseTime", debug_parse_time);
module.try_emplace("debugInfoIndexTime", debug_index_time);
module.try_emplace("debugInfoByteSize", (int64_t)debug_info_size);
+ module.try_emplace("debugInfoIndexLoadedFromCache",
+ debug_info_index_loaded_from_cache);
+ module.try_emplace("debugInfoIndexSavedToCache",
+ debug_info_index_saved_to_cache);
return module;
}
+llvm::json::Value ConstStringStats::ToJSON() const {
+ json::Object obj;
+ obj.try_emplace<int64_t>("bytesTotal", stats.GetBytesTotal());
+ obj.try_emplace<int64_t>("bytesUsed", stats.GetBytesUsed());
+ obj.try_emplace<int64_t>("bytesUnused", stats.GetBytesUnused());
+ return obj;
+}
+
json::Value TargetStats::ToJSON(Target &target) {
CollectStats(target);
@@ -80,7 +95,8 @@ json::Value TargetStats::ToJSON(Target &target) {
elapsed(*m_launch_or_attach_time, *m_first_public_stop_time);
target_metrics_json.try_emplace("firstStopTime", elapsed_time);
}
- target_metrics_json.try_emplace("targetCreateTime", m_create_time.count());
+ target_metrics_json.try_emplace("targetCreateTime",
+ m_create_time.get().count());
json::Array breakpoints_array;
double totalBreakpointResolveTime = 0.0;
@@ -144,6 +160,10 @@ llvm::json::Value DebuggerStats::ReportStatistics(Debugger &debugger,
double symtab_index_time = 0.0;
double debug_parse_time = 0.0;
double debug_index_time = 0.0;
+ uint32_t symtabs_loaded = 0;
+ uint32_t symtabs_saved = 0;
+ uint32_t debug_index_loaded = 0;
+ uint32_t debug_index_saved = 0;
uint64_t debug_info_size = 0;
if (target) {
json_targets.emplace_back(target->ReportStatistics());
@@ -167,13 +187,30 @@ llvm::json::Value DebuggerStats::ReportStatistics(Debugger &debugger,
}
module_stat.uuid = module->GetUUID().GetAsString();
module_stat.triple = module->GetArchitecture().GetTriple().str();
- module_stat.symtab_parse_time = module->GetSymtabParseTime().count();
- module_stat.symtab_index_time = module->GetSymtabIndexTime().count();
+ module_stat.symtab_parse_time = module->GetSymtabParseTime().get().count();
+ module_stat.symtab_index_time = module->GetSymtabIndexTime().get().count();
+ Symtab *symtab = module->GetSymtab();
+ if (symtab) {
+ module_stat.symtab_loaded_from_cache = symtab->GetWasLoadedFromCache();
+ if (module_stat.symtab_loaded_from_cache)
+ ++symtabs_loaded;
+ module_stat.symtab_saved_to_cache = symtab->GetWasSavedToCache();
+ if (module_stat.symtab_saved_to_cache)
+ ++symtabs_saved;
+ }
SymbolFile *sym_file = module->GetSymbolFile();
if (sym_file) {
module_stat.debug_index_time = sym_file->GetDebugInfoIndexTime().count();
module_stat.debug_parse_time = sym_file->GetDebugInfoParseTime().count();
module_stat.debug_info_size = sym_file->GetDebugInfoSize();
+ module_stat.debug_info_index_loaded_from_cache =
+ sym_file->GetDebugInfoIndexWasLoadedFromCache();
+ if (module_stat.debug_info_index_loaded_from_cache)
+ ++debug_index_loaded;
+ module_stat.debug_info_index_saved_to_cache =
+ sym_file->GetDebugInfoIndexWasSavedToCache();
+ if (module_stat.debug_info_index_saved_to_cache)
+ ++debug_index_saved;
}
symtab_parse_time += module_stat.symtab_parse_time;
symtab_index_time += module_stat.symtab_index_time;
@@ -183,13 +220,23 @@ llvm::json::Value DebuggerStats::ReportStatistics(Debugger &debugger,
json_modules.emplace_back(module_stat.ToJSON());
}
+ ConstStringStats const_string_stats;
+ json::Object json_memory{
+ {"strings", const_string_stats.ToJSON()},
+ };
+
json::Object global_stats{
{"targets", std::move(json_targets)},
{"modules", std::move(json_modules)},
+ {"memory", std::move(json_memory)},
{"totalSymbolTableParseTime", symtab_parse_time},
{"totalSymbolTableIndexTime", symtab_index_time},
+ {"totalSymbolTablesLoadedFromCache", symtabs_loaded},
+ {"totalSymbolTablesSavedToCache", symtabs_saved},
{"totalDebugInfoParseTime", debug_parse_time},
{"totalDebugInfoIndexTime", debug_index_time},
+ {"totalDebugInfoIndexLoadedFromCache", debug_index_loaded},
+ {"totalDebugInfoIndexSavedToCache", debug_index_saved},
{"totalDebugInfoByteSize", debug_info_size},
};
return std::move(global_stats);
diff --git a/contrib/llvm-project/lldb/source/Target/Target.cpp b/contrib/llvm-project/lldb/source/Target/Target.cpp
index fa860399aca7..01e51c0577aa 100644
--- a/contrib/llvm-project/lldb/source/Target/Target.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Target.cpp
@@ -212,17 +212,20 @@ const lldb::ProcessSP &Target::GetProcessSP() const { return m_process_sp; }
lldb::REPLSP Target::GetREPL(Status &err, lldb::LanguageType language,
const char *repl_options, bool can_create) {
+ if (language == eLanguageTypeUnknown)
+ language = m_debugger.GetREPLLanguage();
+
if (language == eLanguageTypeUnknown) {
LanguageSet repl_languages = Language::GetLanguagesSupportingREPLs();
if (auto single_lang = repl_languages.GetSingularLanguage()) {
language = *single_lang;
} else if (repl_languages.Empty()) {
- err.SetErrorStringWithFormat(
+ err.SetErrorString(
"LLDB isn't configured with REPL support for any languages.");
return REPLSP();
} else {
- err.SetErrorStringWithFormat(
+ err.SetErrorString(
"Multiple possible REPL languages. Please specify a language.");
return REPLSP();
}
diff --git a/contrib/llvm-project/lldb/source/Target/Thread.cpp b/contrib/llvm-project/lldb/source/Target/Thread.cpp
index 481a39a576e9..c5f16b4e6c1d 100644
--- a/contrib/llvm-project/lldb/source/Target/Thread.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Thread.cpp
@@ -471,9 +471,7 @@ void Thread::SetStopInfoToNothing() {
StopInfo::CreateStopReasonWithSignal(*this, LLDB_INVALID_SIGNAL_NUMBER));
}
-bool Thread::ThreadStoppedForAReason(void) {
- return (bool)GetPrivateStopInfo();
-}
+bool Thread::ThreadStoppedForAReason() { return (bool)GetPrivateStopInfo(); }
bool Thread::CheckpointThreadState(ThreadStateCheckpoint &saved_state) {
saved_state.register_backup_sp.reset();
diff --git a/contrib/llvm-project/lldb/source/Target/TraceInstructionDumper.cpp b/contrib/llvm-project/lldb/source/Target/TraceInstructionDumper.cpp
index dc1e86481c36..d58d2dff7383 100644
--- a/contrib/llvm-project/lldb/source/Target/TraceInstructionDumper.cpp
+++ b/contrib/llvm-project/lldb/source/Target/TraceInstructionDumper.cpp
@@ -137,7 +137,7 @@ DumpInstructionSymbolContext(Stream &s,
insn.sc.module_sp->GetFileSpec().GetFilename().AsCString());
else
insn.sc.DumpStopContext(&s, insn.exe_ctx.GetTargetPtr(), insn.address,
- /*show_fullpath=*/false,
+ /*show_fullpaths=*/false,
/*show_module=*/true, /*show_inlined_frames=*/false,
/*show_function_arguments=*/true,
/*show_function_name=*/true);
@@ -148,8 +148,8 @@ static void DumpInstructionDisassembly(Stream &s, InstructionSymbolInfo &insn) {
if (!insn.instruction)
return;
s.Printf(" ");
- insn.instruction->Dump(&s, /*show_address=*/false, /*show_bytes=*/false,
- /*max_opcode_byte_size=*/0, &insn.exe_ctx, &insn.sc,
+ insn.instruction->Dump(&s, /*max_opcode_byte_size=*/0, /*show_address=*/false,
+ /*show_bytes=*/false, &insn.exe_ctx, &insn.sc,
/*prev_sym_ctx=*/nullptr,
/*disassembly_addr_format=*/nullptr,
/*max_address_text_size=*/0);
diff --git a/contrib/llvm-project/lldb/source/Utility/ConstString.cpp b/contrib/llvm-project/lldb/source/Utility/ConstString.cpp
index e5e1b2387e64..142c335ddbbe 100644
--- a/contrib/llvm-project/lldb/source/Utility/ConstString.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/ConstString.cpp
@@ -159,16 +159,15 @@ public:
return nullptr;
}
- // Return the size in bytes that this object and any items in its collection
- // of uniqued strings + data count values takes in memory.
- size_t MemorySize() const {
- size_t mem_size = sizeof(Pool);
+ ConstString::MemoryStats GetMemoryStats() const {
+ ConstString::MemoryStats stats;
for (const auto &pool : m_string_pools) {
llvm::sys::SmartScopedReader<false> rlock(pool.m_mutex);
- for (const auto &entry : pool.m_string_map)
- mem_size += sizeof(StringPoolEntryType) + entry.getKey().size();
+ const Allocator &alloc = pool.m_string_map.getAllocator();
+ stats.bytes_total += alloc.getTotalMemory();
+ stats.bytes_used += alloc.getBytesAllocated();
}
- return mem_size;
+ return stats;
}
protected:
@@ -327,9 +326,8 @@ void ConstString::SetTrimmedCStringWithLength(const char *cstr,
m_string = StringPool().GetConstTrimmedCStringWithLength(cstr, cstr_len);
}
-size_t ConstString::StaticMemorySize() {
- // Get the size of the static string pool
- return StringPool().MemorySize();
+ConstString::MemoryStats ConstString::GetMemoryStats() {
+ return StringPool().GetMemoryStats();
}
void llvm::format_provider<ConstString>::format(const ConstString &CS,
diff --git a/contrib/llvm-project/lldb/source/Utility/Instrumentation.cpp b/contrib/llvm-project/lldb/source/Utility/Instrumentation.cpp
new file mode 100644
index 000000000000..861789810e1a
--- /dev/null
+++ b/contrib/llvm-project/lldb/source/Utility/Instrumentation.cpp
@@ -0,0 +1,43 @@
+//===-- Instrumentation.cpp -----------------------------------------------===//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lldb/Utility/Instrumentation.h"
+#include "llvm/Support/Signposts.h"
+
+#include <cstdio>
+#include <cstdlib>
+#include <limits>
+#include <thread>
+
+using namespace lldb_private;
+using namespace lldb_private::instrumentation;
+
+// Whether we're currently across the API boundary.
+static thread_local bool g_global_boundary = false;
+
+// Instrument SB API calls with singposts when supported.
+static llvm::ManagedStatic<llvm::SignpostEmitter> g_api_signposts;
+
+Instrumenter::Instrumenter(llvm::StringRef pretty_func,
+ std::string &&pretty_args)
+ : m_pretty_func(pretty_func), m_local_boundary(false) {
+ if (!g_global_boundary) {
+ g_global_boundary = true;
+ m_local_boundary = true;
+ g_api_signposts->startInterval(this, m_pretty_func);
+ }
+ LLDB_LOG(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API), "[{0}] {1} ({2})",
+ m_local_boundary ? "external" : "internal", m_pretty_func,
+ pretty_args);
+}
+
+Instrumenter::~Instrumenter() {
+ if (m_local_boundary) {
+ g_global_boundary = false;
+ g_api_signposts->endInterval(this, m_pretty_func);
+ }
+}
diff --git a/contrib/llvm-project/lldb/source/Utility/Log.cpp b/contrib/llvm-project/lldb/source/Utility/Log.cpp
index ff654ec93e78..d229538073d1 100644
--- a/contrib/llvm-project/lldb/source/Utility/Log.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/Log.cpp
@@ -30,7 +30,6 @@
#include <process.h>
#else
#include <unistd.h>
-#include <pthread.h>
#endif
using namespace lldb_private;
@@ -89,7 +88,7 @@ void Log::Enable(const std::shared_ptr<llvm::raw_ostream> &stream_sp,
uint32_t options, uint32_t flags) {
llvm::sys::ScopedWriter lock(m_mutex);
- uint32_t mask = m_mask.fetch_or(flags, std::memory_order_relaxed);
+ MaskType mask = m_mask.fetch_or(flags, std::memory_order_relaxed);
if (mask | flags) {
m_options.store(options, std::memory_order_relaxed);
m_stream_sp = stream_sp;
@@ -100,7 +99,7 @@ void Log::Enable(const std::shared_ptr<llvm::raw_ostream> &stream_sp,
void Log::Disable(uint32_t flags) {
llvm::sys::ScopedWriter lock(m_mutex);
- uint32_t mask = m_mask.fetch_and(~flags, std::memory_order_relaxed);
+ MaskType mask = m_mask.fetch_and(~flags, std::memory_order_relaxed);
if (!(mask & ~flags)) {
m_stream_sp.reset();
m_channel.log_ptr.store(nullptr, std::memory_order_relaxed);
@@ -180,9 +179,6 @@ void Log::Warning(const char *format, ...) {
}
void Log::Initialize() {
-#ifdef LLVM_ON_UNIX
- pthread_atfork(nullptr, nullptr, &Log::DisableLoggingChild);
-#endif
InitializeLldbChannel();
}
@@ -346,11 +342,3 @@ void Log::Format(llvm::StringRef file, llvm::StringRef function,
message << payload << "\n";
WriteMessage(message.str());
}
-
-void Log::DisableLoggingChild() {
- // Disable logging by clearing out the atomic variable after forking -- if we
- // forked while another thread held the channel mutex, we would deadlock when
- // trying to write to the log.
- for (auto &c: *g_channel_map)
- c.second.m_channel.log_ptr.store(nullptr, std::memory_order_relaxed);
-}
diff --git a/contrib/llvm-project/lldb/source/Utility/Logging.cpp b/contrib/llvm-project/lldb/source/Utility/Logging.cpp
index 4648bec502c5..67d5d3af2640 100644
--- a/contrib/llvm-project/lldb/source/Utility/Logging.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/Logging.cpp
@@ -16,49 +16,74 @@
using namespace lldb_private;
static constexpr Log::Category g_categories[] = {
- {{"api"}, {"log API calls and return values"}, LIBLLDB_LOG_API},
- {{"ast"}, {"log AST"}, LIBLLDB_LOG_AST},
- {{"break"}, {"log breakpoints"}, LIBLLDB_LOG_BREAKPOINTS},
- {{"commands"}, {"log command argument parsing"}, LIBLLDB_LOG_COMMANDS},
- {{"comm"}, {"log communication activities"}, LIBLLDB_LOG_COMMUNICATION},
- {{"conn"}, {"log connection details"}, LIBLLDB_LOG_CONNECTION},
- {{"demangle"}, {"log mangled names to catch demangler crashes"}, LIBLLDB_LOG_DEMANGLE},
- {{"dyld"}, {"log shared library related activities"}, LIBLLDB_LOG_DYNAMIC_LOADER},
- {{"event"}, {"log broadcaster, listener and event queue activities"}, LIBLLDB_LOG_EVENTS},
- {{"expr"}, {"log expressions"}, LIBLLDB_LOG_EXPRESSIONS},
- {{"formatters"}, {"log data formatters related activities"}, LIBLLDB_LOG_DATAFORMATTERS},
- {{"host"}, {"log host activities"}, LIBLLDB_LOG_HOST},
- {{"jit"}, {"log JIT events in the target"}, LIBLLDB_LOG_JIT_LOADER},
- {{"language"}, {"log language runtime events"}, LIBLLDB_LOG_LANGUAGE},
- {{"mmap"}, {"log mmap related activities"}, LIBLLDB_LOG_MMAP},
- {{"module"}, {"log module activities such as when modules are created, destroyed, replaced, and more"}, LIBLLDB_LOG_MODULES},
- {{"object"}, {"log object construction/destruction for important objects"}, LIBLLDB_LOG_OBJECT},
- {{"os"}, {"log OperatingSystem plugin related activities"}, LIBLLDB_LOG_OS},
- {{"platform"}, {"log platform events and activities"}, LIBLLDB_LOG_PLATFORM},
- {{"process"}, {"log process events and activities"}, LIBLLDB_LOG_PROCESS},
- {{"script"}, {"log events about the script interpreter"}, LIBLLDB_LOG_SCRIPT},
- {{"state"}, {"log private and public process state changes"}, LIBLLDB_LOG_STATE},
- {{"step"}, {"log step related activities"}, LIBLLDB_LOG_STEP},
- {{"symbol"}, {"log symbol related issues and warnings"}, LIBLLDB_LOG_SYMBOLS},
- {{"system-runtime"}, {"log system runtime events"}, LIBLLDB_LOG_SYSTEM_RUNTIME},
- {{"target"}, {"log target events and activities"}, LIBLLDB_LOG_TARGET},
- {{"temp"}, {"log internal temporary debug messages"}, LIBLLDB_LOG_TEMPORARY},
- {{"thread"}, {"log thread events and activities"}, LIBLLDB_LOG_THREAD},
- {{"types"}, {"log type system related activities"}, LIBLLDB_LOG_TYPES},
- {{"unwind"}, {"log stack unwind activities"}, LIBLLDB_LOG_UNWIND},
- {{"watch"}, {"log watchpoint related activities"}, LIBLLDB_LOG_WATCHPOINTS},
+ {{"api"}, {"log API calls and return values"}, LLDBLog::API},
+ {{"ast"}, {"log AST"}, LLDBLog::AST},
+ {{"break"}, {"log breakpoints"}, LLDBLog::Breakpoints},
+ {{"commands"}, {"log command argument parsing"}, LLDBLog::Commands},
+ {{"comm"}, {"log communication activities"}, LLDBLog::Communication},
+ {{"conn"}, {"log connection details"}, LLDBLog::Connection},
+ {{"demangle"},
+ {"log mangled names to catch demangler crashes"},
+ LLDBLog::Demangle},
+ {{"dyld"},
+ {"log shared library related activities"},
+ LLDBLog::DynamicLoader},
+ {{"event"},
+ {"log broadcaster, listener and event queue activities"},
+ LLDBLog::Events},
+ {{"expr"}, {"log expressions"}, LLDBLog::Expressions},
+ {{"formatters"},
+ {"log data formatters related activities"},
+ LLDBLog::DataFormatters},
+ {{"host"}, {"log host activities"}, LLDBLog::Host},
+ {{"jit"}, {"log JIT events in the target"}, LLDBLog::JITLoader},
+ {{"language"}, {"log language runtime events"}, LLDBLog::Language},
+ {{"mmap"}, {"log mmap related activities"}, LLDBLog::MMap},
+ {{"module"},
+ {"log module activities such as when modules are created, destroyed, "
+ "replaced, and more"},
+ LLDBLog::Modules},
+ {{"object"},
+ {"log object construction/destruction for important objects"},
+ LLDBLog::Object},
+ {{"os"}, {"log OperatingSystem plugin related activities"}, LLDBLog::OS},
+ {{"platform"}, {"log platform events and activities"}, LLDBLog::Platform},
+ {{"process"}, {"log process events and activities"}, LLDBLog::Process},
+ {{"script"}, {"log events about the script interpreter"}, LLDBLog::Script},
+ {{"state"},
+ {"log private and public process state changes"},
+ LLDBLog::State},
+ {{"step"}, {"log step related activities"}, LLDBLog::Step},
+ {{"symbol"}, {"log symbol related issues and warnings"}, LLDBLog::Symbols},
+ {{"system-runtime"}, {"log system runtime events"}, LLDBLog::SystemRuntime},
+ {{"target"}, {"log target events and activities"}, LLDBLog::Target},
+ {{"temp"}, {"log internal temporary debug messages"}, LLDBLog::Temporary},
+ {{"thread"}, {"log thread events and activities"}, LLDBLog::Thread},
+ {{"types"}, {"log type system related activities"}, LLDBLog::Types},
+ {{"unwind"}, {"log stack unwind activities"}, LLDBLog::Unwind},
+ {{"watch"}, {"log watchpoint related activities"}, LLDBLog::Watchpoints},
};
-static Log::Channel g_log_channel(g_categories, LIBLLDB_LOG_DEFAULT);
+static Log::Channel g_log_channel(g_categories,
+ LLDBLog::Process | LLDBLog::Thread |
+ LLDBLog::DynamicLoader |
+ LLDBLog::Breakpoints |
+ LLDBLog::Watchpoints | LLDBLog::Step |
+ LLDBLog::State | LLDBLog::Symbols |
+ LLDBLog::Target | LLDBLog::Commands);
+
+template <> Log::Channel &lldb_private::LogChannelFor<LLDBLog>() {
+ return g_log_channel;
+}
void lldb_private::InitializeLldbChannel() {
Log::Register("lldb", g_log_channel);
}
-Log *lldb_private::GetLogIfAllCategoriesSet(uint32_t mask) {
- return g_log_channel.GetLogIfAll(mask);
+Log *lldb_private::GetLogIfAllCategoriesSet(LLDBLog mask) {
+ return GetLog(mask);
}
-Log *lldb_private::GetLogIfAnyCategoriesSet(uint32_t mask) {
- return g_log_channel.GetLogIfAny(mask);
+Log *lldb_private::GetLogIfAnyCategoriesSet(LLDBLog mask) {
+ return GetLog(mask);
}
diff --git a/contrib/llvm-project/lldb/source/Utility/Reproducer.cpp b/contrib/llvm-project/lldb/source/Utility/Reproducer.cpp
index a306d6c1ef25..1e71dba472ed 100644
--- a/contrib/llvm-project/lldb/source/Utility/Reproducer.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/Reproducer.cpp
@@ -362,7 +362,7 @@ llvm::Error repro::Finalize(Loader *loader) {
FileSpec mapping =
reproducer_root.CopyByAppendingPathComponent(FileProvider::Info::file);
- if (auto ec = collector.copyFiles(/*stop_on_error=*/false))
+ if (auto ec = collector.copyFiles(/*StopOnError=*/false))
return errorCodeToError(ec);
collector.writeMapping(mapping.GetPath());
diff --git a/contrib/llvm-project/lldb/source/Utility/ReproducerInstrumentation.cpp b/contrib/llvm-project/lldb/source/Utility/ReproducerInstrumentation.cpp
deleted file mode 100644
index b3285f4b3776..000000000000
--- a/contrib/llvm-project/lldb/source/Utility/ReproducerInstrumentation.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-//===-- ReproducerInstrumentation.cpp -------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "lldb/Utility/ReproducerInstrumentation.h"
-#include "lldb/Utility/Reproducer.h"
-#include <cstdio>
-#include <cstdlib>
-#include <limits>
-#include <thread>
-
-using namespace lldb_private;
-using namespace lldb_private::repro;
-
-// Whether we're currently across the API boundary.
-static thread_local bool g_global_boundary = false;
-
-void *IndexToObject::GetObjectForIndexImpl(unsigned idx) {
- return m_mapping.lookup(idx);
-}
-
-void IndexToObject::AddObjectForIndexImpl(unsigned idx, void *object) {
- assert(idx != 0 && "Cannot add object for sentinel");
- m_mapping[idx] = object;
-}
-
-std::vector<void *> IndexToObject::GetAllObjects() const {
- std::vector<std::pair<unsigned, void *>> pairs;
- for (auto &e : m_mapping) {
- pairs.emplace_back(e.first, e.second);
- }
-
- // Sort based on index.
- std::sort(pairs.begin(), pairs.end(),
- [](auto &lhs, auto &rhs) { return lhs.first < rhs.first; });
-
- std::vector<void *> objects;
- objects.reserve(pairs.size());
- for (auto &p : pairs) {
- objects.push_back(p.second);
- }
-
- return objects;
-}
-
-template <> const uint8_t *Deserializer::Deserialize<const uint8_t *>() {
- return Deserialize<uint8_t *>();
-}
-
-template <> void *Deserializer::Deserialize<void *>() {
- return const_cast<void *>(Deserialize<const void *>());
-}
-
-template <> const void *Deserializer::Deserialize<const void *>() {
- return nullptr;
-}
-
-template <> char *Deserializer::Deserialize<char *>() {
- return const_cast<char *>(Deserialize<const char *>());
-}
-
-template <> const char *Deserializer::Deserialize<const char *>() {
- const size_t size = Deserialize<size_t>();
- if (size == std::numeric_limits<size_t>::max())
- return nullptr;
- assert(HasData(size + 1));
- const char *str = m_buffer.data();
- m_buffer = m_buffer.drop_front(size + 1);
-#ifdef LLDB_REPRO_INSTR_TRACE
- llvm::errs() << "Deserializing with " << LLVM_PRETTY_FUNCTION << " -> \""
- << str << "\"\n";
-#endif
- return str;
-}
-
-template <> const char **Deserializer::Deserialize<const char **>() {
- const size_t size = Deserialize<size_t>();
- if (size == 0)
- return nullptr;
- const char **r =
- reinterpret_cast<const char **>(calloc(size + 1, sizeof(char *)));
- for (size_t i = 0; i < size; ++i)
- r[i] = Deserialize<const char *>();
- return r;
-}
-
-void Deserializer::CheckSequence(unsigned sequence) {
- if (m_expected_sequence && *m_expected_sequence != sequence)
- llvm::report_fatal_error(
- "The result does not match the preceding "
- "function. This is probably the result of concurrent "
- "use of the SB API during capture, which is currently not "
- "supported.");
- m_expected_sequence.reset();
-}
-
-bool Registry::Replay(const FileSpec &file) {
- auto error_or_file = llvm::MemoryBuffer::getFile(file.GetPath());
- if (auto err = error_or_file.getError())
- return false;
-
- return Replay((*error_or_file)->getBuffer());
-}
-
-bool Registry::Replay(llvm::StringRef buffer) {
- Deserializer deserializer(buffer);
- return Replay(deserializer);
-}
-
-bool Registry::Replay(Deserializer &deserializer) {
-#ifndef LLDB_REPRO_INSTR_TRACE
- Log *log = GetLogIfAllCategoriesSet(LIBLLDB_LOG_API);
-#endif
-
- // Disable buffering stdout so that we approximate the way things get flushed
- // during an interactive session.
- setvbuf(stdout, nullptr, _IONBF, 0);
-
- while (deserializer.HasData(1)) {
- unsigned sequence = deserializer.Deserialize<unsigned>();
- unsigned id = deserializer.Deserialize<unsigned>();
-
-#ifndef LLDB_REPRO_INSTR_TRACE
- LLDB_LOG(log, "Replaying {0}: {1}", id, GetSignature(id));
-#else
- llvm::errs() << "Replaying " << id << ": " << GetSignature(id) << "\n";
-#endif
-
- deserializer.SetExpectedSequence(sequence);
- GetReplayer(id)->operator()(deserializer);
- }
-
- // Add a small artificial delay to ensure that all asynchronous events have
- // completed before we exit.
- std::this_thread::sleep_for(std::chrono::milliseconds(100));
-
- return true;
-}
-
-void Registry::DoRegister(uintptr_t RunID, std::unique_ptr<Replayer> replayer,
- SignatureStr signature) {
- const unsigned id = m_replayers.size() + 1;
- assert(m_replayers.find(RunID) == m_replayers.end());
- m_replayers[RunID] = std::make_pair(std::move(replayer), id);
- m_ids[id] =
- std::make_pair(m_replayers[RunID].first.get(), std::move(signature));
-}
-
-unsigned Registry::GetID(uintptr_t addr) {
- unsigned id = m_replayers[addr].second;
- assert(id != 0 && "Forgot to add function to registry?");
- return id;
-}
-
-std::string Registry::GetSignature(unsigned id) {
- assert(m_ids.count(id) != 0 && "ID not in registry");
- return m_ids[id].second.ToString();
-}
-
-void Registry::CheckID(unsigned expected, unsigned actual) {
- if (expected != actual) {
- llvm::errs() << "Reproducer expected signature " << expected << ": '"
- << GetSignature(expected) << "'\n";
- llvm::errs() << "Reproducer actual signature " << actual << ": '"
- << GetSignature(actual) << "'\n";
- llvm::report_fatal_error(
- "Detected reproducer replay divergence. Refusing to continue.");
- }
-
-#ifdef LLDB_REPRO_INSTR_TRACE
- llvm::errs() << "Replaying " << actual << ": " << GetSignature(actual)
- << "\n";
-#endif
-}
-
-Replayer *Registry::GetReplayer(unsigned id) {
- assert(m_ids.count(id) != 0 && "ID not in registry");
- return m_ids[id].first;
-}
-
-std::string Registry::SignatureStr::ToString() const {
- return (result + (result.empty() ? "" : " ") + scope + "::" + name + args)
- .str();
-}
-
-unsigned ObjectToIndex::GetIndexForObjectImpl(const void *object) {
- unsigned index = m_mapping.size() + 1;
- auto it = m_mapping.find(object);
- if (it == m_mapping.end())
- m_mapping[object] = index;
- return m_mapping[object];
-}
-
-Recorder::Recorder()
- : m_pretty_func(), m_pretty_args(),
-
- m_sequence(std::numeric_limits<unsigned>::max()) {
- if (!g_global_boundary) {
- g_global_boundary = true;
- m_local_boundary = true;
- m_sequence = GetNextSequenceNumber();
- }
-}
-
-Recorder::Recorder(llvm::StringRef pretty_func, std::string &&pretty_args)
- : m_serializer(nullptr), m_pretty_func(pretty_func),
- m_pretty_args(pretty_args), m_local_boundary(false),
- m_result_recorded(true),
- m_sequence(std::numeric_limits<unsigned>::max()) {
- if (!g_global_boundary) {
- g_global_boundary = true;
- m_local_boundary = true;
- m_sequence = GetNextSequenceNumber();
- LLDB_LOG(GetLogIfAllCategoriesSet(LIBLLDB_LOG_API), "{0} ({1})",
- m_pretty_func, m_pretty_args);
- }
-}
-
-Recorder::~Recorder() {
- assert(m_result_recorded && "Did you forget LLDB_RECORD_RESULT?");
- UpdateBoundary();
-}
-
-unsigned Recorder::GetSequenceNumber() const {
- assert(m_sequence != std::numeric_limits<unsigned>::max());
- return m_sequence;
-}
-
-void Recorder::PrivateThread() { g_global_boundary = true; }
-
-void Recorder::UpdateBoundary() {
- if (m_local_boundary)
- g_global_boundary = false;
-}
-
-void InstrumentationData::Initialize(Serializer &serializer,
- Registry &registry) {
- InstanceImpl().emplace(serializer, registry);
-}
-
-void InstrumentationData::Initialize(Deserializer &deserializer,
- Registry &registry) {
- InstanceImpl().emplace(deserializer, registry);
-}
-
-InstrumentationData &InstrumentationData::Instance() {
- if (!InstanceImpl())
- InstanceImpl().emplace();
- return *InstanceImpl();
-}
-
-llvm::Optional<InstrumentationData> &InstrumentationData::InstanceImpl() {
- static llvm::Optional<InstrumentationData> g_instrumentation_data;
- return g_instrumentation_data;
-}
-
-std::atomic<unsigned> lldb_private::repro::Recorder::g_sequence;
-std::mutex lldb_private::repro::Recorder::g_mutex;
diff --git a/contrib/llvm-project/lldb/source/Utility/StringList.cpp b/contrib/llvm-project/lldb/source/Utility/StringList.cpp
index baff34ae3a5e..ee1f157f16f1 100644
--- a/contrib/llvm-project/lldb/source/Utility/StringList.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/StringList.cpp
@@ -42,7 +42,9 @@ void StringList::AppendString(const char *str) {
void StringList::AppendString(const std::string &s) { m_strings.push_back(s); }
-void StringList::AppendString(std::string &&s) { m_strings.push_back(s); }
+void StringList::AppendString(std::string &&s) {
+ m_strings.push_back(std::move(s));
+}
void StringList::AppendString(const char *str, size_t str_len) {
if (str)
@@ -53,6 +55,10 @@ void StringList::AppendString(llvm::StringRef str) {
m_strings.push_back(str.str());
}
+void StringList::AppendString(const llvm::Twine &str) {
+ m_strings.push_back(str.str());
+}
+
void StringList::AppendList(const char **strv, int strc) {
for (int i = 0; i < strc; ++i) {
if (strv[i])
@@ -133,9 +139,9 @@ void StringList::InsertStringAtIndex(size_t idx, const std::string &str) {
void StringList::InsertStringAtIndex(size_t idx, std::string &&str) {
if (idx < m_strings.size())
- m_strings.insert(m_strings.begin() + idx, str);
+ m_strings.insert(m_strings.begin() + idx, std::move(str));
else
- m_strings.push_back(str);
+ m_strings.push_back(std::move(str));
}
void StringList::DeleteStringAtIndex(size_t idx) {
diff --git a/contrib/llvm-project/lldb/source/Utility/Timer.cpp b/contrib/llvm-project/lldb/source/Utility/Timer.cpp
index 2f3afe4c8703..b190f35007d5 100644
--- a/contrib/llvm-project/lldb/source/Utility/Timer.cpp
+++ b/contrib/llvm-project/lldb/source/Utility/Timer.cpp
@@ -63,7 +63,7 @@ Timer::Timer(Timer::Category &category, const char *format, ...)
TimerStack &stack = GetTimerStackForCurrentThread();
stack.push_back(this);
- if (g_quiet && stack.size() <= g_display_depth) {
+ if (!g_quiet && stack.size() <= g_display_depth) {
std::lock_guard<std::mutex> lock(GetFileMutex());
// Indent
@@ -89,7 +89,7 @@ Timer::~Timer() {
Signposts->endInterval(this, m_category.GetName());
TimerStack &stack = GetTimerStackForCurrentThread();
- if (g_quiet && stack.size() <= g_display_depth) {
+ if (!g_quiet && stack.size() <= g_display_depth) {
std::lock_guard<std::mutex> lock(GetFileMutex());
::fprintf(stdout, "%*s%.9f sec (%.9f sec)\n",
int(stack.size() - 1) * TIMER_INDENT_AMOUNT, "",
diff --git a/contrib/llvm-project/lldb/tools/driver/Driver.cpp b/contrib/llvm-project/lldb/tools/driver/Driver.cpp
index 2ed9958e51da..233e0dd977d3 100644
--- a/contrib/llvm-project/lldb/tools/driver/Driver.cpp
+++ b/contrib/llvm-project/lldb/tools/driver/Driver.cpp
@@ -296,6 +296,7 @@ SBError Driver::ProcessArgs(const opt::InputArgList &args, bool &exiting) {
arg_value);
return error;
}
+ m_debugger.SetREPLLanguage(m_option_data.m_repl_lang);
}
if (args.hasArg(OPT_repl)) {
diff --git a/contrib/llvm-project/lldb/tools/lldb-instr/Instrument.cpp b/contrib/llvm-project/lldb/tools/lldb-instr/Instrument.cpp
index 8fbc3e64833c..4b8725396a61 100644
--- a/contrib/llvm-project/lldb/tools/lldb-instr/Instrument.cpp
+++ b/contrib/llvm-project/lldb/tools/lldb-instr/Instrument.cpp
@@ -22,143 +22,6 @@ using namespace clang::tooling;
static llvm::cl::OptionCategory InstrCategory("LLDB Instrumentation Generator");
-/// Get the macro name for recording method calls.
-///
-/// LLDB_RECORD_METHOD
-/// LLDB_RECORD_METHOD_CONST
-/// LLDB_RECORD_METHOD_NO_ARGS
-/// LLDB_RECORD_METHOD_CONST_NO_ARGS
-/// LLDB_RECORD_STATIC_METHOD
-/// LLDB_RECORD_STATIC_METHOD_NO_ARGS
-static std::string GetRecordMethodMacroName(bool Static, bool Const,
- bool NoArgs) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
-
- OS << "LLDB_RECORD";
- if (Static)
- OS << "_STATIC";
- OS << "_METHOD";
- if (Const)
- OS << "_CONST";
- if (NoArgs)
- OS << "_NO_ARGS";
-
- return OS.str();
-}
-
-/// Get the macro name for register methods.
-///
-/// LLDB_REGISTER_CONSTRUCTOR
-/// LLDB_REGISTER_METHOD
-/// LLDB_REGISTER_METHOD_CONST
-/// LLDB_REGISTER_STATIC_METHOD
-static std::string GetRegisterMethodMacroName(bool Static, bool Const) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
-
- OS << "LLDB_REGISTER";
- if (Static)
- OS << "_STATIC";
- OS << "_METHOD";
- if (Const)
- OS << "_CONST";
-
- return OS.str();
-}
-
-static std::string GetRecordMethodMacro(StringRef Result, StringRef Class,
- StringRef Method, StringRef Signature,
- StringRef Values, bool Static,
- bool Const) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
-
- OS << GetRecordMethodMacroName(Static, Const, Values.empty());
- OS << "(" << Result << ", " << Class << ", " << Method;
-
- if (!Values.empty()) {
- OS << ", (" << Signature << "), " << Values << ");\n\n";
- } else {
- OS << ");\n\n";
- }
-
- return OS.str();
-}
-
-static std::string GetRecordConstructorMacro(StringRef Class,
- StringRef Signature,
- StringRef Values) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
- if (!Values.empty()) {
- OS << "LLDB_RECORD_CONSTRUCTOR(" << Class << ", (" << Signature << "), "
- << Values << ");\n\n";
- } else {
- OS << "LLDB_RECORD_CONSTRUCTOR_NO_ARGS(" << Class << ");\n\n";
- }
- return OS.str();
-}
-
-static std::string GetRecordDummyMacro(StringRef Result, StringRef Class,
- StringRef Method, StringRef Signature,
- StringRef Values) {
- assert(!Values.empty());
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
-
- OS << "LLDB_RECORD_DUMMY(" << Result << ", " << Class << ", " << Method;
- OS << ", (" << Signature << "), " << Values << ");\n\n";
-
- return OS.str();
-}
-
-static std::string GetRegisterConstructorMacro(StringRef Class,
- StringRef Signature) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
- OS << "LLDB_REGISTER_CONSTRUCTOR(" << Class << ", (" << Signature << "));\n";
- return OS.str();
-}
-
-static std::string GetRegisterMethodMacro(StringRef Result, StringRef Class,
- StringRef Method, StringRef Signature,
- bool Static, bool Const) {
- std::string Macro;
- llvm::raw_string_ostream OS(Macro);
- OS << GetRegisterMethodMacroName(Static, Const);
- OS << "(" << Result << ", " << Class << ", " << Method << ", (" << Signature
- << "));\n";
- return OS.str();
-}
-
-class SBReturnVisitor : public RecursiveASTVisitor<SBReturnVisitor> {
-public:
- SBReturnVisitor(Rewriter &R) : MyRewriter(R) {}
-
- bool VisitReturnStmt(ReturnStmt *Stmt) {
- Expr *E = Stmt->getRetValue();
-
- if (E->getBeginLoc().isMacroID())
- return false;
-
- SourceRange R(E->getBeginLoc(), E->getEndLoc());
-
- StringRef WrittenExpr = Lexer::getSourceText(
- CharSourceRange::getTokenRange(R), MyRewriter.getSourceMgr(),
- MyRewriter.getLangOpts());
-
- std::string ReplacementText =
- "LLDB_RECORD_RESULT(" + WrittenExpr.str() + ")";
- MyRewriter.ReplaceText(R, ReplacementText);
-
- return true;
- }
-
-private:
- Rewriter &MyRewriter;
-};
-
class SBVisitor : public RecursiveASTVisitor<SBVisitor> {
public:
SBVisitor(Rewriter &R, ASTContext &Context)
@@ -170,85 +33,40 @@ public:
if (ShouldSkip(Decl))
return false;
- // Skip CXXMethodDecls that already starts with a macro. This should make
- // it easier to rerun the tool to find missing macros.
- Stmt *Body = Decl->getBody();
- for (auto &C : Body->children()) {
- if (C->getBeginLoc().isMacroID())
- return false;
- break;
- }
-
// Print 'bool' instead of '_Bool'.
PrintingPolicy Policy(Context.getLangOpts());
Policy.Bool = true;
- // Unsupported signatures get a dummy macro.
- bool ShouldInsertDummy = false;
-
// Collect the functions parameter types and names.
- std::vector<std::string> ParamTypes;
std::vector<std::string> ParamNames;
- for (auto *P : Decl->parameters()) {
- QualType T = P->getType();
- ParamTypes.push_back(T.getAsString(Policy));
+ if (!Decl->isStatic())
+ ParamNames.push_back("this");
+ for (auto *P : Decl->parameters())
ParamNames.push_back(P->getNameAsString());
- // Currently we don't support functions that have function pointers as an
- // argument, in which case we insert a dummy macro.
- ShouldInsertDummy |= T->isFunctionPointerType();
- }
-
- // Convert the two lists to string for the macros.
- std::string ParamTypesStr = llvm::join(ParamTypes, ", ");
- std::string ParamNamesStr = llvm::join(ParamNames, ", ");
-
- CXXRecordDecl *Record = Decl->getParent();
- QualType ReturnType = Decl->getReturnType();
-
// Construct the macros.
- std::string Macro;
- if (ShouldInsertDummy) {
- // Don't insert a register call for dummy macros.
- Macro = GetRecordDummyMacro(
- ReturnType.getAsString(Policy), Record->getNameAsString(),
- Decl->getNameAsString(), ParamTypesStr, ParamNamesStr);
-
- } else if (isa<CXXConstructorDecl>(Decl)) {
- llvm::outs() << GetRegisterConstructorMacro(Record->getNameAsString(),
- ParamTypesStr);
-
- Macro = GetRecordConstructorMacro(Record->getNameAsString(),
- ParamTypesStr, ParamNamesStr);
+ std::string Buffer;
+ llvm::raw_string_ostream Macro(Buffer);
+ if (ParamNames.empty()) {
+ Macro << "LLDB_INSTRUMENT()";
} else {
- llvm::outs() << GetRegisterMethodMacro(
- ReturnType.getAsString(Policy), Record->getNameAsString(),
- Decl->getNameAsString(), ParamTypesStr, Decl->isStatic(),
- Decl->isConst());
-
- Macro = GetRecordMethodMacro(
- ReturnType.getAsString(Policy), Record->getNameAsString(),
- Decl->getNameAsString(), ParamTypesStr, ParamNamesStr,
- Decl->isStatic(), Decl->isConst());
+ Macro << "LLDB_INSTRUMENT_VA(" << llvm::join(ParamNames, ", ") << ")";
}
- // Insert the macro at the beginning of the function. We don't attempt to
- // fix the formatting and instead rely on clang-format to fix it after the
- // tool has run. This is also the reason that the macros end with two
- // newlines, counting on clang-format to normalize this in case the macro
- // got inserted before an existing newline.
- SourceLocation InsertLoc = Lexer::getLocForEndOfToken(
- Body->getBeginLoc(), 0, MyRewriter.getSourceMgr(),
- MyRewriter.getLangOpts());
- MyRewriter.InsertTextAfter(InsertLoc, Macro);
-
- // If the function returns a class or struct, we need to wrap its return
- // statement(s).
- bool ShouldRecordResult = ReturnType->isStructureOrClassType() ||
- ReturnType->getPointeeCXXRecordDecl();
- if (!ShouldInsertDummy && ShouldRecordResult) {
- SBReturnVisitor Visitor(MyRewriter);
- Visitor.TraverseDecl(Decl);
+ Stmt *Body = Decl->getBody();
+ for (auto &C : Body->children()) {
+ if (C->getBeginLoc().isMacroID()) {
+ CharSourceRange Range =
+ MyRewriter.getSourceMgr().getExpansionRange(C->getSourceRange());
+ MyRewriter.ReplaceText(Range, Macro.str());
+ } else {
+ Macro << ";";
+ SourceLocation InsertLoc = Lexer::getLocForEndOfToken(
+ Body->getBeginLoc(), 0, MyRewriter.getSourceMgr(),
+ MyRewriter.getLangOpts());
+ MyRewriter.InsertTextAfter(InsertLoc, Macro.str());
+ }
+ break;
}
return true;
@@ -321,15 +139,9 @@ class SBAction : public ASTFrontendAction {
public:
SBAction() = default;
- bool BeginSourceFileAction(CompilerInstance &CI) override {
- llvm::outs() << "{\n";
- return true;
- }
+ bool BeginSourceFileAction(CompilerInstance &CI) override { return true; }
- void EndSourceFileAction() override {
- llvm::outs() << "}\n";
- MyRewriter.overwriteChangedFiles();
- }
+ void EndSourceFileAction() override { MyRewriter.overwriteChangedFiles(); }
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef File) override {
diff --git a/contrib/llvm-project/lldb/tools/lldb-server/lldb-gdbserver.cpp b/contrib/llvm-project/lldb/tools/lldb-server/lldb-gdbserver.cpp
index 906ae4c378b6..7648a0bb668d 100644
--- a/contrib/llvm-project/lldb/tools/lldb-server/lldb-gdbserver.cpp
+++ b/contrib/llvm-project/lldb/tools/lldb-server/lldb-gdbserver.cpp
@@ -27,6 +27,7 @@
#include "lldb/Host/Socket.h"
#include "lldb/Host/common/NativeProcessProtocol.h"
#include "lldb/Target/Process.h"
+#include "lldb/Utility/Logging.h"
#include "lldb/Utility/Status.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/ArgList.h"
diff --git a/contrib/llvm-project/llvm/include/llvm-c/Core.h b/contrib/llvm-project/llvm/include/llvm-c/Core.h
index ae2bcb8444b4..ca3ca24487a5 100644
--- a/contrib/llvm-project/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm-project/llvm/include/llvm-c/Core.h
@@ -4020,8 +4020,13 @@ LLVMValueRef LLVMBuildIsNull(LLVMBuilderRef, LLVMValueRef Val,
const char *Name);
LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef, LLVMValueRef Val,
const char *Name);
-LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef, LLVMValueRef LHS,
- LLVMValueRef RHS, const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name),
+ "Use LLVMBuildPtrDiff2 instead to support opaque pointers");
+LLVMValueRef LLVMBuildPtrDiff2(LLVMBuilderRef, LLVMTypeRef ElemTy,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering ordering,
LLVMBool singleThread, const char *Name);
LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B, LLVMAtomicRMWBinOp op,
diff --git a/contrib/llvm-project/llvm/include/llvm-c/DebugInfo.h b/contrib/llvm-project/llvm/include/llvm-c/DebugInfo.h
index d7fb898b60d2..a515533f38e2 100644
--- a/contrib/llvm-project/llvm/include/llvm-c/DebugInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm-c/DebugInfo.h
@@ -1102,7 +1102,7 @@ LLVMMetadataRef LLVMDIBuilderGetOrCreateArray(LLVMDIBuilderRef Builder,
* \param Length Length of the address operation array.
*/
LLVMMetadataRef LLVMDIBuilderCreateExpression(LLVMDIBuilderRef Builder,
- int64_t *Addr, size_t Length);
+ uint64_t *Addr, size_t Length);
/**
* Create a new descriptor for the specified variable that does not have an
@@ -1112,7 +1112,7 @@ LLVMMetadataRef LLVMDIBuilderCreateExpression(LLVMDIBuilderRef Builder,
*/
LLVMMetadataRef
LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
- int64_t Value);
+ uint64_t Value);
/**
* Create a new descriptor for the specified variable.
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/APInt.h b/contrib/llvm-project/llvm/include/llvm/ADT/APInt.h
index c2660502a419..b1fc85d3c09d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/APInt.h
@@ -417,7 +417,7 @@ public:
bool isIntN(unsigned N) const { return getActiveBits() <= N; }
/// Check if this APInt has an N-bits signed integer value.
- bool isSignedIntN(unsigned N) const { return getMinSignedBits() <= N; }
+ bool isSignedIntN(unsigned N) const { return getSignificantBits() <= N; }
/// Check if this APInt's value is a power of two greater than zero.
///
@@ -1069,8 +1069,9 @@ public:
///
/// \returns true if *this < RHS when considered signed.
bool slt(int64_t RHS) const {
- return (!isSingleWord() && getMinSignedBits() > 64) ? isNegative()
- : getSExtValue() < RHS;
+ return (!isSingleWord() && getSignificantBits() > 64)
+ ? isNegative()
+ : getSExtValue() < RHS;
}
/// Unsigned less or equal comparison
@@ -1139,8 +1140,9 @@ public:
///
/// \returns true if *this > RHS when considered signed.
bool sgt(int64_t RHS) const {
- return (!isSingleWord() && getMinSignedBits() > 64) ? !isNegative()
- : getSExtValue() > RHS;
+ return (!isSingleWord() && getSignificantBits() > 64)
+ ? !isNegative()
+ : getSExtValue() > RHS;
}
/// Unsigned greater or equal comparison
@@ -1450,7 +1452,12 @@ public:
/// returns the smallest bit width that will retain the negative value. For
/// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
/// for -1, this function will always return 1.
- unsigned getMinSignedBits() const { return BitWidth - getNumSignBits() + 1; }
+ unsigned getSignificantBits() const {
+ return BitWidth - getNumSignBits() + 1;
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `getSignificantBits()` instead.
+ unsigned getMinSignedBits() const { return getSignificantBits(); }
/// Get zero extended value
///
@@ -1472,7 +1479,7 @@ public:
int64_t getSExtValue() const {
if (isSingleWord())
return SignExtend64(U.VAL, BitWidth);
- assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+ assert(getSignificantBits() <= 64 && "Too many bits for int64_t");
return int64_t(U.pVal[0]);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/AllocatorList.h b/contrib/llvm-project/llvm/include/llvm/ADT/AllocatorList.h
index 404a657f27de..04d0afc9d076 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/AllocatorList.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/AllocatorList.h
@@ -13,7 +13,6 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/Support/Allocator.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/Any.h b/contrib/llvm-project/llvm/include/llvm/ADT/Any.h
index e513586845a1..1b4f2c2fa985 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/Any.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/Any.h
@@ -15,7 +15,8 @@
#ifndef LLVM_ADT_ANY_H
#define LLVM_ADT_ANY_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/Support/Compiler.h"
#include <cassert>
#include <memory>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm-project/llvm/include/llvm/ADT/ArrayRef.h
index 61f85cfc812b..b6896395dae8 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ArrayRef.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ArrayRef.h
@@ -141,7 +141,7 @@ namespace llvm {
template <typename U, typename A>
ArrayRef(const std::vector<U *, A> &Vec,
std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
- * = 0)
+ * = nullptr)
: Data(Vec.data()), Length(Vec.size()) {}
/// @}
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/BitVector.h
index cd1964cbdd98..fff4a8f578d2 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/BitVector.h
@@ -444,6 +444,12 @@ public:
return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
}
+ /// Return the last element in the vector.
+ bool back() const {
+ assert(!empty() && "Getting last element of empty vector.");
+ return (*this)[size() - 1];
+ }
+
bool test(unsigned Idx) const {
return (*this)[Idx];
}
@@ -465,6 +471,12 @@ public:
set(OldSize);
}
+ /// Pop one bit from the end of the vector.
+ void pop_back() {
+ assert(!empty() && "Empty vector has no element to pop.");
+ resize(size() - 1);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = Bits.size();
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/CoalescingBitVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/CoalescingBitVector.h
index 18803ecf209f..6935c255a099 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/CoalescingBitVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/CoalescingBitVector.h
@@ -15,12 +15,12 @@
#define LLVM_ADT_COALESCINGBITVECTOR_H
#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <initializer_list>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/CombinationGenerator.h b/contrib/llvm-project/llvm/include/llvm/ADT/CombinationGenerator.h
index ab6afd555726..a0bec68eaad6 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/CombinationGenerator.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/CombinationGenerator.h
@@ -28,7 +28,7 @@
#define LLVM_ADT_COMBINATIONGENERATOR_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstring>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/DenseSet.h b/contrib/llvm-project/llvm/include/llvm/ADT/DenseSet.h
index edce7c43773c..e767211a0900 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/DenseSet.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/DenseSet.h
@@ -17,7 +17,6 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
-#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <iterator>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/DepthFirstIterator.h b/contrib/llvm-project/llvm/include/llvm/ADT/DepthFirstIterator.h
index d4f173ca7caa..42ac61d7cf52 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/DepthFirstIterator.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/DepthFirstIterator.h
@@ -38,7 +38,6 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
-#include <set>
#include <utility>
#include <vector>
@@ -231,7 +230,7 @@ iterator_range<df_iterator<T>> depth_first(const T& G) {
}
// Provide global definitions of external depth first iterators...
-template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
struct df_ext_iterator : public df_iterator<T, SetTy, true> {
df_ext_iterator(const df_iterator<T, SetTy, true> &V)
: df_iterator<T, SetTy, true>(V) {}
@@ -280,7 +279,7 @@ iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
}
// Provide global definitions of external inverse depth first iterators...
-template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeRef>>
+template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
: idf_iterator<T, SetTy, true>(V) {}
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h
index aad704301e43..7768253e121d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/GenericCycleInfo.h
@@ -41,8 +41,8 @@
namespace llvm {
-template <typename ContexT> class GenericCycleInfo;
-template <typename ContexT> class GenericCycleInfoCompute;
+template <typename ContextT> class GenericCycleInfo;
+template <typename ContextT> class GenericCycleInfoCompute;
/// A possibly irreducible generalization of a \ref Loop.
template <typename ContextT> class GenericCycle {
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
index 81b21a7319a7..f0e898cafaf9 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
@@ -140,44 +140,7 @@ public:
bool isEmpty() const { return !Root; }
- //===--------------------------------------------------===//
- // Foreach - A limited form of map iteration.
- //===--------------------------------------------------===//
-
-private:
- template <typename Callback>
- struct CBWrapper {
- Callback C;
-
- void operator()(value_type_ref V) { C(V.first,V.second); }
- };
-
- template <typename Callback>
- struct CBWrapperRef {
- Callback &C;
-
- CBWrapperRef(Callback& c) : C(c) {}
-
- void operator()(value_type_ref V) { C(V.first,V.second); }
- };
-
public:
- template <typename Callback>
- void foreach(Callback& C) {
- if (Root) {
- CBWrapperRef<Callback> CB(C);
- Root->foreach(CB);
- }
- }
-
- template <typename Callback>
- void foreach() {
- if (Root) {
- CBWrapper<Callback> CB;
- Root->foreach(CB);
- }
- }
-
//===--------------------------------------------------===//
// For testing.
//===--------------------------------------------------===//
@@ -264,7 +227,7 @@ public:
: Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {}
static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
- return ImmutableMapRef(0, F);
+ return ImmutableMapRef(nullptr, F);
}
void manualRetain() {
@@ -345,7 +308,7 @@ public:
/// which key is the highest in the ordering of keys in the map. This
/// method returns NULL if the map is empty.
value_type* getMaxElement() const {
- return Root ? &(Root->getMaxElement()->getValue()) : 0;
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
}
//===--------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableSet.h
index 48b253d3b75e..8cef5acbafaa 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableSet.h
@@ -169,20 +169,6 @@ public:
/// is logarithmic in the size of the tree.
bool contains(key_type_ref K) { return (bool) find(K); }
- /// foreach - A member template the accepts invokes operator() on a functor
- /// object (specified by Callback) for every node/subtree in the tree.
- /// Nodes are visited using an inorder traversal.
- template <typename Callback>
- void foreach(Callback& C) {
- if (ImutAVLTree* L = getLeft())
- L->foreach(C);
-
- C(value);
-
- if (ImutAVLTree* R = getRight())
- R->foreach(C);
- }
-
/// validateTree - A utility method that checks that the balancing and
/// ordering invariants of the tree are satisfied. It is a recursive
/// method that returns the height of the tree, which is then consumed
@@ -1063,12 +1049,6 @@ public:
/// This method runs in constant time.
bool isSingleton() const { return getHeight() == 1; }
- template <typename Callback>
- void foreach(Callback& C) { if (Root) Root->foreach(C); }
-
- template <typename Callback>
- void foreach() { if (Root) { Callback C; Root->foreach(C); } }
-
//===--------------------------------------------------===//
// Iterators.
//===--------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/MapVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/MapVector.h
index f9540999381a..d281166b3e19 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/MapVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/MapVector.h
@@ -18,7 +18,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h b/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
index cdbf2e353241..7d6b3e92f6b2 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
@@ -21,7 +21,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
-#include <memory>
#include <new>
#include <utility>
@@ -34,12 +33,12 @@ namespace optional_detail {
/// Storage for any type.
//
// The specialization condition intentionally uses
-// llvm::is_trivially_copy_constructible instead of
-// std::is_trivially_copy_constructible. GCC versions prior to 7.4 may
-// instantiate the copy constructor of `T` when
-// std::is_trivially_copy_constructible is instantiated. This causes
-// compilation to fail if we query the trivially copy constructible property of
-// a class which is not copy constructible.
+// llvm::is_trivially_{copy/move}_constructible instead of
+// std::is_trivially_{copy/move}_constructible. GCC versions prior to 7.4 may
+// instantiate the copy/move constructor of `T` when
+// std::is_trivially_{copy/move}_constructible is instantiated. This causes
+// compilation to fail if we query the trivially copy/move constructible
+// property of a class which is not copy/move constructible.
//
// The current implementation of OptionalStorage insists that in order to use
// the trivial specialization, the value_type must be trivially copy
@@ -50,12 +49,13 @@ namespace optional_detail {
//
// The move constructible / assignable conditions emulate the remaining behavior
// of std::is_trivially_copyable.
-template <typename T, bool = (llvm::is_trivially_copy_constructible<T>::value &&
- std::is_trivially_copy_assignable<T>::value &&
- (std::is_trivially_move_constructible<T>::value ||
- !std::is_move_constructible<T>::value) &&
- (std::is_trivially_move_assignable<T>::value ||
- !std::is_move_assignable<T>::value))>
+template <typename T,
+ bool = (llvm::is_trivially_copy_constructible<T>::value &&
+ std::is_trivially_copy_assignable<T>::value &&
+ (llvm::is_trivially_move_constructible<T>::value ||
+ !std::is_move_constructible<T>::value) &&
+ (std::is_trivially_move_assignable<T>::value ||
+ !std::is_move_assignable<T>::value))>
class OptionalStorage {
union {
char empty;
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/PriorityWorklist.h b/contrib/llvm-project/llvm/include/llvm/ADT/PriorityWorklist.h
index 01dd59a2e71a..e9fbf296973d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/PriorityWorklist.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/PriorityWorklist.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h b/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
index 2d38e153c79e..c3200c926518 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/STLExtras.h
@@ -16,8 +16,10 @@
#ifndef LLVM_ADT_STLEXTRAS_H
#define LLVM_ADT_STLEXTRAS_H
+#include "llvm/ADT/identity.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/abi-breaking.h"
@@ -200,65 +202,6 @@ template <size_t I, typename... Ts>
using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;
//===----------------------------------------------------------------------===//
-// Extra additions to <functional>
-//===----------------------------------------------------------------------===//
-
-template <class Ty> struct identity {
- using argument_type = Ty;
-
- Ty &operator()(Ty &self) const {
- return self;
- }
- const Ty &operator()(const Ty &self) const {
- return self;
- }
-};
-
-/// An efficient, type-erasing, non-owning reference to a callable. This is
-/// intended for use as the type of a function parameter that is not used
-/// after the function in question returns.
-///
-/// This class does not own the callable, so it is not in general safe to store
-/// a function_ref.
-template<typename Fn> class function_ref;
-
-template<typename Ret, typename ...Params>
-class function_ref<Ret(Params...)> {
- Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
- intptr_t callable;
-
- template<typename Callable>
- static Ret callback_fn(intptr_t callable, Params ...params) {
- return (*reinterpret_cast<Callable*>(callable))(
- std::forward<Params>(params)...);
- }
-
-public:
- function_ref() = default;
- function_ref(std::nullptr_t) {}
-
- template <typename Callable>
- function_ref(
- Callable &&callable,
- // This is not the copy-constructor.
- std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
- function_ref>::value> * = nullptr,
- // Functor must be callable and return a suitable type.
- std::enable_if_t<std::is_void<Ret>::value ||
- std::is_convertible<decltype(std::declval<Callable>()(
- std::declval<Params>()...)),
- Ret>::value> * = nullptr)
- : callback(callback_fn<typename std::remove_reference<Callable>::type>),
- callable(reinterpret_cast<intptr_t>(&callable)) {}
-
- Ret operator()(Params ...params) const {
- return callback(callable, std::forward<Params>(params)...);
- }
-
- explicit operator bool() const { return callback; }
-};
-
-//===----------------------------------------------------------------------===//
// Extra additions to <iterator>
//===----------------------------------------------------------------------===//
@@ -416,20 +359,14 @@ auto reverse(ContainerTy &&C,
return make_range(C.rbegin(), C.rend());
}
-// Returns a std::reverse_iterator wrapped around the given iterator.
-template <typename IteratorTy>
-std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
- return std::reverse_iterator<IteratorTy>(It);
-}
-
// Returns an iterator_range over the given container which iterates in reverse.
// Note that the container must have begin()/end() methods which return
// bidirectional iterators for this to work.
template <typename ContainerTy>
auto reverse(ContainerTy &&C,
std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
- return make_range(llvm::make_reverse_iterator(std::end(C)),
- llvm::make_reverse_iterator(std::begin(C)));
+ return make_range(std::make_reverse_iterator(std::end(C)),
+ std::make_reverse_iterator(std::begin(C)));
}
/// An iterator adaptor that filters the elements of given inner iterators.
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/STLFunctionalExtras.h b/contrib/llvm-project/llvm/include/llvm/ADT/STLFunctionalExtras.h
new file mode 100644
index 000000000000..ebe1b1521a5d
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/STLFunctionalExtras.h
@@ -0,0 +1,76 @@
+//===- llvm/ADT/STLFunctionalExtras.h - Extras for <functional> -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some extension to <functional>.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLFUNCTIONALEXTRAS_H
+#define LLVM_ADT_STLFUNCTIONALEXTRAS_H
+
+#include "llvm/ADT/STLForwardCompat.h"
+
+#include <type_traits>
+#include <utility>
+#include <cstdint>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <functional>
+//===----------------------------------------------------------------------===//
+
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+ Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Params ...params) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Params>(params)...);
+ }
+
+public:
+ function_ref() = default;
+ function_ref(std::nullptr_t) {}
+
+ template <typename Callable>
+ function_ref(
+ Callable &&callable,
+ // This is not the copy-constructor.
+ std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
+ function_ref>::value> * = nullptr,
+ // Functor must be callable and return a suitable type.
+ std::enable_if_t<std::is_void<Ret>::value ||
+ std::is_convertible<decltype(std::declval<Callable>()(
+ std::declval<Params>()...)),
+ Ret>::value> * = nullptr)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+
+ Ret operator()(Params ...params) const {
+ return callback(callable, std::forward<Params>(params)...);
+ }
+
+ explicit operator bool() const { return callback; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLFUNCTIONALEXTRAS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ScopedHashTable.h b/contrib/llvm-project/llvm/include/llvm/ADT/ScopedHashTable.h
index a5e57c6a16c2..48544961d095 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ScopedHashTable.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -197,7 +197,7 @@ public:
using iterator = ScopedHashTableIterator<K, V, KInfo>;
- iterator end() { return iterator(0); }
+ iterator end() { return iterator(nullptr); }
iterator begin(const K &Key) {
typename DenseMap<K, ValTy*, KInfo>::iterator I =
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/SetVector.h
index 32bcd50966cc..82d5e98afb5d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SetVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SetVector.h
@@ -23,7 +23,6 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
-#include <algorithm>
#include <cassert>
#include <iterator>
#include <vector>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/SmallBitVector.h
index 51ee5dbbce05..17be317a10d7 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SmallBitVector.h
@@ -462,6 +462,12 @@ public:
return getPointer()->operator[](Idx);
}
+ /// Return the last element in the vector.
+ bool back() const {
+ assert(!empty() && "Getting last element of empty vector.");
+ return (*this)[size() - 1];
+ }
+
bool test(unsigned Idx) const {
return (*this)[Idx];
}
@@ -471,6 +477,12 @@ public:
resize(size() + 1, Val);
}
+ /// Pop one bit from the end of the vector.
+ void pop_back() {
+ assert(!empty() && "Empty vector has no element to pop.");
+ resize(size() - 1);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SmallSet.h b/contrib/llvm-project/llvm/include/llvm/ADT/SmallSet.h
index 0600e528ee69..fe4f74eac85d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SmallSet.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SmallSet.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm-project/llvm/include/llvm/ADT/SmallString.h
index 56b0639b98cc..81243af1f97d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SmallString.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SmallString.h
@@ -70,16 +70,16 @@ public:
/// Append from a list of StringRefs.
void append(std::initializer_list<StringRef> Refs) {
- size_t SizeNeeded = this->size();
+ size_t CurrentSize = this->size();
+ size_t SizeNeeded = CurrentSize;
for (const StringRef &Ref : Refs)
SizeNeeded += Ref.size();
- this->reserve(SizeNeeded);
- auto CurEnd = this->end();
+ this->resize_for_overwrite(SizeNeeded);
for (const StringRef &Ref : Refs) {
- this->uninitialized_copy(Ref.begin(), Ref.end(), CurEnd);
- CurEnd += Ref.size();
+ std::copy(Ref.begin(), Ref.end(), this->begin() + CurrentSize);
+ CurrentSize += Ref.size();
}
- this->set_size(SizeNeeded);
+ assert(CurrentSize == this->size());
}
/// @}
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
index 804567ebe3f1..466acb83d466 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SmallVector.h
@@ -13,10 +13,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
-#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
@@ -34,6 +31,8 @@
namespace llvm {
+template <typename IteratorT> class iterator_range;
+
/// This is all the stuff common to all SmallVectors.
///
/// The template parameter specifies the type which should be used to hold the
@@ -72,15 +71,11 @@ public:
LLVM_NODISCARD bool empty() const { return !Size; }
+protected:
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
- ///
- /// Clients can use this in conjunction with capacity() to write past the end
- /// of the buffer when they know that more elements are available, and only
- /// update the size later. This avoids the cost of value initializing elements
- /// which will only be overwritten.
void set_size(size_t N) {
assert(N <= capacity());
Size = N;
@@ -588,6 +583,9 @@ public:
}
private:
+ // Make set_size() private to avoid misuse in subclasses.
+ using SuperClass::set_size;
+
template <bool ForOverwrite> void resizeImpl(size_type N) {
if (N == this->size())
return;
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SparseMultiSet.h b/contrib/llvm-project/llvm/include/llvm/ADT/SparseMultiSet.h
index fec0a70a0bef..f63cef936433 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SparseMultiSet.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SparseMultiSet.h
@@ -20,7 +20,7 @@
#ifndef LLVM_ADT_SPARSEMULTISET_H
#define LLVM_ADT_SPARSEMULTISET_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/identity.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseSet.h"
#include <cassert>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm-project/llvm/include/llvm/ADT/SparseSet.h
index d8acf1ee2f3a..e66d76ad88e1 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/SparseSet.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/SparseSet.h
@@ -19,7 +19,7 @@
#ifndef LLVM_ADT_SPARSESET_H
#define LLVM_ADT_SPARSESET_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/identity.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/AllocatorBase.h"
#include <cassert>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringExtras.h
index 2ca672e7855b..81a0954226d6 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringExtras.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringExtras.h
@@ -29,14 +29,15 @@
namespace llvm {
-template<typename T> class SmallVectorImpl;
class raw_ostream;
/// hexdigit - Return the hexadecimal character for the
/// given number \p X (which should be less than 16).
inline char hexdigit(unsigned X, bool LowerCase = false) {
- const char HexChar = LowerCase ? 'a' : 'A';
- return X < 10 ? '0' + X : HexChar + X - 10;
+ assert(X < 16);
+ static const char LUT[] = "0123456789ABCDEF";
+ const uint8_t Offset = LowerCase ? 32 : 0;
+ return LUT[X] | Offset;
}
/// Given an array of c-style strings terminated by a null pointer, construct
@@ -164,23 +165,26 @@ inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
-inline std::string toHex(StringRef Input, bool LowerCase = false) {
- static const char *const LUT = "0123456789ABCDEF";
- const uint8_t Offset = LowerCase ? 32 : 0;
- size_t Length = Input.size();
-
- std::string Output;
- Output.reserve(2 * Length);
- for (size_t i = 0; i < Length; ++i) {
- const unsigned char c = Input[i];
- Output.push_back(LUT[c >> 4] | Offset);
- Output.push_back(LUT[c & 15] | Offset);
+inline void toHex(ArrayRef<uint8_t> Input, bool LowerCase,
+ SmallVectorImpl<char> &Output) {
+ const size_t Length = Input.size();
+ Output.resize_for_overwrite(Length * 2);
+
+ for (size_t i = 0; i < Length; i++) {
+ const uint8_t c = Input[i];
+ Output[i * 2 ] = hexdigit(c >> 4, LowerCase);
+ Output[i * 2 + 1] = hexdigit(c & 15, LowerCase);
}
- return Output;
}
inline std::string toHex(ArrayRef<uint8_t> Input, bool LowerCase = false) {
- return toHex(toStringRef(Input), LowerCase);
+ SmallString<16> Output;
+ toHex(Input, LowerCase, Output);
+ return std::string(Output);
+}
+
+inline std::string toHex(StringRef Input, bool LowerCase = false) {
+ return toHex(arrayRefFromStringRef(Input), LowerCase);
}
/// Store the binary representation of the two provided values, \p MSB and
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringMap.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringMap.h
index 669956d41e0c..562a2ff1a192 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringMap.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringMap.h
@@ -14,6 +14,7 @@
#define LLVM_ADT_STRINGMAP_H
#include "llvm/ADT/StringMapEntry.h"
+#include "llvm/ADT/iterator.h"
#include "llvm/Support/AllocatorBase.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <initializer_list>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringMapEntry.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringMapEntry.h
index 8bfad5540230..120d4f3ca4bc 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringMapEntry.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringMapEntry.h
@@ -15,7 +15,9 @@
#ifndef LLVM_ADT_STRINGMAPENTRY_H
#define LLVM_ADT_STRINGMAPENTRY_H
+#include "llvm/ADT/None.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
index 3950910f0635..118def2f43e1 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringRef.h
@@ -9,7 +9,8 @@
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
@@ -877,6 +878,25 @@ namespace llvm {
return ltrim(Chars).rtrim(Chars);
}
+ /// Detect the line ending style of the string.
+ ///
+ /// If the string contains a line ending, return the line ending character
+ /// sequence that is detected. Otherwise return '\n' for unix line endings.
+ ///
+ /// \return - The line ending character sequence.
+ LLVM_NODISCARD
+ StringRef detectEOL() const {
+ size_t Pos = find('\r');
+ if (Pos == npos) {
+ // If there is no carriage return, assume unix
+ return "\n";
+ }
+ if (Pos + 1 < Length && Data[Pos + 1] == '\n')
+ return "\r\n"; // Windows
+ if (Pos > 0 && Data[Pos - 1] == '\n')
+ return "\n\r"; // You monster!
+ return "\r"; // Classic Mac
+ }
/// @}
};
@@ -959,13 +979,7 @@ namespace llvm {
reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)), 0);
}
- static unsigned getHashValue(StringRef Val) {
- assert(Val.data() != getEmptyKey().data() &&
- "Cannot hash the empty key!");
- assert(Val.data() != getTombstoneKey().data() &&
- "Cannot hash the tombstone key!");
- return (unsigned)(hash_value(Val));
- }
+ static unsigned getHashValue(StringRef Val);
static bool isEqual(StringRef LHS, StringRef RHS) {
if (RHS.data() == getEmptyKey().data())
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/StringSwitch.h b/contrib/llvm-project/llvm/include/llvm/ADT/StringSwitch.h
index 726d67c8f24a..4b7882d7ca10 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/StringSwitch.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/StringSwitch.h
@@ -12,6 +12,7 @@
#ifndef LLVM_ADT_STRINGSWITCH_H
#define LLVM_ADT_STRINGSWITCH_H
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h b/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
index 5dbd4f16bfd5..0f0a7b08b5d3 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/Triple.h
@@ -107,9 +107,11 @@ public:
enum SubArchType {
NoSubArch,
+ ARMSubArch_v9_3a,
ARMSubArch_v9_2a,
ARMSubArch_v9_1a,
ARMSubArch_v9,
+ ARMSubArch_v8_8a,
ARMSubArch_v8_7a,
ARMSubArch_v8_6a,
ARMSubArch_v8_5a,
@@ -270,9 +272,7 @@ public:
/// Default constructor is the same as an empty string and leaves all
/// triple fields unknown.
- Triple()
- : Data(), Arch(), SubArch(), Vendor(), OS(), Environment(),
- ObjectFormat() {}
+ Triple() : Arch(), SubArch(), Vendor(), OS(), Environment(), ObjectFormat() {}
explicit Triple(const Twine &Str);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/identity.h b/contrib/llvm-project/llvm/include/llvm/ADT/identity.h
new file mode 100644
index 000000000000..f07bb6fb39f6
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/identity.h
@@ -0,0 +1,34 @@
+//===- llvm/ADT/Identity.h - Provide std::identity from C++20 ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an implementation of std::identity from C++20.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IDENTITY_H
+#define LLVM_ADT_IDENTITY_H
+
+
+namespace llvm {
+
+template <class Ty> struct identity {
+ using argument_type = Ty;
+
+ Ty &operator()(Ty &self) const {
+ return self;
+ }
+ const Ty &operator()(const Ty &self) const {
+ return self;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IDENTITY_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ilist.h b/contrib/llvm-project/llvm/include/llvm/ADT/ilist.h
index d5a1f286b177..b3aa26f2454d 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ilist.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ilist.h
@@ -103,7 +103,7 @@ template <class TraitsT, class NodeT> struct HasGetNext {
template <size_t N> struct SFINAE {};
template <class U>
- static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = 0);
+ static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = nullptr);
template <class> static No &test(...);
public:
@@ -117,7 +117,7 @@ template <class TraitsT> struct HasCreateSentinel {
typedef char No[2];
template <class U>
- static Yes &test(U *I, decltype(I->createSentinel()) * = 0);
+ static Yes &test(U *I, decltype(I->createSentinel()) * = nullptr);
template <class> static No &test(...);
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysis.h
index 2770a1a9b277..d4febe6c1db9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -60,7 +60,6 @@ class CatchReturnInst;
class DominatorTree;
class FenceInst;
class Function;
-class InvokeInst;
class LoopInfo;
class PreservedAnalyses;
class TargetLibraryInfo;
@@ -679,7 +678,7 @@ public:
/// Checks if functions with the specified behavior are known to only write
/// memory (or not access memory at all).
- static bool doesNotReadMemory(FunctionModRefBehavior MRB) {
+ static bool onlyWritesMemory(FunctionModRefBehavior MRB) {
return !isRefSet(createModRefInfo(MRB));
}
@@ -1268,6 +1267,14 @@ bool isIdentifiedObject(const Value *V);
/// IdentifiedObjects.
bool isIdentifiedFunctionLocal(const Value *V);
+/// Return true if Object memory is not visible after an unwind, in the sense
+/// that program semantics cannot depend on Object containing any particular
+/// value on unwind. If the RequiresNoCaptureBeforeUnwind out parameter is set
+/// to true, then the memory is only not visible if the object has not been
+/// captured prior to the unwind. Otherwise it is not visible even if captured.
+bool isNotVisibleOnUnwind(const Value *Object,
+ bool &RequiresNoCaptureBeforeUnwind);
+
/// A manager for alias analyses.
///
/// This class can have analyses registered with it and when run, it will run
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h b/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
index 972eceaa3ba9..043b1b7ca2dc 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -31,17 +31,15 @@ namespace llvm {
class AAResults;
class AAEvaluator : public PassInfoMixin<AAEvaluator> {
- int64_t FunctionCount;
- int64_t NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount;
- int64_t NoModRefCount, ModCount, RefCount, ModRefCount;
- int64_t MustCount, MustRefCount, MustModCount, MustModRefCount;
+ int64_t FunctionCount = 0;
+ int64_t NoAliasCount = 0, MayAliasCount = 0, PartialAliasCount = 0;
+ int64_t MustAliasCount = 0;
+ int64_t NoModRefCount = 0, ModCount = 0, RefCount = 0, ModRefCount = 0;
+ int64_t MustCount = 0, MustRefCount = 0, MustModCount = 0;
+ int64_t MustModRefCount = 0;
public:
- AAEvaluator()
- : FunctionCount(), NoAliasCount(), MayAliasCount(), PartialAliasCount(),
- MustAliasCount(), NoModRefCount(), ModCount(), RefCount(),
- ModRefCount(), MustCount(), MustRefCount(), MustModCount(),
- MustModRefCount() {}
+ AAEvaluator() = default;
AAEvaluator(AAEvaluator &&Arg)
: FunctionCount(Arg.FunctionCount), NoAliasCount(Arg.NoAliasCount),
MayAliasCount(Arg.MayAliasCount),
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/BasicAliasAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
index ed9d1ba4c5a7..97dda58109e9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -25,7 +25,6 @@
namespace llvm {
-struct AAMDNodes;
class AssumptionCache;
class BasicBlock;
class DataLayout;
@@ -58,7 +57,7 @@ public:
BasicAAResult(const DataLayout &DL, const Function &F,
const TargetLibraryInfo &TLI, AssumptionCache &AC,
DominatorTree *DT = nullptr, PhiValues *PV = nullptr)
- : AAResultBase(), DL(DL), F(F), TLI(TLI), AC(AC), DT(DT), PV(PV) {}
+ : DL(DL), F(F), TLI(TLI), AC(AC), DT(DT), PV(PV) {}
BasicAAResult(const BasicAAResult &Arg)
: AAResultBase(Arg), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI), AC(Arg.AC),
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/contrib/llvm-project/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index f581b18bff17..858dd369dd0b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
@@ -1451,7 +1452,7 @@ void BlockFrequencyInfoImpl<BT>::iterativeInference(
// frequencies need to be updated based on the incoming edges.
// The set is dynamic and changes after every update. Initially all blocks
// with a positive frequency are active
- auto IsActive = std::vector<bool>(Freq.size(), false);
+ auto IsActive = BitVector(Freq.size(), false);
std::queue<size_t> ActiveSet;
for (size_t I = 0; I < Freq.size(); I++) {
if (Freq[I] > 0) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/CFLAliasAnalysisUtils.h b/contrib/llvm-project/llvm/include/llvm/Analysis/CFLAliasAnalysisUtils.h
index 02f999a5b913..2eae2824bec3 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/CFLAliasAnalysisUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/CFLAliasAnalysisUtils.h
@@ -50,8 +50,8 @@ static inline const Function *parentFunctionOfValue(const Value *Val) {
if (auto *Arg = dyn_cast<Argument>(Val))
return Arg->getParent();
return nullptr;
+}
} // namespace cflaa
} // namespace llvm
-}
#endif // LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ConstantFolding.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ConstantFolding.h
index 45fb879f0c1f..37258c80e3a3 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -148,12 +148,11 @@ Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset,
Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
const DataLayout &DL);
-/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
-/// getelementptr constantexpr, return the constant value being addressed by the
-/// constant expression, or null if something is funny and we can't decide.
-Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE,
- Type *Ty,
- const DataLayout &DL);
+/// If C is a uniform value where all bits are the same (either all zero, all
+/// ones, all undef or all poison), return the corresponding uniform value in
+/// the new type. If the value is not uniform or the result cannot be
+/// represented, return null.
+Constant *ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty);
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ConstraintSystem.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ConstraintSystem.h
index d5b8f208172b..d7800f578325 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ConstraintSystem.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ConstraintSystem.h
@@ -73,7 +73,7 @@ public:
return R;
}
- bool isConditionImplied(SmallVector<int64_t, 8> R);
+ bool isConditionImplied(SmallVector<int64_t, 8> R) const;
void popLastConstraint() { Constraints.pop_back(); }
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
index 51dd4a738f00..4ea589ec7efc 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
@@ -52,7 +52,7 @@ public:
};
DDGNode() = delete;
- DDGNode(const NodeKind K) : DDGNodeBase(), Kind(K) {}
+ DDGNode(const NodeKind K) : Kind(K) {}
DDGNode(const DDGNode &N) : DDGNodeBase(N), Kind(N.Kind) {}
DDGNode(DDGNode &&N) : DDGNodeBase(std::move(N)), Kind(N.Kind) {}
virtual ~DDGNode() = 0;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
index 59737744f576..d8021907b5b2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -181,6 +181,25 @@ private:
std::string Name;
};
+template <typename GraphT>
+void WriteDOTGraphToFile(Function &F, GraphT &&Graph,
+ std::string FileNamePrefix, bool IsSimple) {
+ std::string Filename = FileNamePrefix + "." + F.getName().str() + ".dot";
+ std::error_code EC;
+
+ errs() << "Writing '" << Filename << "'...";
+
+ raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
+ std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
+ std::string Title = GraphName + " for '" + F.getName().str() + "' function";
+
+ if (!EC)
+ WriteGraph(File, Graph, IsSimple, Title);
+ else
+ errs() << " error opening file for writing!";
+ errs() << "\n";
+}
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DependenceAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DependenceAnalysis.h
index 305c9b1d88f2..8c852e85b04a 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DependenceAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -74,12 +74,8 @@ namespace llvm {
Dependence &operator=(Dependence &&) = default;
public:
- Dependence(Instruction *Source,
- Instruction *Destination) :
- Src(Source),
- Dst(Destination),
- NextPredecessor(nullptr),
- NextSuccessor(nullptr) {}
+ Dependence(Instruction *Source, Instruction *Destination)
+ : Src(Source), Dst(Destination) {}
virtual ~Dependence() {}
/// Dependence::DVEntry - Each level in the distance/direction vector
@@ -99,9 +95,10 @@ namespace llvm {
bool PeelFirst : 1; // Peeling the first iteration will break dependence.
bool PeelLast : 1; // Peeling the last iteration will break the dependence.
bool Splitable : 1; // Splitting the loop will break dependence.
- const SCEV *Distance; // NULL implies no distance available.
- DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
- PeelLast(false), Splitable(false), Distance(nullptr) { }
+ const SCEV *Distance = nullptr; // NULL implies no distance available.
+ DVEntry()
+ : Direction(ALL), Scalar(true), PeelFirst(false), PeelLast(false),
+ Splitable(false) {}
};
/// getSrc - Returns the source instruction for this dependence.
@@ -200,7 +197,7 @@ namespace llvm {
private:
Instruction *Src, *Dst;
- const Dependence *NextPredecessor, *NextSuccessor;
+ const Dependence *NextPredecessor = nullptr, *NextSuccessor = nullptr;
friend class DependenceInfo;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DivergenceAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DivergenceAnalysis.h
index 6f759a81fdef..c52b42ae8dc2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DivergenceAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DivergenceAnalysis.h
@@ -22,7 +22,6 @@
#include <vector>
namespace llvm {
-class Module;
class Value;
class Instruction;
class Loop;
@@ -147,7 +146,7 @@ class DivergenceInfo {
// analysis can run indefinitely. We set ContainsIrreducible and no
// analysis is actually performed on the function. All values in
// this function are conservatively reported as divergent instead.
- bool ContainsIrreducible;
+ bool ContainsIrreducible = false;
std::unique_ptr<SyncDependenceAnalysis> SDA;
std::unique_ptr<DivergenceAnalysisImpl> DA;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DomPrinter.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DomPrinter.h
index a177f877b295..e6df12d88072 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DomPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DomPrinter.h
@@ -14,6 +14,20 @@
#ifndef LLVM_ANALYSIS_DOMPRINTER_H
#define LLVM_ANALYSIS_DOMPRINTER_H
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class DomTreePrinterPass : public PassInfoMixin<DomTreePrinterPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class DomTreeOnlyPrinterPass : public PassInfoMixin<DomTreeOnlyPrinterPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
namespace llvm {
class FunctionPass;
FunctionPass *createDomPrinterPass();
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h b/contrib/llvm-project/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
index 51c5c620230b..7b81d5754930 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
@@ -121,13 +121,23 @@ struct IRInstructionData
/// and is used when checking when two instructions are considered similar.
/// If either instruction is not legal, the instructions are automatically not
/// considered similar.
- bool Legal;
+ bool Legal = false;
/// This is only relevant if we are wrapping a CmpInst where we needed to
/// change the predicate of a compare instruction from a greater than form
/// to a less than form. It is None otherwise.
Optional<CmpInst::Predicate> RevisedPredicate;
+ /// This is only relevant if we are wrapping a CallInst. If we are requiring
+ /// that the function calls have matching names as well as types, and the
+ /// call is not an indirect call, this will hold the name of the function. If
+ /// it is an indirect string, it will be the empty string. However, if this
+ /// requirement is not in place it will be the empty string regardless of the
+ /// function call type. The value held here is used to create the hash of the
+ /// instruction, and check to make sure two instructions are close to one
+ /// another.
+ Optional<std::string> CalleeName;
+
/// This structure holds the distances of how far "ahead of" or "behind" the
/// target blocks of a branch, or the incoming blocks of a phi nodes are.
/// If the value is negative, it means that the block was registered before
@@ -168,6 +178,10 @@ struct IRInstructionData
/// instruction. the IRInstructionData must be wrapping a CmpInst.
CmpInst::Predicate getPredicate() const;
+ /// Get the callee name that the call instruction is using for hashing the
+ /// instruction. The IRInstructionData must be wrapping a CallInst.
+ StringRef getCalleeName() const;
+
/// A function that swaps the predicates to their less than form if they are
/// in a greater than form. Otherwise, the predicate is unchanged.
///
@@ -185,6 +199,31 @@ struct IRInstructionData
void
setBranchSuccessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);
+ /// For an IRInstructionData containing a CallInst, set the function name
+ /// appropriately. This will be an empty string if it is an indirect call,
+ /// or we are not matching by name of the called function. It will be the
+ /// name of the function if \p MatchByName is true and it is not an indirect
+ /// call. We may decide not to match by name in order to expand the
+ /// size of the regions we can match. If a function name has the same type
+ /// signature, but the different name, the region of code is still almost the
+ /// same. Since function names can be treated as constants, the name itself
+ /// could be extrapolated away. However, matching by name provides a
+ /// specificity and more "identical" code than not matching by name.
+ ///
+ /// \param MatchByName - A flag to mark whether we are using the called
+ /// function name as a differentiating parameter.
+ void setCalleeName(bool MatchByName = true);
+
+ /// For an IRInstructionData containing a PHINode, finds the
+ /// relative distances from the incoming basic block to the current block by
+ /// taking the difference of the number assigned to the current basic block
+ /// and the incoming basic block of the branch.
+ ///
+ /// \param BasicBlockToInteger - The mapping of basic blocks to their location
+ /// in the module.
+ void
+ setPHIPredecessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);
+
/// Hashes \p Value based on its opcode, types, and operand types.
/// Two IRInstructionData instances produce the same hash when they perform
/// the same operation.
@@ -223,12 +262,14 @@ struct IRInstructionData
llvm::hash_value(ID.Inst->getType()),
llvm::hash_value(ID.getPredicate()),
llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
- else if (CallInst *CI = dyn_cast<CallInst>(ID.Inst))
+ else if (isa<CallInst>(ID.Inst)) {
+ std::string FunctionName = *ID.CalleeName;
return llvm::hash_combine(
llvm::hash_value(ID.Inst->getOpcode()),
llvm::hash_value(ID.Inst->getType()),
- llvm::hash_value(CI->getCalledFunction()->getName().str()),
+ llvm::hash_value(ID.Inst->getType()), llvm::hash_value(FunctionName),
llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
+ }
return llvm::hash_combine(
llvm::hash_value(ID.Inst->getOpcode()),
llvm::hash_value(ID.Inst->getType()),
@@ -346,6 +387,10 @@ struct IRInstructionMapper {
/// to be considered for similarity.
bool HaveLegalRange = false;
+ /// Marks whether we should use exact function names, as well as types to
+ /// find similarity between calls.
+ bool EnableMatchCallsByName = false;
+
/// This allocator pointer is in charge of holding on to the IRInstructionData
/// so it is not deallocated until whatever external tool is using it is done
/// with the information.
@@ -462,8 +507,11 @@ struct IRInstructionMapper {
return Legal;
return Illegal;
}
- // TODO: Determine a scheme to resolve when the labels are similar enough.
- InstrType visitPHINode(PHINode &PN) { return Illegal; }
+ InstrType visitPHINode(PHINode &PN) {
+ if (EnableBranches)
+ return Legal;
+ return Illegal;
+ }
// TODO: Handle allocas.
InstrType visitAllocaInst(AllocaInst &AI) { return Illegal; }
// We exclude variable argument instructions since variable arguments
@@ -483,7 +531,10 @@ struct IRInstructionMapper {
// is not an indirect call.
InstrType visitCallInst(CallInst &CI) {
Function *F = CI.getCalledFunction();
- if (!F || CI.isIndirectCall() || !F->hasName())
+ bool IsIndirectCall = CI.isIndirectCall();
+ if (IsIndirectCall && !EnableIndirectCalls)
+ return Illegal;
+ if (!F && !IsIndirectCall)
return Illegal;
return Legal;
}
@@ -498,6 +549,10 @@ struct IRInstructionMapper {
// The flag variable that lets the classifier know whether we should
// allow branches to be checked for similarity.
bool EnableBranches = false;
+
+ // The flag variable that lets the classifier know whether we should
+ // allow indirect calls to be considered legal instructions.
+ bool EnableIndirectCalls = false;
};
/// Maps an Instruction to a member of InstrType.
@@ -882,9 +937,12 @@ typedef std::vector<SimilarityGroup> SimilarityGroupList;
/// analyzing the module.
class IRSimilarityIdentifier {
public:
- IRSimilarityIdentifier(bool MatchBranches = true)
+ IRSimilarityIdentifier(bool MatchBranches = true,
+ bool MatchIndirectCalls = true,
+ bool MatchCallsWithName = false)
: Mapper(&InstDataAllocator, &InstDataListAllocator),
- EnableBranches(MatchBranches) {}
+ EnableBranches(MatchBranches), EnableIndirectCalls(MatchIndirectCalls),
+ EnableMatchingCallsByName(MatchCallsWithName) {}
private:
/// Map the instructions in the module to unsigned integers, using mapping
@@ -964,6 +1022,15 @@ private:
/// similarity, or only look within basic blocks.
bool EnableBranches = true;
+ /// The flag variable that marks whether we allow indirect calls to be checked
+ /// for similarity, or exclude them as a legal instruction.
+ bool EnableIndirectCalls = true;
+
+ /// The flag variable that marks whether we allow calls to be marked as
+ /// similar if they do not have the same name, only the same calling
+ /// convention, attributes and type signature.
+ bool EnableMatchingCallsByName = true;
+
/// The SimilarityGroups found with the most recent run of \ref
/// findSimilarity. None if there is no recent run.
Optional<SimilarityGroupList> SimilarityCandidates;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h b/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
index 9858a46d16a2..dec488a6f26d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/IVDescriptors.h
@@ -77,10 +77,12 @@ public:
RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurKind K,
FastMathFlags FMF, Instruction *ExactFP, Type *RT,
bool Signed, bool Ordered,
- SmallPtrSetImpl<Instruction *> &CI)
+ SmallPtrSetImpl<Instruction *> &CI,
+ unsigned MinWidthCastToRecurTy)
: StartValue(Start), LoopExitInstr(Exit), Kind(K), FMF(FMF),
ExactFPMathInst(ExactFP), RecurrenceType(RT), IsSigned(Signed),
- IsOrdered(Ordered) {
+ IsOrdered(Ordered),
+ MinWidthCastToRecurrenceType(MinWidthCastToRecurTy) {
CastInsts.insert(CI.begin(), CI.end());
}
@@ -251,6 +253,11 @@ public:
/// recurrence.
const SmallPtrSet<Instruction *, 8> &getCastInsts() const { return CastInsts; }
+ /// Returns the minimum width used by the recurrence in bits.
+ unsigned getMinWidthCastToRecurrenceTypeInBits() const {
+ return MinWidthCastToRecurrenceType;
+ }
+
/// Returns true if all source operands of the recurrence are SExtInsts.
bool isSigned() const { return IsSigned; }
@@ -291,6 +298,8 @@ private:
bool IsOrdered = false;
// Instructions used for type-promoting the recurrence.
SmallPtrSet<Instruction *, 8> CastInsts;
+ // The minimum width used by the recurrence.
+ unsigned MinWidthCastToRecurrenceType;
};
/// A struct for saving information about induction variables.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/IVUsers.h b/contrib/llvm-project/llvm/include/llvm/Analysis/IVUsers.h
index e2026a4d5875..390d09848dde 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/IVUsers.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/IVUsers.h
@@ -28,7 +28,6 @@ class Value;
class ScalarEvolution;
class SCEV;
class IVUsers;
-class DataLayout;
/// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineAdvisor.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineAdvisor.h
index 9f9bc3a5e71b..0103ee7f8386 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineAdvisor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineAdvisor.h
@@ -10,6 +10,7 @@
#define LLVM_ANALYSIS_INLINEADVISOR_H
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/PassManager.h"
@@ -65,7 +66,9 @@ public:
/// Call after inlining succeeded, and did not result in deleting the callee.
void recordInlining();
- /// Call after inlining succeeded, and resulted in deleting the callee.
+ /// Call after inlining succeeded, and results in the callee being
+ /// delete-able, meaning, it has no more users, and will be cleaned up
+ /// subsequently.
void recordInliningWithCalleeDeleted();
/// Call after the decision for a call site was to not inline.
@@ -159,14 +162,13 @@ public:
/// This must be called when the Inliner pass is exited, as function passes
/// may be run subsequently. This allows an implementation of InlineAdvisor
- /// to prepare for a partial update.
- virtual void onPassExit() {}
+ /// to prepare for a partial update, based on the optional SCC.
+ virtual void onPassExit(LazyCallGraph::SCC *SCC = nullptr) {}
- /// Called when the module is invalidated. We let the advisor implementation
- /// decide what to refresh - in the case of the development mode
- /// implementation, for example, we wouldn't want to delete the whole object
- /// and need to re-load the model evaluator.
- virtual void onModuleInvalidated() {}
+ /// Support for printer pass
+ virtual void print(raw_ostream &OS) const {
+ OS << "Unimplemented InlineAdvisor print\n";
+ }
protected:
InlineAdvisor(Module &M, FunctionAnalysisManager &FAM);
@@ -178,19 +180,6 @@ protected:
FunctionAnalysisManager &FAM;
std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;
- /// We may want to defer deleting functions to after the inlining for a whole
- /// module has finished. This allows us to reliably use function pointers as
- /// unique identifiers, as an efficient implementation detail of the
- /// InlineAdvisor. Otherwise, it is possible the memory allocator
- /// re-allocate Function objects at the same address of a deleted Function;
- /// and Functions are potentially created during the function passes called
- /// after each SCC inlining (e.g. argument promotion does that).
- void freeDeletedFunctions();
-
- bool isFunctionDeleted(const Function *F) const {
- return DeletedFunctions.count(F);
- }
-
enum class MandatoryInliningKind { NotMandatory, Always, Never };
static MandatoryInliningKind getMandatoryKind(CallBase &CB,
@@ -201,8 +190,6 @@ protected:
private:
friend class InlineAdvice;
- void markFunctionAsDeleted(Function *F);
- std::unordered_set<const Function *> DeletedFunctions;
};
/// The default (manual heuristics) implementation of the InlineAdvisor. This
@@ -217,8 +204,6 @@ public:
private:
std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
- void onPassExit() override { freeDeletedFunctions(); }
-
InlineParams Params;
};
@@ -232,8 +217,6 @@ public:
Result(Module &M, ModuleAnalysisManager &MAM) : M(M), MAM(MAM) {}
bool invalidate(Module &, const PreservedAnalyses &PA,
ModuleAnalysisManager::Invalidator &) {
- if (Advisor && !PA.areAllPreserved())
- Advisor->onModuleInvalidated();
// Check whether the analysis has been explicitly invalidated. Otherwise,
// it's stateless and remains preserved.
auto PAC = PA.getChecker<InlineAdvisorAnalysis>();
@@ -252,16 +235,23 @@ public:
Result run(Module &M, ModuleAnalysisManager &MAM) { return Result(M, MAM); }
};
-#ifdef LLVM_HAVE_TF_AOT
+/// Printer pass for the FunctionPropertiesAnalysis results.
+class InlineAdvisorAnalysisPrinterPass
+ : public PassInfoMixin<InlineAdvisorAnalysisPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit InlineAdvisorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+};
+
std::unique_ptr<InlineAdvisor>
getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM);
-#endif
-#ifdef LLVM_HAVE_TF_API
std::unique_ptr<InlineAdvisor>
getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
std::function<bool(CallBase &)> GetDefaultAdvice);
-#endif
// Default (manual policy) decision making helper APIs. Shared with the legacy
// pass manager inliner.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
index 776749b9a07f..f86ee5a14874 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineCost.h
@@ -21,7 +21,6 @@
#include <climits>
namespace llvm {
-class AssumptionCacheTracker;
class BlockFrequencyInfo;
class CallBase;
class DataLayout;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineOrder.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineOrder.h
index def3192356f4..feefa9b9ddd1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/InlineOrder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InlineOrder.h
@@ -20,7 +20,6 @@
namespace llvm {
class CallBase;
class Function;
-class Module;
template <typename T> class InlineOrder {
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InstSimplifyFolder.h
new file mode 100644
index 000000000000..54ef1ddf6085
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -0,0 +1,255 @@
+//===- InstSimplifyFolder.h - InstSimplify folding helper --------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the InstSimplifyFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for folding operations to
+// existing values using InstructionSimplify. At the moment, only a subset of
+// the implementation uses InstructionSimplify. The rest of the implementation
+// only folds constants.
+//
+// The folder also applies target-specific constant folding.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
+#define LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilderFolder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+/// InstSimplifyFolder - Use InstructionSimplify to fold operations to existing
+/// values. Also applies target-specific constant folding when not using
+/// InstructionSimplify.
+class InstSimplifyFolder final : public IRBuilderFolder {
+ TargetFolder ConstFolder;
+ SimplifyQuery SQ;
+
+ virtual void anchor();
+
+public:
+ InstSimplifyFolder(const DataLayout &DL) : ConstFolder(DL), SQ(DL) {}
+
+ //===--------------------------------------------------------------------===//
+ // Value-based folders.
+ //
+ // Return an existing value or a constant if the operation can be simplified.
+ // Otherwise return nullptr.
+ //===--------------------------------------------------------------------===//
+ Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return SimplifyAddInst(LHS, RHS, HasNUW, HasNSW, SQ);
+ }
+
+ Value *FoldAnd(Value *LHS, Value *RHS) const override {
+ return SimplifyAndInst(LHS, RHS, SQ);
+ }
+
+ Value *FoldOr(Value *LHS, Value *RHS) const override {
+ return SimplifyOrInst(LHS, RHS, SQ);
+ }
+
+ Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ return SimplifyICmpInst(P, LHS, RHS, SQ);
+ }
+
+ Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ bool IsInBounds = false) const override {
+ return SimplifyGEPInst(Ty, Ptr, IdxList, IsInBounds, SQ);
+ }
+
+ Value *FoldSelect(Value *C, Value *True, Value *False) const override {
+ return SimplifySelectInst(C, True, False, SQ);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateFAdd(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateFAdd(LHS, RHS);
+ }
+ Value *CreateSub(Constant *LHS, Constant *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return ConstFolder.CreateSub(LHS, RHS, HasNUW, HasNSW);
+ }
+ Value *CreateFSub(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateFSub(LHS, RHS);
+ }
+ Value *CreateMul(Constant *LHS, Constant *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return ConstFolder.CreateMul(LHS, RHS, HasNUW, HasNSW);
+ }
+ Value *CreateFMul(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateFMul(LHS, RHS);
+ }
+ Value *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const override {
+ return ConstFolder.CreateUDiv(LHS, RHS, isExact);
+ }
+ Value *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const override {
+ return ConstFolder.CreateSDiv(LHS, RHS, isExact);
+ }
+ Value *CreateFDiv(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateFDiv(LHS, RHS);
+ }
+ Value *CreateURem(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateURem(LHS, RHS);
+ }
+ Value *CreateSRem(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateSRem(LHS, RHS);
+ }
+ Value *CreateFRem(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateFRem(LHS, RHS);
+ }
+ Value *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return ConstFolder.CreateShl(LHS, RHS, HasNUW, HasNSW);
+ }
+ Value *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const override {
+ return ConstFolder.CreateLShr(LHS, RHS, isExact);
+ }
+ Value *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const override {
+ return ConstFolder.CreateAShr(LHS, RHS, isExact);
+ }
+ Value *CreateXor(Constant *LHS, Constant *RHS) const override {
+ return ConstFolder.CreateXor(LHS, RHS);
+ }
+
+ Value *CreateBinOp(Instruction::BinaryOps Opc, Constant *LHS,
+ Constant *RHS) const override {
+ return ConstFolder.CreateBinOp(Opc, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateNeg(Constant *C, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return ConstFolder.CreateNeg(C, HasNUW, HasNSW);
+ }
+ Value *CreateFNeg(Constant *C) const override {
+ return ConstFolder.CreateFNeg(C);
+ }
+ Value *CreateNot(Constant *C) const override {
+ return ConstFolder.CreateNot(C);
+ }
+
+ Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
+ return ConstFolder.CreateUnOp(Opc, C);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateCast(Op, C, DestTy);
+ }
+ Value *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateIntCast(C, DestTy, isSigned);
+ }
+ Value *CreatePointerCast(Constant *C, Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreatePointerCast(C, DestTy);
+ }
+ Value *CreateFPCast(Constant *C, Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateFPCast(C, DestTy);
+ }
+ Value *CreateBitCast(Constant *C, Type *DestTy) const override {
+ return ConstFolder.CreateBitCast(C, DestTy);
+ }
+ Value *CreateIntToPtr(Constant *C, Type *DestTy) const override {
+ return ConstFolder.CreateIntToPtr(C, DestTy);
+ }
+ Value *CreatePtrToInt(Constant *C, Type *DestTy) const override {
+ return ConstFolder.CreatePtrToInt(C, DestTy);
+ }
+ Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateZExtOrBitCast(C, DestTy);
+ }
+ Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateSExtOrBitCast(C, DestTy);
+ }
+ Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreateTruncOrBitCast(C, DestTy);
+ }
+
+ Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+ Type *DestTy) const override {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return ConstFolder.CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const override {
+ return ConstFolder.CreateFCmp(P, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
+ return ConstFolder.CreateExtractElement(Vec, Idx);
+ }
+
+ Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const override {
+ return ConstFolder.CreateInsertElement(Vec, NewElt, Idx);
+ }
+
+ Value *CreateShuffleVector(Constant *V1, Constant *V2,
+ ArrayRef<int> Mask) const override {
+ return ConstFolder.CreateShuffleVector(V1, V2, Mask);
+ }
+
+ Value *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const override {
+ return ConstFolder.CreateExtractValue(Agg, IdxList);
+ }
+
+ Value *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const override {
+ return ConstFolder.CreateInsertValue(Agg, Val, IdxList);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm-project/llvm/include/llvm/Analysis/InstructionSimplify.h
index f0f8e4bc9175..8b49c115f101 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -63,7 +63,7 @@ class Value;
/// results if the users specified it is safe to use.
struct InstrInfoQuery {
InstrInfoQuery(bool UMD) : UseInstrInfo(UMD) {}
- InstrInfoQuery() : UseInstrInfo(true) {}
+ InstrInfoQuery() = default;
bool UseInstrInfo = true;
MDNode *getMetadata(const Instruction *I, unsigned KindID) const {
@@ -248,8 +248,8 @@ Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q);
/// Given operands for a GetElementPtrInst, fold the result or return null.
-Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
- const SimplifyQuery &Q);
+Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
+ bool InBounds, const SimplifyQuery &Q);
/// Given operands for an InsertValueInst, fold the result or return null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBlockFrequencyInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBlockFrequencyInfo.h
index 0e7dc943bacf..a6d8b76b12ae 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBlockFrequencyInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBlockFrequencyInfo.h
@@ -22,7 +22,6 @@
namespace llvm {
class AnalysisUsage;
-class BranchProbabilityInfo;
class Function;
class LoopInfo;
@@ -34,8 +33,7 @@ template <typename FunctionT, typename BranchProbabilityInfoPassT,
typename LoopInfoT, typename BlockFrequencyInfoT>
class LazyBlockFrequencyInfo {
public:
- LazyBlockFrequencyInfo()
- : Calculated(false), F(nullptr), BPIPass(nullptr), LI(nullptr) {}
+ LazyBlockFrequencyInfo() = default;
/// Set up the per-function input.
void setAnalysis(const FunctionT *F, BranchProbabilityInfoPassT *BPIPass,
@@ -68,10 +66,10 @@ public:
private:
BlockFrequencyInfoT BFI;
- bool Calculated;
- const FunctionT *F;
- BranchProbabilityInfoPassT *BPIPass;
- const LoopInfoT *LI;
+ bool Calculated = false;
+ const FunctionT *F = nullptr;
+ BranchProbabilityInfoPassT *BPIPass = nullptr;
+ const LoopInfoT *LI = nullptr;
};
/// This is an alternative analysis pass to
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBranchProbabilityInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBranchProbabilityInfo.h
index 3c632f02905a..bad7423616b4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBranchProbabilityInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyBranchProbabilityInfo.h
@@ -57,7 +57,7 @@ class LazyBranchProbabilityInfoPass : public FunctionPass {
public:
LazyBranchProbabilityInfo(const Function *F, const LoopInfo *LI,
const TargetLibraryInfo *TLI)
- : Calculated(false), F(F), LI(LI), TLI(TLI) {}
+ : F(F), LI(LI), TLI(TLI) {}
/// Retrieve the BPI with the branch probabilities computed.
BranchProbabilityInfo &getCalculated() {
@@ -75,7 +75,7 @@ class LazyBranchProbabilityInfoPass : public FunctionPass {
private:
BranchProbabilityInfo BPI;
- bool Calculated;
+ bool Calculated = false;
const Function *F;
const LoopInfo *LI;
const TargetLibraryInfo *TLI;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h
index 0580f4d7b226..eb8f66bada59 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LazyCallGraph.h
@@ -884,7 +884,9 @@ public:
RefSCC *RC = nullptr;
/// Build the begin iterator for a node.
- postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
+ postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {
+ incrementUntilNonEmptyRefSCC();
+ }
/// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
@@ -899,6 +901,17 @@ public:
return G.PostOrderRefSCCs[Index];
}
+ // Keep incrementing until RC is non-empty (or null).
+ void incrementUntilNonEmptyRefSCC() {
+ while (RC && RC->size() == 0)
+ increment();
+ }
+
+ void increment() {
+ assert(RC && "Cannot increment the end iterator!");
+ RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
+ }
+
public:
bool operator==(const postorder_ref_scc_iterator &Arg) const {
return G == Arg.G && RC == Arg.RC;
@@ -908,8 +921,8 @@ public:
using iterator_facade_base::operator++;
postorder_ref_scc_iterator &operator++() {
- assert(RC && "Cannot increment the end iterator!");
- RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
+ increment();
+ incrementUntilNonEmptyRefSCC();
return *this;
}
};
@@ -1190,7 +1203,7 @@ private:
}
};
-inline LazyCallGraph::Edge::Edge() : Value() {}
+inline LazyCallGraph::Edge::Edge() {}
inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
inline LazyCallGraph::Edge::operator bool() const {
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/Loads.h b/contrib/llvm-project/llvm/include/llvm/Analysis/Loads.h
index ced1943b81d9..3db501c51a17 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/Loads.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/Loads.h
@@ -24,7 +24,6 @@ class DominatorTree;
class Instruction;
class LoadInst;
class Loop;
-class MDNode;
class MemoryLocation;
class ScalarEvolution;
class TargetLibraryInfo;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 2b4edfac61fc..c83a04991b04 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -26,7 +26,6 @@ class AAResults;
class DataLayout;
class Loop;
class LoopAccessInfo;
-class OptimizationRemarkEmitter;
class raw_ostream;
class SCEV;
class SCEVUnionPredicate;
@@ -170,10 +169,7 @@ public:
};
MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
- : PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeDepDistBytes(0),
- MaxSafeVectorWidthInBits(-1U),
- FoundNonConstantDistanceDependence(false),
- Status(VectorizationSafetyStatus::Safe), RecordDependences(true) {}
+ : PSE(PSE), InnermostLoop(L) {}
/// Register the location (instructions are given increasing numbers)
/// of a write access.
@@ -265,30 +261,30 @@ private:
SmallVector<Instruction *, 16> InstMap;
/// The program order index to be used for the next instruction.
- unsigned AccessIdx;
+ unsigned AccessIdx = 0;
// We can access this many bytes in parallel safely.
- uint64_t MaxSafeDepDistBytes;
+ uint64_t MaxSafeDepDistBytes = 0;
/// Number of elements (from consecutive iterations) that are safe to
/// operate on simultaneously, multiplied by the size of the element in bits.
/// The size of the element is taken from the memory access that is most
/// restrictive.
- uint64_t MaxSafeVectorWidthInBits;
+ uint64_t MaxSafeVectorWidthInBits = -1U;
/// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
- bool FoundNonConstantDistanceDependence;
+ bool FoundNonConstantDistanceDependence = false;
/// Result of the dependence checks, indicating whether the checked
/// dependences are safe for vectorization, require RT checks or are known to
/// be unsafe.
- VectorizationSafetyStatus Status;
+ VectorizationSafetyStatus Status = VectorizationSafetyStatus::Safe;
//// True if Dependences reflects the dependences in the
//// loop. If false we exceeded MaxDependences and
//// Dependences is invalid.
- bool RecordDependences;
+ bool RecordDependences = true;
/// Memory dependences collected during the analysis. Only valid if
/// RecordDependences is true.
@@ -396,7 +392,7 @@ public:
AliasSetId(AliasSetId), Expr(Expr) {}
};
- RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
+ RuntimePointerChecking(ScalarEvolution *SE) : SE(SE) {}
/// Reset the state of the pointer runtime information.
void reset() {
@@ -445,7 +441,7 @@ public:
unsigned Depth = 0) const;
/// This flag indicates if we need to add the runtime check.
- bool Need;
+ bool Need = false;
/// Information about the pointers that may require checking.
SmallVector<PointerInfo, 2> Pointers;
@@ -621,17 +617,17 @@ private:
Loop *TheLoop;
- unsigned NumLoads;
- unsigned NumStores;
+ unsigned NumLoads = 0;
+ unsigned NumStores = 0;
- uint64_t MaxSafeDepDistBytes;
+ uint64_t MaxSafeDepDistBytes = -1;
/// Cache the result of analyzeLoop.
- bool CanVecMem;
- bool HasConvergentOp;
+ bool CanVecMem = false;
+ bool HasConvergentOp = false;
/// Indicator that there are non vectorizable stores to a uniform address.
- bool HasDependenceInvolvingLoopInvariantAddress;
+ bool HasDependenceInvolvingLoopInvariantAddress = false;
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index bc8a1e74e447..d07e6977fed1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -87,7 +87,7 @@ typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
template <> class LoopAnalysisManagerFunctionProxy::Result {
public:
explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI)
- : InnerAM(&InnerAM), LI(&LI), MSSAUsed(false) {}
+ : InnerAM(&InnerAM), LI(&LI) {}
Result(Result &&Arg)
: InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI), MSSAUsed(Arg.MSSAUsed) {
// We have to null out the analysis manager in the moved-from state
@@ -136,7 +136,7 @@ public:
private:
LoopAnalysisManager *InnerAM;
LoopInfo *LI;
- bool MSSAUsed;
+ bool MSSAUsed = false;
};
/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopInfo.h
index 15c9d911ab80..b2326c4714dd 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopInfo.h
@@ -557,21 +557,24 @@ public:
/// If the given value is an instruction inside of the loop and it can be
/// hoisted, do so to make it trivially loop-invariant.
- /// Return true if the value after any hoisting is loop invariant. This
- /// function can be used as a slightly more aggressive replacement for
- /// isLoopInvariant.
+ /// Return true if \c V is already loop-invariant, and false if \c V can't
+ /// be made loop-invariant. If \c V is made loop-invariant, \c Changed is
+ /// set to true. This function can be used as a slightly more aggressive
+ /// replacement for isLoopInvariant.
///
/// If InsertPt is specified, it is the point to hoist instructions to.
/// If null, the terminator of the loop preheader is used.
+ ///
bool makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt = nullptr,
MemorySSAUpdater *MSSAU = nullptr) const;
/// If the given instruction is inside of the loop and it can be hoisted, do
/// so to make it trivially loop-invariant.
- /// Return true if the instruction after any hoisting is loop invariant. This
- /// function can be used as a slightly more aggressive replacement for
- /// isLoopInvariant.
+ /// Return true if \c I is already loop-invariant, and false if \c I can't
+ /// be made loop-invariant. If \c I is made loop-invariant, \c Changed is
+ /// set to true. This function can be used as a slightly more aggressive
+ /// replacement for isLoopInvariant.
///
/// If InsertPt is specified, it is the point to hoist instructions to.
/// If null, the terminator of the loop preheader is used.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopNestAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopNestAnalysis.h
index 3d4a064cf7e3..852a6c438d43 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/LoopNestAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/LoopNestAnalysis.h
@@ -102,12 +102,35 @@ public:
return Loops[Index];
}
+ /// Get the loop index of the given loop \p L.
+ unsigned getLoopIndex(const Loop &L) const {
+ for (unsigned I = 0; I < getNumLoops(); ++I)
+ if (getLoop(I) == &L)
+ return I;
+ llvm_unreachable("Loop not in the loop nest");
+ }
+
/// Return the number of loops in the nest.
size_t getNumLoops() const { return Loops.size(); }
/// Get the loops in the nest.
ArrayRef<Loop *> getLoops() const { return Loops; }
+ /// Get the loops in the nest at the given \p Depth.
+ LoopVectorTy getLoopsAtDepth(unsigned Depth) const {
+ assert(Depth >= Loops.front()->getLoopDepth() &&
+ Depth <= Loops.back()->getLoopDepth() && "Invalid depth");
+ LoopVectorTy Result;
+ for (unsigned I = 0; I < getNumLoops(); ++I) {
+ Loop *L = getLoop(I);
+ if (L->getLoopDepth() == Depth)
+ Result.push_back(L);
+ else if (L->getLoopDepth() > Depth)
+ break;
+ }
+ return Result;
+ }
+
/// Retrieve a vector of perfect loop nests contained in the current loop
/// nest. For example, given the following nest containing 4 loops, this
/// member function would return {{L1,L2},{L3,L4}}.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MLInlineAdvisor.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MLInlineAdvisor.h
index a218561e61c7..05411d9c99a2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MLInlineAdvisor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MLInlineAdvisor.h
@@ -9,13 +9,13 @@
#ifndef LLVM_ANALYSIS_MLINLINEADVISOR_H
#define LLVM_ANALYSIS_MLINLINEADVISOR_H
-#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineAdvisor.h"
+#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/IR/PassManager.h"
+#include <deque>
#include <memory>
-#include <unordered_map>
namespace llvm {
class Module;
@@ -26,10 +26,10 @@ public:
MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
std::unique_ptr<MLModelRunner> ModelRunner);
- CallGraph *callGraph() const { return CG.get(); }
virtual ~MLInlineAdvisor() = default;
void onPassEntry() override;
+ void onPassExit(LazyCallGraph::SCC *SCC) override;
int64_t getIRSize(const Function &F) const { return F.getInstructionCount(); }
void onSuccessfulInlining(const MLInlineAdvice &Advice,
@@ -38,7 +38,6 @@ public:
bool isForcedToStop() const { return ForceStop; }
int64_t getLocalCalls(Function &F);
const MLModelRunner &getModelRunner() const { return *ModelRunner.get(); }
- void onModuleInvalidated() override { Invalid = true; }
protected:
std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
@@ -51,20 +50,32 @@ protected:
virtual std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE);
+ // Get the initial 'level' of the function, or 0 if the function has been
+ // introduced afterwards.
+ // TODO: should we keep this updated?
+ unsigned getInitialFunctionLevel(const Function &F) const;
+
std::unique_ptr<MLModelRunner> ModelRunner;
private:
int64_t getModuleIRSize() const;
- bool Invalid = true;
- std::unique_ptr<CallGraph> CG;
+ void print(raw_ostream &OS) const override {
+ OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount
+ << "\n";
+ }
+
+ LazyCallGraph &CG;
int64_t NodeCount = 0;
int64_t EdgeCount = 0;
- std::map<const Function *, unsigned> FunctionLevels;
+ int64_t EdgesOfLastSeenNodes = 0;
+
+ std::map<const LazyCallGraph::Node *, unsigned> FunctionLevels;
const int32_t InitialIRSize = 0;
int32_t CurrentIRSize = 0;
-
+ std::deque<const LazyCallGraph::Node *> NodesInLastSCC;
+ DenseSet<const LazyCallGraph::Node *> AllNodes;
bool ForceStop = false;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
index 90b3cc7e76e6..669c02af0b3b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MLModelRunner.h
@@ -41,15 +41,22 @@ public:
getTensorUntyped(static_cast<size_t>(FeatureID)));
}
-protected:
- MLModelRunner(LLVMContext &Ctx) : Ctx(Ctx) {}
- virtual void *evaluateUntyped() = 0;
virtual void *getTensorUntyped(size_t Index) = 0;
const void *getTensorUntyped(size_t Index) const {
return (const_cast<MLModelRunner *>(this))->getTensorUntyped(Index);
}
+ enum class Kind : int { Unknown, Release, Development, NoOp };
+ Kind getKind() const { return Type; }
+
+protected:
+ MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) {
+ assert(Type != Kind::Unknown);
+ }
+ virtual void *evaluateUntyped() = 0;
+
LLVMContext &Ctx;
+ const Kind Type;
};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 94495a518042..d5b60ee540e0 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -30,7 +30,6 @@ namespace llvm {
class AllocaInst;
class Argument;
class CallInst;
-class ConstantInt;
class ConstantPointerNull;
class DataLayout;
class ExtractElementInst;
@@ -45,7 +44,6 @@ class IntToPtrInst;
class LLVMContext;
class LoadInst;
class PHINode;
-class PointerType;
class SelectInst;
class Type;
class UndefValue;
@@ -54,118 +52,26 @@ class Value;
/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
+bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI);
bool isAllocationFn(const Value *V,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast = false);
-
-/// Tests if a value is a call or invoke to a function that returns a
-/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-
-/// Tests if a value is a call or invoke to a library function that
-/// allocates uninitialized memory (such as malloc).
-bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-bool isMallocLikeFn(const Value *V,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast = false);
-
-/// Tests if a value is a call or invoke to a library function that
-/// allocates uninitialized memory with alignment (such as aligned_alloc).
-bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-bool isAlignedAllocLikeFn(
- const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast = false);
-
-/// Tests if a value is a call or invoke to a library function that
-/// allocates zero-filled memory (such as calloc).
-bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
+ function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
-bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
+bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
+bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
/// Tests if a value is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
-bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
+bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
/// Tests if a function is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
bool isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI);
-/// Tests if a value is a call or invoke to a library function that
-/// allocates memory and throws if an allocation failed (e.g., new).
-bool isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-
-/// Tests if a value is a call or invoke to a library function that
-/// allocates memory (strdup, strndup).
-bool isStrdupLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-
-//===----------------------------------------------------------------------===//
-// malloc Call Utility Functions.
-//
-
-/// extractMallocCall - Returns the corresponding CallInst if the instruction
-/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
-/// ignore InvokeInst here.
-const CallInst *
-extractMallocCall(const Value *I,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
-inline CallInst *
-extractMallocCall(Value *I,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
- return const_cast<CallInst *>(extractMallocCall((const Value *)I, GetTLI));
-}
-
-/// getMallocType - Returns the PointerType resulting from the malloc call.
-/// The PointerType depends on the number of bitcast uses of the malloc call:
-/// 0: PointerType is the malloc calls' return type.
-/// 1: PointerType is the bitcast's result type.
-/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
-
-/// getMallocAllocatedType - Returns the Type allocated by malloc call.
-/// The Type depends on the number of bitcast uses of the malloc call:
-/// 0: PointerType is the malloc calls' return type.
-/// 1: PointerType is the bitcast's result type.
-/// >1: Unique PointerType cannot be determined, return NULL.
-Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
-
-/// getMallocArraySize - Returns the array size of a malloc call. If the
-/// argument passed to malloc is a multiple of the size of the malloced type,
-/// then return that multiple. For non-array mallocs, the multiple is
-/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
-/// determined.
-Value *getMallocArraySize(CallInst *CI, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- bool LookThroughSExt = false);
-
-//===----------------------------------------------------------------------===//
-// calloc Call Utility Functions.
-//
-
-/// extractCallocCall - Returns the corresponding CallInst if the instruction
-/// is a calloc call.
-const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
-inline CallInst *extractCallocCall(Value *I, const TargetLibraryInfo *TLI) {
- return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
-}
-
-
//===----------------------------------------------------------------------===//
// free Call Utility Functions.
//
@@ -181,6 +87,37 @@ inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
}
//===----------------------------------------------------------------------===//
+// Properties of allocation functions
+//
+
+/// Return false if the allocation can have side effects on the program state
+/// we are required to preserve beyond the effect of allocating a new object.
+/// Ex: If our allocation routine has a counter for the number of objects
+/// allocated, and the program prints it on exit, can the value change due
+/// to optimization? Answer is highly language dependent.
+/// Note: *Removable* really does mean removable; it does not mean observable.
+/// A language (e.g. C++) can allow removing allocations without allowing
+/// insertion or speculative execution of allocation routines.
+bool isAllocRemovable(const CallBase *V, const TargetLibraryInfo *TLI);
+
+/// Gets the alignment argument for an aligned_alloc-like function
+Value *getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI);
+
+/// Return the size of the requested allocation. With a trivial mapper, this is
+/// identical to calling getObjectSize(..., Exact). A mapper function can be
+/// used to replace one Value* (operand to the allocation) with another. This
+/// is useful when doing abstract interpretation.
+Optional<APInt> getAllocSize(const CallBase *CB,
+ const TargetLibraryInfo *TLI,
+ std::function<const Value*(const Value*)> Mapper);
+
+/// If this allocation function initializes memory to a fixed value, return
+/// said value in the requested type. Otherwise, return nullptr.
+Constant *getInitialValueOfAllocation(const CallBase *Alloc,
+ const TargetLibraryInfo *TLI,
+ Type *Ty);
+
+//===----------------------------------------------------------------------===//
// Utility functions to compute size of objects.
//
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
index 833fce1b1726..23e50f601e04 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -284,8 +284,7 @@ public:
return T.isScalable() ? UnknownSize : T.getFixedSize();
}
- MemoryLocation()
- : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()), AATags() {}
+ MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {}
explicit MemoryLocation(const Value *Ptr, LocationSize Size,
const AAMDNodes &AATags = AAMDNodes())
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h
index 48aeef371e3d..b41f5771bacd 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h
@@ -106,7 +106,6 @@
namespace llvm {
-class AllocaInst;
class Function;
class Instruction;
class MemoryAccess;
@@ -866,7 +865,7 @@ private:
AccessList *getOrCreateAccessList(const BasicBlock *);
DefsList *getOrCreateDefsList(const BasicBlock *);
void renumberBlock(const BasicBlock *) const;
- AliasAnalysis *AA;
+ AliasAnalysis *AA = nullptr;
DominatorTree *DT;
Function &F;
@@ -893,7 +892,7 @@ private:
std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
- unsigned NextID;
+ unsigned NextID = 0;
};
/// Enables verification of MemorySSA.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSAUpdater.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSAUpdater.h
index 659e6aff6e28..3e5ebe9cb427 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSAUpdater.h
@@ -44,7 +44,6 @@
namespace llvm {
class BasicBlock;
-class BranchInst;
class DominatorTree;
class Instruction;
class LoopBlocksRPO;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
index ca99d5d01eef..071ccf96fe5b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
@@ -26,17 +26,11 @@ namespace llvm {
/// sacrificed for ease of use while training.
class ModelUnderTrainingRunner final : public MLModelRunner {
public:
- ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- const std::vector<LoggedFeatureSpec> &OutputSpecs);
-
// Disallows copy and assign.
ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
ModelUnderTrainingRunner &
operator=(const ModelUnderTrainingRunner &) = delete;
- bool isValid() const { return !!Evaluator; }
-
const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
return OutputSpecs;
}
@@ -45,13 +39,27 @@ public:
lastEvaluationResult() const {
return LastEvaluationResult;
}
+ static bool classof(const MLModelRunner *R) {
+ return R->getKind() == MLModelRunner::Kind::Development;
+ }
+
+ static std::unique_ptr<ModelUnderTrainingRunner>
+ createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath,
+ StringRef DecisionName,
+ const std::vector<TensorSpec> &InputSpecs,
+ StringRef OutputSpecsPathOverride = "");
private:
+ ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<LoggedFeatureSpec> &OutputSpecs);
+
std::unique_ptr<TFModelEvaluator> Evaluator;
const std::vector<LoggedFeatureSpec> OutputSpecs;
Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
void *evaluateUntyped() override;
void *getTensorUntyped(size_t Index) override;
+ bool isValid() const { return !!Evaluator; }
};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
index 60d6777c765b..5bcedf98865c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -26,6 +26,10 @@ public:
NoInferenceModelRunner(LLVMContext &Ctx,
const std::vector<TensorSpec> &Inputs);
+ static bool classof(const MLModelRunner *R) {
+ return R->getKind() == MLModelRunner::Kind::NoOp;
+ }
+
private:
void *evaluateUntyped() override {
llvm_unreachable("We shouldn't call run on this model runner.");
@@ -36,4 +40,4 @@ private:
};
} // namespace llvm
#endif // defined(LLVM_HAVE_TF_API)
-#endif // defined(LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H)
+#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
index b4f4e5f29768..d19a6394bd48 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -40,7 +40,7 @@ class ObjCARCAAResult : public AAResultBase<ObjCARCAAResult> {
const DataLayout &DL;
public:
- explicit ObjCARCAAResult(const DataLayout &DL) : AAResultBase(), DL(DL) {}
+ explicit ObjCARCAAResult(const DataLayout &DL) : DL(DL) {}
ObjCARCAAResult(ObjCARCAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL) {}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCInstKind.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCInstKind.h
index 84565b9315c7..e332bcf88be7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCInstKind.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCInstKind.h
@@ -28,7 +28,7 @@ namespace objcarc {
enum class ARCInstKind {
Retain, ///< objc_retain
RetainRV, ///< objc_retainAutoreleasedReturnValue
- ClaimRV, ///< objc_unsafeClaimAutoreleasedReturnValue
+ UnsafeClaimRV, ///< objc_unsafeClaimAutoreleasedReturnValue
RetainBlock, ///< objc_retainBlock
Release, ///< objc_release
Autorelease, ///< objc_autorelease
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCUtil.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCUtil.h
index 362dd6c29992..1d330ca58a87 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCUtil.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ObjCARCUtil.h
@@ -48,15 +48,15 @@ inline Optional<Function *> getAttachedARCFunction(const CallBase *CB) {
return cast<Function>(B->Inputs[0]);
}
-/// Check whether the function is retainRV/claimRV.
+/// Check whether the function is retainRV/unsafeClaimRV.
inline bool isRetainOrClaimRV(ARCInstKind Kind) {
- return Kind == ARCInstKind::RetainRV || Kind == ARCInstKind::ClaimRV;
+ return Kind == ARCInstKind::RetainRV || Kind == ARCInstKind::UnsafeClaimRV;
}
/// This function returns the ARCInstKind of the function attached to operand
/// bundle clang_arc_attachedcall. It returns None if the call doesn't have the
/// operand bundle or the operand is null. Otherwise it returns either RetainRV
-/// or ClaimRV.
+/// or UnsafeClaimRV.
inline ARCInstKind getAttachedARCFunctionKind(const CallBase *CB) {
Optional<Function *> Fn = getAttachedARCFunction(CB);
if (!Fn.hasValue())
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/PHITransAddr.h b/contrib/llvm-project/llvm/include/llvm/Analysis/PHITransAddr.h
index 54a07f053478..a23f8e61c303 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/PHITransAddr.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -40,7 +40,7 @@ class PHITransAddr {
const DataLayout &DL;
/// TLI - The target library info if known, otherwise null.
- const TargetLibraryInfo *TLI;
+ const TargetLibraryInfo *TLI = nullptr;
/// A cache of \@llvm.assume calls used by SimplifyInstruction.
AssumptionCache *AC;
@@ -50,7 +50,7 @@ class PHITransAddr {
public:
PHITransAddr(Value *addr, const DataLayout &DL, AssumptionCache *AC)
- : Addr(addr), DL(DL), TLI(nullptr), AC(AC) {
+ : Addr(addr), DL(DL), AC(AC) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
index b684f87ea5cb..1bf2e853980c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
@@ -10,6 +10,10 @@
// Only inference is supported.
//
//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
+#define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
+
#include "llvm/Analysis/MLModelRunner.h"
#include <memory>
@@ -29,7 +33,8 @@ public:
ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames,
StringRef DecisionName, StringRef FeedPrefix = "feed_",
StringRef FetchPrefix = "fetch_")
- : MLModelRunner(Ctx), CompiledModel(std::make_unique<TGen>()) {
+ : MLModelRunner(Ctx, MLModelRunner::Kind::Release),
+ CompiledModel(std::make_unique<TGen>()) {
assert(CompiledModel && "The CompiledModel should be valid");
const size_t FeatureCount = FeatureNames.size();
@@ -49,6 +54,10 @@ public:
virtual ~ReleaseModeModelRunner() = default;
+ static bool classof(const MLModelRunner *R) {
+ return R->getKind() == MLModelRunner::Kind::Release;
+ }
+
private:
void *evaluateUntyped() override {
CompiledModel->Run();
@@ -65,3 +74,5 @@ private:
std::unique_ptr<TGen> CompiledModel;
};
} // namespace llvm
+
+#endif // LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ReplayInlineAdvisor.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ReplayInlineAdvisor.h
index a0eb9af62205..dc2efeafb568 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ReplayInlineAdvisor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ReplayInlineAdvisor.h
@@ -14,11 +14,9 @@
#include "llvm/IR/LLVMContext.h"
namespace llvm {
-class BasicBlock;
class CallBase;
class Function;
class Module;
-class OptimizationRemarkEmitter;
struct CallSiteFormat {
enum class Format : int {
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
index df50611832ce..1e6dac44cf2b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -568,6 +568,7 @@ public:
const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+ const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
@@ -629,14 +630,18 @@ public:
const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
const SCEV *getMinMaxExpr(SCEVTypes Kind,
SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
+ SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
- const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
- const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+ bool Sequential = false);
+ const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
+ bool Sequential = false);
const SCEV *getUnknown(Value *V);
const SCEV *getCouldNotCompute();
@@ -728,11 +733,13 @@ public:
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umin operation with them.
- const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
+ bool Sequential = false);
/// Promote the operands to the wider of the types using zero-extension, and
/// then perform a umin operation with them. N-ary function.
- const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops);
+ const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+ bool Sequential = false);
/// Transitively follow the chain of pointer-type operands until reaching a
/// SCEV that does not have a single pointer operand. This returns a
@@ -1713,6 +1720,15 @@ private:
bool IsSubExpr,
bool AllowPredicates = false);
+ /// Variant of previous which takes the components representing an ICmp
+ /// as opposed to the ICmpInst itself. Note that the prior version can
+ /// return more precise results in some cases and is preferred when caller
+ /// has a materialized ICmp.
+ ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ bool IsSubExpr,
+ bool AllowPredicates = false);
+
/// Compute the number of times the backedge of the specified loop will
/// execute if its exit condition were a switch with a single exiting case
/// to ExitingBB.
@@ -1882,6 +1898,15 @@ private:
const SCEV *FoundLHS, const SCEV *FoundRHS,
unsigned Depth);
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ ///
+ /// This routine tries to reason about shifts.
+ bool isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
/// If we know that the specified Phi is in the header of its containing
/// loop, we know the loop executes a constant number of times, and the PHI
/// node is just a recurrence involving constants, fold it.
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
index 20acb407ead0..ebd427354cee 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -27,7 +27,7 @@ class SCEVAAResult : public AAResultBase<SCEVAAResult> {
ScalarEvolution &SE;
public:
- explicit SCEVAAResult(ScalarEvolution &SE) : AAResultBase(), SE(SE) {}
+ explicit SCEVAAResult(ScalarEvolution &SE) : SE(SE) {}
SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionDivision.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionDivision.h
index 24f0c51487bd..7d5902d31795 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionDivision.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionDivision.h
@@ -42,6 +42,7 @@ public:
void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
void visitSMinExpr(const SCEVSMinExpr *Numerator) {}
void visitUMinExpr(const SCEVUMinExpr *Numerator) {}
+ void visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Numerator) {}
void visitUnknown(const SCEVUnknown *Numerator) {}
void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index c0da311e4e48..cd8e5fab6766 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -35,883 +35,929 @@ class ConstantRange;
class Loop;
class Type;
- enum SCEVTypes : unsigned short {
- // These should be ordered in terms of increasing complexity to make the
- // folders simpler.
- scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
- scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr, scUMinExpr, scSMinExpr,
- scPtrToInt, scUnknown, scCouldNotCompute
- };
-
- /// This class represents a constant integer value.
- class SCEVConstant : public SCEV {
- friend class ScalarEvolution;
-
- ConstantInt *V;
-
- SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
- SCEV(ID, scConstant, 1), V(v) {}
-
- public:
- ConstantInt *getValue() const { return V; }
- const APInt &getAPInt() const { return getValue()->getValue(); }
-
- Type *getType() const { return V->getType(); }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scConstant;
- }
- };
-
- inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
- APInt Size(16, 1);
- for (auto *Arg : Args)
- Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
- return (unsigned short)Size.getZExtValue();
+enum SCEVTypes : unsigned short {
+ // These should be ordered in terms of increasing complexity to make the
+ // folders simpler.
+ scConstant,
+ scTruncate,
+ scZeroExtend,
+ scSignExtend,
+ scAddExpr,
+ scMulExpr,
+ scUDivExpr,
+ scAddRecExpr,
+ scUMaxExpr,
+ scSMaxExpr,
+ scUMinExpr,
+ scSMinExpr,
+ scSequentialUMinExpr,
+ scPtrToInt,
+ scUnknown,
+ scCouldNotCompute
+};
+
+/// This class represents a constant integer value.
+class SCEVConstant : public SCEV {
+ friend class ScalarEvolution;
+
+ ConstantInt *V;
+
+ SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v)
+ : SCEV(ID, scConstant, 1), V(v) {}
+
+public:
+ ConstantInt *getValue() const { return V; }
+ const APInt &getAPInt() const { return getValue()->getValue(); }
+
+ Type *getType() const { return V->getType(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scConstant; }
+};
+
+inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
+ APInt Size(16, 1);
+ for (auto *Arg : Args)
+ Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
+ return (unsigned short)Size.getZExtValue();
+}
+
+/// This is the base class for unary cast operator classes.
+class SCEVCastExpr : public SCEV {
+protected:
+ std::array<const SCEV *, 1> Operands;
+ Type *Ty;
+
+ SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
+ Type *ty);
+
+public:
+ const SCEV *getOperand() const { return Operands[0]; }
+ const SCEV *getOperand(unsigned i) const {
+ assert(i == 0 && "Operand index out of range!");
+ return Operands[0];
}
+ using op_iterator = std::array<const SCEV *, 1>::const_iterator;
+ using op_range = iterator_range<op_iterator>;
- /// This is the base class for unary cast operator classes.
- class SCEVCastExpr : public SCEV {
- protected:
- std::array<const SCEV *, 1> Operands;
- Type *Ty;
-
- SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
- Type *ty);
-
- public:
- const SCEV *getOperand() const { return Operands[0]; }
- const SCEV *getOperand(unsigned i) const {
- assert(i == 0 && "Operand index out of range!");
- return Operands[0];
- }
- using op_iterator = std::array<const SCEV *, 1>::const_iterator;
- using op_range = iterator_range<op_iterator>;
-
- op_range operands() const {
- return make_range(Operands.begin(), Operands.end());
- }
- size_t getNumOperands() const { return 1; }
- Type *getType() const { return Ty; }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
- S->getSCEVType() == scZeroExtend ||
- S->getSCEVType() == scSignExtend;
- }
- };
-
- /// This class represents a cast from a pointer to a pointer-sized integer
- /// value.
- class SCEVPtrToIntExpr : public SCEVCastExpr {
- friend class ScalarEvolution;
+ op_range operands() const {
+ return make_range(Operands.begin(), Operands.end());
+ }
+ size_t getNumOperands() const { return 1; }
+ Type *getType() const { return Ty; }
- SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
+ S->getSCEVType() == scZeroExtend || S->getSCEVType() == scSignExtend;
+ }
+};
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scPtrToInt;
- }
- };
+/// This class represents a cast from a pointer to a pointer-sized integer
+/// value.
+class SCEVPtrToIntExpr : public SCEVCastExpr {
+ friend class ScalarEvolution;
- /// This is the base class for unary integral cast operator classes.
- class SCEVIntegralCastExpr : public SCEVCastExpr {
- protected:
- SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty);
-
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scTruncate ||
- S->getSCEVType() == scZeroExtend ||
- S->getSCEVType() == scSignExtend;
- }
- };
+ SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
- /// This class represents a truncation of an integer value to a
- /// smaller integer value.
- class SCEVTruncateExpr : public SCEVIntegralCastExpr {
- friend class ScalarEvolution;
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scPtrToInt; }
+};
- SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty);
+/// This is the base class for unary integral cast operator classes.
+class SCEVIntegralCastExpr : public SCEVCastExpr {
+protected:
+ SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
+ const SCEV *op, Type *ty);
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scTruncate;
- }
- };
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scTruncate || S->getSCEVType() == scZeroExtend ||
+ S->getSCEVType() == scSignExtend;
+ }
+};
- /// This class represents a zero extension of a small integer value
- /// to a larger integer value.
- class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
- friend class ScalarEvolution;
+/// This class represents a truncation of an integer value to a
+/// smaller integer value.
+class SCEVTruncateExpr : public SCEVIntegralCastExpr {
+ friend class ScalarEvolution;
- SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty);
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scZeroExtend;
- }
- };
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scTruncate; }
+};
- /// This class represents a sign extension of a small integer value
- /// to a larger integer value.
- class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
- friend class ScalarEvolution;
+/// This class represents a zero extension of a small integer value
+/// to a larger integer value.
+class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
+ friend class ScalarEvolution;
- SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, Type *ty);
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scSignExtend;
- }
- };
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scZeroExtend;
+ }
+};
- /// This node is a base class providing common functionality for
- /// n'ary operators.
- class SCEVNAryExpr : public SCEV {
- protected:
- // Since SCEVs are immutable, ScalarEvolution allocates operand
- // arrays with its SCEVAllocator, so this class just needs a simple
- // pointer rather than a more elaborate vector-like data structure.
- // This also avoids the need for a non-trivial destructor.
- const SCEV *const *Operands;
- size_t NumOperands;
-
- SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
- : SCEV(ID, T, computeExpressionSize(makeArrayRef(O, N))), Operands(O),
- NumOperands(N) {}
+/// This class represents a sign extension of a small integer value
+/// to a larger integer value.
+class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
+ friend class ScalarEvolution;
- public:
- size_t getNumOperands() const { return NumOperands; }
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);
- const SCEV *getOperand(unsigned i) const {
- assert(i < NumOperands && "Operand index out of range!");
- return Operands[i];
- }
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scSignExtend;
+ }
+};
+
+/// This node is a base class providing common functionality for
+/// n'ary operators.
+class SCEVNAryExpr : public SCEV {
+protected:
+ // Since SCEVs are immutable, ScalarEvolution allocates operand
+ // arrays with its SCEVAllocator, so this class just needs a simple
+ // pointer rather than a more elaborate vector-like data structure.
+ // This also avoids the need for a non-trivial destructor.
+ const SCEV *const *Operands;
+ size_t NumOperands;
+
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
+ const SCEV *const *O, size_t N)
+ : SCEV(ID, T, computeExpressionSize(makeArrayRef(O, N))), Operands(O),
+ NumOperands(N) {}
+
+public:
+ size_t getNumOperands() const { return NumOperands; }
+
+ const SCEV *getOperand(unsigned i) const {
+ assert(i < NumOperands && "Operand index out of range!");
+ return Operands[i];
+ }
- using op_iterator = const SCEV *const *;
- using op_range = iterator_range<op_iterator>;
+ using op_iterator = const SCEV *const *;
+ using op_range = iterator_range<op_iterator>;
- op_iterator op_begin() const { return Operands; }
- op_iterator op_end() const { return Operands + NumOperands; }
- op_range operands() const {
- return make_range(op_begin(), op_end());
- }
+ op_iterator op_begin() const { return Operands; }
+ op_iterator op_end() const { return Operands + NumOperands; }
+ op_range operands() const { return make_range(op_begin(), op_end()); }
- NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
- return (NoWrapFlags)(SubclassData & Mask);
- }
+ NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
+ return (NoWrapFlags)(SubclassData & Mask);
+ }
- bool hasNoUnsignedWrap() const {
- return getNoWrapFlags(FlagNUW) != FlagAnyWrap;
- }
+ bool hasNoUnsignedWrap() const {
+ return getNoWrapFlags(FlagNUW) != FlagAnyWrap;
+ }
- bool hasNoSignedWrap() const {
- return getNoWrapFlags(FlagNSW) != FlagAnyWrap;
- }
+ bool hasNoSignedWrap() const {
+ return getNoWrapFlags(FlagNSW) != FlagAnyWrap;
+ }
- bool hasNoSelfWrap() const {
- return getNoWrapFlags(FlagNW) != FlagAnyWrap;
- }
+ bool hasNoSelfWrap() const { return getNoWrapFlags(FlagNW) != FlagAnyWrap; }
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
- S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
- S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr ||
- S->getSCEVType() == scAddRecExpr;
- }
- };
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
+ S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
+ S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr ||
+ S->getSCEVType() == scSequentialUMinExpr ||
+ S->getSCEVType() == scAddRecExpr;
+ }
+};
- /// This node is the base class for n'ary commutative operators.
- class SCEVCommutativeExpr : public SCEVNAryExpr {
- protected:
- SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
- enum SCEVTypes T, const SCEV *const *O, size_t N)
+/// This node is the base class for n'ary commutative operators.
+class SCEVCommutativeExpr : public SCEVNAryExpr {
+protected:
+ SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
+ const SCEV *const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
- S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
- S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr;
- }
-
- /// Set flags for a non-recurrence without clearing previously set flags.
- void setNoWrapFlags(NoWrapFlags Flags) {
- SubclassData |= Flags;
- }
- };
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
+ S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
+ S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr;
+ }
- /// This node represents an addition of some number of SCEVs.
- class SCEVAddExpr : public SCEVCommutativeExpr {
- friend class ScalarEvolution;
-
- Type *Ty;
-
- SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
- auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
- return Op->getType()->isPointerTy();
- });
- if (FirstPointerTypedOp != operands().end())
- Ty = (*FirstPointerTypedOp)->getType();
- else
- Ty = getOperand(0)->getType();
- }
+ /// Set flags for a non-recurrence without clearing previously set flags.
+ void setNoWrapFlags(NoWrapFlags Flags) { SubclassData |= Flags; }
+};
+
+/// This node represents an addition of some number of SCEVs.
+class SCEVAddExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
+
+ Type *Ty;
+
+ SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
+ auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
+ return Op->getType()->isPointerTy();
+ });
+ if (FirstPointerTypedOp != operands().end())
+ Ty = (*FirstPointerTypedOp)->getType();
+ else
+ Ty = getOperand(0)->getType();
+ }
- public:
- Type *getType() const { return Ty; }
+public:
+ Type *getType() const { return Ty; }
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scAddExpr;
- }
- };
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; }
+};
- /// This node represents multiplication of some number of SCEVs.
- class SCEVMulExpr : public SCEVCommutativeExpr {
- friend class ScalarEvolution;
+/// This node represents multiplication of some number of SCEVs.
+class SCEVMulExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeIDRef ID,
- const SCEV *const *O, size_t N)
+ SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {}
- public:
- Type *getType() const { return getOperand(0)->getType(); }
+public:
+ Type *getType() const { return getOperand(0)->getType(); }
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scMulExpr;
- }
- };
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; }
+};
- /// This class represents a binary unsigned division operation.
- class SCEVUDivExpr : public SCEV {
- friend class ScalarEvolution;
+/// This class represents a binary unsigned division operation.
+class SCEVUDivExpr : public SCEV {
+ friend class ScalarEvolution;
- std::array<const SCEV *, 2> Operands;
+ std::array<const SCEV *, 2> Operands;
- SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
- : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
- Operands[0] = lhs;
- Operands[1] = rhs;
- }
-
- public:
- const SCEV *getLHS() const { return Operands[0]; }
- const SCEV *getRHS() const { return Operands[1]; }
- size_t getNumOperands() const { return 2; }
- const SCEV *getOperand(unsigned i) const {
- assert((i == 0 || i == 1) && "Operand index out of range!");
- return i == 0 ? getLHS() : getRHS();
- }
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+ : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
+ Operands[0] = lhs;
+ Operands[1] = rhs;
+ }
- using op_iterator = std::array<const SCEV *, 2>::const_iterator;
- using op_range = iterator_range<op_iterator>;
- op_range operands() const {
- return make_range(Operands.begin(), Operands.end());
- }
+public:
+ const SCEV *getLHS() const { return Operands[0]; }
+ const SCEV *getRHS() const { return Operands[1]; }
+ size_t getNumOperands() const { return 2; }
+ const SCEV *getOperand(unsigned i) const {
+ assert((i == 0 || i == 1) && "Operand index out of range!");
+ return i == 0 ? getLHS() : getRHS();
+ }
- Type *getType() const {
- // In most cases the types of LHS and RHS will be the same, but in some
- // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
- // depend on the type for correctness, but handling types carefully can
- // avoid extra casts in the SCEVExpander. The LHS is more likely to be
- // a pointer type than the RHS, so use the RHS' type here.
- return getRHS()->getType();
- }
+ using op_iterator = std::array<const SCEV *, 2>::const_iterator;
+ using op_range = iterator_range<op_iterator>;
+ op_range operands() const {
+ return make_range(Operands.begin(), Operands.end());
+ }
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scUDivExpr;
- }
- };
+ Type *getType() const {
+ // In most cases the types of LHS and RHS will be the same, but in some
+ // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
+ // depend on the type for correctness, but handling types carefully can
+ // avoid extra casts in the SCEVExpander. The LHS is more likely to be
+ // a pointer type than the RHS, so use the RHS' type here.
+ return getRHS()->getType();
+ }
- /// This node represents a polynomial recurrence on the trip count
- /// of the specified loop. This is the primary focus of the
- /// ScalarEvolution framework; all the other SCEV subclasses are
- /// mostly just supporting infrastructure to allow SCEVAddRecExpr
- /// expressions to be created and analyzed.
- ///
- /// All operands of an AddRec are required to be loop invariant.
- ///
- class SCEVAddRecExpr : public SCEVNAryExpr {
- friend class ScalarEvolution;
-
- const Loop *L;
-
- SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
- const SCEV *const *O, size_t N, const Loop *l)
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scUDivExpr; }
+};
+
+/// This node represents a polynomial recurrence on the trip count
+/// of the specified loop. This is the primary focus of the
+/// ScalarEvolution framework; all the other SCEV subclasses are
+/// mostly just supporting infrastructure to allow SCEVAddRecExpr
+/// expressions to be created and analyzed.
+///
+/// All operands of an AddRec are required to be loop invariant.
+///
+class SCEVAddRecExpr : public SCEVNAryExpr {
+ friend class ScalarEvolution;
+
+ const Loop *L;
+
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N,
+ const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
- public:
- Type *getType() const { return getStart()->getType(); }
- const SCEV *getStart() const { return Operands[0]; }
- const Loop *getLoop() const { return L; }
-
- /// Constructs and returns the recurrence indicating how much this
- /// expression steps by. If this is a polynomial of degree N, it
- /// returns a chrec of degree N-1. We cannot determine whether
- /// the step recurrence has self-wraparound.
- const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
- if (isAffine()) return getOperand(1);
- return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
- op_end()),
- getLoop(), FlagAnyWrap);
- }
-
- /// Return true if this represents an expression A + B*x where A
- /// and B are loop invariant values.
- bool isAffine() const {
- // We know that the start value is invariant. This expression is thus
- // affine iff the step is also invariant.
- return getNumOperands() == 2;
- }
+public:
+ Type *getType() const { return getStart()->getType(); }
+ const SCEV *getStart() const { return Operands[0]; }
+ const Loop *getLoop() const { return L; }
+
+ /// Constructs and returns the recurrence indicating how much this
+ /// expression steps by. If this is a polynomial of degree N, it
+ /// returns a chrec of degree N-1. We cannot determine whether
+ /// the step recurrence has self-wraparound.
+ const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+ if (isAffine())
+ return getOperand(1);
+ return SE.getAddRecExpr(
+ SmallVector<const SCEV *, 3>(op_begin() + 1, op_end()), getLoop(),
+ FlagAnyWrap);
+ }
- /// Return true if this represents an expression A + B*x + C*x^2
- /// where A, B and C are loop invariant values. This corresponds
- /// to an addrec of the form {L,+,M,+,N}
- bool isQuadratic() const {
- return getNumOperands() == 3;
- }
+ /// Return true if this represents an expression A + B*x where A
+ /// and B are loop invariant values.
+ bool isAffine() const {
+ // We know that the start value is invariant. This expression is thus
+ // affine iff the step is also invariant.
+ return getNumOperands() == 2;
+ }
- /// Set flags for a recurrence without clearing any previously set flags.
- /// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
- /// to make it easier to propagate flags.
- void setNoWrapFlags(NoWrapFlags Flags) {
- if (Flags & (FlagNUW | FlagNSW))
- Flags = ScalarEvolution::setFlags(Flags, FlagNW);
- SubclassData |= Flags;
- }
+ /// Return true if this represents an expression A + B*x + C*x^2
+ /// where A, B and C are loop invariant values. This corresponds
+ /// to an addrec of the form {L,+,M,+,N}
+ bool isQuadratic() const { return getNumOperands() == 3; }
+
+ /// Set flags for a recurrence without clearing any previously set flags.
+ /// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
+ /// to make it easier to propagate flags.
+ void setNoWrapFlags(NoWrapFlags Flags) {
+ if (Flags & (FlagNUW | FlagNSW))
+ Flags = ScalarEvolution::setFlags(Flags, FlagNW);
+ SubclassData |= Flags;
+ }
- /// Return the value of this chain of recurrences at the specified
- /// iteration number.
- const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
-
- /// Return the value of this chain of recurrences at the specified iteration
- /// number. Takes an explicit list of operands to represent an AddRec.
- static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
- const SCEV *It, ScalarEvolution &SE);
-
- /// Return the number of iterations of this loop that produce
- /// values in the specified constant range. Another way of
- /// looking at this is that it returns the first iteration number
- /// where the value is not in the condition, thus computing the
- /// exit count. If the iteration count can't be computed, an
- /// instance of SCEVCouldNotCompute is returned.
- const SCEV *getNumIterationsInRange(const ConstantRange &Range,
- ScalarEvolution &SE) const;
-
- /// Return an expression representing the value of this expression
- /// one iteration of the loop ahead.
- const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scAddRecExpr;
- }
- };
+ /// Return the value of this chain of recurrences at the specified
+ /// iteration number.
+ const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+
+ /// Return the value of this chain of recurrences at the specified iteration
+ /// number. Takes an explicit list of operands to represent an AddRec.
+ static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
+ const SCEV *It, ScalarEvolution &SE);
+
+ /// Return the number of iterations of this loop that produce
+ /// values in the specified constant range. Another way of
+ /// looking at this is that it returns the first iteration number
+ /// where the value is not in the condition, thus computing the
+ /// exit count. If the iteration count can't be computed, an
+ /// instance of SCEVCouldNotCompute is returned.
+ const SCEV *getNumIterationsInRange(const ConstantRange &Range,
+ ScalarEvolution &SE) const;
+
+ /// Return an expression representing the value of this expression
+ /// one iteration of the loop ahead.
+ const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddRecExpr;
+ }
+};
- /// This node is the base class min/max selections.
- class SCEVMinMaxExpr : public SCEVCommutativeExpr {
- friend class ScalarEvolution;
+/// This node is the base class min/max selections.
+class SCEVMinMaxExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
- static bool isMinMaxType(enum SCEVTypes T) {
- return T == scSMaxExpr || T == scUMaxExpr || T == scSMinExpr ||
- T == scUMinExpr;
- }
+ static bool isMinMaxType(enum SCEVTypes T) {
+ return T == scSMaxExpr || T == scUMaxExpr || T == scSMinExpr ||
+ T == scUMinExpr;
+ }
- protected:
- /// Note: Constructing subclasses via this constructor is allowed
- SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
- const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, T, O, N) {
- assert(isMinMaxType(T));
- // Min and max never overflow
- setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
- }
+protected:
+ /// Note: Constructing subclasses via this constructor is allowed
+ SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, T, O, N) {
+ assert(isMinMaxType(T));
+ // Min and max never overflow
+ setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+ }
- public:
- Type *getType() const { return getOperand(0)->getType(); }
+public:
+ Type *getType() const { return getOperand(0)->getType(); }
- static bool classof(const SCEV *S) {
- return isMinMaxType(S->getSCEVType());
- }
+ static bool classof(const SCEV *S) { return isMinMaxType(S->getSCEVType()); }
- static enum SCEVTypes negate(enum SCEVTypes T) {
- switch (T) {
- case scSMaxExpr:
- return scSMinExpr;
- case scSMinExpr:
- return scSMaxExpr;
- case scUMaxExpr:
- return scUMinExpr;
- case scUMinExpr:
- return scUMaxExpr;
- default:
- llvm_unreachable("Not a min or max SCEV type!");
- }
+ static enum SCEVTypes negate(enum SCEVTypes T) {
+ switch (T) {
+ case scSMaxExpr:
+ return scSMinExpr;
+ case scSMinExpr:
+ return scSMaxExpr;
+ case scUMaxExpr:
+ return scUMinExpr;
+ case scUMinExpr:
+ return scUMaxExpr;
+ default:
+ llvm_unreachable("Not a min or max SCEV type!");
}
- };
+ }
+};
+
+/// This class represents a signed maximum selection.
+class SCEVSMaxExpr : public SCEVMinMaxExpr {
+ friend class ScalarEvolution;
+
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ : SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
+
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scSMaxExpr; }
+};
+
+/// This class represents an unsigned maximum selection.
+class SCEVUMaxExpr : public SCEVMinMaxExpr {
+ friend class ScalarEvolution;
+
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ : SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
+
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scUMaxExpr; }
+};
+
+/// This class represents a signed minimum selection.
+class SCEVSMinExpr : public SCEVMinMaxExpr {
+ friend class ScalarEvolution;
+
+ SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ : SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
+
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scSMinExpr; }
+};
+
+/// This class represents an unsigned minimum selection.
+class SCEVUMinExpr : public SCEVMinMaxExpr {
+ friend class ScalarEvolution;
+
+ SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
+ : SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
+
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scUMinExpr; }
+};
+
+/// This node is the base class for sequential/in-order min/max selections.
+/// Note that their fundamental difference from SCEVMinMaxExpr's is that they
+/// are early-returning upon reaching saturation point.
+/// I.e. given `0 umin_seq poison`, the result will be `0`,
+/// while the result of `0 umin poison` is `poison`.
+class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
+ friend class ScalarEvolution;
+
+ static bool isSequentialMinMaxType(enum SCEVTypes T) {
+ return T == scSequentialUMinExpr;
+ }
- /// This class represents a signed maximum selection.
- class SCEVSMaxExpr : public SCEVMinMaxExpr {
- friend class ScalarEvolution;
+ /// Set flags for a non-recurrence without clearing previously set flags.
+ void setNoWrapFlags(NoWrapFlags Flags) { SubclassData |= Flags; }
+
+protected:
+ /// Note: Constructing subclasses via this constructor is allowed
+ SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
+ const SCEV *const *O, size_t N)
+ : SCEVNAryExpr(ID, T, O, N) {
+ assert(isSequentialMinMaxType(T));
+ // Min and max never overflow
+ setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+ }
- SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
- : SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}
+public:
+ Type *getType() const { return getOperand(0)->getType(); }
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scSMaxExpr;
+ static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty) {
+ assert(isSequentialMinMaxType(Ty));
+ switch (Ty) {
+ case scSequentialUMinExpr:
+ return scUMinExpr;
+ default:
+ llvm_unreachable("Not a sequential min/max type.");
}
- };
-
- /// This class represents an unsigned maximum selection.
- class SCEVUMaxExpr : public SCEVMinMaxExpr {
- friend class ScalarEvolution;
-
- SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
- : SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}
+ }
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scUMaxExpr;
- }
- };
+ SCEVTypes getEquivalentNonSequentialSCEVType() const {
+ return getEquivalentNonSequentialSCEVType(getSCEVType());
+ }
- /// This class represents a signed minimum selection.
- class SCEVSMinExpr : public SCEVMinMaxExpr {
- friend class ScalarEvolution;
+ static bool classof(const SCEV *S) {
+ return isSequentialMinMaxType(S->getSCEVType());
+ }
+};
- SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
- : SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}
+/// This class represents a sequential/in-order unsigned minimum selection.
+class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
+ friend class ScalarEvolution;
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scSMinExpr;
- }
- };
+ SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O,
+ size_t N)
+ : SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}
- /// This class represents an unsigned minimum selection.
- class SCEVUMinExpr : public SCEVMinMaxExpr {
- friend class ScalarEvolution;
+public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scSequentialUMinExpr;
+ }
+};
+
+/// This means that we are dealing with an entirely unknown SCEV
+/// value, and only represent it as its LLVM Value. This is the
+/// "bottom" value for the analysis.
+class SCEVUnknown final : public SCEV, private CallbackVH {
+ friend class ScalarEvolution;
+
+ /// The parent ScalarEvolution value. This is used to update the
+ /// parent's maps when the value associated with a SCEVUnknown is
+ /// deleted or RAUW'd.
+ ScalarEvolution *SE;
+
+ /// The next pointer in the linked list of all SCEVUnknown
+ /// instances owned by a ScalarEvolution.
+ SCEVUnknown *Next;
+
+ SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V, ScalarEvolution *se,
+ SCEVUnknown *next)
+ : SCEV(ID, scUnknown, 1), CallbackVH(V), SE(se), Next(next) {}
+
+ // Implement CallbackVH.
+ void deleted() override;
+ void allUsesReplacedWith(Value *New) override;
+
+public:
+ Value *getValue() const { return getValPtr(); }
+
+ /// @{
+ /// Test whether this is a special constant representing a type
+ /// size, alignment, or field offset in a target-independent
+ /// manner, and hasn't happened to have been folded with other
+ /// operations into something unrecognizable. This is mainly only
+ /// useful for pretty-printing and other situations where it isn't
+ /// absolutely required for these to succeed.
+ bool isSizeOf(Type *&AllocTy) const;
+ bool isAlignOf(Type *&AllocTy) const;
+ bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
+ /// @}
+
+ Type *getType() const { return getValPtr()->getType(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) { return S->getSCEVType() == scUnknown; }
+};
+
+/// This class defines a simple visitor class that may be used for
+/// various SCEV analysis purposes.
+template <typename SC, typename RetVal = void> struct SCEVVisitor {
+ RetVal visit(const SCEV *S) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return ((SC *)this)->visitConstant((const SCEVConstant *)S);
+ case scPtrToInt:
+ return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
+ case scTruncate:
+ return ((SC *)this)->visitTruncateExpr((const SCEVTruncateExpr *)S);
+ case scZeroExtend:
+ return ((SC *)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr *)S);
+ case scSignExtend:
+ return ((SC *)this)->visitSignExtendExpr((const SCEVSignExtendExpr *)S);
+ case scAddExpr:
+ return ((SC *)this)->visitAddExpr((const SCEVAddExpr *)S);
+ case scMulExpr:
+ return ((SC *)this)->visitMulExpr((const SCEVMulExpr *)S);
+ case scUDivExpr:
+ return ((SC *)this)->visitUDivExpr((const SCEVUDivExpr *)S);
+ case scAddRecExpr:
+ return ((SC *)this)->visitAddRecExpr((const SCEVAddRecExpr *)S);
+ case scSMaxExpr:
+ return ((SC *)this)->visitSMaxExpr((const SCEVSMaxExpr *)S);
+ case scUMaxExpr:
+ return ((SC *)this)->visitUMaxExpr((const SCEVUMaxExpr *)S);
+ case scSMinExpr:
+ return ((SC *)this)->visitSMinExpr((const SCEVSMinExpr *)S);
+ case scUMinExpr:
+ return ((SC *)this)->visitUMinExpr((const SCEVUMinExpr *)S);
+ case scSequentialUMinExpr:
+ return ((SC *)this)
+ ->visitSequentialUMinExpr((const SCEVSequentialUMinExpr *)S);
+ case scUnknown:
+ return ((SC *)this)->visitUnknown((const SCEVUnknown *)S);
+ case scCouldNotCompute:
+ return ((SC *)this)->visitCouldNotCompute((const SCEVCouldNotCompute *)S);
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ }
- SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
- : SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}
+ RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
+ llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
+ }
+};
+
+/// Visit all nodes in the expression tree using worklist traversal.
+///
+/// Visitor implements:
+/// // return true to follow this node.
+/// bool follow(const SCEV *S);
+/// // return true to terminate the search.
+/// bool isDone();
+template <typename SV> class SCEVTraversal {
+ SV &Visitor;
+ SmallVector<const SCEV *, 8> Worklist;
+ SmallPtrSet<const SCEV *, 8> Visited;
+
+ void push(const SCEV *S) {
+ if (Visited.insert(S).second && Visitor.follow(S))
+ Worklist.push_back(S);
+ }
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scUMinExpr;
- }
- };
+public:
+ SCEVTraversal(SV &V) : Visitor(V) {}
- /// This means that we are dealing with an entirely unknown SCEV
- /// value, and only represent it as its LLVM Value. This is the
- /// "bottom" value for the analysis.
- class SCEVUnknown final : public SCEV, private CallbackVH {
- friend class ScalarEvolution;
-
- /// The parent ScalarEvolution value. This is used to update the
- /// parent's maps when the value associated with a SCEVUnknown is
- /// deleted or RAUW'd.
- ScalarEvolution *SE;
-
- /// The next pointer in the linked list of all SCEVUnknown
- /// instances owned by a ScalarEvolution.
- SCEVUnknown *Next;
-
- SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V,
- ScalarEvolution *se, SCEVUnknown *next) :
- SCEV(ID, scUnknown, 1), CallbackVH(V), SE(se), Next(next) {}
-
- // Implement CallbackVH.
- void deleted() override;
- void allUsesReplacedWith(Value *New) override;
-
- public:
- Value *getValue() const { return getValPtr(); }
-
- /// @{
- /// Test whether this is a special constant representing a type
- /// size, alignment, or field offset in a target-independent
- /// manner, and hasn't happened to have been folded with other
- /// operations into something unrecognizable. This is mainly only
- /// useful for pretty-printing and other situations where it isn't
- /// absolutely required for these to succeed.
- bool isSizeOf(Type *&AllocTy) const;
- bool isAlignOf(Type *&AllocTy) const;
- bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
- /// @}
-
- Type *getType() const { return getValPtr()->getType(); }
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scUnknown;
- }
- };
+ void visitAll(const SCEV *Root) {
+ push(Root);
+ while (!Worklist.empty() && !Visitor.isDone()) {
+ const SCEV *S = Worklist.pop_back_val();
- /// This class defines a simple visitor class that may be used for
- /// various SCEV analysis purposes.
- template<typename SC, typename RetVal=void>
- struct SCEVVisitor {
- RetVal visit(const SCEV *S) {
switch (S->getSCEVType()) {
case scConstant:
- return ((SC*)this)->visitConstant((const SCEVConstant*)S);
+ case scUnknown:
+ continue;
case scPtrToInt:
- return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
case scTruncate:
- return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S);
case scZeroExtend:
- return ((SC*)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr*)S);
case scSignExtend:
- return ((SC*)this)->visitSignExtendExpr((const SCEVSignExtendExpr*)S);
+ push(cast<SCEVCastExpr>(S)->getOperand());
+ continue;
case scAddExpr:
- return ((SC*)this)->visitAddExpr((const SCEVAddExpr*)S);
case scMulExpr:
- return ((SC*)this)->visitMulExpr((const SCEVMulExpr*)S);
- case scUDivExpr:
- return ((SC*)this)->visitUDivExpr((const SCEVUDivExpr*)S);
- case scAddRecExpr:
- return ((SC*)this)->visitAddRecExpr((const SCEVAddRecExpr*)S);
case scSMaxExpr:
- return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S);
case scUMaxExpr:
- return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S);
case scSMinExpr:
- return ((SC *)this)->visitSMinExpr((const SCEVSMinExpr *)S);
case scUMinExpr:
- return ((SC *)this)->visitUMinExpr((const SCEVUMinExpr *)S);
- case scUnknown:
- return ((SC*)this)->visitUnknown((const SCEVUnknown*)S);
+ case scSequentialUMinExpr:
+ case scAddRecExpr:
+ for (const auto *Op : cast<SCEVNAryExpr>(S)->operands())
+ push(Op);
+ continue;
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ push(UDiv->getLHS());
+ push(UDiv->getRHS());
+ continue;
+ }
case scCouldNotCompute:
- return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S);
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
}
llvm_unreachable("Unknown SCEV kind!");
}
+ }
+};
- RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
- llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
- }
- };
+/// Use SCEVTraversal to visit all nodes in the given expression tree.
+template <typename SV> void visitAll(const SCEV *Root, SV &Visitor) {
+ SCEVTraversal<SV> T(Visitor);
+ T.visitAll(Root);
+}
- /// Visit all nodes in the expression tree using worklist traversal.
- ///
- /// Visitor implements:
- /// // return true to follow this node.
- /// bool follow(const SCEV *S);
- /// // return true to terminate the search.
- /// bool isDone();
- template<typename SV>
- class SCEVTraversal {
- SV &Visitor;
- SmallVector<const SCEV *, 8> Worklist;
- SmallPtrSet<const SCEV *, 8> Visited;
-
- void push(const SCEV *S) {
- if (Visited.insert(S).second && Visitor.follow(S))
- Worklist.push_back(S);
- }
+/// Return true if any node in \p Root satisfies the predicate \p Pred.
+template <typename PredTy>
+bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
+ struct FindClosure {
+ bool Found = false;
+ PredTy Pred;
- public:
- SCEVTraversal(SV& V): Visitor(V) {}
-
- void visitAll(const SCEV *Root) {
- push(Root);
- while (!Worklist.empty() && !Visitor.isDone()) {
- const SCEV *S = Worklist.pop_back_val();
-
- switch (S->getSCEVType()) {
- case scConstant:
- case scUnknown:
- continue;
- case scPtrToInt:
- case scTruncate:
- case scZeroExtend:
- case scSignExtend:
- push(cast<SCEVCastExpr>(S)->getOperand());
- continue;
- case scAddExpr:
- case scMulExpr:
- case scSMaxExpr:
- case scUMaxExpr:
- case scSMinExpr:
- case scUMinExpr:
- case scAddRecExpr:
- for (const auto *Op : cast<SCEVNAryExpr>(S)->operands())
- push(Op);
- continue;
- case scUDivExpr: {
- const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
- push(UDiv->getLHS());
- push(UDiv->getRHS());
- continue;
- }
- case scCouldNotCompute:
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- }
- llvm_unreachable("Unknown SCEV kind!");
- }
+ FindClosure(PredTy Pred) : Pred(Pred) {}
+
+ bool follow(const SCEV *S) {
+ if (!Pred(S))
+ return true;
+
+ Found = true;
+ return false;
}
+
+ bool isDone() const { return Found; }
};
- /// Use SCEVTraversal to visit all nodes in the given expression tree.
- template<typename SV>
- void visitAll(const SCEV *Root, SV& Visitor) {
- SCEVTraversal<SV> T(Visitor);
- T.visitAll(Root);
+ FindClosure FC(Pred);
+ visitAll(Root, FC);
+ return FC.Found;
+}
+
+/// This visitor recursively visits a SCEV expression and re-writes it.
+/// The result from each visit is cached, so it will return the same
+/// SCEV for the same input.
+template <typename SC>
+class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
+protected:
+ ScalarEvolution &SE;
+ // Memoize the result of each visit so that we only compute once for
+ // the same input SCEV. This is to avoid redundant computations when
+ // a SCEV is referenced by multiple SCEVs. Without memoization, this
+ // visit algorithm would have exponential time complexity in the worst
+ // case, causing the compiler to hang on certain tests.
+ DenseMap<const SCEV *, const SCEV *> RewriteResults;
+
+public:
+ SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
+
+ const SCEV *visit(const SCEV *S) {
+ auto It = RewriteResults.find(S);
+ if (It != RewriteResults.end())
+ return It->second;
+ auto *Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
+ auto Result = RewriteResults.try_emplace(S, Visited);
+ assert(Result.second && "Should insert a new entry");
+ return Result.first->second;
}
- /// Return true if any node in \p Root satisfies the predicate \p Pred.
- template <typename PredTy>
- bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
- struct FindClosure {
- bool Found = false;
- PredTy Pred;
-
- FindClosure(PredTy Pred) : Pred(Pred) {}
+ const SCEV *visitConstant(const SCEVConstant *Constant) { return Constant; }
- bool follow(const SCEV *S) {
- if (!Pred(S))
- return true;
-
- Found = true;
- return false;
- }
+ const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
+ const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ return Operand == Expr->getOperand()
+ ? Expr
+ : SE.getPtrToIntExpr(Operand, Expr->getType());
+ }
- bool isDone() const { return Found; }
- };
-
- FindClosure FC(Pred);
- visitAll(Root, FC);
- return FC.Found;
- }
-
- /// This visitor recursively visits a SCEV expression and re-writes it.
- /// The result from each visit is cached, so it will return the same
- /// SCEV for the same input.
- template<typename SC>
- class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
- protected:
- ScalarEvolution &SE;
- // Memoize the result of each visit so that we only compute once for
- // the same input SCEV. This is to avoid redundant computations when
- // a SCEV is referenced by multiple SCEVs. Without memoization, this
- // visit algorithm would have exponential time complexity in the worst
- // case, causing the compiler to hang on certain tests.
- DenseMap<const SCEV *, const SCEV *> RewriteResults;
-
- public:
- SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
-
- const SCEV *visit(const SCEV *S) {
- auto It = RewriteResults.find(S);
- if (It != RewriteResults.end())
- return It->second;
- auto* Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
- auto Result = RewriteResults.try_emplace(S, Visited);
- assert(Result.second && "Should insert a new entry");
- return Result.first->second;
- }
+ const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
+ const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ return Operand == Expr->getOperand()
+ ? Expr
+ : SE.getTruncateExpr(Operand, Expr->getType());
+ }
- const SCEV *visitConstant(const SCEVConstant *Constant) {
- return Constant;
- }
+ const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ return Operand == Expr->getOperand()
+ ? Expr
+ : SE.getZeroExtendExpr(Operand, Expr->getType());
+ }
- const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
- return Operand == Expr->getOperand()
- ? Expr
- : SE.getPtrToIntExpr(Operand, Expr->getType());
- }
+ const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ return Operand == Expr->getOperand()
+ ? Expr
+ : SE.getSignExtendExpr(Operand, Expr->getType());
+ }
- const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
- const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
- return Operand == Expr->getOperand()
- ? Expr
- : SE.getTruncateExpr(Operand, Expr->getType());
+ const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getAddExpr(Operands);
+ }
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
- return Operand == Expr->getOperand()
- ? Expr
- : SE.getZeroExtendExpr(Operand, Expr->getType());
+ const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getMulExpr(Operands);
+ }
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
- return Operand == Expr->getOperand()
- ? Expr
- : SE.getSignExtendExpr(Operand, Expr->getType());
- }
+ const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
+ auto *LHS = ((SC *)this)->visit(Expr->getLHS());
+ auto *RHS = ((SC *)this)->visit(Expr->getRHS());
+ bool Changed = LHS != Expr->getLHS() || RHS != Expr->getRHS();
+ return !Changed ? Expr : SE.getUDivExpr(LHS, RHS);
+ }
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC*)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getAddExpr(Operands);
+ const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr
+ : SE.getAddRecExpr(Operands, Expr->getLoop(),
+ Expr->getNoWrapFlags());
+ }
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC*)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getMulExpr(Operands);
+ const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getSMaxExpr(Operands);
+ }
- const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
- auto *LHS = ((SC *)this)->visit(Expr->getLHS());
- auto *RHS = ((SC *)this)->visit(Expr->getRHS());
- bool Changed = LHS != Expr->getLHS() || RHS != Expr->getRHS();
- return !Changed ? Expr : SE.getUDivExpr(LHS, RHS);
+ const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getUMaxExpr(Operands);
+ }
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC*)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr
- : SE.getAddRecExpr(Operands, Expr->getLoop(),
- Expr->getNoWrapFlags());
+ const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getSMinExpr(Operands);
+ }
- const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC *)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getSMaxExpr(Operands);
+ const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getUMinExpr(Operands);
+ }
- const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC*)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getUMaxExpr(Operands);
+ const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ bool Changed = false;
+ for (auto *Op : Expr->operands()) {
+ Operands.push_back(((SC *)this)->visit(Op));
+ Changed |= Op != Operands.back();
}
+ return !Changed ? Expr : SE.getUMinExpr(Operands, /*Sequential=*/true);
+ }
- const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC *)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getSMinExpr(Operands);
- }
+ const SCEV *visitUnknown(const SCEVUnknown *Expr) { return Expr; }
- const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- bool Changed = false;
- for (auto *Op : Expr->operands()) {
- Operands.push_back(((SC *)this)->visit(Op));
- Changed |= Op != Operands.back();
- }
- return !Changed ? Expr : SE.getUMinExpr(Operands);
- }
+ const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
+ return Expr;
+ }
+};
+
+using ValueToValueMap = DenseMap<const Value *, Value *>;
+using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;
+
+/// The SCEVParameterRewriter takes a scalar evolution expression and updates
+/// the SCEVUnknown components following the Map (Value -> SCEV).
+class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
+public:
+ static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
+ ValueToSCEVMapTy &Map) {
+ SCEVParameterRewriter Rewriter(SE, Map);
+ return Rewriter.visit(Scev);
+ }
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- return Expr;
- }
+ SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
+ : SCEVRewriteVisitor(SE), Map(M) {}
- const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
+ const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ auto I = Map.find(Expr->getValue());
+ if (I == Map.end())
return Expr;
- }
- };
-
- using ValueToValueMap = DenseMap<const Value *, Value *>;
- using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;
-
- /// The SCEVParameterRewriter takes a scalar evolution expression and updates
- /// the SCEVUnknown components following the Map (Value -> SCEV).
- class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
- public:
- static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
- ValueToSCEVMapTy &Map) {
- SCEVParameterRewriter Rewriter(SE, Map);
- return Rewriter.visit(Scev);
- }
-
- SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
- : SCEVRewriteVisitor(SE), Map(M) {}
-
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- auto I = Map.find(Expr->getValue());
- if (I == Map.end())
- return Expr;
- return I->second;
- }
+ return I->second;
+ }
- private:
- ValueToSCEVMapTy &Map;
- };
+private:
+ ValueToSCEVMapTy &Map;
+};
- using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
+using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
- /// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
- /// the Map (Loop -> SCEV) to all AddRecExprs.
- class SCEVLoopAddRecRewriter
- : public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
- public:
- SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
- : SCEVRewriteVisitor(SE), Map(M) {}
+/// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
+/// the Map (Loop -> SCEV) to all AddRecExprs.
+class SCEVLoopAddRecRewriter
+ : public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
+public:
+ SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
+ : SCEVRewriteVisitor(SE), Map(M) {}
- static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
- ScalarEvolution &SE) {
- SCEVLoopAddRecRewriter Rewriter(SE, Map);
- return Rewriter.visit(Scev);
- }
+ static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
+ ScalarEvolution &SE) {
+ SCEVLoopAddRecRewriter Rewriter(SE, Map);
+ return Rewriter.visit(Scev);
+ }
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- SmallVector<const SCEV *, 2> Operands;
- for (const SCEV *Op : Expr->operands())
- Operands.push_back(visit(Op));
+ const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (const SCEV *Op : Expr->operands())
+ Operands.push_back(visit(Op));
- const Loop *L = Expr->getLoop();
- if (0 == Map.count(L))
- return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
+ const Loop *L = Expr->getLoop();
+ if (0 == Map.count(L))
+ return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
- return SCEVAddRecExpr::evaluateAtIteration(Operands, Map[L], SE);
- }
+ return SCEVAddRecExpr::evaluateAtIteration(Operands, Map[L], SE);
+ }
- private:
- LoopToScevMapT &Map;
- };
+private:
+ LoopToScevMapT &Map;
+};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h b/contrib/llvm-project/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h
index 92459ea79ab4..cfc1e20255d1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/SyncDependenceAnalysis.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfo.h"
+#include <map>
#include <memory>
#include <unordered_map>
@@ -27,7 +28,6 @@ namespace llvm {
class BasicBlock;
class DominatorTree;
-class Loop;
class PostDominatorTree;
using ConstBlockSet = SmallPtrSet<const BasicBlock *, 4>;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetFolder.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetFolder.h
index b23316ac3d9b..1df0530e40e6 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetFolder.h
@@ -43,13 +43,72 @@ public:
explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
//===--------------------------------------------------------------------===//
- // Binary Operators
+ // Value-based folders.
+ //
+ // Return an existing value or a constant if the operation can be simplified.
+ // Otherwise return nullptr.
//===--------------------------------------------------------------------===//
+ Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return Fold(ConstantExpr::getAdd(LC, RC, HasNUW, HasNSW));
+ return nullptr;
+ }
- Constant *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const override {
- return Fold(ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW));
+ Value *FoldAnd(Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return Fold(ConstantExpr::getAnd(LC, RC));
+ return nullptr;
+ }
+
+ Value *FoldOr(Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return Fold(ConstantExpr::getOr(LC, RC));
+ return nullptr;
+ }
+
+ Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return ConstantExpr::getCompare(P, LC, RC);
+ return nullptr;
+ }
+
+ Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ bool IsInBounds = false) const override {
+ if (auto *PC = dyn_cast<Constant>(Ptr)) {
+ // Every index must be constant.
+ if (any_of(IdxList, [](Value *V) { return !isa<Constant>(V); }))
+ return nullptr;
+ if (IsInBounds)
+ return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, PC, IdxList));
+ else
+ return Fold(ConstantExpr::getGetElementPtr(Ty, PC, IdxList));
+ }
+ return nullptr;
}
+
+ Value *FoldSelect(Value *C, Value *True, Value *False) const override {
+ auto *CC = dyn_cast<Constant>(C);
+ auto *TC = dyn_cast<Constant>(True);
+ auto *FC = dyn_cast<Constant>(False);
+ if (CC && TC && FC)
+ return Fold(ConstantExpr::getSelect(CC, TC, FC));
+
+ return nullptr;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return Fold(ConstantExpr::getFAdd(LHS, RHS));
}
@@ -99,12 +158,6 @@ public:
bool isExact = false) const override {
return Fold(ConstantExpr::getAShr(LHS, RHS, isExact));
}
- Constant *CreateAnd(Constant *LHS, Constant *RHS) const override {
- return Fold(ConstantExpr::getAnd(LHS, RHS));
- }
- Constant *CreateOr(Constant *LHS, Constant *RHS) const override {
- return Fold(ConstantExpr::getOr(LHS, RHS));
- }
Constant *CreateXor(Constant *LHS, Constant *RHS) const override {
return Fold(ConstantExpr::getXor(LHS, RHS));
}
@@ -134,42 +187,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Memory Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const override {
- return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
- }
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return Fold(ConstantExpr::getGetElementPtr(Ty, C, Idx));
- }
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const override {
- return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
- }
-
- Constant *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const override {
- return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
- }
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx));
- }
- Constant *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const override {
- return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -231,10 +248,6 @@ public:
// Compare Instructions
//===--------------------------------------------------------------------===//
- Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return Fold(ConstantExpr::getCompare(P, LHS, RHS));
- }
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const override {
return Fold(ConstantExpr::getCompare(P, LHS, RHS));
@@ -244,11 +257,6 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- Constant *CreateSelect(Constant *C, Constant *True,
- Constant *False) const override {
- return Fold(ConstantExpr::getSelect(C, True, False));
- }
-
Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
return Fold(ConstantExpr::getExtractElement(Vec, Idx));
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
index d9f5c9689d5c..34ef9cc61c4f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -43,7 +43,6 @@ class BlockFrequencyInfo;
class DominatorTree;
class BranchInst;
class CallBase;
-class ExtractElementInst;
class Function;
class GlobalValue;
class InstCombiner;
@@ -664,6 +663,12 @@ public:
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
/// Return true if the target supports masked gather.
bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
+ /// Return true if the target forces scalarizing of llvm.masked.gather
+ /// intrinsics.
+ bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
+ /// Return true if the target forces scalarizing of llvm.masked.scatter
+ /// intrinsics.
+ bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;
/// Return true if the target supports masked compress store.
bool isLegalMaskedCompressStore(Type *DataType) const;
@@ -1359,10 +1364,12 @@ public:
/// Flags describing the kind of vector reduction.
struct ReductionFlags {
- ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
- bool IsMaxOp; ///< If the op a min/max kind, true if it's a max operation.
- bool IsSigned; ///< Whether the operation is a signed int reduction.
- bool NoNaN; ///< If op is an fp min/max, whether NaNs may be present.
+ ReductionFlags() = default;
+ bool IsMaxOp =
+ false; ///< If the op a min/max kind, true if it's a max operation.
+ bool IsSigned = false; ///< Whether the operation is a signed int reduction.
+ bool NoNaN =
+ false; ///< If op is an fp min/max, whether NaNs may be present.
};
/// \returns True if the target prefers reductions in loop.
@@ -1545,6 +1552,10 @@ public:
virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
+ virtual bool forceScalarizeMaskedGather(VectorType *DataType,
+ Align Alignment) = 0;
+ virtual bool forceScalarizeMaskedScatter(VectorType *DataType,
+ Align Alignment) = 0;
virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
virtual bool enableOrderedReductions() = 0;
@@ -1948,6 +1959,14 @@ public:
bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
return Impl.isLegalMaskedGather(DataType, Alignment);
}
+ bool forceScalarizeMaskedGather(VectorType *DataType,
+ Align Alignment) override {
+ return Impl.forceScalarizeMaskedGather(DataType, Alignment);
+ }
+ bool forceScalarizeMaskedScatter(VectorType *DataType,
+ Align Alignment) override {
+ return Impl.forceScalarizeMaskedScatter(DataType, Alignment);
+ }
bool isLegalMaskedCompressStore(Type *DataType) override {
return Impl.isLegalMaskedCompressStore(DataType);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 26a696f09b3d..4b9ef7c57ffc 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -267,6 +267,15 @@ public:
return false;
}
+ bool forceScalarizeMaskedGather(VectorType *DataType, Align Alignment) const {
+ return false;
+ }
+
+ bool forceScalarizeMaskedScatter(VectorType *DataType,
+ Align Alignment) const {
+ return false;
+ }
+
bool isLegalMaskedCompressStore(Type *DataType) const { return false; }
bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }
@@ -624,6 +633,7 @@ public:
case Intrinsic::coro_end:
case Intrinsic::coro_frame:
case Intrinsic::coro_size:
+ case Intrinsic::coro_align:
case Intrinsic::coro_suspend:
case Intrinsic::coro_subfn_addr:
// These intrinsics don't actually represent code after lowering.
@@ -1052,7 +1062,20 @@ public:
}
case Instruction::Load: {
auto *LI = cast<LoadInst>(U);
- return TargetTTI->getMemoryOpCost(Opcode, U->getType(), LI->getAlign(),
+ Type *LoadType = U->getType();
+ // If there is a non-register sized type, the cost estimation may expand
+ // it to be several instructions to load into multiple registers on the
+ // target. But, if the only use of the load is a trunc instruction to a
+ // register sized type, the instruction selector can combine these
+ // instructions to be a single load. So, in this case, we use the
+ // destination type of the trunc instruction rather than the load to
+ // accurately estimate the cost of this load instruction.
+ if (CostKind == TTI::TCK_CodeSize && LI->hasOneUse() &&
+ !LoadType->isVectorTy()) {
+ if (const TruncInst *TI = dyn_cast<TruncInst>(*LI->user_begin()))
+ LoadType = TI->getDestTy();
+ }
+ return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
LI->getPointerAddressSpace(),
CostKind, I);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h b/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
index 012fca53a200..785b9fe949a5 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -178,8 +178,14 @@ public:
// Flush the content of the log to the stream, clearing the stored data in the
// process.
+ void flush(std::string *Str);
void flush(raw_ostream &OS);
+ // Flush a set of logs that are produced from the same module, e.g.
+ // per-function regalloc traces, as a google::protobuf::Struct message.
+ static void flushLogs(raw_ostream &OS,
+ const StringMap<std::unique_ptr<Logger>> &Loggers);
+
private:
std::vector<LoggedFeatureSpec> FeatureSpecs;
TensorSpec RewardSpec;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ValueTracking.h
index b4f38a3e976f..5b39b0244339 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ValueTracking.h
@@ -33,7 +33,6 @@ class APInt;
class AssumptionCache;
class DominatorTree;
class GEPOperator;
-class IntrinsicInst;
class LoadInst;
class WithOverflowInst;
struct KnownBits;
@@ -203,23 +202,14 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true);
- /// Get the minimum bit size for this Value \p Op as a signed integer.
- /// i.e. x == sext(trunc(x to MinSignedBits) to bitwidth(x)).
- /// Similar to the APInt::getMinSignedBits function.
- unsigned ComputeMinSignedBits(const Value *Op, const DataLayout &DL,
- unsigned Depth = 0,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
-
- /// This function computes the integer multiple of Base that equals V. If
- /// successful, it returns true and returns the multiple in Multiple. If
- /// unsuccessful, it returns false. Also, if V can be simplified to an
- /// integer, then the simplified V is returned in Val. Look through sext only
- /// if LookThroughSExt=true.
- bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
- bool LookThroughSExt = false,
- unsigned Depth = 0);
+ /// Get the upper bound on bit size for this Value \p Op as a signed integer.
+ /// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
+ /// Similar to the APInt::getSignificantBits function.
+ unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
+ unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
/// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
/// intrinsics are treated as-if they were intrinsics.
@@ -555,7 +545,8 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// Determine the possible constant range of an integer or vector of integer
/// value. This is intended as a cheap, non-recursive check.
- ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true,
+ ConstantRange computeConstantRange(const Value *V, bool ForSigned,
+ bool UseInstrInfo = true,
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h
index 7332b2a7ea89..c94cafe7458b 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/AMDGPUMetadataVerifier.h
@@ -16,9 +16,21 @@
#ifndef LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
#define LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
-#include "llvm/BinaryFormat/MsgPackDocument.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MsgPackReader.h"
+
+#include <cstddef>
namespace llvm {
+
+namespace msgpack {
+ class DocNode;
+ class MapDocNode;
+}
+
namespace AMDGPU {
namespace HSAMD {
namespace V3 {
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.def b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.def
index 61f3f27ebb47..76e51b3157d3 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.def
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.def
@@ -495,7 +495,8 @@ HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU)
HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU)
HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU)
HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU)
-HANDLE_DW_AT(0x2137, GNU_entry_view, 0, GNU)
+HANDLE_DW_AT(0x2137, GNU_locviews, 0, GNU)
+HANDLE_DW_AT(0x2138, GNU_entry_view, 0, GNU)
HANDLE_DW_AT(0x2201, SUN_template, 0, SUN)
// Conflicting:
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.h
index a725aff39ac6..4473f506d371 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/Dwarf.h
@@ -19,7 +19,6 @@
#ifndef LLVM_BINARYFORMAT_DWARF_H
#define LLVM_BINARYFORMAT_DWARF_H
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -31,9 +30,11 @@
namespace llvm {
class StringRef;
+template<typename T> class Optional;
namespace dwarf {
+
//===----------------------------------------------------------------------===//
// DWARF constants as gleaned from the DWARF Debugging Information Format V.5
// reference manual http://www.dwarfstd.org/.
@@ -649,6 +650,9 @@ struct FormParams {
uint16_t Version;
uint8_t AddrSize;
DwarfFormat Format;
+ /// True if DWARF v2 output generally uses relocations for references
+ /// to other .debug_* sections.
+ bool DwarfUsesRelocationsAcrossSections = false;
/// The definition of the size of form DW_FORM_ref_addr depends on the
/// version. In DWARF v2 it's the size of an address; after that, it's the
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
index 065661cbd188..8840929174d6 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
@@ -22,7 +22,6 @@
#include "llvm/ADT/StringRef.h"
#include <cstdint>
#include <cstring>
-#include <string>
namespace llvm {
namespace ELF {
@@ -656,7 +655,8 @@ enum : unsigned {
EF_RISCV_FLOAT_ABI_SINGLE = 0x0002,
EF_RISCV_FLOAT_ABI_DOUBLE = 0x0004,
EF_RISCV_FLOAT_ABI_QUAD = 0x0006,
- EF_RISCV_RVE = 0x0008
+ EF_RISCV_RVE = 0x0008,
+ EF_RISCV_TSO = 0x0010,
};
// ELF Relocation types for RISC-V
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MachO.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MachO.h
index df2ba94c7896..ce3a5c46e0d1 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MachO.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MachO.h
@@ -489,6 +489,7 @@ enum { VM_PROT_READ = 0x1, VM_PROT_WRITE = 0x2, VM_PROT_EXECUTE = 0x4 };
// Values for platform field in build_version_command.
enum PlatformType {
+ PLATFORM_UNKNOWN = 0,
PLATFORM_MACOS = 1,
PLATFORM_IOS = 2,
PLATFORM_TVOS = 3,
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackReader.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackReader.h
index 5035de03cc1b..5123fb65cf09 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackReader.h
@@ -34,8 +34,7 @@
#define LLVM_BINARYFORMAT_MSGPACKREADER_H
#include "llvm/Support/Error.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MemoryBufferRef.h"
#include <cstdint>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackWriter.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackWriter.h
index 254b52f18bd3..b3d3c3bcdef0 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/MsgPackWriter.h
@@ -28,12 +28,13 @@
#ifndef LLVM_BINARYFORMAT_MSGPACKWRITER_H
#define LLVM_BINARYFORMAT_MSGPACKWRITER_H
-#include "llvm/BinaryFormat/MsgPack.h"
#include "llvm/Support/EndianStream.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MemoryBufferRef.h"
namespace llvm {
+
+class raw_ostream;
+
namespace msgpack {
/// Writes MessagePack objects to an output stream, one at a time.
diff --git a/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 7301618d337a..6d0f51ce9c6d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -381,10 +381,14 @@ enum ConstantsCodes {
CST_CODE_CE_UNOP = 25, // CE_UNOP: [opcode, opval]
CST_CODE_POISON = 26, // POISON
CST_CODE_DSO_LOCAL_EQUIVALENT = 27, // DSO_LOCAL_EQUIVALENT [gvty, gv]
- CST_CODE_INLINEASM = 28, // INLINEASM: [sideeffect|alignstack|
+ CST_CODE_INLINEASM_OLD3 = 28, // INLINEASM: [sideeffect|alignstack|
+ // asmdialect|unwind,
+ // asmstr,conststr]
+ CST_CODE_NO_CFI_VALUE = 29, // NO_CFI [ fty, f ]
+ CST_CODE_INLINEASM = 30, // INLINEASM: [fnty,
+ // sideeffect|alignstack|
// asmdialect|unwind,
// asmstr,conststr]
- CST_CODE_NO_CFI_VALUE = 29, // NO_CFI [ fty, f ]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
index 281ecb8de251..d911bfd435ae 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/AsmPrinterHandler.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -231,6 +232,9 @@ public:
/// Returns 4 for DWARF32 and 12 for DWARF64.
unsigned int getUnitLengthFieldByteSize() const;
+ /// Returns information about the byte size of DW_FORM values.
+ dwarf::FormParams getDwarfFormParams() const;
+
bool isPositionIndependent() const;
/// Return true if assembly output should contain comments.
@@ -431,7 +435,8 @@ public:
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
/// correctness.
- void emitAlignment(Align Alignment, const GlobalObject *GV = nullptr) const;
+ void emitAlignment(Align Alignment, const GlobalObject *GV = nullptr,
+ unsigned MaxBytesToEmit = 0) const;
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 324b7dcfb3ac..0b2737628923 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -875,14 +875,18 @@ public:
switch (improveShuffleKindFromMask(Kind, Mask)) {
case TTI::SK_Broadcast:
- return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp));
+ if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
+ return getBroadcastShuffleOverhead(FVT);
+ return InstructionCost::getInvalid();
case TTI::SK_Select:
case TTI::SK_Splice:
case TTI::SK_Reverse:
case TTI::SK_Transpose:
case TTI::SK_PermuteSingleSrc:
case TTI::SK_PermuteTwoSrc:
- return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
+ if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
+ return getPermuteShuffleOverhead(FVT);
+ return InstructionCost::getInvalid();
case TTI::SK_ExtractSubvector:
return getExtractSubvectorOverhead(Tp, Index,
cast<FixedVectorType>(SubTp));
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/CalcSpillWeights.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/CalcSpillWeights.h
index 0b6ed079b38e..bfd5bab3d1c0 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/CalcSpillWeights.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/CalcSpillWeights.h
@@ -80,6 +80,18 @@ class VirtRegMap;
/// live intervals.
void calculateSpillWeightsAndHints();
+ /// Return the preferred allocation register for reg, given a COPY
+ /// instruction.
+ static Register copyHint(const MachineInstr *MI, unsigned Reg,
+ const TargetRegisterInfo &TRI,
+ const MachineRegisterInfo &MRI);
+
+ /// Determine if all values in LI are rematerializable.
+ static bool isRematerializable(const LiveInterval &LI,
+ const LiveIntervals &LIS,
+ const VirtRegMap &VRM,
+ const TargetInstrInfo &TII);
+
protected:
/// Helper function for weight calculations.
/// (Re)compute LI's spill weight and allocation hint, or, for non null
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
index 1fd07ca2c8d4..f6563971f981 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
@@ -159,7 +159,7 @@ protected:
class AddIRPass {
public:
AddIRPass(ModulePassManager &MPM, bool DebugPM, bool Check = true)
- : MPM(MPM), FPM() {
+ : MPM(MPM) {
if (Check)
AddingFunctionPasses = false;
}
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/DIE.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/DIE.h
index 9e94c401bfae..32df448b91a1 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/DIE.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/DIE.h
@@ -191,7 +191,7 @@ public:
void setValue(uint64_t Val) { Integer = Val; }
void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -208,7 +208,7 @@ public:
const MCExpr *getValue() const { return Expr; }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -225,7 +225,7 @@ public:
const MCSymbol *getValue() const { return Label; }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -243,8 +243,8 @@ public:
/// EmitValue - Emit base type reference.
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- /// SizeOf - Determine size of the base type reference in bytes.
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ /// sizeOf - Determine size of the base type reference in bytes.
+ unsigned sizeOf(const dwarf::FormParams &, dwarf::Form) const;
void print(raw_ostream &O) const;
uint64_t getIndex() const { return Index; }
@@ -261,7 +261,7 @@ public:
DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -280,7 +280,7 @@ public:
StringRef getString() const { return S.getString(); }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -302,7 +302,7 @@ public:
StringRef getString() const { return S; }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &, dwarf::Form) const;
void print(raw_ostream &O) const;
};
@@ -321,7 +321,7 @@ public:
DIE &getEntry() const { return *Entry; }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -340,7 +340,7 @@ public:
size_t getValue() const { return Index; }
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -356,7 +356,7 @@ public:
: Addr(Idx), Offset(Hi, Lo) {}
void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -506,7 +506,7 @@ public:
void emitValue(const AsmPrinter *AP) const;
/// Return the size of a value in bytes.
- unsigned SizeOf(const AsmPrinter *AP) const;
+ unsigned sizeOf(const dwarf::FormParams &FormParams) const;
void print(raw_ostream &O) const;
void dump() const;
@@ -774,8 +774,16 @@ public:
unsigned getAbbrevNumber() const { return AbbrevNumber; }
dwarf::Tag getTag() const { return Tag; }
/// Get the compile/type unit relative offset of this DIE.
- unsigned getOffset() const { return Offset; }
- unsigned getSize() const { return Size; }
+ unsigned getOffset() const {
+ // A real Offset can't be zero because the unit headers are at offset zero.
+ assert(Offset && "Offset being queried before it's been computed.");
+ return Offset;
+ }
+ unsigned getSize() const {
+ // A real Size can't be zero because it includes the non-empty abbrev code.
+ assert(Size && "Size being queried before it's been ocmputed.");
+ return Size;
+ }
bool hasChildren() const { return ForceChildren || !Children.empty(); }
void setForceChildren(bool B) { ForceChildren = B; }
@@ -817,12 +825,12 @@ public:
/// properly refer to other DIE objects since all DIEs have calculated their
/// offsets.
///
- /// \param AP AsmPrinter to use when calculating sizes.
+ /// \param FormParams Used when calculating sizes.
/// \param AbbrevSet the abbreviation used to unique DIE abbreviations.
/// \param CUOffset the compile/type unit relative offset in bytes.
/// \returns the offset for the DIE that follows this DIE within the
/// current compile/type unit.
- unsigned computeOffsetsAndAbbrevs(const AsmPrinter *AP,
+ unsigned computeOffsetsAndAbbrevs(const dwarf::FormParams &FormParams,
DIEAbbrevSet &AbbrevSet, unsigned CUOffset);
/// Climb up the parent chain to get the compile unit or type unit DIE that
@@ -925,9 +933,8 @@ class DIELoc : public DIEValueList {
public:
DIELoc() = default;
- /// ComputeSize - Calculate the size of the location expression.
- ///
- unsigned ComputeSize(const AsmPrinter *AP) const;
+ /// Calculate the size of the location expression.
+ unsigned computeSize(const dwarf::FormParams &FormParams) const;
// TODO: move setSize() and Size to DIEValueList.
void setSize(unsigned size) { Size = size; }
@@ -948,7 +955,7 @@ public:
}
void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
@@ -962,9 +969,8 @@ class DIEBlock : public DIEValueList {
public:
DIEBlock() = default;
- /// ComputeSize - Calculate the size of the location expression.
- ///
- unsigned ComputeSize(const AsmPrinter *AP) const;
+ /// Calculate the size of the location expression.
+ unsigned computeSize(const dwarf::FormParams &FormParams) const;
// TODO: move setSize() and Size to DIEValueList.
void setSize(unsigned size) { Size = size; }
@@ -982,7 +988,7 @@ public:
}
void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
- unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned sizeOf(const dwarf::FormParams &, dwarf::Form Form) const;
void print(raw_ostream &O) const;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
index e7425dd3dc04..2ac9d938d281 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
@@ -17,7 +17,6 @@
namespace llvm {
-class DILocalVariable;
class DILocation;
class DINode;
class MachineFunction;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/FaultMaps.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/FaultMaps.h
index 12d2872c8c5b..8a8b1d2e6008 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/FaultMaps.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/FaultMaps.h
@@ -18,7 +18,6 @@ namespace llvm {
class AsmPrinter;
class MCExpr;
-class raw_ostream;
class FaultMaps {
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 9c878d4b087b..3a4b3ee18e1b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -95,7 +95,7 @@ public:
bool IsFixed = true)
: ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, IsFixed, &OrigValue) {}
- ArgInfo() : BaseArgInfo() {}
+ ArgInfo() {}
};
struct CallLoweringInfo {
@@ -388,12 +388,12 @@ protected:
/// \p Handler to move them to the assigned locations.
///
/// \return True if everything has succeeded, false otherwise.
- bool determineAndHandleAssignments(ValueHandler &Handler,
- ValueAssigner &Assigner,
- SmallVectorImpl<ArgInfo> &Args,
- MachineIRBuilder &MIRBuilder,
- CallingConv::ID CallConv, bool IsVarArg,
- Register ThisReturnReg = Register()) const;
+ bool
+ determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner,
+ SmallVectorImpl<ArgInfo> &Args,
+ MachineIRBuilder &MIRBuilder,
+ CallingConv::ID CallConv, bool IsVarArg,
+ ArrayRef<Register> ThisReturnRegs = None) const;
/// Use \p Handler to insert code to handle the argument/return values
/// represented by \p Args. It's expected determineAssignments previously
@@ -402,7 +402,7 @@ protected:
CCState &CCState,
SmallVectorImpl<CCValAssign> &ArgLocs,
MachineIRBuilder &MIRBuilder,
- Register ThisReturnReg = Register()) const;
+ ArrayRef<Register> ThisReturnRegs = None) const;
/// Check whether parameters to a call that are passed in callee saved
/// registers are the same as from the calling function. This needs to be
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index f3fa652b0175..45c27c25aea0 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -323,6 +323,11 @@ public:
void applyCombineUnmergeConstant(MachineInstr &MI,
SmallVectorImpl<APInt> &Csts);
+ /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
+ bool
+ matchCombineUnmergeUndef(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+
/// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
@@ -353,8 +358,8 @@ public:
std::pair<Register, bool> &PtrRegAndCommute);
// Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
- bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
- void applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+ bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
+ void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
/// Transform anyext(trunc(x)) to x.
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
index 4a1a4ff2528a..e73f8489497e 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -20,7 +20,6 @@ class GISelChangeObserver;
class LegalizerInfo;
class MachineInstr;
class MachineIRBuilder;
-class MachineRegisterInfo;
// Contains information relevant to enabling/disabling various combines for a
// pass.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
index c5af64d2bcbe..7d198fada411 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -15,7 +15,6 @@
namespace llvm {
class MachineInstr;
-class MachineFunction;
// Worklist which mostly works similar to InstCombineWorkList, but on
// MachineInstrs. The main difference with something like a SetVector is that
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 886b3af834d7..38d2fe28063a 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -487,7 +487,8 @@ public:
// That is not done yet.
if (ConvertOp == 0)
return true;
- return !DestTy.isVector() && OpTy.isVector();
+ return !DestTy.isVector() && OpTy.isVector() &&
+ DestTy == OpTy.getElementType();
case TargetOpcode::G_CONCAT_VECTORS: {
if (ConvertOp == 0)
return true;
@@ -977,10 +978,13 @@ public:
Builder.setInstr(MI);
for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
- Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
Register DefReg = MI.getOperand(Idx).getReg();
- Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
- UpdatedDefs.push_back(DefReg);
+ Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
+
+ if (!MRI.use_empty(DefReg)) {
+ Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
+ UpdatedDefs.push_back(DefReg);
+ }
}
markInstAndDefDead(MI, *MergeI, DeadInsts);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
index 4871d8d32ebd..c19f1d5330ba 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -25,7 +25,6 @@
namespace llvm {
-class MachineRegisterInfo;
class LostDebugLocObserver;
class Legalizer : public MachineFunctionPass {
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 044f2e22cfdd..3b2f937375eb 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -30,7 +30,6 @@
namespace llvm {
// Forward declarations.
class LegalizerInfo;
-class Legalizer;
class MachineRegisterInfo;
class GISelChangeObserver;
class LostDebugLocObserver;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 0b37539030b1..9507c3411b5c 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -38,7 +38,6 @@ class LegalizerHelper;
class MachineInstr;
class MachineRegisterInfo;
class MCInstrInfo;
-class GISelChangeObserver;
namespace LegalizeActions {
enum LegalizeAction : std::uint8_t {
@@ -557,7 +556,7 @@ class LegalizeRuleSet {
}
public:
- LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false), Rules() {}
+ LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false) {}
bool isAliasedByAnother() { return IsAliasedByAnother; }
void setIsAliasedByAnother() { IsAliasedByAnother = true; }
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
index 29575f386d7a..0845c001abdb 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
@@ -30,7 +30,6 @@
namespace llvm {
// Forward declarations.
class MachineRegisterInfo;
-class TargetTransformInfo;
namespace GISelAddressing {
/// Helper struct to store a base, index and offset that forms an address
struct BaseIndexOffset {
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 28bb8de11762..daf1ff052983 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H
#define LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H
+#include "llvm/ADT/APInt.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/InstrTypes.h"
@@ -59,11 +60,26 @@ inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
return SP;
}
-struct ConstantMatch {
- int64_t &CR;
- ConstantMatch(int64_t &C) : CR(C) {}
+template <typename ConstT>
+inline Optional<ConstT> matchConstant(Register, const MachineRegisterInfo &);
+
+template <>
+inline Optional<APInt> matchConstant(Register Reg,
+ const MachineRegisterInfo &MRI) {
+ return getIConstantVRegVal(Reg, MRI);
+}
+
+template <>
+inline Optional<int64_t> matchConstant(Register Reg,
+ const MachineRegisterInfo &MRI) {
+ return getIConstantVRegSExtVal(Reg, MRI);
+}
+
+template <typename ConstT> struct ConstantMatch {
+ ConstT &CR;
+ ConstantMatch(ConstT &C) : CR(C) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
- if (auto MaybeCst = getIConstantVRegSExtVal(Reg, MRI)) {
+ if (auto MaybeCst = matchConstant<ConstT>(Reg, MRI)) {
CR = *MaybeCst;
return true;
}
@@ -71,7 +87,12 @@ struct ConstantMatch {
}
};
-inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+inline ConstantMatch<APInt> m_ICst(APInt &Cst) {
+ return ConstantMatch<APInt>(Cst);
+}
+inline ConstantMatch<int64_t> m_ICst(int64_t &Cst) {
+ return ConstantMatch<int64_t>(Cst);
+}
struct GCstAndRegMatch {
Optional<ValueAndVReg> &ValReg;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index fde0cb3cf1af..c4c2fc076dd8 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -836,17 +836,38 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op);
+
+ /// Build and insert G_ASSERT_SEXT, G_ASSERT_ZEXT, or G_ASSERT_ALIGN
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAssertOp(unsigned Opc, const DstOp &Res, const SrcOp &Op,
+ unsigned Val) {
+ return buildInstr(Opc, Res, Op).addImm(Val);
+ }
+
/// Build and insert \p Res = G_ASSERT_ZEXT Op, Size
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op,
- unsigned Size);
+ unsigned Size) {
+ return buildAssertOp(TargetOpcode::G_ASSERT_ZEXT, Res, Op, Size);
+ }
/// Build and insert \p Res = G_ASSERT_SEXT Op, Size
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op,
- unsigned Size);
+ unsigned Size) {
+ return buildAssertOp(TargetOpcode::G_ASSERT_SEXT, Res, Op, Size);
+ }
+
+ /// Build and insert \p Res = G_ASSERT_ALIGN Op, AlignVal
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op,
+ Align AlignVal) {
+ return buildAssertOp(TargetOpcode::G_ASSERT_ALIGN, Res, Op, AlignVal.value());
+ }
/// Build and insert `Res = G_LOAD Addr, MMO`.
///
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index 5c693d8de521..45006eecfce6 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -253,7 +253,7 @@ public:
public:
MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
- : InsertPoint(), MBB(MBB), Beginning(Beginning) {
+ : MBB(MBB), Beginning(Beginning) {
// If we try to insert before phis, we should use the insertion
// points on the incoming edges.
assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
@@ -299,7 +299,7 @@ public:
public:
EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
- : InsertPoint(), Src(Src), DstOrSplit(&Dst), P(P) {}
+ : Src(Src), DstOrSplit(&Dst), P(P) {}
bool isSplit() const override {
return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 8fed79585fe9..aed915d2cc4b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -44,7 +44,6 @@ class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetRegisterClass;
-class ConstantInt;
class ConstantFP;
class APFloat;
class MachineIRBuilder;
@@ -271,9 +270,10 @@ Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
/// If successful, returns the G_BUILD_VECTOR representing the folded vector
/// constant. \p MIB should have an insertion point already set to create new
/// G_CONSTANT instructions as needed.
-Optional<MachineInstr *>
-ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2,
- const MachineRegisterInfo &MRI, MachineIRBuilder &MIB);
+Register ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIB);
Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
uint64_t Imm, const MachineRegisterInfo &MRI);
@@ -311,10 +311,11 @@ Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
///
/// If there is an existing live-in argument register, it will be returned.
/// This will also ensure there is a valid copy
-Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
+Register getFunctionLiveInPhysReg(MachineFunction &MF,
+ const TargetInstrInfo &TII,
MCRegister PhysReg,
const TargetRegisterClass &RC,
- LLT RegTy = LLT());
+ const DebugLoc &DL, LLT RegTy = LLT());
/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
/// number of vector elements or scalar bitwidth. The intent is a
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h
index fd106f55a43d..b07c7cd3db3a 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -797,6 +797,10 @@ enum NodeType {
/// The scalar width of the type given in operand 1 must be equal to, or
/// smaller than, the scalar result type width. It may end up being smaller
/// than the result width as a result of integer type legalization.
+ ///
+ /// After converting to the scalar integer type in operand 1, the value is
+ /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
+ /// zero extends.
FP_TO_SINT_SAT,
FP_TO_UINT_SAT,
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/IndirectThunks.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/IndirectThunks.h
index 90f9912f0ee0..a2cdd0a9e965 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/IndirectThunks.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/IndirectThunks.h
@@ -59,7 +59,7 @@ void ThunkInserter<Derived>::createThunkFunction(MachineModuleInfo &MMI,
// Add Attributes so that we don't create a frame, unwind information, or
// inline.
- AttrBuilder B;
+ AttrBuilder B(Ctx);
B.addAttribute(llvm::Attribute::NoUnwind);
B.addAttribute(llvm::Attribute::Naked);
F->addFnAttrs(B);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveInterval.h
index 923a45821dd4..51ffe2807434 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -724,7 +724,7 @@ namespace llvm {
T *P;
public:
- SingleLinkedListIterator<T>(T *P) : P(P) {}
+ SingleLinkedListIterator(T *P) : P(P) {}
SingleLinkedListIterator<T> &operator++() {
P = P->Next;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveRangeEdit.h
index fa4e80179eec..d80522f5bdac 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveRangeEdit.h
@@ -34,9 +34,7 @@ namespace llvm {
class AAResults;
class LiveIntervals;
-class MachineBlockFrequencyInfo;
class MachineInstr;
-class MachineLoopInfo;
class MachineOperand;
class TargetInstrInfo;
class TargetRegisterInfo;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRFormatter.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRFormatter.h
index 12c90600f6df..3f145ff224ad 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRFormatter.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MIRFormatter.h
@@ -23,7 +23,6 @@ namespace llvm {
class MachineFunction;
class MachineInstr;
struct PerFunctionMIParsingState;
-struct SlotMapping;
/// MIRFormater - Interface to format MIR operand based on target
class MIRFormatter {
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index efe22ea8f332..638b6732a543 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -136,6 +136,10 @@ private:
/// Alignment of the basic block. One if the basic block does not need to be
/// aligned.
Align Alignment;
+ /// Maximum amount of bytes that can be added to align the basic block. If the
+ /// alignment cannot be reached in this many bytes, no bytes are emitted.
+ /// Zero to represent no maximum.
+ unsigned MaxBytesForAlignment = 0;
/// Indicate that this basic block is entered via an exception handler.
bool IsEHPad = false;
@@ -521,6 +525,19 @@ public:
/// Set alignment of the basic block.
void setAlignment(Align A) { Alignment = A; }
+ void setAlignment(Align A, unsigned MaxBytes) {
+ setAlignment(A);
+ setMaxBytesForAlignment(MaxBytes);
+ }
+
+ /// Return the maximum amount of padding allowed for aligning the basic block.
+ unsigned getMaxBytesForAlignment() const { return MaxBytesForAlignment; }
+
+ /// Set the maximum amount of padding allowed for aligning the basic block
+ void setMaxBytesForAlignment(unsigned MaxBytes) {
+ MaxBytesForAlignment = MaxBytes;
+ }
+
/// Returns true if the block is a landing pad. That is this basic block is
/// entered via an exception handler.
bool isEHPad() const { return IsEHPad; }
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineLoopUtils.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineLoopUtils.h
index 2352fbca548d..b9bf93b71e25 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineLoopUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineLoopUtils.h
@@ -10,7 +10,6 @@
#define LLVM_CODEGEN_MACHINELOOPUTILS_H
namespace llvm {
-class MachineLoop;
class MachineBasicBlock;
class MachineRegisterInfo;
class TargetInstrInfo;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 860a86ee991b..c07606e89374 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -44,7 +44,6 @@
namespace llvm {
class BasicBlock;
-class CallInst;
class Function;
class LLVMTargetMachine;
class MMIAddrLabelMap;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassManager.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassManager.h
index f967167c65e1..75b8a89c812e 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachinePassManager.h
@@ -40,10 +40,10 @@ class MachineFunctionAnalysisManager : public AnalysisManager<MachineFunction> {
public:
using Base = AnalysisManager<MachineFunction>;
- MachineFunctionAnalysisManager() : Base(), FAM(nullptr), MAM(nullptr) {}
+ MachineFunctionAnalysisManager() : FAM(nullptr), MAM(nullptr) {}
MachineFunctionAnalysisManager(FunctionAnalysisManager &FAM,
ModuleAnalysisManager &MAM)
- : Base(), FAM(&FAM), MAM(&MAM) {}
+ : FAM(&FAM), MAM(&MAM) {}
MachineFunctionAnalysisManager(MachineFunctionAnalysisManager &&) = default;
MachineFunctionAnalysisManager &
operator=(MachineFunctionAnalysisManager &&) = default;
@@ -135,7 +135,7 @@ public:
MachineFunctionPassManager(bool DebugLogging = false,
bool RequireCodeGenSCCOrder = false,
bool VerifyMachineFunction = false)
- : Base(), RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
+ : RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
VerifyMachineFunction(VerifyMachineFunction) {}
MachineFunctionPassManager(MachineFunctionPassManager &&) = default;
MachineFunctionPassManager &
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
index e368fd7d056a..267c4b595eec 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -425,10 +425,6 @@ protected:
IntervalPressure BotPressure;
RegPressureTracker BotRPTracker;
- /// True if disconnected subregister components are already renamed.
- /// The renaming is only done on demand if lane masks are tracked.
- bool DisconnectedComponentsRenamed = false;
-
public:
ScheduleDAGMILive(MachineSchedContext *C,
std::unique_ptr<MachineSchedStrategy> S)
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/Passes.h
index d5ad12fadfa0..616ab1034133 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/Passes.h
@@ -30,7 +30,6 @@ class MemoryBuffer;
class ModulePass;
class Pass;
class TargetMachine;
-class TargetRegisterClass;
class raw_ostream;
} // End llvm namespace
@@ -551,6 +550,10 @@ namespace llvm {
/// The pass transforms amx intrinsics to scalar operation if the function has
/// optnone attribute or it is O0.
FunctionPass *createX86LowerAMXIntrinsicsPass();
+
+ /// When learning an eviction policy, extract score(reward) information,
+ /// otherwise this does nothing
+ FunctionPass *createRegAllocScoringPass();
} // End llvm namespace
#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SDNodeProperties.td b/contrib/llvm-project/llvm/include/llvm/CodeGen/SDNodeProperties.td
index d25e0bda26a9..3cb304f47f4b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SDNodeProperties.td
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SDNodeProperties.td
@@ -1,4 +1,4 @@
-//===- SDNodeProperties.td - Common code for DAG isels ---*- tablegen -*-===//
+//===- SDNodeProperties.td - Common code for DAG isels -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
index d21844555f5b..e31719bcff0b 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1350,13 +1350,9 @@ public:
SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
- Align Alignment, MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo = AAMDNodes(),
- bool IsCompressing = false);
- SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- SDValue Mask, SDValue EVL, MachineMemOperand *MMO,
- bool IsCompressing = false);
+ SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT,
+ MachineMemOperand *MMO, ISD::MemIndexedMode AM,
+ bool IsTruncating = false, bool IsCompressing = false);
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, SDValue Mask, SDValue EVL,
MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
@@ -1833,18 +1829,18 @@ public:
unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
unsigned Depth = 0) const;
- /// Get the minimum bit size for this Value \p Op as a signed integer.
- /// i.e. x == sext(trunc(x to MinSignedBits) to bitwidth(x)).
- /// Similar to the APInt::getMinSignedBits function.
+ /// Get the upper bound on bit size for this Value \p Op as a signed integer.
+ /// i.e. x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
+ /// Similar to the APInt::getSignificantBits function.
/// Helper wrapper to ComputeNumSignBits.
- unsigned ComputeMinSignedBits(SDValue Op, unsigned Depth = 0) const;
+ unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth = 0) const;
- /// Get the minimum bit size for this Value \p Op as a signed integer.
- /// i.e. x == sext(trunc(x to MinSignedBits) to bitwidth(x)).
- /// Similar to the APInt::getMinSignedBits function.
+ /// Get the upper bound on bit size for this Value \p Op as a signed integer.
+ /// i.e. x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
+ /// Similar to the APInt::getSignificantBits function.
/// Helper wrapper to ComputeNumSignBits.
- unsigned ComputeMinSignedBits(SDValue Op, const APInt &DemandedElts,
- unsigned Depth = 0) const;
+ unsigned ComputeMaxSignificantBits(SDValue Op, const APInt &DemandedElts,
+ unsigned Depth = 0) const;
/// Return true if this function can prove that \p Op is never poison
/// and, if \p PoisonOnly is false, does not have undef bits.
@@ -2001,6 +1997,9 @@ public:
return SplitVector(N, DL, LoVT, HiVT);
}
+ /// Split the explicit vector length parameter of a VP operation.
+ std::pair<SDValue, SDValue> SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL);
+
/// Split the node's operand with EXTRACT_SUBVECTOR and
/// return the low/high part.
std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
index 6a3d76be0ed6..0f3af915da64 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -39,7 +39,7 @@ private:
public:
BaseIndexOffset() = default;
BaseIndexOffset(SDValue Base, SDValue Index, bool IsIndexSignExt)
- : Base(Base), Index(Index), Offset(), IsIndexSignExt(IsIndexSignExt) {}
+ : Base(Base), Index(Index), IsIndexSignExt(IsIndexSignExt) {}
BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
bool IsIndexSignExt)
: Base(Base), Index(Index), Offset(Offset),
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 2855e1f1e587..cd62c47abce9 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -675,6 +675,9 @@ public:
}
}
+ /// Test if this node is a vector predication operation.
+ bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }
+
/// Test if this node has a post-isel opcode, directly
/// corresponding to a MachineInstr opcode.
bool isMachineOpcode() const { return NodeType < 0; }
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TailDuplicator.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TailDuplicator.h
index 6862bb2c3f44..daaa27f72d52 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TailDuplicator.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TailDuplicator.h
@@ -26,7 +26,6 @@
namespace llvm {
class MachineBasicBlock;
-class MachineBlockFrequencyInfo;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineInstr;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index a855a0797723..f2ca1590fc39 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -216,8 +216,8 @@ public:
/// With basic block sections, emit callee saved frame moves for basic blocks
/// that are in a different section.
virtual void
- emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) const {}
+ emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {}
/// Replace a StackProbe stub (if any) with the actual probe code inline
virtual void inlineStackProbe(MachineFunction &MF,
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 58b8e59b68d7..411811d08c18 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -130,7 +130,7 @@ public:
}
/// Given \p MO is a PhysReg use return if it can be ignored for the purpose
- /// of instruction rematerialization.
+ /// of instruction rematerialization or sinking.
virtual bool isIgnorableUse(const MachineOperand &MO) const {
return false;
}
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
index b2d82e0cc6e8..bec191570594 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -63,7 +63,6 @@
namespace llvm {
-class BranchProbability;
class CCState;
class CCValAssign;
class Constant;
@@ -1802,11 +1801,14 @@ public:
/// Return the preferred loop alignment.
virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
+ /// Return the maximum amount of bytes allowed to be emitted when padding for
+ /// alignment
+ virtual unsigned
+ getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const;
+
/// Should loops be aligned even when the function is marked OptSize (but not
/// MinSize).
- virtual bool alignLoopsWithOptSize() const {
- return false;
- }
+ virtual bool alignLoopsWithOptSize() const { return false; }
/// If the target has a standard location for the stack protector guard,
/// returns the address of that location. Otherwise, returns nullptr.
@@ -1836,8 +1838,8 @@ public:
virtual Function *getSSPStackGuardCheck(const Module &M) const;
/// \returns true if a constant G_UBFX is legal on the target.
- virtual bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1,
- LLT Ty2) const {
+ virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
+ LLT Ty2) const {
return false;
}
@@ -2341,6 +2343,9 @@ protected:
/// means the target does not care about loop alignment. The target may also
/// override getPrefLoopAlignment to provide per-loop values.
void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
+ void setMaxBytesForAlignment(unsigned MaxBytes) {
+ MaxBytesForAlignment = MaxBytes;
+ }
/// Set the minimum stack alignment of an argument.
void setMinStackArgumentAlignment(Align Alignment) {
@@ -2521,6 +2526,8 @@ public:
case ISD::SHL:
case ISD::SRL:
case ISD::SRA:
+ case ISD::ROTL:
+ case ISD::ROTR:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
@@ -3030,6 +3037,8 @@ private:
/// The preferred loop alignment (in log2 bot in bytes).
Align PrefLoopAlignment;
+ /// The maximum amount of bytes permitted to be emitted for alignment.
+ unsigned MaxBytesForAlignment;
/// Size in bits of the maximum atomics size the backend supports.
/// Accesses larger than this will be expanded by AtomicExpandPass.
@@ -3283,6 +3292,17 @@ public:
return false;
}
+ // Lets target to control the following reassociation of operands: (op (op x,
+ // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
+ // default consider profitable any case where N0 has single use. This
+ // behavior reflects the condition replaced by this target hook call in the
+ // DAGCombiner. Any particular target can implement its own heuristic to
+ // restrict common combiner.
+ virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
+ SDValue N1) const {
+ return N0.hasOneUse();
+ }
+
virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
return false;
}
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 8483d078ca74..c3b842052ef5 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -1094,6 +1094,13 @@ public:
inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
}
+
+ /// Some targets have non-allocatable registers that aren't technically part
+ /// of the explicit callee saved register list, but should be handled as such
+ /// in certain cases.
+ virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
+ return false;
+ }
};
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
index 1c6d0b1ead86..4f1c666df35f 100644
--- a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
+++ b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
@@ -385,8 +385,8 @@ private:
: Die(Die), Type(T), CU(CU), Flags(0), OtherInfo(OtherInfo) {}
WorklistItem(unsigned AncestorIdx, CompileUnit &CU, unsigned Flags)
- : Die(), Type(WorklistItemType::LookForParentDIEsToKeep), CU(CU),
- Flags(Flags), AncestorIdx(AncestorIdx) {}
+ : Type(WorklistItemType::LookForParentDIEsToKeep), CU(CU), Flags(Flags),
+ AncestorIdx(AncestorIdx) {}
};
/// returns true if we need to translate strings.
diff --git a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
index a6310bcb5df1..afba19ac7d42 100644
--- a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
+++ b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinkerCompileUnit.h
@@ -148,9 +148,6 @@ public:
return LocationAttributes;
}
- void setHasInterestingContent() { HasInterestingContent = true; }
- bool hasInterestingContent() { return HasInterestingContent; }
-
/// Mark every DIE in this unit as kept. This function also
/// marks variables as InDebugMap so that they appear in the
/// reconstructed accelerator tables.
@@ -298,9 +295,6 @@ private:
/// Is this unit subject to the ODR rule?
bool HasODR;
- /// Did a DIE actually contain a valid reloc?
- bool HasInterestingContent;
-
/// The DW_AT_language of this unit.
uint16_t Language = 0;
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/CodeView.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
index 9d41cb9fdd2b..d4cb6ae7a28e 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -162,6 +162,8 @@ enum SourceLanguage : uint8_t {
MSIL = 0x0f,
HLSL = 0x10,
+ Rust = 0x15,
+
/// The DMD & Swift compilers emit 'D' and 'S', respectively, for the CV
/// source language. Microsoft does not have enumerators for them yet.
D = 'D',
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
index 3b6d1b0b1a70..01916bd18fb8 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -111,7 +111,8 @@ public:
}
TypeIndex ContainingType;
- PointerToMemberRepresentation Representation;
+ PointerToMemberRepresentation Representation =
+ PointerToMemberRepresentation::Unknown;
};
class TypeRecord {
@@ -160,8 +161,8 @@ public:
TypeIndex getArgumentList() const { return ArgumentList; }
TypeIndex ReturnType;
- CallingConvention CallConv;
- FunctionOptions Options;
+ CallingConvention CallConv = CallingConvention::NearC;
+ FunctionOptions Options = FunctionOptions::None;
uint16_t ParameterCount = 0;
TypeIndex ArgumentList;
};
@@ -194,8 +195,8 @@ public:
TypeIndex ReturnType;
TypeIndex ClassType;
TypeIndex ThisType;
- CallingConvention CallConv;
- FunctionOptions Options;
+ CallingConvention CallConv = CallingConvention::NearC;
+ FunctionOptions Options = FunctionOptions::None;
uint16_t ParameterCount = 0;
TypeIndex ArgumentList;
int32_t ThisPointerAdjustment = 0;
@@ -209,7 +210,7 @@ public:
LabelRecord(LabelType Mode) : TypeRecord(TypeRecordKind::Label), Mode(Mode) {}
- LabelType Mode;
+ LabelType Mode = LabelType::Near;
};
// LF_MFUNC_ID
@@ -454,7 +455,7 @@ public:
StringRef getUniqueName() const { return UniqueName; }
uint16_t MemberCount = 0;
- ClassOptions Options;
+ ClassOptions Options = ClassOptions::None;
TypeIndex FieldList;
StringRef Name;
StringRef UniqueName;
@@ -585,7 +586,7 @@ public:
uint32_t getAge() const { return Age; }
StringRef getName() const { return Name; }
- GUID Guid;
+ GUID Guid = {};
uint32_t Age = 0;
StringRef Name;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
index 24714ac3d101..e82faf6eeb24 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -52,6 +52,7 @@ class raw_ostream;
/// information parsing. The actual data is supplied through DWARFObj.
class DWARFContext : public DIContext {
DWARFUnitVector NormalUnits;
+ Optional<DenseMap<uint64_t, DWARFTypeUnit*>> NormalTypeUnits;
std::unique_ptr<DWARFUnitIndex> CUIndex;
std::unique_ptr<DWARFGdbIndex> GdbIndex;
std::unique_ptr<DWARFUnitIndex> TUIndex;
@@ -70,6 +71,7 @@ class DWARFContext : public DIContext {
std::unique_ptr<AppleAcceleratorTable> AppleObjC;
DWARFUnitVector DWOUnits;
+ Optional<DenseMap<uint64_t, DWARFTypeUnit*>> DWOTypeUnits;
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
std::unique_ptr<DWARFDebugMacro> MacinfoDWO;
std::unique_ptr<DWARFDebugMacro> MacroDWO;
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
index c4370cb54113..6bdd23900182 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
@@ -16,7 +16,6 @@
namespace llvm {
-class DataExtractor;
class DWARFUnit;
/// DWARFDebugInfoEntry - A DIE with only the minimum required data.
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
index 8f93ebc4ebc0..f731d440a35b 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h
@@ -463,13 +463,17 @@ inline bool operator!=(const std::reverse_iterator<DWARFDie::iterator> &LHS,
}
inline std::reverse_iterator<DWARFDie::iterator> DWARFDie::rbegin() const {
- return llvm::make_reverse_iterator(end());
+ return std::make_reverse_iterator(end());
}
inline std::reverse_iterator<DWARFDie::iterator> DWARFDie::rend() const {
- return llvm::make_reverse_iterator(begin());
+ return std::make_reverse_iterator(begin());
}
+void dumpTypeQualifiedName(const DWARFDie &DIE, raw_ostream &OS);
+void dumpTypeUnqualifiedName(const DWARFDie &DIE, raw_ostream &OS,
+ std::string *OriginalFullName = nullptr);
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_DWARF_DWARFDIE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/LookupResult.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/LookupResult.h
index 211a352595fd..3dabbce32bb2 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/LookupResult.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/LookupResult.h
@@ -17,7 +17,6 @@
namespace llvm {
class raw_ostream;
namespace gsym {
-struct FileEntry;
struct SourceLocation {
StringRef Name; ///< Function or symbol name.
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h
index bcde92e94fc5..dcbda39a7696 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/ObjectFileTransformer.h
@@ -21,7 +21,6 @@ class ObjectFile;
namespace gsym {
-struct CUInfo;
class GsymCreator;
class ObjectFileTransformer {
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/StringTable.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/StringTable.h
index 045c9e3f3ebd..6dd90499c203 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/StringTable.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/GSYM/StringTable.h
@@ -20,7 +20,7 @@ namespace gsym {
/// string at offset zero. Strings must be UTF8 NULL terminated strings.
struct StringTable {
StringRef Data;
- StringTable() : Data() {}
+ StringTable() {}
StringTable(StringRef D) : Data(D) {}
StringRef operator[](size_t Offset) const { return getString(Offset); }
StringRef getString(uint32_t Offset) const {
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFBuilder.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFBuilder.h
index 282870f5b3f1..1a03d42ded92 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MSFBuilder.h
@@ -20,7 +20,6 @@
namespace llvm {
class FileBufferByteStream;
-class WritableBinaryStream;
namespace msf {
class MSFBuilder {
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MappedBlockStream.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MappedBlockStream.h
index 296a4840b779..b5f0596fceed 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MappedBlockStream.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/MSF/MappedBlockStream.h
@@ -154,7 +154,7 @@ private:
WritableBinaryStreamRef WriteInterface;
};
-} // end namespace pdb
+} // namespace msf
} // end namespace llvm
#endif // LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
index 1a7c2f3aeeab..570b40c70578 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
@@ -27,7 +27,14 @@
// DIA headers must come after windows headers.
#include <cvconst.h>
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
#include <dia2.h>
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
#include <diacreate.h>
#endif // LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
index 1a9fb240a248..cde66d399243 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
@@ -38,7 +38,7 @@ private:
void dumpChildren(raw_ostream &OS, StringRef Label, PDB_SymType ChildType,
int Indent) const;
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
index 6be27c8d3bc7..f50057c68406 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
@@ -79,7 +79,7 @@ public:
uint32_t getCompilandId() const;
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
index 7152249cbd03..1cdc1811bb1a 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
@@ -40,7 +40,7 @@ public:
FORWARD_SYMBOL_METHOD(getVirtualAddress)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
index 3125c271d2e8..021f27c7f0f7 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
@@ -39,7 +39,7 @@ public:
FORWARD_SYMBOL_METHOD(getVirtualAddress)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
index 3625e23f014f..33eb36696cc2 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
@@ -39,7 +39,7 @@ public:
FORWARD_SYMBOL_METHOD(getVirtualAddress)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
index e2b2545d78ec..f8dcb2ba9d5f 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
@@ -37,7 +37,7 @@ public:
FORWARD_SYMBOL_METHOD(getUndecoratedName)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
index 274de8b0b16f..a5f795cc1303 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
@@ -46,7 +46,7 @@ public:
FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
index c0215c9ee4b1..d4cd6e71423e 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
@@ -34,7 +34,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
index bab292ee0d46..bd2dbc914725 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
@@ -53,7 +53,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
index 7d94c3c97a2b..df6309b1545c 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
@@ -30,7 +30,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
index dc647aff48d3..7bf0317ff1ca 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
@@ -26,7 +26,7 @@ public:
FORWARD_SYMBOL_METHOD(getOemSymbolId)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
index 7a9e43785d67..5d742237bac4 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
@@ -26,7 +26,7 @@ public:
FORWARD_SYMBOL_METHOD(getUpperBoundId)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
index 3ac72801b202..0aab91039509 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
@@ -46,7 +46,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
index c4d9dd6308a3..d56a90662dae 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
@@ -27,7 +27,7 @@ public:
FORWARD_SYMBOL_ID_METHOD(getType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
index 22d3623496f2..559ceec5aace 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
@@ -27,7 +27,7 @@ public:
FORWARD_SYMBOL_ID_METHOD(getType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
index a1491ca2e415..ceb4bff5b7b4 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
@@ -41,7 +41,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
index 6bc70bca82e7..5e7b83ce8004 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
@@ -25,7 +25,7 @@ public:
FORWARD_SYMBOL_METHOD(getName)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
index b36f459e880c..da25eab50f9b 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
@@ -37,7 +37,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
index 2712d0617e0e..8dc29ca26192 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
@@ -44,7 +44,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
index e8161d311ea7..d08728dafa76 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
@@ -31,7 +31,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
index 614060867042..c7e2ac148503 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
@@ -29,7 +29,7 @@ public:
FORWARD_SYMBOL_METHOD(isVolatileType)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
index cc29d38c2578..5b4909b800b9 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
@@ -24,7 +24,7 @@ public:
void dump(PDBSymDumper &Dumper) const override;
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
index fd812cb2f793..19a8f414eb43 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
@@ -27,7 +27,7 @@ public:
FORWARD_SYMBOL_METHOD(getName)
};
+} // namespace pdb
} // namespace llvm
-}
#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h
index 4bb11bf62593..779dc885372d 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h
@@ -87,7 +87,7 @@ private:
public:
PlainPrinterBase(raw_ostream &OS, raw_ostream &ES, PrinterConfig &Config)
- : DIPrinter(), OS(OS), ES(ES), Config(Config) {}
+ : OS(OS), ES(ES), Config(Config) {}
void print(const Request &Request, const DILineInfo &Info) override;
void print(const Request &Request, const DIInliningInfo &Info) override;
@@ -138,7 +138,7 @@ private:
public:
JSONPrinter(raw_ostream &OS, PrinterConfig &Config)
- : DIPrinter(), OS(OS), Config(Config) {}
+ : OS(OS), Config(Config) {}
void print(const Request &Request, const DILineInfo &Info) override;
void print(const Request &Request, const DIInliningInfo &Info) override;
diff --git a/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
index fcb8ed3a9222..064cfa75b1a1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
+++ b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
@@ -23,6 +23,8 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <chrono>
+
namespace llvm {
typedef ArrayRef<uint8_t> BuildIDRef;
diff --git a/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h b/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h
index 51de66629544..ca3b76ca9f3f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h
+++ b/contrib/llvm-project/llvm/include/llvm/Debuginfod/HTTPClient.h
@@ -13,12 +13,14 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_SUPPORT_HTTP_CLIENT_H
-#define LLVM_SUPPORT_HTTP_CLIENT_H
+#ifndef LLVM_DEBUGINFOD_HTTPCLIENT_H
+#define LLVM_DEBUGINFOD_HTTPCLIENT_H
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <chrono>
+
namespace llvm {
enum class HTTPMethod { GET };
@@ -116,4 +118,4 @@ public:
} // end namespace llvm
-#endif // LLVM_SUPPORT_HTTP_CLIENT_H
+#endif // LLVM_DEBUGINFOD_HTTPCLIENT_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Demangle/ItaniumDemangle.h b/contrib/llvm-project/llvm/include/llvm/Demangle/ItaniumDemangle.h
index 86f5c992b63d..28545ed06836 100644
--- a/contrib/llvm-project/llvm/include/llvm/Demangle/ItaniumDemangle.h
+++ b/contrib/llvm-project/llvm/include/llvm/Demangle/ItaniumDemangle.h
@@ -1,3 +1,5 @@
+// Do not edit! -*- read-only -*-
+// See README.txt for instructions
//===------------------------- ItaniumDemangle.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -11,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEMANGLE_ITANIUMDEMANGLE_H
-#define LLVM_DEMANGLE_ITANIUMDEMANGLE_H
+#ifndef DEMANGLE_ITANIUMDEMANGLE_H
+#define DEMANGLE_ITANIUMDEMANGLE_H
// FIXME: (possibly) incomplete list of features that clang mangles that this
// file does not yet support:
@@ -21,12 +23,13 @@
#include "DemangleConfig.h"
#include "StringView.h"
#include "Utility.h"
+#include <algorithm>
#include <cassert>
#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cstring>
-#include <numeric>
+#include <limits>
#include <utility>
#define FOR_EACH_NODE_KIND(X) \
@@ -1210,7 +1213,8 @@ public:
class ParameterPack final : public Node {
NodeArray Data;
- // Setup OutputBuffer for a pack expansion unless we're already expanding one.
+ // Setup OutputBuffer for a pack expansion, unless we're already expanding
+ // one.
void initializePackExpansion(OutputBuffer &OB) const {
if (OB.CurrentPackMax == std::numeric_limits<unsigned>::max()) {
OB.CurrentPackMax = static_cast<unsigned>(Data.size());
@@ -2473,7 +2477,7 @@ template <typename Derived, typename Alloc> struct AbstractManglingParser {
char consume() { return First != Last ? *First++ : '\0'; }
- char look(unsigned Lookahead = 0) {
+ char look(unsigned Lookahead = 0) const {
if (static_cast<size_t>(Last - First) <= Lookahead)
return '\0';
return First[Lookahead];
@@ -2591,34 +2595,38 @@ Node *AbstractManglingParser<Derived, Alloc>::parseName(NameState *State) {
if (look() == 'Z')
return getDerived().parseLocalName(State);
- // ::= <unscoped-template-name> <template-args>
- if (look() == 'S' && look(1) != 't') {
- Node *S = getDerived().parseSubstitution();
- if (S == nullptr)
- return nullptr;
- if (look() != 'I')
- return nullptr;
- Node *TA = getDerived().parseTemplateArgs(State != nullptr);
- if (TA == nullptr)
- return nullptr;
- if (State) State->EndsWithTemplateArgs = true;
- return make<NameWithTemplateArgs>(S, TA);
+ Node *Result = nullptr;
+ bool IsSubst = look() == 'S' && look(1) != 't';
+ if (IsSubst) {
+ // A substitution must lead to:
+ // ::= <unscoped-template-name> <template-args>
+ Result = getDerived().parseSubstitution();
+ } else {
+ // An unscoped name can be one of:
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ Result = getDerived().parseUnscopedName(State);
}
-
- Node *N = getDerived().parseUnscopedName(State);
- if (N == nullptr)
+ if (Result == nullptr)
return nullptr;
- // ::= <unscoped-template-name> <template-args>
+
if (look() == 'I') {
- Subs.push_back(N);
+ // ::= <unscoped-template-name> <template-args>
+ if (!IsSubst)
+ // An unscoped-template-name is substitutable.
+ Subs.push_back(Result);
Node *TA = getDerived().parseTemplateArgs(State != nullptr);
if (TA == nullptr)
return nullptr;
- if (State) State->EndsWithTemplateArgs = true;
- return make<NameWithTemplateArgs>(N, TA);
+ if (State)
+ State->EndsWithTemplateArgs = true;
+ Result = make<NameWithTemplateArgs>(Result, TA);
+ } else if (IsSubst) {
+ // The substitution case must be followed by <template-args>.
+ return nullptr;
}
- // ::= <unscoped-name>
- return N;
+
+ return Result;
}
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
@@ -2663,13 +2671,17 @@ Node *AbstractManglingParser<Derived, Alloc>::parseLocalName(NameState *State) {
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseUnscopedName(NameState *State) {
- if (consumeIf("StL") || consumeIf("St")) {
- Node *R = getDerived().parseUnqualifiedName(State);
- if (R == nullptr)
- return nullptr;
- return make<StdQualifiedName>(R);
- }
- return getDerived().parseUnqualifiedName(State);
+ bool IsStd = consumeIf("St");
+ if (IsStd)
+ consumeIf('L');
+
+ Node *Result = getDerived().parseUnqualifiedName(State);
+ if (Result == nullptr)
+ return nullptr;
+ if (IsStd)
+ Result = make<StdQualifiedName>(Result);
+
+ return Result;
}
// <unqualified-name> ::= <operator-name> [abi-tags]
@@ -4064,9 +4076,9 @@ Node *AbstractManglingParser<Derived, Alloc>::parseType() {
}
// ::= <substitution> # See Compression below
case 'S': {
- if (look(1) && look(1) != 't') {
- Node *Sub = getDerived().parseSubstitution();
- if (Sub == nullptr)
+ if (look(1) != 't') {
+ Result = getDerived().parseSubstitution();
+ if (Result == nullptr)
return nullptr;
// Sub could be either of:
@@ -4083,13 +4095,13 @@ Node *AbstractManglingParser<Derived, Alloc>::parseType() {
Node *TA = getDerived().parseTemplateArgs();
if (TA == nullptr)
return nullptr;
- Result = make<NameWithTemplateArgs>(Sub, TA);
- break;
+ Result = make<NameWithTemplateArgs>(Result, TA);
+ } else {
+ // If all we parsed was a substitution, don't re-insert into the
+ // substitution table.
+ return Result;
}
-
- // If all we parsed was a substitution, don't re-insert into the
- // substitution table.
- return Sub;
+ break;
}
DEMANGLE_FALLTHROUGH;
}
@@ -5437,38 +5449,35 @@ Node *AbstractManglingParser<Derived, Alloc>::parseSubstitution() {
if (!consumeIf('S'))
return nullptr;
- if (std::islower(look())) {
- Node *SpecialSub;
+ if (look() >= 'a' && look() <= 'z') {
+ SpecialSubKind Kind;
switch (look()) {
case 'a':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::allocator);
+ Kind = SpecialSubKind::allocator;
break;
case 'b':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::basic_string);
+ Kind = SpecialSubKind::basic_string;
break;
- case 's':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::string);
+ case 'd':
+ Kind = SpecialSubKind::iostream;
break;
case 'i':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::istream);
+ Kind = SpecialSubKind::istream;
break;
case 'o':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::ostream);
+ Kind = SpecialSubKind::ostream;
break;
- case 'd':
- ++First;
- SpecialSub = make<SpecialSubstitution>(SpecialSubKind::iostream);
+ case 's':
+ Kind = SpecialSubKind::string;
break;
default:
return nullptr;
}
+ ++First;
+ auto *SpecialSub = make<SpecialSubstitution>(Kind);
if (!SpecialSub)
return nullptr;
+
// Itanium C++ ABI 5.1.2: If a name that would use a built-in <substitution>
// has ABI tags, the tags are appended to the substitution; the result is a
// substitutable component.
@@ -5747,4 +5756,4 @@ struct ManglingParser : AbstractManglingParser<ManglingParser<Alloc>, Alloc> {
DEMANGLE_NAMESPACE_END
-#endif // LLVM_DEMANGLE_ITANIUMDEMANGLE_H
+#endif // DEMANGLE_ITANIUMDEMANGLE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangle.h b/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangle.h
index 040313661601..6f2d0416901e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangle.h
+++ b/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangle.h
@@ -9,10 +9,8 @@
#ifndef LLVM_DEMANGLE_MICROSOFTDEMANGLE_H
#define LLVM_DEMANGLE_MICROSOFTDEMANGLE_H
-#include "llvm/Demangle/DemangleConfig.h"
#include "llvm/Demangle/MicrosoftDemangleNodes.h"
#include "llvm/Demangle/StringView.h"
-#include "llvm/Demangle/Utility.h"
#include <utility>
diff --git a/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h b/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
index 46daa3885a06..8ad2472364b4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/Demangle/MicrosoftDemangleNodes.h
@@ -13,7 +13,6 @@
#ifndef LLVM_DEMANGLE_MICROSOFTDEMANGLENODES_H
#define LLVM_DEMANGLE_MICROSOFTDEMANGLENODES_H
-#include "llvm/Demangle/DemangleConfig.h"
#include "llvm/Demangle/StringView.h"
#include <array>
#include <cstdint>
@@ -283,9 +282,7 @@ struct StructorIdentifierNode;
struct ThunkSignatureNode;
struct PointerTypeNode;
struct ArrayTypeNode;
-struct CustomNode;
struct TagTypeNode;
-struct IntrinsicTypeNode;
struct NodeArrayNode;
struct QualifiedNameNode;
struct TemplateParameterReferenceNode;
diff --git a/contrib/llvm-project/llvm/include/llvm/Demangle/StringView.h b/contrib/llvm-project/llvm/include/llvm/Demangle/StringView.h
index 378e85341637..323282f69c26 100644
--- a/contrib/llvm-project/llvm/include/llvm/Demangle/StringView.h
+++ b/contrib/llvm-project/llvm/include/llvm/Demangle/StringView.h
@@ -1,3 +1,5 @@
+// Do not edit! -*- read-only -*-
+// See README.txt for instructions
//===--- StringView.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -10,11 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEMANGLE_STRINGVIEW_H
-#define LLVM_DEMANGLE_STRINGVIEW_H
+#ifndef DEMANGLE_STRINGVIEW_H
+#define DEMANGLE_STRINGVIEW_H
#include "DemangleConfig.h"
-#include <algorithm>
#include <cassert>
#include <cstring>
@@ -38,15 +39,16 @@ public:
StringView substr(size_t Pos, size_t Len = npos) const {
assert(Pos <= size());
- return StringView(begin() + Pos, std::min(Len, size() - Pos));
+ if (Len > size() - Pos)
+ Len = size() - Pos;
+ return StringView(begin() + Pos, Len);
}
size_t find(char C, size_t From = 0) const {
- size_t FindBegin = std::min(From, size());
// Avoid calling memchr with nullptr.
- if (FindBegin < size()) {
+ if (From < size()) {
// Just forward to memchr, which is faster than a hand-rolled loop.
- if (const void *P = ::memchr(First + FindBegin, C, size() - FindBegin))
+ if (const void *P = ::memchr(First + From, C, size() - From))
return size_t(static_cast<const char *>(P) - First);
}
return npos;
@@ -98,7 +100,7 @@ public:
bool startsWith(StringView Str) const {
if (Str.size() > size())
return false;
- return std::equal(Str.begin(), Str.end(), begin());
+ return std::strncmp(Str.begin(), begin(), Str.size()) == 0;
}
const char &operator[](size_t Idx) const { return *(begin() + Idx); }
@@ -111,7 +113,7 @@ public:
inline bool operator==(const StringView &LHS, const StringView &RHS) {
return LHS.size() == RHS.size() &&
- std::equal(LHS.begin(), LHS.end(), RHS.begin());
+ std::strncmp(LHS.begin(), RHS.begin(), LHS.size()) == 0;
}
DEMANGLE_NAMESPACE_END
diff --git a/contrib/llvm-project/llvm/include/llvm/Demangle/Utility.h b/contrib/llvm-project/llvm/include/llvm/Demangle/Utility.h
index 4fea9351a4bf..bec019da8680 100644
--- a/contrib/llvm-project/llvm/include/llvm/Demangle/Utility.h
+++ b/contrib/llvm-project/llvm/include/llvm/Demangle/Utility.h
@@ -1,3 +1,5 @@
+// Do not edit! -*- read-only -*-
+// See README.txt for instructions
//===--- Utility.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -10,14 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEMANGLE_UTILITY_H
-#define LLVM_DEMANGLE_UTILITY_H
+#ifndef DEMANGLE_UTILITY_H
+#define DEMANGLE_UTILITY_H
#include "StringView.h"
+#include <array>
#include <cstdint>
#include <cstdlib>
#include <cstring>
-#include <iterator>
+#include <exception>
#include <limits>
DEMANGLE_NAMESPACE_BEGIN
@@ -48,8 +51,8 @@ class OutputBuffer {
return;
}
- char Temp[21];
- char *TempPtr = std::end(Temp);
+ std::array<char, 21> Temp;
+ char *TempPtr = Temp.data() + Temp.size();
while (N) {
*--TempPtr = char('0' + N % 10);
@@ -59,7 +62,7 @@ class OutputBuffer {
// Add negative sign...
if (isNeg)
*--TempPtr = '-';
- this->operator<<(StringView(TempPtr, std::end(Temp)));
+ this->operator<<(StringView(TempPtr, Temp.data() + Temp.size()));
}
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITEventListener.h
index 4eefd993de2b..effff2ea5cfa 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -24,7 +24,6 @@
namespace llvm {
class IntelJITEventsWrapper;
-class MachineFunction;
class OProfileWrapper;
namespace object {
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h
index ec78d9db40b6..33eee7a75f39 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/EHFrameSupport.h
@@ -25,25 +25,20 @@ namespace jitlink {
class EHFrameRegistrar {
public:
virtual ~EHFrameRegistrar();
- virtual Error registerEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) = 0;
- virtual Error deregisterEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) = 0;
+ virtual Error registerEHFrames(orc::ExecutorAddrRange EHFrameSection) = 0;
+ virtual Error deregisterEHFrames(orc::ExecutorAddrRange EHFrameSection) = 0;
};
/// Registers / Deregisters EH-frames in the current process.
class InProcessEHFrameRegistrar final : public EHFrameRegistrar {
public:
- Error registerEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) override;
+ Error registerEHFrames(orc::ExecutorAddrRange EHFrameSection) override;
- Error deregisterEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) override;
+ Error deregisterEHFrames(orc::ExecutorAddrRange EHFrameSection) override;
};
-using StoreFrameRangeFunction =
- std::function<void(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize)>;
+using StoreFrameRangeFunction = std::function<void(
+ orc::ExecutorAddr EHFrameSectionAddr, size_t EHFrameSectionSize)>;
/// Creates a pass that records the address and size of the EH frame section.
/// If no eh-frame section is found then the address and size will both be given
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
index 83d85953fce6..ddbb3e76f145 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
@@ -104,10 +104,10 @@ class Addressable {
friend class LinkGraph;
protected:
- Addressable(JITTargetAddress Address, bool IsDefined)
+ Addressable(orc::ExecutorAddr Address, bool IsDefined)
: Address(Address), IsDefined(IsDefined), IsAbsolute(false) {}
- Addressable(JITTargetAddress Address)
+ Addressable(orc::ExecutorAddr Address)
: Address(Address), IsDefined(false), IsAbsolute(true) {
assert(!(IsDefined && IsAbsolute) &&
"Block cannot be both defined and absolute");
@@ -119,8 +119,8 @@ public:
Addressable(Addressable &&) = delete;
Addressable &operator=(Addressable &&) = default;
- JITTargetAddress getAddress() const { return Address; }
- void setAddress(JITTargetAddress Address) { this->Address = Address; }
+ orc::ExecutorAddr getAddress() const { return Address; }
+ void setAddress(orc::ExecutorAddr Address) { this->Address = Address; }
/// Returns true if this is a defined addressable, in which case you
/// can downcast this to a Block.
@@ -133,7 +133,7 @@ private:
this->IsAbsolute = IsAbsolute;
}
- JITTargetAddress Address = 0;
+ orc::ExecutorAddr Address;
uint64_t IsDefined : 1;
uint64_t IsAbsolute : 1;
@@ -152,7 +152,7 @@ class Block : public Addressable {
private:
/// Create a zero-fill defined addressable.
- Block(Section &Parent, JITTargetAddress Size, JITTargetAddress Address,
+ Block(Section &Parent, orc::ExecutorAddrDiff Size, orc::ExecutorAddr Address,
uint64_t Alignment, uint64_t AlignmentOffset)
: Addressable(Address, true), Parent(&Parent), Size(Size) {
assert(isPowerOf2_64(Alignment) && "Alignment must be power of 2");
@@ -168,7 +168,7 @@ private:
/// Create a defined addressable for the given content.
/// The Content is assumed to be non-writable, and will be copied when
/// mutations are required.
- Block(Section &Parent, ArrayRef<char> Content, JITTargetAddress Address,
+ Block(Section &Parent, ArrayRef<char> Content, orc::ExecutorAddr Address,
uint64_t Alignment, uint64_t AlignmentOffset)
: Addressable(Address, true), Parent(&Parent), Data(Content.data()),
Size(Content.size()) {
@@ -188,7 +188,7 @@ private:
/// The standard way to achieve this is to allocate it on the Graph's
/// allocator.
Block(Section &Parent, MutableArrayRef<char> Content,
- JITTargetAddress Address, uint64_t Alignment, uint64_t AlignmentOffset)
+ orc::ExecutorAddr Address, uint64_t Alignment, uint64_t AlignmentOffset)
: Addressable(Address, true), Parent(&Parent), Data(Content.data()),
Size(Content.size()) {
assert(isPowerOf2_64(Alignment) && "Alignment must be power of 2");
@@ -328,7 +328,7 @@ public:
/// Returns the address of the fixup for the given edge, which is equal to
/// this block's address plus the edge's offset.
- JITTargetAddress getFixupAddress(const Edge &E) const {
+ orc::ExecutorAddr getFixupAddress(const Edge &E) const {
return getAddress() + E.getOffset();
}
@@ -343,12 +343,17 @@ private:
std::vector<Edge> Edges;
};
-// Align a JITTargetAddress to conform with block alignment requirements.
-inline JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
+// Align an address to conform with block alignment requirements.
+inline uint64_t alignToBlock(uint64_t Addr, Block &B) {
uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
return Addr + Delta;
}
+// Align a orc::ExecutorAddr to conform with block alignment requirements.
+inline orc::ExecutorAddr alignToBlock(orc::ExecutorAddr Addr, Block &B) {
+ return orc::ExecutorAddr(alignToBlock(Addr.getValue(), B));
+}
+
/// Describes symbol linkage. This can be used to make resolve definition
/// clashes.
enum class Linkage : uint8_t {
@@ -391,8 +396,8 @@ class Symbol {
friend class LinkGraph;
private:
- Symbol(Addressable &Base, JITTargetAddress Offset, StringRef Name,
- JITTargetAddress Size, Linkage L, Scope S, bool IsLive,
+ Symbol(Addressable &Base, orc::ExecutorAddrDiff Offset, StringRef Name,
+ orc::ExecutorAddrDiff Size, Linkage L, Scope S, bool IsLive,
bool IsCallable)
: Name(Name), Base(&Base), Offset(Offset), Size(Size) {
assert(Offset <= MaxOffset && "Offset out of range");
@@ -403,7 +408,8 @@ private:
}
static Symbol &constructCommon(void *SymStorage, Block &Base, StringRef Name,
- JITTargetAddress Size, Scope S, bool IsLive) {
+ orc::ExecutorAddrDiff Size, Scope S,
+ bool IsLive) {
assert(SymStorage && "Storage cannot be null");
assert(!Name.empty() && "Common symbol name cannot be empty");
assert(Base.isDefined() &&
@@ -416,7 +422,7 @@ private:
}
static Symbol &constructExternal(void *SymStorage, Addressable &Base,
- StringRef Name, JITTargetAddress Size,
+ StringRef Name, orc::ExecutorAddrDiff Size,
Linkage L) {
assert(SymStorage && "Storage cannot be null");
assert(!Base.isDefined() &&
@@ -428,7 +434,7 @@ private:
}
static Symbol &constructAbsolute(void *SymStorage, Addressable &Base,
- StringRef Name, JITTargetAddress Size,
+ StringRef Name, orc::ExecutorAddrDiff Size,
Linkage L, Scope S, bool IsLive) {
assert(SymStorage && "Storage cannot be null");
assert(!Base.isDefined() &&
@@ -439,8 +445,8 @@ private:
}
static Symbol &constructAnonDef(void *SymStorage, Block &Base,
- JITTargetAddress Offset,
- JITTargetAddress Size, bool IsCallable,
+ orc::ExecutorAddrDiff Offset,
+ orc::ExecutorAddrDiff Size, bool IsCallable,
bool IsLive) {
assert(SymStorage && "Storage cannot be null");
assert((Offset + Size) <= Base.getSize() &&
@@ -452,9 +458,9 @@ private:
}
static Symbol &constructNamedDef(void *SymStorage, Block &Base,
- JITTargetAddress Offset, StringRef Name,
- JITTargetAddress Size, Linkage L, Scope S,
- bool IsLive, bool IsCallable) {
+ orc::ExecutorAddrDiff Offset, StringRef Name,
+ orc::ExecutorAddrDiff Size, Linkage L,
+ Scope S, bool IsLive, bool IsCallable) {
assert(SymStorage && "Storage cannot be null");
assert((Offset + Size) <= Base.getSize() &&
"Symbol extends past end of block");
@@ -552,16 +558,16 @@ public:
}
/// Returns the offset for this symbol within the underlying addressable.
- JITTargetAddress getOffset() const { return Offset; }
+ orc::ExecutorAddrDiff getOffset() const { return Offset; }
/// Returns the address of this symbol.
- JITTargetAddress getAddress() const { return Base->getAddress() + Offset; }
+ orc::ExecutorAddr getAddress() const { return Base->getAddress() + Offset; }
/// Returns the size of this symbol.
- JITTargetAddress getSize() const { return Size; }
+ orc::ExecutorAddrDiff getSize() const { return Size; }
/// Set the size of this symbol.
- void setSize(JITTargetAddress Size) {
+ void setSize(orc::ExecutorAddrDiff Size) {
assert(Base && "Cannot set size for null Symbol");
assert((Size == 0 || Base->isDefined()) &&
"Non-zero size can only be set for defined symbols");
@@ -622,7 +628,7 @@ private:
void setBlock(Block &B) { Base = &B; }
- void setOffset(uint64_t NewOffset) {
+ void setOffset(orc::ExecutorAddrDiff NewOffset) {
assert(NewOffset <= MaxOffset && "Offset out of range");
Offset = NewOffset;
}
@@ -637,7 +643,7 @@ private:
uint64_t S : 2;
uint64_t IsLive : 1;
uint64_t IsCallable : 1;
- JITTargetAddress Size = 0;
+ orc::ExecutorAddrDiff Size = 0;
};
raw_ostream &operator<<(raw_ostream &OS, const Symbol &A);
@@ -783,13 +789,17 @@ public:
assert((First || !Last) && "Last can not be null if start is non-null");
return !First;
}
- JITTargetAddress getStart() const {
- return First ? First->getAddress() : 0;
+ orc::ExecutorAddr getStart() const {
+ return First ? First->getAddress() : orc::ExecutorAddr();
+ }
+ orc::ExecutorAddr getEnd() const {
+ return Last ? Last->getAddress() + Last->getSize() : orc::ExecutorAddr();
}
- JITTargetAddress getEnd() const {
- return Last ? Last->getAddress() + Last->getSize() : 0;
+ orc::ExecutorAddrDiff getSize() const { return getEnd() - getStart(); }
+
+ orc::ExecutorAddrRange getRange() const {
+ return orc::ExecutorAddrRange(getStart(), getEnd());
}
- uint64_t getSize() const { return getEnd() - getStart(); }
private:
Block *First = nullptr;
@@ -995,7 +1005,7 @@ public:
/// Create a content block.
Block &createContentBlock(Section &Parent, ArrayRef<char> Content,
- uint64_t Address, uint64_t Alignment,
+ orc::ExecutorAddr Address, uint64_t Alignment,
uint64_t AlignmentOffset) {
return createBlock(Parent, Content, Address, Alignment, AlignmentOffset);
}
@@ -1003,15 +1013,17 @@ public:
/// Create a content block with initially mutable data.
Block &createMutableContentBlock(Section &Parent,
MutableArrayRef<char> MutableContent,
- uint64_t Address, uint64_t Alignment,
+ orc::ExecutorAddr Address,
+ uint64_t Alignment,
uint64_t AlignmentOffset) {
return createBlock(Parent, MutableContent, Address, Alignment,
AlignmentOffset);
}
/// Create a zero-fill block.
- Block &createZeroFillBlock(Section &Parent, uint64_t Size, uint64_t Address,
- uint64_t Alignment, uint64_t AlignmentOffset) {
+ Block &createZeroFillBlock(Section &Parent, orc::ExecutorAddrDiff Size,
+ orc::ExecutorAddr Address, uint64_t Alignment,
+ uint64_t AlignmentOffset) {
return createBlock(Parent, Size, Address, Alignment, AlignmentOffset);
}
@@ -1061,22 +1073,24 @@ public:
/// present during lookup: Externals with strong linkage must be found or
/// an error will be emitted. Externals with weak linkage are permitted to
/// be undefined, in which case they are assigned a value of 0.
- Symbol &addExternalSymbol(StringRef Name, uint64_t Size, Linkage L) {
+ Symbol &addExternalSymbol(StringRef Name, orc::ExecutorAddrDiff Size,
+ Linkage L) {
assert(llvm::count_if(ExternalSymbols,
[&](const Symbol *Sym) {
return Sym->getName() == Name;
}) == 0 &&
"Duplicate external symbol");
- auto &Sym =
- Symbol::constructExternal(Allocator.Allocate<Symbol>(),
- createAddressable(0, false), Name, Size, L);
+ auto &Sym = Symbol::constructExternal(
+ Allocator.Allocate<Symbol>(),
+ createAddressable(orc::ExecutorAddr(), false), Name, Size, L);
ExternalSymbols.insert(&Sym);
return Sym;
}
/// Add an absolute symbol.
- Symbol &addAbsoluteSymbol(StringRef Name, JITTargetAddress Address,
- uint64_t Size, Linkage L, Scope S, bool IsLive) {
+ Symbol &addAbsoluteSymbol(StringRef Name, orc::ExecutorAddr Address,
+ orc::ExecutorAddrDiff Size, Linkage L, Scope S,
+ bool IsLive) {
assert(llvm::count_if(AbsoluteSymbols,
[&](const Symbol *Sym) {
return Sym->getName() == Name;
@@ -1091,7 +1105,7 @@ public:
/// Convenience method for adding a weak zero-fill symbol.
Symbol &addCommonSymbol(StringRef Name, Scope S, Section &Section,
- JITTargetAddress Address, uint64_t Size,
+ orc::ExecutorAddr Address, orc::ExecutorAddrDiff Size,
uint64_t Alignment, bool IsLive) {
assert(llvm::count_if(defined_symbols(),
[&](const Symbol *Sym) {
@@ -1107,8 +1121,8 @@ public:
}
/// Add an anonymous symbol.
- Symbol &addAnonymousSymbol(Block &Content, JITTargetAddress Offset,
- JITTargetAddress Size, bool IsCallable,
+ Symbol &addAnonymousSymbol(Block &Content, orc::ExecutorAddrDiff Offset,
+ orc::ExecutorAddrDiff Size, bool IsCallable,
bool IsLive) {
auto &Sym = Symbol::constructAnonDef(Allocator.Allocate<Symbol>(), Content,
Offset, Size, IsCallable, IsLive);
@@ -1117,9 +1131,9 @@ public:
}
/// Add a named symbol.
- Symbol &addDefinedSymbol(Block &Content, JITTargetAddress Offset,
- StringRef Name, JITTargetAddress Size, Linkage L,
- Scope S, bool IsCallable, bool IsLive) {
+ Symbol &addDefinedSymbol(Block &Content, orc::ExecutorAddrDiff Offset,
+ StringRef Name, orc::ExecutorAddrDiff Size,
+ Linkage L, Scope S, bool IsCallable, bool IsLive) {
assert((S == Scope::Local || llvm::count_if(defined_symbols(),
[&](const Symbol *Sym) {
return Sym->getName() == Name;
@@ -1193,7 +1207,7 @@ public:
assert(Sym.isDefined() && "Sym is not a defined symbol");
Section &Sec = Sym.getBlock().getSection();
Sec.removeSymbol(Sym);
- Sym.makeExternal(createAddressable(0, false));
+ Sym.makeExternal(createAddressable(orc::ExecutorAddr(), false));
}
ExternalSymbols.insert(&Sym);
}
@@ -1203,7 +1217,7 @@ public:
///
/// Symbol size, linkage, scope, and callability, and liveness will be left
/// unchanged. Symbol offset will be reset to 0.
- void makeAbsolute(Symbol &Sym, JITTargetAddress Address) {
+ void makeAbsolute(Symbol &Sym, orc::ExecutorAddr Address) {
assert(!Sym.isAbsolute() && "Symbol is already absolute");
if (Sym.isExternal()) {
assert(ExternalSymbols.count(&Sym) &&
@@ -1222,8 +1236,9 @@ public:
/// Turn an absolute or external symbol into a defined one by attaching it to
/// a block. Symbol must not already be defined.
- void makeDefined(Symbol &Sym, Block &Content, JITTargetAddress Offset,
- JITTargetAddress Size, Linkage L, Scope S, bool IsLive) {
+ void makeDefined(Symbol &Sym, Block &Content, orc::ExecutorAddrDiff Offset,
+ orc::ExecutorAddrDiff Size, Linkage L, Scope S,
+ bool IsLive) {
assert(!Sym.isDefined() && "Sym is already a defined symbol");
if (Sym.isAbsolute()) {
assert(AbsoluteSymbols.count(&Sym) &&
@@ -1255,15 +1270,15 @@ public:
///
/// All other symbol attributes are unchanged.
void transferDefinedSymbol(Symbol &Sym, Block &DestBlock,
- JITTargetAddress NewOffset,
- Optional<JITTargetAddress> ExplicitNewSize) {
+ orc::ExecutorAddrDiff NewOffset,
+ Optional<orc::ExecutorAddrDiff> ExplicitNewSize) {
auto &OldSection = Sym.getBlock().getSection();
Sym.setBlock(DestBlock);
Sym.setOffset(NewOffset);
if (ExplicitNewSize)
Sym.setSize(*ExplicitNewSize);
else {
- JITTargetAddress RemainingBlockSize = DestBlock.getSize() - NewOffset;
+ auto RemainingBlockSize = DestBlock.getSize() - NewOffset;
if (Sym.getSize() > RemainingBlockSize)
Sym.setSize(RemainingBlockSize);
}
@@ -1377,7 +1392,7 @@ public:
///
/// Accessing this object after finalization will result in undefined
/// behavior.
- JITLinkMemoryManager::AllocActions &allocActions() { return AAs; }
+ orc::shared::AllocActions &allocActions() { return AAs; }
/// Dump the graph.
void dump(raw_ostream &OS);
@@ -1395,7 +1410,7 @@ private:
SectionList Sections;
ExternalSymbolSet ExternalSymbols;
ExternalSymbolSet AbsoluteSymbols;
- JITLinkMemoryManager::AllocActions AAs;
+ orc::shared::AllocActions AAs;
};
inline MutableArrayRef<char> Block::getMutableContent(LinkGraph &G) {
@@ -1407,14 +1422,14 @@ inline MutableArrayRef<char> Block::getMutableContent(LinkGraph &G) {
/// Enables easy lookup of blocks by addresses.
class BlockAddressMap {
public:
- using AddrToBlockMap = std::map<JITTargetAddress, Block *>;
+ using AddrToBlockMap = std::map<orc::ExecutorAddr, Block *>;
using const_iterator = AddrToBlockMap::const_iterator;
/// A block predicate that always adds all blocks.
static bool includeAllBlocks(const Block &B) { return true; }
/// A block predicate that always includes blocks with non-null addresses.
- static bool includeNonNull(const Block &B) { return B.getAddress(); }
+ static bool includeNonNull(const Block &B) { return !!B.getAddress(); }
BlockAddressMap() = default;
@@ -1478,7 +1493,7 @@ public:
/// Returns the block starting at the given address, or nullptr if no such
/// block exists.
- Block *getBlockAt(JITTargetAddress Addr) const {
+ Block *getBlockAt(orc::ExecutorAddr Addr) const {
auto I = AddrToBlock.find(Addr);
if (I == AddrToBlock.end())
return nullptr;
@@ -1487,7 +1502,7 @@ public:
/// Returns the block covering the given address, or nullptr if no such block
/// exists.
- Block *getBlockCovering(JITTargetAddress Addr) const {
+ Block *getBlockCovering(orc::ExecutorAddr Addr) const {
auto I = AddrToBlock.upper_bound(Addr);
if (I == AddrToBlock.begin())
return nullptr;
@@ -1504,10 +1519,11 @@ private:
ExistingBlock.getAddress() + ExistingBlock.getSize();
return make_error<JITLinkError>(
"Block at " +
- formatv("{0:x16} -- {1:x16}", NewBlock.getAddress(), NewBlockEnd) +
+ formatv("{0:x16} -- {1:x16}", NewBlock.getAddress().getValue(),
+ NewBlockEnd.getValue()) +
" overlaps " +
- formatv("{0:x16} -- {1:x16}", ExistingBlock.getAddress(),
- ExistingBlockEnd));
+ formatv("{0:x16} -- {1:x16}", ExistingBlock.getAddress().getValue(),
+ ExistingBlockEnd.getValue()));
}
AddrToBlockMap AddrToBlock;
@@ -1532,7 +1548,7 @@ public:
/// Returns the list of symbols that start at the given address, or nullptr if
/// no such symbols exist.
- const SymbolVector *getSymbolsAt(JITTargetAddress Addr) const {
+ const SymbolVector *getSymbolsAt(orc::ExecutorAddr Addr) const {
auto I = AddrToSymbols.find(Addr);
if (I == AddrToSymbols.end())
return nullptr;
@@ -1540,7 +1556,7 @@ public:
}
private:
- std::map<JITTargetAddress, SymbolVector> AddrToSymbols;
+ std::map<orc::ExecutorAddr, SymbolVector> AddrToSymbols;
};
/// A function for mutating LinkGraphs.
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h
index 62c271dfc0b2..abfeafe798e7 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h
@@ -13,9 +13,12 @@
#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
+#include "llvm/ADT/FunctionExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
#include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
-#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MSVCErrorWorkarounds.h"
@@ -39,46 +42,6 @@ class Section;
/// and their implemetations should include any necessary synchronization.
class JITLinkMemoryManager {
public:
- /// Represents a call to a graph-memory-management support function in the
- /// executor.
- ///
- /// Support functions are called as:
- ///
- /// auto *Result =
- /// ((char*(*)(const void*, size_t))FnAddr)(
- /// (const void*)CtxAddr, (size_t)CtxSize)
- ///
- /// A null result is interpreted as success.
- ///
- /// A non-null result is interpreted as a heap-allocated string containing
- /// an error message to report to the allocator (the allocator's
- /// executor-side implementation code is responsible for freeing the error
- /// string).
- struct AllocActionCall {
- JITTargetAddress FnAddr = 0;
- JITTargetAddress CtxAddr = 0;
- JITTargetAddress CtxSize = 0;
- };
-
- /// A pair of AllocActionCalls, one to be run at finalization time, one to be
- /// run at deallocation time.
- ///
- /// AllocActionCallPairs should be constructed for paired operations (e.g.
- /// __register_ehframe and __deregister_ehframe for eh-frame registration).
- /// See comments for AllocActions for execution ordering.
- ///
- /// For unpaired operations one or the other member can be left unused, as
- /// AllocationActionCalls with an FnAddr of zero will be skipped.
- struct AllocActionCallPair {
- AllocActionCall Finalize;
- AllocActionCall Dealloc;
- };
-
- /// A vector of allocation actions to be run for this allocation.
- ///
- /// Finalize allocations will be run in order at finalize time. Dealloc
- /// actions will be run in reverse order at deallocation time.
- using AllocActions = std::vector<AllocActionCallPair>;
/// Represents a finalized allocation.
///
@@ -92,47 +55,49 @@ public:
class FinalizedAlloc {
friend class JITLinkMemoryManager;
- public:
- static constexpr JITTargetAddress InvalidAddr = ~JITTargetAddress(0);
+ static constexpr auto InvalidAddr = ~uint64_t(0);
+ public:
FinalizedAlloc() = default;
- explicit FinalizedAlloc(JITTargetAddress A) : A(A) {
- assert(A != 0 && "Explicitly creating an invalid allocation?");
+ explicit FinalizedAlloc(orc::ExecutorAddr A) : A(A) {
+ assert(A.getValue() != InvalidAddr &&
+ "Explicitly creating an invalid allocation?");
}
FinalizedAlloc(const FinalizedAlloc &) = delete;
FinalizedAlloc(FinalizedAlloc &&Other) : A(Other.A) {
- Other.A = InvalidAddr;
+ Other.A.setValue(InvalidAddr);
}
FinalizedAlloc &operator=(const FinalizedAlloc &) = delete;
FinalizedAlloc &operator=(FinalizedAlloc &&Other) {
- assert(A == InvalidAddr &&
+ assert(A.getValue() == InvalidAddr &&
"Cannot overwrite active finalized allocation");
std::swap(A, Other.A);
return *this;
}
~FinalizedAlloc() {
- assert(A == InvalidAddr && "Finalized allocation was not deallocated");
+ assert(A.getValue() == InvalidAddr &&
+ "Finalized allocation was not deallocated");
}
/// FinalizedAllocs convert to false for default-constructed, and
/// true otherwise. Default-constructed allocs need not be deallocated.
- explicit operator bool() const { return A != InvalidAddr; }
+ explicit operator bool() const { return A.getValue() != InvalidAddr; }
/// Returns the address associated with this finalized allocation.
/// The allocation is unmodified.
- JITTargetAddress getAddress() const { return A; }
+ orc::ExecutorAddr getAddress() const { return A; }
/// Returns the address associated with this finalized allocation and
/// resets this object to the default state.
/// This should only be used by allocators when deallocating memory.
- JITTargetAddress release() {
- JITTargetAddress Tmp = A;
- A = InvalidAddr;
+ orc::ExecutorAddr release() {
+ orc::ExecutorAddr Tmp = A;
+ A.setValue(InvalidAddr);
return Tmp;
}
private:
- JITTargetAddress A = InvalidAddr;
+ orc::ExecutorAddr A{InvalidAddr};
};
/// Represents an allocation which has not been finalized yet.
@@ -262,7 +227,7 @@ public:
Align Alignment;
size_t ContentSize;
uint64_t ZeroFillSize;
- JITTargetAddress Addr;
+ orc::ExecutorAddr Addr;
char *WorkingMem = nullptr;
private:
@@ -312,7 +277,7 @@ public:
/// Returns a reference to the AllocActions in the graph.
/// This convenience function saves callers from having to #include
/// LinkGraph.h if all they need are allocation actions.
- JITLinkMemoryManager::AllocActions &graphAllocActions();
+ orc::shared::AllocActions &graphAllocActions();
private:
LinkGraph &G;
@@ -340,7 +305,7 @@ public:
/// Describes the segment working memory and executor address.
struct SegmentInfo {
- JITTargetAddress Addr = 0;
+ orc::ExecutorAddr Addr;
MutableArrayRef<char> WorkingMem;
};
@@ -413,12 +378,12 @@ private:
// There shouldn't need to be a heap alloc for this.
struct FinalizedAllocInfo {
sys::MemoryBlock StandardSegments;
- std::vector<AllocActionCall> DeallocActions;
+ std::vector<orc::shared::WrapperFunctionCall> DeallocActions;
};
- FinalizedAlloc
- createFinalizedAlloc(sys::MemoryBlock StandardSegments,
- std::vector<AllocActionCall> DeallocActions);
+ FinalizedAlloc createFinalizedAlloc(
+ sys::MemoryBlock StandardSegments,
+ std::vector<orc::shared::WrapperFunctionCall> DeallocActions);
uint64_t PageSize;
std::mutex FinalizedAllocsMutex;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/MemoryFlags.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/MemoryFlags.h
index 8fdce93ebc56..e9771319ef06 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/MemoryFlags.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/MemoryFlags.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/riscv.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/riscv.h
index b8d08d88c1c9..5abd4cf11dea 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/riscv.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/riscv.h
@@ -37,6 +37,13 @@ enum EdgeKind_riscv : Edge::Kind {
///
R_RISCV_64,
+ /// Low 12 bits of PC-relative branch pointer value relocation
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target - Fixup + Addend) & 0xFFF
+ ///
+ R_RISCV_BRANCH,
+
/// High 20 bits of 32-bit pointer value relocation
///
/// Fixup expression
@@ -72,6 +79,12 @@ enum EdgeKind_riscv : Edge::Kind {
/// Fixup <- (Target - Fixup + Addend)
R_RISCV_CALL,
+ /// 32 bits PC relative relocation
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target - Fixup + Addend)
+ R_RISCV_32_PCREL,
+
/// PC relative GOT offset
///
/// Fixup expression:
@@ -82,8 +95,79 @@ enum EdgeKind_riscv : Edge::Kind {
///
/// Fixup expression:
/// Fixup <- (Target - Fixup + Addend)
- R_RISCV_CALL_PLT
+ R_RISCV_CALL_PLT,
+
+ /// 64 bits label addition
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target - *{8}Fixup + Addend)
+ R_RISCV_ADD64,
+
+ /// 32 bits label addition
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target - *{4}Fixup + Addend)
+ R_RISCV_ADD32,
+
+ /// 16 bits label addition
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{2}Fixup + Addend)
+ R_RISCV_ADD16,
+
+ /// 8 bits label addition
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{1}Fixup + Addend)
+ R_RISCV_ADD8,
+
+ /// 64 bits label subtraction
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{8}Fixup - Addend)
+ R_RISCV_SUB64,
+
+ /// 32 bits label subtraction
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{4}Fixup - Addend)
+ R_RISCV_SUB32,
+
+ /// 16 bits label subtraction
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{2}Fixup - Addend)
+ R_RISCV_SUB16,
+
+ /// 8 bits label subtraction
+ ///
+ /// Fixup expression
+ /// Fixup <- (Target - *{1}Fixup - Addend)
+ R_RISCV_SUB8,
+ /// Local label assignment
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target + Addend)
+ R_RISCV_SET6,
+
+ /// Local label assignment
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target + Addend)
+ R_RISCV_SET8,
+
+ /// Local label assignment
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target + Addend)
+ R_RISCV_SET16,
+
+ /// Local label assignment
+ ///
+ /// Fixup expression:
+ /// Fixup <- (Target + Addend)
+ R_RISCV_SET32,
};
/// Returns a string name for the given riscv edge. For debugging purposes
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
index 3130ea381534..4a4e8d15be66 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
@@ -368,18 +368,18 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
char *BlockWorkingMem = B.getAlreadyMutableContent().data();
char *FixupPtr = BlockWorkingMem + E.getOffset();
- JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+ auto FixupAddress = B.getAddress() + E.getOffset();
switch (E.getKind()) {
case Pointer64: {
- uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
*(ulittle64_t *)FixupPtr = Value;
break;
}
case Pointer32: {
- uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
if (LLVM_LIKELY(isInRangeForImmU32(Value)))
*(ulittle32_t *)FixupPtr = Value;
else
@@ -387,7 +387,7 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
break;
}
case Pointer32Signed: {
- int64_t Value = E.getTarget().getAddress() + E.getAddend();
+ int64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
if (LLVM_LIKELY(isInRangeForImmS32(Value)))
*(little32_t *)FixupPtr = Value;
else
@@ -483,8 +483,8 @@ extern const char PointerJumpStubContent[6];
inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
Symbol *InitialTarget = nullptr,
uint64_t InitialAddend = 0) {
- auto &B =
- G.createContentBlock(PointerSection, NullPointerContent, ~7ULL, 8, 0);
+ auto &B = G.createContentBlock(PointerSection, NullPointerContent,
+ orc::ExecutorAddr(~uint64_t(7)), 8, 0);
if (InitialTarget)
B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
return G.addAnonymousSymbol(B, 0, 8, false, false);
@@ -498,8 +498,8 @@ inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
/// address: highest allowable: (~5U)
inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
Symbol &PointerSymbol) {
- auto &B =
- G.createContentBlock(StubSection, PointerJumpStubContent, ~5ULL, 1, 0);
+ auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
+ orc::ExecutorAddr(~uint64_t(5)), 1, 0);
B.addEdge(Delta32, 2, PointerSymbol, -4);
return B;
}
@@ -552,8 +552,7 @@ public:
"Fell through switch, but no new kind to set");
DEBUG_WITH_TYPE("jitlink", {
dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
- << formatv("{0:x}", B->getFixupAddress(E)) << " ("
- << formatv("{0:x}", B->getAddress()) << " + "
+ << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
<< formatv("{0:x}", E.getOffset()) << ")\n";
});
E.setKind(KindToSet);
@@ -586,8 +585,7 @@ public:
if (E.getKind() == x86_64::BranchPCRel32 && !E.getTarget().isDefined()) {
DEBUG_WITH_TYPE("jitlink", {
dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
- << formatv("{0:x}", B->getFixupAddress(E)) << " ("
- << formatv("{0:x}", B->getAddress()) << " + "
+ << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
<< formatv("{0:x}", E.getOffset()) << ")\n";
});
// Set the edge kind to Branch32ToPtrJumpStubBypassable to enable it to
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index 30544e8a1748..30af7a628e91 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -51,13 +51,8 @@
#include <vector>
namespace llvm {
-
-class Value;
-
namespace orc {
-class ExtractingIRMaterializationUnit;
-
class CompileOnDemandLayer : public IRLayer {
friend class PartitioningIRMaterializationUnit;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
index c7ba57228ab7..ee53f2383cb0 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -20,7 +20,6 @@
namespace llvm {
-class MCContext;
class MemoryBuffer;
class Module;
class ObjectCache;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
index b5f5636800df..d0168f79e3d8 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Core.h
@@ -1120,32 +1120,33 @@ public:
/// DFS order (based on linkage relationships). Each JITDylib will appear
/// only once.
///
- /// It is illegal to call this method on a defunct JITDylib and the client
- /// is responsible for ensuring that they do not do so.
- static std::vector<JITDylibSP> getDFSLinkOrder(ArrayRef<JITDylibSP> JDs);
+ /// If any JITDylib in the order is defunct then this method will return an
+ /// error, otherwise returns the order.
+ static Expected<std::vector<JITDylibSP>>
+ getDFSLinkOrder(ArrayRef<JITDylibSP> JDs);
- /// Returns the given JITDylibs and all of their transitive dependensies in
+ /// Returns the given JITDylibs and all of their transitive dependencies in
/// reverse DFS order (based on linkage relationships). Each JITDylib will
/// appear only once.
///
- /// It is illegal to call this method on a defunct JITDylib and the client
- /// is responsible for ensuring that they do not do so.
- static std::vector<JITDylibSP>
+ /// If any JITDylib in the order is defunct then this method will return an
+ /// error, otherwise returns the order.
+ static Expected<std::vector<JITDylibSP>>
getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs);
/// Return this JITDylib and its transitive dependencies in DFS order
/// based on linkage relationships.
///
- /// It is illegal to call this method on a defunct JITDylib and the client
- /// is responsible for ensuring that they do not do so.
- std::vector<JITDylibSP> getDFSLinkOrder();
+ /// If any JITDylib in the order is defunct then this method will return an
+ /// error, otherwise returns the order.
+ Expected<std::vector<JITDylibSP>> getDFSLinkOrder();
/// Rteurn this JITDylib and its transitive dependencies in reverse DFS order
/// based on linkage relationships.
///
- /// It is illegal to call this method on a defunct JITDylib and the client
- /// is responsible for ensuring that they do not do so.
- std::vector<JITDylibSP> getReverseDFSLinkOrder();
+ /// If any JITDylib in the order is defunct then this method will return an
+ /// error, otherwise returns the order.
+ Expected<std::vector<JITDylibSP>> getReverseDFSLinkOrder();
private:
using AsynchronousSymbolQuerySet =
@@ -1309,6 +1310,10 @@ public:
/// __dso_handle).
virtual Error setupJITDylib(JITDylib &JD) = 0;
+ /// This method will be called outside the session lock each time a JITDylib
+ /// is removed to allow the Platform to remove any JITDylib-specific data.
+ virtual Error teardownJITDylib(JITDylib &JD) = 0;
+
/// This method will be called under the ExecutionSession lock each time a
/// MaterializationUnit is added to a JITDylib.
virtual Error notifyAdding(ResourceTracker &RT,
@@ -1327,8 +1332,7 @@ public:
const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms);
/// Performs an async lookup for the the given symbols in each of the given
- /// JITDylibs, calling the given handler with the compound result map once
- /// all lookups have completed.
+ /// JITDylibs, calling the given handler once all lookups have completed.
static void
lookupInitSymbolsAsync(unique_function<void(Error)> OnComplete,
ExecutionSession &ES,
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h
index 4b4472e0ac4d..7eb98dfc741e 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h
@@ -73,9 +73,6 @@ raw_ostream &operator<<(raw_ostream &OS,
/// Rendar a SymbolLookupFlags instance.
raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LookupFlags);
-/// Render a JITDylibLookupFlags instance.
-raw_ostream &operator<<(raw_ostream &OS, const LookupKind &K);
-
/// Render a SymbolLookupSet entry.
raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet::value_type &KV);
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h
index af092b3287d3..d2bf8330695f 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORT_H
-#define LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORT_H
+#ifndef LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H
+#define LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h"
@@ -61,4 +61,4 @@ private:
} // namespace orc
} // namespace llvm
-#endif // LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORT_H
+#endif // LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h
index 20da3e3b89eb..6b12fe990a8a 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ELFNixPlatform.h
@@ -101,6 +101,7 @@ public:
ObjectLinkingLayer &getObjectLinkingLayer() const { return ObjLinkingLayer; }
Error setupJITDylib(JITDylib &JD) override;
+ Error teardownJITDylib(JITDylib &JD) override;
Error notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) override;
Error notifyRemoving(ResourceTracker &RT) override;
@@ -236,7 +237,7 @@ private:
DenseMap<JITDylib *, ELFNixJITDylibInitializers> InitSeqs;
std::vector<ELFPerObjectSectionsToRegister> BootstrapPOSRs;
- DenseMap<JITTargetAddress, JITDylib *> HandleAddrToJITDylib;
+ DenseMap<ExecutorAddr, JITDylib *> HandleAddrToJITDylib;
DenseMap<JITDylib *, uint64_t> JITDylibToPThreadKey;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h
index 6d113a7bdf1a..3de55a11a563 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h
@@ -39,10 +39,8 @@ public:
: ES(ES), RegisterEHFrameWrapperFnAddr(RegisterEHFrameWrapperFnAddr),
DeregisterEHFrameWrapperFnAddr(DeregisterEHFRameWrapperFnAddr) {}
- Error registerEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) override;
- Error deregisterEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) override;
+ Error registerEHFrames(ExecutorAddrRange EHFrameSection) override;
+ Error deregisterEHFrames(ExecutorAddrRange EHFrameSection) override;
private:
ExecutionSession &ES;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h
index b9825f17ec17..18656d03e441 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h
@@ -85,7 +85,7 @@ public:
ExecutorAddr A;
if (!SPSArgList<SPSExecutorAddr>::deserialize(IB, A))
return false;
- FA = jitlink::JITLinkMemoryManager::FinalizedAlloc(A.getValue());
+ FA = jitlink::JITLinkMemoryManager::FinalizedAlloc(A);
return true;
}
};
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
index b6fdfb92ced3..29d282372b1f 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
@@ -89,11 +89,6 @@ private:
ExecutorAddr RemoteAddr;
};
- struct EHFrame {
- ExecutorAddr Addr;
- uint64_t Size;
- };
-
// Group of section allocations to be allocated together in the executor. The
// RemoteCodeAddr will stand in as the id of the group for deallocation
// purposes.
@@ -107,7 +102,7 @@ private:
ExecutorAddrRange RemoteCode;
ExecutorAddrRange RemoteROData;
ExecutorAddrRange RemoteRWData;
- std::vector<EHFrame> UnfinalizedEHFrames;
+ std::vector<ExecutorAddrRange> UnfinalizedEHFrames;
std::vector<Alloc> CodeAllocs, RODataAllocs, RWDataAllocs;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index 8e572ea1d0c1..85fa0bb88688 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -33,7 +33,6 @@ class ConstantArray;
class GlobalVariable;
class Function;
class Module;
-class TargetMachine;
class Value;
namespace orc {
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
index f368c92b3702..4f1cde32ff18 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -19,7 +19,6 @@
#include <memory>
namespace llvm {
-class Module;
namespace orc {
/// A layer that applies a transform to emitted modules.
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
index d7b5e2eda6ee..01f3f1b2ab63 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
@@ -97,6 +97,7 @@ public:
ObjectLinkingLayer &getObjectLinkingLayer() const { return ObjLinkingLayer; }
Error setupJITDylib(JITDylib &JD) override;
+ Error teardownJITDylib(JITDylib &JD) override;
Error notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) override;
Error notifyRemoving(ResourceTracker &RT) override;
@@ -239,7 +240,7 @@ private:
std::mutex PlatformMutex;
DenseMap<JITDylib *, MachOJITDylibInitializers> InitSeqs;
- DenseMap<JITTargetAddress, JITDylib *> HeaderAddrToJITDylib;
+ DenseMap<ExecutorAddr, JITDylib *> HeaderAddrToJITDylib;
DenseMap<JITDylib *, uint64_t> JITDylibToPThreadKey;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
index 109922a46e26..12505fc97083 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
@@ -38,10 +38,6 @@ class LinkGraph;
class Symbol;
} // namespace jitlink
-namespace object {
-class ObjectFile;
-} // namespace object
-
namespace orc {
class ObjectLinkingLayerJITLinkContext;
@@ -220,17 +216,11 @@ public:
ResourceKey SrcKey) override;
private:
-
- struct EHFrameRange {
- JITTargetAddress Addr = 0;
- size_t Size;
- };
-
std::mutex EHFramePluginMutex;
ExecutionSession &ES;
std::unique_ptr<jitlink::EHFrameRegistrar> Registrar;
- DenseMap<MaterializationResponsibility *, EHFrameRange> InProcessLinks;
- DenseMap<ResourceKey, std::vector<EHFrameRange>> EHFrameRanges;
+ DenseMap<MaterializationResponsibility *, ExecutorAddrRange> InProcessLinks;
+ DenseMap<ResourceKey, std::vector<ExecutorAddrRange>> EHFrameRanges;
};
} // end namespace orc
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h
new file mode 100644
index 000000000000..6469b87c816f
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/AllocationActions.h
@@ -0,0 +1,101 @@
+//===- AllocationActions.h -- JITLink allocation support calls -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Structures for making memory allocation support calls.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H
+#define LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H
+
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
+#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
+#include "llvm/Support/Memory.h"
+
+#include <vector>
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+/// A pair of WrapperFunctionCalls, one to be run at finalization time, one to
+/// be run at deallocation time.
+///
+/// AllocActionCallPairs should be constructed for paired operations (e.g.
+/// __register_ehframe and __deregister_ehframe for eh-frame registration).
+/// See comments for AllocActions for execution ordering.
+///
+/// For unpaired operations one or the other member can be left unused, as
+/// AllocationActionCalls with an FnAddr of zero will be skipped.
+struct AllocActionCallPair {
+ WrapperFunctionCall Finalize;
+ WrapperFunctionCall Dealloc;
+};
+
+/// A vector of allocation actions to be run for this allocation.
+///
+/// Finalize allocations will be run in order at finalize time. Dealloc
+/// actions will be run in reverse order at deallocation time.
+using AllocActions = std::vector<AllocActionCallPair>;
+
+/// Returns the number of deallocaton actions in the given AllocActions array.
+///
+/// This can be useful if clients want to pre-allocate room for deallocation
+/// actions with the rest of their memory.
+inline size_t numDeallocActions(const AllocActions &AAs) {
+ return llvm::count_if(
+ AAs, [](const AllocActionCallPair &P) { return !!P.Dealloc; });
+}
+
+/// Run finalize actions.
+///
+/// If any finalize action fails then the corresponding dealloc actions will be
+/// run in reverse order (not including the deallocation action for the failed
+/// finalize action), and the error for the failing action will be returned.
+///
+/// If all finalize actions succeed then a vector of deallocation actions will
+/// be returned. The dealloc actions should be run by calling
+/// runDeallocationActions. If this function succeeds then the AA argument will
+/// be cleared before the function returns.
+Expected<std::vector<WrapperFunctionCall>>
+runFinalizeActions(AllocActions &AAs);
+
+/// Run deallocation actions.
+/// Dealloc actions will be run in reverse order (from last element of DAs to
+/// first).
+Error runDeallocActions(ArrayRef<WrapperFunctionCall> DAs);
+
+using SPSAllocActionCallPair =
+ SPSTuple<SPSWrapperFunctionCall, SPSWrapperFunctionCall>;
+
+template <>
+class SPSSerializationTraits<SPSAllocActionCallPair,
+ AllocActionCallPair> {
+ using AL = SPSAllocActionCallPair::AsArgList;
+
+public:
+ static size_t size(const AllocActionCallPair &AAP) {
+ return AL::size(AAP.Finalize, AAP.Dealloc);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const AllocActionCallPair &AAP) {
+ return AL::serialize(OB, AAP.Finalize, AAP.Dealloc);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ AllocActionCallPair &AAP) {
+ return AL::deserialize(IB, AAP.Finalize, AAP.Dealloc);
+ }
+};
+
+} // end namespace shared
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h
index 3c0b2b9edd52..dc080cfc79d1 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h
@@ -13,7 +13,10 @@
#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <type_traits>
@@ -21,17 +24,7 @@
namespace llvm {
namespace orc {
-/// Represents the difference between two addresses in the executor process.
-class ExecutorAddrDiff {
-public:
- ExecutorAddrDiff() = default;
- explicit ExecutorAddrDiff(uint64_t Value) : Value(Value) {}
-
- uint64_t getValue() const { return Value; }
-
-private:
- int64_t Value = 0;
-};
+using ExecutorAddrDiff = uint64_t;
/// Represents an address in the executor process.
class ExecutorAddr {
@@ -39,7 +32,7 @@ public:
ExecutorAddr() = default;
/// Create an ExecutorAddr from the given value.
- explicit ExecutorAddr(uint64_t Addr) : Addr(Addr) {}
+ explicit constexpr ExecutorAddr(uint64_t Addr) : Addr(Addr) {}
/// Create an ExecutorAddr from the given pointer.
/// Warning: This should only be used when JITing in-process.
@@ -98,13 +91,13 @@ public:
ExecutorAddr operator++(int) { return ExecutorAddr(Addr++); }
ExecutorAddr operator--(int) { return ExecutorAddr(Addr--); }
- ExecutorAddr &operator+=(const ExecutorAddrDiff Delta) {
- Addr += Delta.getValue();
+ ExecutorAddr &operator+=(const ExecutorAddrDiff &Delta) {
+ Addr += Delta;
return *this;
}
- ExecutorAddr &operator-=(const ExecutorAddrDiff Delta) {
- Addr -= Delta.getValue();
+ ExecutorAddr &operator-=(const ExecutorAddrDiff &Delta) {
+ Addr -= Delta;
return *this;
}
@@ -121,13 +114,25 @@ inline ExecutorAddrDiff operator-(const ExecutorAddr &LHS,
/// Adding an offset and an address yields an address.
inline ExecutorAddr operator+(const ExecutorAddr &LHS,
const ExecutorAddrDiff &RHS) {
- return ExecutorAddr(LHS.getValue() + RHS.getValue());
+ return ExecutorAddr(LHS.getValue() + RHS);
}
/// Adding an address and an offset yields an address.
inline ExecutorAddr operator+(const ExecutorAddrDiff &LHS,
const ExecutorAddr &RHS) {
- return ExecutorAddr(LHS.getValue() + RHS.getValue());
+ return ExecutorAddr(LHS + RHS.getValue());
+}
+
+/// Subtracting an offset from an address yields an address.
+inline ExecutorAddr operator-(const ExecutorAddr &LHS,
+ const ExecutorAddrDiff &RHS) {
+ return ExecutorAddr(LHS.getValue() - RHS);
+}
+
+/// Taking the modulus of an address and a diff yields a diff.
+inline ExecutorAddrDiff operator%(const ExecutorAddr &LHS,
+ const ExecutorAddrDiff &RHS) {
+ return ExecutorAddrDiff(LHS.getValue() % RHS);
}
/// Represents an address range in the exceutor process.
@@ -158,6 +163,14 @@ struct ExecutorAddrRange {
ExecutorAddr End;
};
+inline raw_ostream &operator<<(raw_ostream &OS, const ExecutorAddr &A) {
+ return OS << formatv("{0:x}", A.getValue());
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const ExecutorAddrRange &R) {
+ return OS << formatv("{0:x} -- {1:x}", R.Start.getValue(), R.End.getValue());
+}
+
namespace shared {
class SPSExecutorAddr {};
@@ -208,6 +221,26 @@ using SPSExecutorAddrRangeSequence = SPSSequence<SPSExecutorAddrRange>;
} // End namespace shared.
} // End namespace orc.
+
+// Provide DenseMapInfo for ExecutorAddrs.
+template <> struct DenseMapInfo<orc::ExecutorAddr> {
+ static inline orc::ExecutorAddr getEmptyKey() {
+ return orc::ExecutorAddr(DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+ static inline orc::ExecutorAddr getTombstoneKey() {
+ return orc::ExecutorAddr(DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const orc::ExecutorAddr &Addr) {
+ return DenseMapInfo<uint64_t>::getHashValue(Addr.getValue());
+ }
+
+ static bool isEqual(const orc::ExecutorAddr &LHS,
+ const orc::ExecutorAddr &RHS) {
+ return DenseMapInfo<uint64_t>::isEqual(LHS.getValue(), RHS.getValue());
+ }
+};
+
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h
index 3ef43f33d84c..96166ac20b2e 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h
@@ -37,8 +37,8 @@ extern const char *MemoryWriteUInt32sWrapperName;
extern const char *MemoryWriteUInt64sWrapperName;
extern const char *MemoryWriteBuffersWrapperName;
-extern const char *RegisterEHFrameSectionCustomDirectWrapperName;
-extern const char *DeregisterEHFrameSectionCustomDirectWrapperName;
+extern const char *RegisterEHFrameSectionWrapperName;
+extern const char *DeregisterEHFrameSectionWrapperName;
extern const char *RunAsMainWrapperName;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h
index 9ac13a493e9d..302b60b80fd0 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h
@@ -33,6 +33,7 @@
#define LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEPACKEDSERIALIZATION_H
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
@@ -112,12 +113,22 @@ public:
static bool serialize(SPSOutputBuffer &OB) { return true; }
static bool deserialize(SPSInputBuffer &IB) { return true; }
+
+ static bool serializeToSmallVector(SmallVectorImpl<char> &V) { return true; }
+
+ static bool deserializeFromSmallVector(const SmallVectorImpl<char> &V) {
+ return true;
+ }
};
// Non-empty list specialization for SPSArgList.
template <typename SPSTagT, typename... SPSTagTs>
class SPSArgList<SPSTagT, SPSTagTs...> {
public:
+ // FIXME: This typedef is here to enable SPS arg serialization from
+ // JITLink. It can be removed once JITLink can access SPS directly.
+ using OutputBuffer = SPSOutputBuffer;
+
template <typename ArgT, typename... ArgTs>
static size_t size(const ArgT &Arg, const ArgTs &...Args) {
return SPSSerializationTraits<SPSTagT, ArgT>::size(Arg) +
@@ -284,6 +295,40 @@ public:
}
};
+/// Trivial SmallVectorImpl<T> -> SPSSequence<char> serialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceSerialization<SPSElementTagT, SmallVectorImpl<T>> {
+public:
+ static constexpr bool available = true;
+};
+
+/// Trivial SPSSequence<SPSElementTagT> -> SmallVectorImpl<T> deserialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceDeserialization<SPSElementTagT, SmallVectorImpl<T>> {
+public:
+ static constexpr bool available = true;
+
+ using element_type = typename SmallVectorImpl<T>::value_type;
+
+ static void reserve(SmallVectorImpl<T> &V, uint64_t Size) { V.reserve(Size); }
+ static bool append(SmallVectorImpl<T> &V, T E) {
+ V.push_back(std::move(E));
+ return true;
+ }
+};
+
+/// Trivial SmallVectorImpl<T> -> SPSSequence<char> serialization.
+template <typename SPSElementTagT, typename T, unsigned N>
+class TrivialSPSSequenceSerialization<SPSElementTagT, SmallVector<T, N>>
+ : public TrivialSPSSequenceSerialization<SPSElementTagT,
+ SmallVectorImpl<T>> {};
+
+/// Trivial SPSSequence<SPSElementTagT> -> SmallVectorImpl<T> deserialization.
+template <typename SPSElementTagT, typename T, unsigned N>
+class TrivialSPSSequenceDeserialization<SPSElementTagT, SmallVector<T, N>>
+ : public TrivialSPSSequenceDeserialization<SPSElementTagT,
+ SmallVectorImpl<T>> {};
+
/// Trivial ArrayRef<T> -> SPSSequence<SPSElementTagT> serialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceSerialization<SPSElementTagT, ArrayRef<T>> {
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h
index 0e8b7e7d345a..d596a89a50b6 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
@@ -69,50 +70,6 @@ inline std::string getWireProtectionFlagsStr(WireProtectionFlags WPF) {
return Result;
}
-struct WrapperFunctionCall {
- ExecutorAddr Func;
- ExecutorAddrRange ArgData;
-
- WrapperFunctionCall() = default;
- WrapperFunctionCall(ExecutorAddr Func, ExecutorAddr ArgData,
- ExecutorAddrDiff ArgSize)
- : Func(Func), ArgData(ArgData, ArgSize) {}
- WrapperFunctionCall(ExecutorAddr Func, ExecutorAddrRange ArgData)
- : Func(Func), ArgData(ArgData) {}
-
- shared::WrapperFunctionResult run() {
- using FnTy =
- shared::CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
- return shared::WrapperFunctionResult(
- Func.toPtr<FnTy *>()(ArgData.Start.toPtr<const char *>(),
- static_cast<size_t>(ArgData.size().getValue())));
- }
-
- /// Run call and deserialize result using SPS.
- template <typename SPSRetT, typename RetT> Error runWithSPSRet(RetT &RetVal) {
- auto WFR = run();
- if (const char *ErrMsg = WFR.getOutOfBandError())
- return make_error<StringError>(ErrMsg, inconvertibleErrorCode());
- shared::SPSInputBuffer IB(WFR.data(), WFR.size());
- if (!shared::SPSSerializationTraits<SPSRetT, RetT>::deserialize(IB, RetVal))
- return make_error<StringError>("Could not deserialize result from "
- "serialized wrapper function call",
- inconvertibleErrorCode());
- return Error::success();
- }
-
- /// Overload for SPS functions returning void.
- Error runWithSPSRet() {
- shared::SPSEmpty E;
- return runWithSPSRet<shared::SPSEmpty>(E);
- }
-};
-
-struct AllocationActionsPair {
- WrapperFunctionCall Finalize;
- WrapperFunctionCall Deallocate;
-};
-
struct SegFinalizeRequest {
WireProtectionFlags Prot;
ExecutorAddr Addr;
@@ -122,7 +79,7 @@ struct SegFinalizeRequest {
struct FinalizeRequest {
std::vector<SegFinalizeRequest> Segments;
- std::vector<AllocationActionsPair> Actions;
+ shared::AllocActions Actions;
};
template <typename T> struct UIntWrite {
@@ -167,17 +124,12 @@ namespace shared {
class SPSMemoryProtectionFlags {};
-using SPSWrapperFunctionCall = SPSTuple<SPSExecutorAddr, SPSExecutorAddrRange>;
-
using SPSSegFinalizeRequest =
SPSTuple<SPSMemoryProtectionFlags, SPSExecutorAddr, uint64_t,
SPSSequence<char>>;
-using SPSAllocationActionsPair =
- SPSTuple<SPSWrapperFunctionCall, SPSWrapperFunctionCall>;
-
using SPSFinalizeRequest = SPSTuple<SPSSequence<SPSSegFinalizeRequest>,
- SPSSequence<SPSAllocationActionsPair>>;
+ SPSSequence<SPSAllocActionCallPair>>;
template <typename T>
using SPSMemoryAccessUIntWrite = SPSTuple<SPSExecutorAddr, T>;
@@ -213,48 +165,6 @@ public:
};
template <>
-class SPSSerializationTraits<SPSWrapperFunctionCall,
- tpctypes::WrapperFunctionCall> {
- using AL = SPSWrapperFunctionCall::AsArgList;
-
-public:
- static size_t size(const tpctypes::WrapperFunctionCall &WFC) {
- return AL::size(WFC.Func, WFC.ArgData);
- }
-
- static bool serialize(SPSOutputBuffer &OB,
- const tpctypes::WrapperFunctionCall &WFC) {
- return AL::serialize(OB, WFC.Func, WFC.ArgData);
- }
-
- static bool deserialize(SPSInputBuffer &IB,
- tpctypes::WrapperFunctionCall &WFC) {
- return AL::deserialize(IB, WFC.Func, WFC.ArgData);
- }
-};
-
-template <>
-class SPSSerializationTraits<SPSAllocationActionsPair,
- tpctypes::AllocationActionsPair> {
- using AL = SPSAllocationActionsPair::AsArgList;
-
-public:
- static size_t size(const tpctypes::AllocationActionsPair &AAP) {
- return AL::size(AAP.Finalize, AAP.Deallocate);
- }
-
- static bool serialize(SPSOutputBuffer &OB,
- const tpctypes::AllocationActionsPair &AAP) {
- return AL::serialize(OB, AAP.Finalize, AAP.Deallocate);
- }
-
- static bool deserialize(SPSInputBuffer &IB,
- tpctypes::AllocationActionsPair &AAP) {
- return AL::deserialize(IB, AAP.Finalize, AAP.Deallocate);
- }
-};
-
-template <>
class SPSSerializationTraits<SPSSegFinalizeRequest,
tpctypes::SegFinalizeRequest> {
using SFRAL = SPSSegFinalizeRequest::AsArgList;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h
index bf841b1f706b..eb3fb084b28b 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h
@@ -356,6 +356,15 @@ public:
}
};
+template <typename SPSRetTagT>
+class ResultSerializer<SPSRetTagT, ErrorSuccess> {
+public:
+ static WrapperFunctionResult serialize(ErrorSuccess Err) {
+ return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ toSPSSerializable(std::move(Err)));
+ }
+};
+
template <typename SPSRetTagT, typename T>
class ResultSerializer<SPSRetTagT, Expected<T>> {
public:
@@ -609,6 +618,117 @@ makeMethodWrapperHandler(RetT (ClassT::*Method)(ArgTs...)) {
return MethodWrapperHandler<RetT, ClassT, ArgTs...>(Method);
}
+/// Represents a serialized wrapper function call.
+/// Serializing calls themselves allows us to batch them: We can make one
+/// "run-wrapper-functions" utility and send it a list of calls to run.
+///
+/// The motivating use-case for this API is JITLink allocation actions, where
+/// we want to run multiple functions to finalize linked memory without having
+/// to make separate IPC calls for each one.
+class WrapperFunctionCall {
+public:
+ using ArgDataBufferType = SmallVector<char, 24>;
+
+ /// Create a WrapperFunctionCall using the given SPS serializer to serialize
+ /// the arguments.
+ template <typename SPSSerializer, typename... ArgTs>
+ static Expected<WrapperFunctionCall> Create(ExecutorAddr FnAddr,
+ const ArgTs &...Args) {
+ ArgDataBufferType ArgData;
+ ArgData.resize(SPSSerializer::size(Args...));
+ SPSOutputBuffer OB(&ArgData[0], ArgData.size());
+ if (SPSSerializer::serialize(OB, Args...))
+ return WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return make_error<StringError>("Cannot serialize arguments for "
+ "AllocActionCall",
+ inconvertibleErrorCode());
+ }
+
+ WrapperFunctionCall() = default;
+
+ /// Create a WrapperFunctionCall from a target function and arg buffer.
+ WrapperFunctionCall(ExecutorAddr FnAddr, ArgDataBufferType ArgData)
+ : FnAddr(FnAddr), ArgData(std::move(ArgData)) {}
+
+ /// Returns the address to be called.
+ const ExecutorAddr &getCallee() const { return FnAddr; }
+
+ /// Returns the argument data.
+ const ArgDataBufferType &getArgData() const { return ArgData; }
+
+ /// WrapperFunctionCalls convert to true if the callee is non-null.
+ explicit operator bool() const { return !!FnAddr; }
+
+ /// Run call returning raw WrapperFunctionResult.
+ shared::WrapperFunctionResult run() const {
+ using FnTy =
+ shared::CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
+ return shared::WrapperFunctionResult(
+ FnAddr.toPtr<FnTy *>()(ArgData.data(), ArgData.size()));
+ }
+
+ /// Run call and deserialize result using SPS.
+ template <typename SPSRetT, typename RetT>
+ std::enable_if_t<!std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet(RetT &RetVal) const {
+ auto WFR = run();
+ if (const char *ErrMsg = WFR.getOutOfBandError())
+ return make_error<StringError>(ErrMsg, inconvertibleErrorCode());
+ shared::SPSInputBuffer IB(WFR.data(), WFR.size());
+ if (!shared::SPSSerializationTraits<SPSRetT, RetT>::deserialize(IB, RetVal))
+ return make_error<StringError>("Could not deserialize result from "
+ "serialized wrapper function call",
+ inconvertibleErrorCode());
+ return Error::success();
+ }
+
+ /// Overload for SPS functions returning void.
+ template <typename SPSRetT>
+ std::enable_if_t<std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet() const {
+ shared::SPSEmpty E;
+ return runWithSPSRet<shared::SPSEmpty>(E);
+ }
+
+ /// Run call and deserialize an SPSError result. SPSError returns and
+ /// deserialization failures are merged into the returned error.
+ Error runWithSPSRetErrorMerged() const {
+ detail::SPSSerializableError RetErr;
+ if (auto Err = runWithSPSRet<SPSError>(RetErr))
+ return Err;
+ return detail::fromSPSSerializable(std::move(RetErr));
+ }
+
+private:
+ orc::ExecutorAddr FnAddr;
+ ArgDataBufferType ArgData;
+};
+
+using SPSWrapperFunctionCall = SPSTuple<SPSExecutorAddr, SPSSequence<char>>;
+
+template <>
+class SPSSerializationTraits<SPSWrapperFunctionCall, WrapperFunctionCall> {
+public:
+ static size_t size(const WrapperFunctionCall &WFC) {
+ return SPSWrapperFunctionCall::AsArgList::size(WFC.getCallee(),
+ WFC.getArgData());
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const WrapperFunctionCall &WFC) {
+ return SPSWrapperFunctionCall::AsArgList::serialize(OB, WFC.getCallee(),
+ WFC.getArgData());
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, WrapperFunctionCall &WFC) {
+ ExecutorAddr FnAddr;
+ WrapperFunctionCall::ArgDataBufferType ArgData;
+ if (!SPSWrapperFunctionCall::AsArgList::deserialize(IB, FnAddr, ArgData))
+ return false;
+ WFC = WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return true;
+ }
+};
+
} // end namespace shared
} // end namespace orc
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h
index 735aa53e41fd..04790afa6e31 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h
@@ -33,22 +33,6 @@ Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
} // end namespace orc
} // end namespace llvm
-/// An eh-frame registration utility suitable for use as a support function
-/// call. This function expects the direct address and size of the eh-frame
-/// section to register as its arguments (it does not treat its arguments as
-/// pointers to an SPS-serialized arg buffer).
-extern "C" llvm::orc::shared::CWrapperFunctionResult
-llvm_orc_registerEHFrameSectionCustomDirectWrapper(
- const char *EHFrameSectionAddr, uint64_t Size);
-
-/// An eh-frame deregistration utility suitable for use as a support function
-/// call. This function expects the direct address and size of the eh-frame
-/// section to register as its arguments (it does not treat its arguments as
-/// pointers to an SPS-serialized arg buffer).
-extern "C" llvm::orc::shared::CWrapperFunctionResult
-llvm_orc_deregisterEHFrameSectionCustomDirectWrapper(
- const char *EHFrameSectionAddr, uint64_t Size);
-
extern "C" llvm::orc::shared::CWrapperFunctionResult
llvm_orc_registerEHFrameSectionWrapper(const char *Data, uint64_t Size);
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
index 6858f6d4db6e..97b333c68b63 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
@@ -43,7 +43,7 @@ public:
private:
struct Allocation {
size_t Size = 0;
- std::vector<tpctypes::WrapperFunctionCall> DeallocationActions;
+ std::vector<shared::WrapperFunctionCall> DeallocationActions;
};
using AllocationsMap = DenseMap<void *, Allocation>;
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
index 153ffef14e6f..37fe44d5fa69 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -127,7 +127,7 @@ public:
JITTargetAddress getTargetAddress() const { return TargetAddress; }
private:
- const char *ContentPtr = 0;
+ const char *ContentPtr = nullptr;
uint64_t Size = 0;
JITTargetAddress TargetAddress = 0;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/FileCheck/FileCheck.h b/contrib/llvm-project/llvm/include/llvm/FileCheck/FileCheck.h
index 6ed75e14ccb6..7a6c98db3029 100644
--- a/contrib/llvm-project/llvm/include/llvm/FileCheck/FileCheck.h
+++ b/contrib/llvm-project/llvm/include/llvm/FileCheck/FileCheck.h
@@ -80,8 +80,7 @@ class FileCheckType {
std::bitset<FileCheckKindModifier::Size> Modifiers;
public:
- FileCheckType(FileCheckKind Kind = CheckNone)
- : Kind(Kind), Count(1), Modifiers() {}
+ FileCheckType(FileCheckKind Kind = CheckNone) : Kind(Kind), Count(1) {}
FileCheckType(const FileCheckType &) = default;
FileCheckType &operator=(const FileCheckType &) = default;
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 18d577dff497..c5abb16dd9e5 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -358,6 +358,7 @@ def OMPC_Unknown : Clause<"unknown"> {
def OMPC_Link : Clause<"link"> {
let flangClass = "OmpObjectList";
}
+def OMPC_Indirect : Clause<"indirect"> {}
def OMPC_Inbranch : Clause<"inbranch"> {}
def OMPC_Notinbranch : Clause<"notinbranch"> {}
def OMPC_Filter : Clause<"filter"> {
@@ -973,12 +974,14 @@ def OMP_BeginDeclareTarget : Directive<"begin declare target"> {
VersionedClause<OMPC_To>,
VersionedClause<OMPC_Link>,
VersionedClause<OMPC_DeviceType>,
+ VersionedClause<OMPC_Indirect>
];
}
def OMP_DeclareTarget : Directive<"declare target"> {
let allowedClauses = [
VersionedClause<OMPC_To>,
- VersionedClause<OMPC_Link>
+ VersionedClause<OMPC_Link>,
+ VersionedClause<OMPC_Indirect>
];
}
def OMP_EndDeclareTarget : Directive<"end declare target"> {}
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPAssume.h b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPAssume.h
new file mode 100644
index 000000000000..c7462ffe6bc0
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPAssume.h
@@ -0,0 +1,55 @@
+//===- OpenMP/OMPAssume.h --- OpenMP assumption helper functions - C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides helper functions and classes to deal with OpenMP
+/// assumptions, e.g., as used by `[begin/end] assumes` and `assume`.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_OPENMP_OMPASSUME_H
+#define LLVM_FRONTEND_OPENMP_OMPASSUME_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+namespace omp {
+
+/// Helper to describe assume clauses.
+struct AssumptionClauseMappingInfo {
+ /// The identifier describing the (beginning of the) clause.
+ llvm::StringLiteral Identifier;
+ /// Flag to determine if the identifier is a full name or the start of a name.
+ bool StartsWith;
+ /// Flag to determine if a directive lists follows.
+ bool HasDirectiveList;
+ /// Flag to determine if an expression follows.
+ bool HasExpression;
+};
+
+/// All known assume clauses.
+static constexpr AssumptionClauseMappingInfo AssumptionClauseMappings[] = {
+#define OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, \
+ HasExpression) \
+ {Identifier, StartsWith, HasDirectiveList, HasExpression},
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+};
+
+inline std::string getAllAssumeClauseOptions() {
+ std::string S;
+ for (const AssumptionClauseMappingInfo &ACMI : AssumptionClauseMappings)
+ S += (S.empty() ? "'" : "', '") + ACMI.Identifier.str();
+ return S + "'";
+}
+
+} // namespace omp
+
+} // namespace llvm
+
+#endif // LLVM_FRONTEND_OPENMP_OMPASSUME_H
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
index d2f9bac16e5a..2178acc90e2c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
@@ -20,14 +20,6 @@
#include "llvm/Frontend/OpenMP/OMP.h.inc"
namespace llvm {
-class Type;
-class Module;
-class ArrayType;
-class StructType;
-class PointerType;
-class StringRef;
-class FunctionType;
-
namespace omp {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
@@ -80,33 +72,6 @@ enum class IdentFlag {
#define OMP_IDENT_FLAG(Enum, ...) constexpr auto Enum = omp::IdentFlag::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
-/// Helper to describe assume clauses.
-struct AssumptionClauseMappingInfo {
- /// The identifier describing the (beginning of the) clause.
- llvm::StringLiteral Identifier;
- /// Flag to determine if the identifier is a full name or the start of a name.
- bool StartsWith;
- /// Flag to determine if a directive lists follows.
- bool HasDirectiveList;
- /// Flag to determine if an expression follows.
- bool HasExpression;
-};
-
-/// All known assume clauses.
-static constexpr AssumptionClauseMappingInfo AssumptionClauseMappings[] = {
-#define OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, \
- HasExpression) \
- {Identifier, StartsWith, HasDirectiveList, HasExpression},
-#include "llvm/Frontend/OpenMP/OMPKinds.def"
-};
-
-inline std::string getAllAssumeClauseOptions() {
- std::string S;
- for (const AssumptionClauseMappingInfo &ACMI : AssumptionClauseMappings)
- S += (S.empty() ? "'" : "', '") + ACMI.Identifier.str();
- return S + "'";
-}
-
/// \note This needs to be kept in sync with kmp.h enum sched_type.
/// Todo: Update kmp.h to include this file, and remove the enums in kmp.h
/// To complete this, more enum values will need to be moved here.
@@ -140,6 +105,14 @@ enum OMPTgtExecModeFlags : int8_t {
LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue */ OMP_TGT_EXEC_MODE_GENERIC_SPMD)
};
+enum class AddressSpace : unsigned {
+ Generic = 0,
+ Global = 1,
+ Shared = 3,
+ Constant = 4,
+ Local = 5,
+};
+
} // end namespace omp
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 9976d1961ed1..85dd28ec3159 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -41,10 +41,7 @@ public:
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
- /// \param AllowExtractorSinking Flag to include sinking instructions,
- /// emitted by CodeExtractor, in the
- /// outlined region. Default is false.
- void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
+ void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
@@ -517,6 +514,12 @@ public:
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
+ /// Add metadata to simd-ize a loop.
+ ///
+ /// \param DL Debug location for instructions added by unrolling.
+ /// \param Loop The loop to simd-ize.
+ void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
+
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
@@ -663,30 +666,34 @@ public:
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
- Constant *getOrCreateSrcLocStr(StringRef LocStr);
+ Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
- Constant *getOrCreateDefaultSrcLocStr();
+ Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
- unsigned Line, unsigned Column);
+ unsigned Line, unsigned Column,
+ uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
- Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr);
+ Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
+ Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
- Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
+ Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
+ uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
- Value *getOrCreateIdent(Constant *SrcLocStr,
- omp::IdentFlag Flags = omp::IdentFlag(0),
- unsigned Reserve2Flags = 0);
+ Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
+ omp::IdentFlag Flags = omp::IdentFlag(0),
+ unsigned Reserve2Flags = 0);
- /// Create a global flag \p Namein the module with initial value \p Value.
+ /// Create a hidden global flag \p Name in the module with initial value \p
+ /// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
@@ -754,7 +761,7 @@ public:
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
- DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
+ DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
@@ -762,6 +769,7 @@ public:
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
+ SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
diff --git a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 08bf5981cdc3..d2b70edd4d87 100644
--- a/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/contrib/llvm-project/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -382,6 +382,8 @@ __OMP_RTL(__kmpc_doacross_wait, false, Void, IdentPtr, Int32, Int64Ptr)
__OMP_RTL(__kmpc_doacross_fini, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, VoidPtr)
+__OMP_RTL(__kmpc_aligned_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, SizeTy,
+ VoidPtr)
__OMP_RTL(__kmpc_free, false, Void, /* Int */ Int32, VoidPtr, VoidPtr)
__OMP_RTL(__kmpc_init_allocator, false, /* omp_allocator_handle_t */ VoidPtr,
@@ -905,6 +907,8 @@ __OMP_RTL_ATTRS(__kmpc_free_shared, DeviceAllocAttrs, AttributeSet(),
ParamAttrs(NoCaptureAttrs))
__OMP_RTL_ATTRS(__kmpc_alloc, DefaultAttrs, ReturnPtrAttrs, ParamAttrs())
+__OMP_RTL_ATTRS(__kmpc_aligned_alloc, DefaultAttrs, ReturnPtrAttrs,
+ ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_free, AllocAttrs, AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_init_allocator, DefaultAttrs, ReturnPtrAttrs,
@@ -1130,6 +1134,8 @@ __OMP_TRAIT_PROPERTY(implementation, vendor, gnu)
__OMP_TRAIT_PROPERTY(implementation, vendor, ibm)
__OMP_TRAIT_PROPERTY(implementation, vendor, intel)
__OMP_TRAIT_PROPERTY(implementation, vendor, llvm)
+__OMP_TRAIT_PROPERTY(implementation, vendor, nec)
+__OMP_TRAIT_PROPERTY(implementation, vendor, nvidia)
__OMP_TRAIT_PROPERTY(implementation, vendor, pgi)
__OMP_TRAIT_PROPERTY(implementation, vendor, ti)
__OMP_TRAIT_PROPERTY(implementation, vendor, unknown)
diff --git a/contrib/llvm-project/llvm/include/llvm/FuzzMutate/OpDescriptor.h b/contrib/llvm-project/llvm/include/llvm/FuzzMutate/OpDescriptor.h
index d6c98cd949a2..43c810920766 100644
--- a/contrib/llvm-project/llvm/include/llvm/FuzzMutate/OpDescriptor.h
+++ b/contrib/llvm-project/llvm/include/llvm/FuzzMutate/OpDescriptor.h
@@ -146,7 +146,7 @@ static inline SourcePred sizedPtrType() {
return false;
if (const auto *PtrT = dyn_cast<PointerType>(V->getType()))
- return PtrT->getElementType()->isSized();
+ return PtrT->getPointerElementType()->isSized();
return false;
};
auto Make = [](ArrayRef<Value *>, ArrayRef<Type *> Ts) {
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Argument.h b/contrib/llvm-project/llvm/include/llvm/IR/Argument.h
index 396ab6a9d01d..7cbfa2a7b6ce 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Argument.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Argument.h
@@ -162,7 +162,7 @@ public:
/// Remove attributes from an argument.
void removeAttr(Attribute::AttrKind Kind);
- void removeAttrs(const AttrBuilder &B);
+ void removeAttrs(const AttributeMask &AM);
/// Check if an argument has a given attribute.
bool hasAttribute(Attribute::AttrKind Kind) const;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
index f64f15bd38ba..5e2cfe6d81ac 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Attributes.h
@@ -27,13 +27,14 @@
#include <bitset>
#include <cassert>
#include <cstdint>
-#include <map>
+#include <set>
#include <string>
#include <utility>
namespace llvm {
class AttrBuilder;
+class AttributeMask;
class AttributeImpl;
class AttributeListImpl;
class AttributeSetNode;
@@ -320,7 +321,7 @@ public:
/// Remove the specified attributes from this set. Returns a new set because
/// attribute sets are immutable.
LLVM_NODISCARD AttributeSet
- removeAttributes(LLVMContext &C, const AttrBuilder &AttrsToRemove) const;
+ removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const;
/// Return the number of attributes in this set.
unsigned getNumAttributes() const;
@@ -580,7 +581,7 @@ public:
/// Remove the specified attributes at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
LLVM_NODISCARD AttributeList removeAttributesAtIndex(
- LLVMContext &C, unsigned Index, const AttrBuilder &AttrsToRemove) const;
+ LLVMContext &C, unsigned Index, const AttributeMask &AttrsToRemove) const;
/// Remove all attributes at the specified index from this
/// attribute list. Returns a new list because attribute lists are immutable.
@@ -604,7 +605,7 @@ public:
/// Remove the specified attribute at the function index from this
/// attribute list. Returns a new list because attribute lists are immutable.
LLVM_NODISCARD AttributeList
- removeFnAttributes(LLVMContext &C, const AttrBuilder &AttrsToRemove) const {
+ removeFnAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const {
return removeAttributesAtIndex(C, FunctionIndex, AttrsToRemove);
}
@@ -630,8 +631,8 @@ public:
/// Remove the specified attribute at the return value index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- LLVM_NODISCARD AttributeList
- removeRetAttributes(LLVMContext &C, const AttrBuilder &AttrsToRemove) const {
+ LLVM_NODISCARD AttributeList removeRetAttributes(
+ LLVMContext &C, const AttributeMask &AttrsToRemove) const {
return removeAttributesAtIndex(C, ReturnIndex, AttrsToRemove);
}
@@ -652,8 +653,9 @@ public:
/// Remove the specified attribute at the specified arg index from this
/// attribute list. Returns a new list because attribute lists are immutable.
- LLVM_NODISCARD AttributeList removeParamAttributes(
- LLVMContext &C, unsigned ArgNo, const AttrBuilder &AttrsToRemove) const {
+ LLVM_NODISCARD AttributeList
+ removeParamAttributes(LLVMContext &C, unsigned ArgNo,
+ const AttributeMask &AttrsToRemove) const {
return removeAttributesAtIndex(C, ArgNo + FirstArgIndex, AttrsToRemove);
}
@@ -929,42 +931,88 @@ template <> struct DenseMapInfo<AttributeList, void> {
//===----------------------------------------------------------------------===//
/// \class
+/// This class stores enough information to efficiently remove some attributes
+/// from an existing AttrBuilder, AttributeSet or AttributeList.
+class AttributeMask {
+ std::bitset<Attribute::EndAttrKinds> Attrs;
+ std::set<SmallString<32>, std::less<>> TargetDepAttrs;
+
+public:
+ AttributeMask() = default;
+ AttributeMask(const AttributeMask &) = delete;
+ AttributeMask(AttributeMask &&) = default;
+
+ AttributeMask(AttributeSet AS) {
+ for (Attribute A : AS)
+ addAttribute(A);
+ }
+
+ /// Add an attribute to the mask.
+ AttributeMask &addAttribute(Attribute::AttrKind Val) {
+ assert((unsigned)Val < Attribute::EndAttrKinds &&
+ "Attribute out of range!");
+ Attrs[Val] = true;
+ return *this;
+ }
+
+ /// Add the Attribute object to the builder.
+ AttributeMask &addAttribute(Attribute A) {
+ if (A.isStringAttribute())
+ addAttribute(A.getKindAsString());
+ else
+ addAttribute(A.getKindAsEnum());
+ return *this;
+ }
+
+ /// Add the target-dependent attribute to the builder.
+ AttributeMask &addAttribute(StringRef A) {
+ TargetDepAttrs.insert(A);
+ return *this;
+ }
+
+ /// Return true if the builder has the specified attribute.
+ bool contains(Attribute::AttrKind A) const {
+ assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
+ return Attrs[A];
+ }
+
+ /// Return true if the builder has the specified target-dependent
+ /// attribute.
+ bool contains(StringRef A) const { return TargetDepAttrs.count(A); }
+
+ /// Return true if the mask contains the specified attribute.
+ bool contains(Attribute A) const {
+ if (A.isStringAttribute())
+ return contains(A.getKindAsString());
+ return contains(A.getKindAsEnum());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
/// This class is used in conjunction with the Attribute::get method to
/// create an Attribute object. The object itself is uniquified. The Builder's
/// value, however, is not. So this can be used as a quick way to test for
/// equality, presence of attributes, etc.
class AttrBuilder {
- std::bitset<Attribute::EndAttrKinds> Attrs;
- std::map<SmallString<32>, SmallString<32>, std::less<>> TargetDepAttrs;
- std::array<uint64_t, Attribute::NumIntAttrKinds> IntAttrs = {};
- std::array<Type *, Attribute::NumTypeAttrKinds> TypeAttrs = {};
-
- Optional<unsigned> kindToIntIndex(Attribute::AttrKind Kind) const;
- Optional<unsigned> kindToTypeIndex(Attribute::AttrKind Kind) const;
+ LLVMContext &Ctx;
+ SmallVector<Attribute, 8> Attrs;
public:
- AttrBuilder() = default;
+ AttrBuilder(LLVMContext &Ctx) : Ctx(Ctx) {}
AttrBuilder(const AttrBuilder &) = delete;
AttrBuilder(AttrBuilder &&) = default;
- AttrBuilder(const Attribute &A) {
+ AttrBuilder(LLVMContext &Ctx, const Attribute &A) : Ctx(Ctx) {
addAttribute(A);
}
- AttrBuilder(AttributeList AS, unsigned Idx);
- AttrBuilder(AttributeSet AS);
+ AttrBuilder(LLVMContext &Ctx, AttributeSet AS);
void clear();
/// Add an attribute to the builder.
- AttrBuilder &addAttribute(Attribute::AttrKind Val) {
- assert((unsigned)Val < Attribute::EndAttrKinds &&
- "Attribute out of range!");
- assert(Attribute::isEnumAttrKind(Val) &&
- "Adding integer/type attribute without an argument!");
- Attrs[Val] = true;
- return *this;
- }
+ AttrBuilder &addAttribute(Attribute::AttrKind Val);
/// Add the Attribute object to the builder.
AttrBuilder &addAttribute(Attribute A);
@@ -975,42 +1023,49 @@ public:
/// Remove an attribute from the builder.
AttrBuilder &removeAttribute(Attribute::AttrKind Val);
- /// Remove the attributes from the builder.
- AttrBuilder &removeAttributes(AttributeList A, uint64_t WithoutIndex);
-
- /// Remove the target-dependent attribute to the builder.
+ /// Remove the target-dependent attribute from the builder.
AttrBuilder &removeAttribute(StringRef A);
- /// Add the attributes from the builder.
+ /// Remove the target-dependent attribute from the builder.
+ AttrBuilder &removeAttribute(Attribute A) {
+ if (A.isStringAttribute())
+ return removeAttribute(A.getKindAsString());
+ else
+ return removeAttribute(A.getKindAsEnum());
+ }
+
+ /// Add the attributes from the builder. Attributes in the passed builder
+ /// overwrite attributes in this builder if they have the same key.
AttrBuilder &merge(const AttrBuilder &B);
/// Remove the attributes from the builder.
- AttrBuilder &remove(const AttrBuilder &B);
+ AttrBuilder &remove(const AttributeMask &AM);
/// Return true if the builder has any attribute that's in the
/// specified builder.
- bool overlaps(const AttrBuilder &B) const;
+ bool overlaps(const AttributeMask &AM) const;
/// Return true if the builder has the specified attribute.
- bool contains(Attribute::AttrKind A) const {
- assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
- return Attrs[A];
- }
+ bool contains(Attribute::AttrKind A) const;
/// Return true if the builder has the specified target-dependent
/// attribute.
bool contains(StringRef A) const;
/// Return true if the builder has IR-level attributes.
- bool hasAttributes() const;
-
- /// Return true if the builder has any attribute that's in the
- /// specified attribute.
- bool hasAttributes(AttributeList A, uint64_t Index) const;
+ bool hasAttributes() const { return !Attrs.empty(); }
/// Return true if the builder has an alignment attribute.
bool hasAlignmentAttr() const;
+ /// Return Attribute with the given Kind. The returned attribute will be
+ /// invalid if the Kind is not present in the builder.
+ Attribute getAttribute(Attribute::AttrKind Kind) const;
+
+ /// Return Attribute with the given Kind. The returned attribute will be
+ /// invalid if the Kind is not present in the builder.
+ Attribute getAttribute(StringRef Kind) const;
+
/// Return raw (possibly packed/encoded) value of integer attribute or 0 if
/// not set.
uint64_t getRawIntAttr(Attribute::AttrKind Kind) const;
@@ -1136,30 +1191,7 @@ public:
/// Attribute.getIntValue().
AttrBuilder &addVScaleRangeAttrFromRawRepr(uint64_t RawVScaleRangeRepr);
- /// Return true if the builder contains no target-independent
- /// attributes.
- bool empty() const { return Attrs.none(); }
-
- // Iterators for target-dependent attributes.
- using td_type = decltype(TargetDepAttrs)::value_type;
- using td_iterator = decltype(TargetDepAttrs)::iterator;
- using td_const_iterator = decltype(TargetDepAttrs)::const_iterator;
- using td_range = iterator_range<td_iterator>;
- using td_const_range = iterator_range<td_const_iterator>;
-
- td_iterator td_begin() { return TargetDepAttrs.begin(); }
- td_iterator td_end() { return TargetDepAttrs.end(); }
-
- td_const_iterator td_begin() const { return TargetDepAttrs.begin(); }
- td_const_iterator td_end() const { return TargetDepAttrs.end(); }
-
- td_range td_attrs() { return td_range(td_begin(), td_end()); }
-
- td_const_range td_attrs() const {
- return td_const_range(td_begin(), td_end());
- }
-
- bool td_empty() const { return TargetDepAttrs.empty(); }
+ ArrayRef<Attribute> attrs() const { return Attrs; }
bool operator==(const AttrBuilder &B) const;
bool operator!=(const AttrBuilder &B) const { return !(*this == B); }
@@ -1168,14 +1200,14 @@ public:
namespace AttributeFuncs {
/// Which attributes cannot be applied to a type.
-AttrBuilder typeIncompatible(Type *Ty);
+AttributeMask typeIncompatible(Type *Ty);
/// Get param/return attributes which imply immediate undefined behavior if an
/// invalid value is passed. For example, this includes noundef (where undef
/// implies UB), but not nonnull (where null implies poison). It also does not
/// include attributes like nocapture, which constrain the function
/// implementation rather than the passed value.
-AttrBuilder getUBImplyingAttributes();
+AttributeMask getUBImplyingAttributes();
/// \returns Return true if the two functions have compatible target-independent
/// attributes for inlining purposes.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Comdat.h b/contrib/llvm-project/llvm/include/llvm/IR/Comdat.h
index 01a047d36455..1701802e6977 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Comdat.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Comdat.h
@@ -16,10 +16,12 @@
#define LLVM_IR_COMDAT_H
#include "llvm-c/Types.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CBindingWrapping.h"
namespace llvm {
+class GlobalObject;
class raw_ostream;
class StringRef;
template <typename ValueTy> class StringMapEntry;
@@ -46,15 +48,21 @@ public:
StringRef getName() const;
void print(raw_ostream &OS, bool IsForDebug = false) const;
void dump() const;
+ const SmallPtrSetImpl<GlobalObject *> &getUsers() const { return Users; }
private:
friend class Module;
+ friend class GlobalObject;
Comdat();
+ void addUser(GlobalObject *GO);
+ void removeUser(GlobalObject *GO);
// Points to the map in Module.
StringMapEntry<Comdat> *Name = nullptr;
SelectionKind SK = Any;
+ // Globals using this comdat.
+ SmallPtrSet<GlobalObject *, 2> Users;
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Constant.h b/contrib/llvm-project/llvm/include/llvm/IR/Constant.h
index c8999b71f3d1..a97372ebbad2 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Constant.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Constant.h
@@ -204,6 +204,12 @@ public:
/// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
bool hasOneLiveUse() const;
+ /// Return true if the constant has no live uses.
+ ///
+ /// This returns the same result as calling Value::use_empty after
+ /// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
+ bool hasZeroLiveUses() const;
+
const Constant *stripPointerCasts() const {
return cast<Constant>(Value::stripPointerCasts());
}
@@ -244,6 +250,8 @@ private:
/// Determine what potential relocations may be needed by this constant.
PossibleRelocationsTy getRelocationInfo() const;
+
+ bool hasNLiveUses(unsigned N) const;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/ConstantFolder.h b/contrib/llvm-project/llvm/include/llvm/IR/ConstantFolder.h
index da4a18e3c181..28dc63a5886e 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/ConstantFolder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/ConstantFolder.h
@@ -17,10 +17,11 @@
#define LLVM_IR_CONSTANTFOLDER_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
-#include "llvm/IR/IRBuilderFolder.h"
namespace llvm {
@@ -32,14 +33,72 @@ public:
explicit ConstantFolder() = default;
//===--------------------------------------------------------------------===//
- // Binary Operators
+ // Value-based folders.
+ //
+ // Return an existing value or a constant if the operation can be simplified.
+ // Otherwise return nullptr.
//===--------------------------------------------------------------------===//
-
- Constant *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const override {
- return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
+ Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return ConstantExpr::getAdd(LC, RC, HasNUW, HasNSW);
+ return nullptr;
+ }
+
+ Value *FoldAnd(Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return ConstantExpr::getAnd(LC, RC);
+ return nullptr;
+ }
+
+ Value *FoldOr(Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return ConstantExpr::getOr(LC, RC);
+ return nullptr;
+ }
+
+ Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ auto *LC = dyn_cast<Constant>(LHS);
+ auto *RC = dyn_cast<Constant>(RHS);
+ if (LC && RC)
+ return ConstantExpr::getCompare(P, LC, RC);
+ return nullptr;
+ }
+
+ Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ bool IsInBounds = false) const override {
+ if (auto *PC = dyn_cast<Constant>(Ptr)) {
+ // Every index must be constant.
+ if (any_of(IdxList, [](Value *V) { return !isa<Constant>(V); }))
+ return nullptr;
+
+ if (IsInBounds)
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, PC, IdxList);
+ else
+ return ConstantExpr::getGetElementPtr(Ty, PC, IdxList);
+ }
+ return nullptr;
+ }
+
+ Value *FoldSelect(Value *C, Value *True, Value *False) const override {
+ auto *CC = dyn_cast<Constant>(C);
+ auto *TC = dyn_cast<Constant>(True);
+ auto *FC = dyn_cast<Constant>(False);
+ if (CC && TC && FC)
+ return ConstantExpr::getSelect(CC, TC, FC);
+ return nullptr;
}
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFAdd(LHS, RHS);
}
@@ -103,11 +162,7 @@ public:
return ConstantExpr::getAShr(LHS, RHS, isExact);
}
- Constant *CreateAnd(Constant *LHS, Constant *RHS) const override {
- return ConstantExpr::getAnd(LHS, RHS);
- }
-
- Constant *CreateOr(Constant *LHS, Constant *RHS) const override {
+ Constant *CreateOr(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getOr(LHS, RHS);
}
@@ -142,46 +197,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Memory Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const override {
- return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
- }
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return ConstantExpr::getGetElementPtr(Ty, C, Idx);
- }
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const override {
- return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
- }
-
- Constant *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const override {
- return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
- }
-
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
- }
-
- Constant *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const override {
- return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -236,11 +251,6 @@ public:
// Compare Instructions
//===--------------------------------------------------------------------===//
- Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return ConstantExpr::getCompare(P, LHS, RHS);
- }
-
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const override {
return ConstantExpr::getCompare(P, LHS, RHS);
@@ -250,11 +260,6 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- Constant *CreateSelect(Constant *C, Constant *True,
- Constant *False) const override {
- return ConstantExpr::getSelect(C, True, False);
- }
-
Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
return ConstantExpr::getExtractElement(Vec, Idx);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Constants.h b/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
index 65d453861628..fb884912b318 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Constants.h
@@ -1196,13 +1196,6 @@ public:
/// and the getIndices() method may be used.
bool hasIndices() const;
- /// Return true if this is a getelementptr expression and all
- /// the index operands are compile-time known integers within the
- /// corresponding notional static array extents. Note that this is
- /// not equivalant to, a subset of, or a superset of the "inbounds"
- /// property.
- bool isGEPWithNoNotionalOverIndexing() const;
-
/// Select constant expr
///
/// \param OnlyIfReducedTy see \a getWithOperands() docs.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/DIBuilder.h b/contrib/llvm-project/llvm/include/llvm/IR/DIBuilder.h
index 61c6dd885980..f36c9e620d43 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/DIBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/DIBuilder.h
@@ -46,6 +46,7 @@ namespace llvm {
Function *DeclareFn; ///< llvm.dbg.declare
Function *ValueFn; ///< llvm.dbg.value
Function *LabelFn; ///< llvm.dbg.label
+ Function *AddrFn; ///< llvm.dbg.addr
SmallVector<Metadata *, 4> AllEnumTypes;
/// Track the RetainTypes, since they can be updated later on.
@@ -86,12 +87,25 @@ namespace llvm {
Instruction *insertLabel(DILabel *LabelInfo, const DILocation *DL,
BasicBlock *InsertBB, Instruction *InsertBefore);
+ /// Internal helper with common code used by insertDbg{Value,Addr}Intrinsic.
+ Instruction *insertDbgIntrinsic(llvm::Function *Intrinsic, llvm::Value *Val,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertBB,
+ Instruction *InsertBefore);
+
/// Internal helper for insertDbgValueIntrinsic.
Instruction *
insertDbgValueIntrinsic(llvm::Value *Val, DILocalVariable *VarInfo,
DIExpression *Expr, const DILocation *DL,
BasicBlock *InsertBB, Instruction *InsertBefore);
+ /// Internal helper for insertDbgAddrIntrinsic.
+ Instruction *
+ insertDbgAddrIntrinsic(llvm::Value *Val, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertBB, Instruction *InsertBefore);
+
public:
/// Construct a builder for a module.
///
@@ -698,7 +712,6 @@ namespace llvm {
/// variable which has a complex address expression for its address.
/// \param Addr An array of complex address operations.
DIExpression *createExpression(ArrayRef<uint64_t> Addr = None);
- DIExpression *createExpression(ArrayRef<int64_t> Addr);
/// Create an expression for a variable that does not have an address, but
/// does have a constant value.
@@ -930,6 +943,30 @@ namespace llvm {
const DILocation *DL,
Instruction *InsertBefore);
+ /// Insert a new llvm.dbg.addr intrinsic call.
+ /// \param Addr llvm::Value of the address
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertAtEnd Location for the new intrinsic.
+ Instruction *insertDbgAddrIntrinsic(llvm::Value *Addr,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd);
+
+ /// Insert a new llvm.dbg.addr intrinsic call.
+ /// \param Addr llvm::Value of the address.
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertBefore Location for the new intrinsic.
+ Instruction *insertDbgAddrIntrinsic(llvm::Value *Addr,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore);
+
/// Replace the vtable holder in the given type.
///
/// If this creates a self reference, it may orphan some unresolved cycles
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/DebugInfoMetadata.h b/contrib/llvm-project/llvm/include/llvm/IR/DebugInfoMetadata.h
index c04f07c534af..ba2568042c41 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -852,42 +852,48 @@ class DIStringType : public DIType {
static DIStringType *getImpl(LLVMContext &Context, unsigned Tag,
StringRef Name, Metadata *StringLength,
- Metadata *StrLenExp, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding,
- StorageType Storage, bool ShouldCreate = true) {
+ Metadata *StrLenExp, Metadata *StrLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ unsigned Encoding, StorageType Storage,
+ bool ShouldCreate = true) {
return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
- StringLength, StrLenExp, SizeInBits, AlignInBits, Encoding,
- Storage, ShouldCreate);
+ StringLength, StrLenExp, StrLocationExp, SizeInBits,
+ AlignInBits, Encoding, Storage, ShouldCreate);
}
static DIStringType *getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, Metadata *StringLength,
- Metadata *StrLenExp, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding,
- StorageType Storage, bool ShouldCreate = true);
+ Metadata *StrLenExp, Metadata *StrLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ unsigned Encoding, StorageType Storage,
+ bool ShouldCreate = true);
TempDIStringType cloneImpl() const {
return getTemporary(getContext(), getTag(), getRawName(),
getRawStringLength(), getRawStringLengthExp(),
- getSizeInBits(), getAlignInBits(), getEncoding());
+ getRawStringLocationExp(), getSizeInBits(),
+ getAlignInBits(), getEncoding());
}
public:
DEFINE_MDNODE_GET(DIStringType,
(unsigned Tag, StringRef Name, uint64_t SizeInBits,
uint32_t AlignInBits),
- (Tag, Name, nullptr, nullptr, SizeInBits, AlignInBits, 0))
+ (Tag, Name, nullptr, nullptr, nullptr, SizeInBits,
+ AlignInBits, 0))
DEFINE_MDNODE_GET(DIStringType,
(unsigned Tag, MDString *Name, Metadata *StringLength,
- Metadata *StringLengthExp, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding),
- (Tag, Name, StringLength, StringLengthExp, SizeInBits,
- AlignInBits, Encoding))
+ Metadata *StringLengthExp, Metadata *StringLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ unsigned Encoding),
+ (Tag, Name, StringLength, StringLengthExp,
+ StringLocationExp, SizeInBits, AlignInBits, Encoding))
DEFINE_MDNODE_GET(DIStringType,
(unsigned Tag, StringRef Name, Metadata *StringLength,
- Metadata *StringLengthExp, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding),
- (Tag, Name, StringLength, StringLengthExp, SizeInBits,
- AlignInBits, Encoding))
+ Metadata *StringLengthExp, Metadata *StringLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ unsigned Encoding),
+ (Tag, Name, StringLength, StringLengthExp,
+ StringLocationExp, SizeInBits, AlignInBits, Encoding))
TempDIStringType clone() const { return cloneImpl(); }
@@ -903,11 +909,17 @@ public:
return cast_or_null<DIExpression>(getRawStringLengthExp());
}
+ DIExpression *getStringLocationExp() const {
+ return cast_or_null<DIExpression>(getRawStringLocationExp());
+ }
+
unsigned getEncoding() const { return Encoding; }
Metadata *getRawStringLength() const { return getOperand(3); }
Metadata *getRawStringLengthExp() const { return getOperand(4); }
+
+ Metadata *getRawStringLocationExp() const { return getOperand(5); }
};
/// Derived types.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/DerivedTypes.h b/contrib/llvm-project/llvm/include/llvm/IR/DerivedTypes.h
index 8a1b26e699e3..f52ce3cde318 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/DerivedTypes.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/DerivedTypes.h
@@ -667,9 +667,11 @@ public:
unsigned AddressSpace) {
if (PT->isOpaque())
return get(PT->getContext(), AddressSpace);
- return get(PT->getElementType(), AddressSpace);
+ return get(PT->PointeeTy, AddressSpace);
}
+ [[deprecated("Pointer element types are deprecated. You can *temporarily* "
+ "use Type::getPointerElementType() instead")]]
Type *getElementType() const {
assert(!isOpaque() && "Attempting to get element type of opaque pointer");
return PointeeTy;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Function.h b/contrib/llvm-project/llvm/include/llvm/IR/Function.h
index 669418eacbb0..90095cd1bc77 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Function.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Function.h
@@ -364,7 +364,7 @@ public:
/// Remove function attribute from this function.
void removeFnAttr(StringRef Kind);
- void removeFnAttrs(const AttrBuilder &Attrs);
+ void removeFnAttrs(const AttributeMask &Attrs);
/// removes the attribute from the return value list of attributes.
void removeRetAttr(Attribute::AttrKind Kind);
@@ -373,7 +373,7 @@ public:
void removeRetAttr(StringRef Kind);
/// removes the attributes from the return value list of attributes.
- void removeRetAttrs(const AttrBuilder &Attrs);
+ void removeRetAttrs(const AttributeMask &Attrs);
/// removes the attribute from the list of attributes.
void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
@@ -382,7 +382,7 @@ public:
void removeParamAttr(unsigned ArgNo, StringRef Kind);
/// removes the attribute from the list of attributes.
- void removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs);
+ void removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs);
/// Return true if the function has the attribute.
bool hasFnAttribute(Attribute::AttrKind Kind) const;
@@ -509,10 +509,10 @@ public:
}
/// Determine if the function does not access or only writes memory.
- bool doesNotReadMemory() const {
+ bool onlyWritesMemory() const {
return doesNotAccessMemory() || hasFnAttribute(Attribute::WriteOnly);
}
- void setDoesNotReadMemory() {
+ void setOnlyWritesMemory() {
addFnAttr(Attribute::WriteOnly);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/contrib/llvm-project/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
index ed854e458da2..1fa996229749 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -32,8 +32,6 @@ class generic_gep_type_iterator {
ItTy OpIt;
PointerUnion<StructType *, Type *> CurTy;
- enum : uint64_t { Unbounded = -1ull };
- uint64_t NumElements = Unbounded;
generic_gep_type_iterator() = default;
@@ -79,16 +77,11 @@ public:
generic_gep_type_iterator &operator++() { // Preincrement
Type *Ty = getIndexedType();
- if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
CurTy = ATy->getElementType();
- NumElements = ATy->getNumElements();
- } else if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ else if (auto *VTy = dyn_cast<VectorType>(Ty))
CurTy = VTy->getElementType();
- if (isa<ScalableVectorType>(VTy))
- NumElements = Unbounded;
- else
- NumElements = cast<FixedVectorType>(VTy)->getNumElements();
- } else
+ else
CurTy = dyn_cast<StructType>(Ty);
++OpIt;
return *this;
@@ -123,15 +116,6 @@ public:
StructType *getStructTypeOrNull() const {
return CurTy.dyn_cast<StructType *>();
}
-
- bool isBoundedSequential() const {
- return isSequential() && NumElements != Unbounded;
- }
-
- uint64_t getSequentialNumElements() const {
- assert(isBoundedSequential());
- return NumElements;
- }
};
using gep_type_iterator = generic_gep_type_iterator<>;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/GlobalObject.h b/contrib/llvm-project/llvm/include/llvm/IR/GlobalObject.h
index e15cf718bb10..0bb9fd730059 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/GlobalObject.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/GlobalObject.h
@@ -22,7 +22,6 @@
namespace llvm {
class Comdat;
-class MDNode;
class Metadata;
class GlobalObject : public GlobalValue {
@@ -48,6 +47,7 @@ protected:
ObjComdat(nullptr) {
setGlobalValueSubClassData(0);
}
+ ~GlobalObject();
Comdat *ObjComdat;
enum {
@@ -122,7 +122,7 @@ public:
bool hasComdat() const { return getComdat() != nullptr; }
const Comdat *getComdat() const { return ObjComdat; }
Comdat *getComdat() { return ObjComdat; }
- void setComdat(Comdat *C) { ObjComdat = C; }
+ void setComdat(Comdat *C);
using Value::addMetadata;
using Value::clearMetadata;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/GlobalVariable.h b/contrib/llvm-project/llvm/include/llvm/IR/GlobalVariable.h
index 674d49eb9de6..e772964fcc6b 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/GlobalVariable.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/GlobalVariable.h
@@ -34,7 +34,6 @@ class Constant;
class Module;
template <typename ValueSubClass> class SymbolTableListTraits;
-class DIGlobalVariable;
class DIGlobalVariableExpression;
class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IRBuilder.h b/contrib/llvm-project/llvm/include/llvm/IR/IRBuilder.h
index bcf52278ccbb..53f517480ca1 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IRBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IRBuilder.h
@@ -1211,9 +1211,8 @@ private:
public:
Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
bool HasNUW = false, bool HasNSW = false) {
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
+ if (auto *V = Folder.FoldAdd(LHS, RHS, HasNUW, HasNSW))
+ return V;
return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
HasNUW, HasNSW);
}
@@ -1360,12 +1359,8 @@ public:
}
Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (auto *RC = dyn_cast<Constant>(RHS)) {
- if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
- return LHS; // LHS & -1 -> LHS
- if (auto *LC = dyn_cast<Constant>(LHS))
- return Insert(Folder.CreateAnd(LC, RC), Name);
- }
+ if (auto *V = Folder.FoldAnd(LHS, RHS))
+ return V;
return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
}
@@ -1386,12 +1381,8 @@ public:
}
Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (auto *RC = dyn_cast<Constant>(RHS)) {
- if (RC->isNullValue())
- return LHS; // LHS | 0 -> LHS
- if (auto *LC = dyn_cast<Constant>(LHS))
- return Insert(Folder.CreateOr(LC, RC), Name);
- }
+ if (auto *V = Folder.FoldOr(LHS, RHS))
+ return V;
return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
}
@@ -1571,6 +1562,15 @@ public:
Cond2, Name);
}
+ // NOTE: this is sequential, non-commutative, ordered reduction!
+ Value *CreateLogicalOr(ArrayRef<Value *> Ops) {
+ assert(!Ops.empty());
+ Value *Accum = Ops[0];
+ for (unsigned i = 1; i < Ops.size(); i++)
+ Accum = CreateLogicalOr(Accum, Ops[i]);
+ return Accum;
+ }
+
CallInst *CreateConstrainedFPBinOp(
Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
const Twine &Name = "", MDNode *FPMathTag = nullptr,
@@ -1735,45 +1735,28 @@ public:
Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name = "") {
- if (auto *PC = dyn_cast<Constant>(Ptr)) {
- // Every index must be constant.
- size_t i, e;
- for (i = 0, e = IdxList.size(); i != e; ++i)
- if (!isa<Constant>(IdxList[i]))
- break;
- if (i == e)
- return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
- }
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
}
Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name = "") {
- if (auto *PC = dyn_cast<Constant>(Ptr)) {
- // Every index must be constant.
- size_t i, e;
- for (i = 0, e = IdxList.size(); i != e; ++i)
- if (!isa<Constant>(IdxList[i]))
- break;
- if (i == e)
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
- Name);
- }
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
}
Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
- if (auto *PC = dyn_cast<Constant>(Ptr))
- if (auto *IC = dyn_cast<Constant>(Idx))
- return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
}
Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
const Twine &Name = "") {
- if (auto *PC = dyn_cast<Constant>(Ptr))
- if (auto *IC = dyn_cast<Constant>(Idx))
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
}
@@ -1781,8 +1764,8 @@ public:
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
}
@@ -1791,8 +1774,8 @@ public:
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
}
@@ -1804,8 +1787,8 @@ public:
ConstantInt::get(Type::getInt32Ty(Context), Idx1)
};
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
}
@@ -1817,8 +1800,8 @@ public:
ConstantInt::get(Type::getInt32Ty(Context), Idx1)
};
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
}
@@ -1827,8 +1810,8 @@ public:
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
}
@@ -1837,8 +1820,8 @@ public:
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
}
@@ -1850,8 +1833,8 @@ public:
ConstantInt::get(Type::getInt64Ty(Context), Idx1)
};
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
+ return V;
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
}
@@ -1863,8 +1846,8 @@ public:
ConstantInt::get(Type::getInt64Ty(Context), Idx1)
};
- if (auto *PC = dyn_cast<Constant>(Ptr))
- return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
+ if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
+ return V;
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
}
@@ -2215,9 +2198,8 @@ public:
Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "") {
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateICmp(P, LC, RC), Name);
+ if (auto *V = Folder.FoldICmp(P, LHS, RHS))
+ return V;
return Insert(new ICmpInst(P, LHS, RHS), Name);
}
@@ -2431,7 +2413,8 @@ public:
/// This is intended to implement C-style pointer subtraction. As such, the
/// pointers must be appropriately aligned for their element types and
/// pointing into the same object.
- Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
+ Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
+ const Twine &Name = "");
/// Create a launder.invariant.group intrinsic call. If Ptr type is
/// different from pointer to i8, it's casted to pointer to i8 in the same
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IRBuilderFolder.h b/contrib/llvm-project/llvm/include/llvm/IR/IRBuilderFolder.h
index e781e8e094af..2827ab553adc 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IRBuilderFolder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -26,11 +26,30 @@ public:
virtual ~IRBuilderFolder();
//===--------------------------------------------------------------------===//
+ // Value-based folders.
+ //
+ // Return an existing value or a constant if the operation can be simplified.
+ // Otherwise return nullptr.
+ //===--------------------------------------------------------------------===//
+ virtual Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
+ bool HasNSW = false) const = 0;
+
+ virtual Value *FoldAnd(Value *LHS, Value *RHS) const = 0;
+
+ virtual Value *FoldOr(Value *LHS, Value *RHS) const = 0;
+
+ virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
+ Value *RHS) const = 0;
+
+ virtual Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ bool IsInBounds = false) const = 0;
+
+ virtual Value *FoldSelect(Value *C, Value *True, Value *False) const = 0;
+
+ //===--------------------------------------------------------------------===//
// Binary Operators
//===--------------------------------------------------------------------===//
- virtual Value *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false, bool HasNSW = false) const = 0;
virtual Value *CreateFAdd(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const = 0;
@@ -52,8 +71,6 @@ public:
bool isExact = false) const = 0;
virtual Value *CreateAShr(Constant *LHS, Constant *RHS,
bool isExact = false) const = 0;
- virtual Value *CreateAnd(Constant *LHS, Constant *RHS) const = 0;
- virtual Value *CreateOr(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateXor(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateBinOp(Instruction::BinaryOps Opc,
Constant *LHS, Constant *RHS) const = 0;
@@ -69,29 +86,6 @@ public:
virtual Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const = 0;
//===--------------------------------------------------------------------===//
- // Memory Instructions
- //===--------------------------------------------------------------------===//
-
- virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const = 0;
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const = 0;
- virtual Value *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const = 0;
- virtual Value *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const = 0;
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- virtual Value *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const = 0;
- virtual Value *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const = 0;
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -114,8 +108,6 @@ public:
// Compare Instructions
//===--------------------------------------------------------------------===//
- virtual Value *CreateICmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const = 0;
virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const = 0;
@@ -123,8 +115,6 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- virtual Value *CreateSelect(Constant *C, Constant *True,
- Constant *False) const = 0;
virtual Value *CreateExtractElement(Constant *Vec, Constant *Idx) const = 0;
virtual Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
Constant *Idx) const = 0;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/InlineAsm.h b/contrib/llvm-project/llvm/include/llvm/IR/InlineAsm.h
index 1a0767aca142..cf6b7af96980 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/InlineAsm.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/InlineAsm.h
@@ -171,6 +171,11 @@ public:
/// selectAlternative - Point this constraint to the alternative constraint
/// indicated by the index.
void selectAlternative(unsigned index);
+
+ /// Whether this constraint corresponds to an argument.
+ bool hasArg() const {
+ return Type == isInput || (Type == isOutput && isIndirect);
+ }
};
/// ParseConstraints - Split up the constraint string into the specific
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/InstrTypes.h b/contrib/llvm-project/llvm/include/llvm/IR/InstrTypes.h
index 143a87f4997d..b3d2a2c8ed9d 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/InstrTypes.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/InstrTypes.h
@@ -1544,7 +1544,7 @@ public:
}
/// Removes the attributes from the function
- void removeFnAttrs(const AttrBuilder &AttrsToRemove) {
+ void removeFnAttrs(const AttributeMask &AttrsToRemove) {
Attrs = Attrs.removeFnAttributes(getContext(), AttrsToRemove);
}
@@ -1559,7 +1559,7 @@ public:
}
/// Removes the attributes from the return value
- void removeRetAttrs(const AttrBuilder &AttrsToRemove) {
+ void removeRetAttrs(const AttributeMask &AttrsToRemove) {
Attrs = Attrs.removeRetAttributes(getContext(), AttrsToRemove);
}
@@ -1576,7 +1576,7 @@ public:
}
/// Removes the attributes from the given argument
- void removeParamAttrs(unsigned ArgNo, const AttrBuilder &AttrsToRemove) {
+ void removeParamAttrs(unsigned ArgNo, const AttributeMask &AttrsToRemove) {
Attrs = Attrs.removeParamAttributes(getContext(), ArgNo, AttrsToRemove);
}
@@ -1717,13 +1717,19 @@ public:
// FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
// better indicate that this may return a conservative answer.
- bool doesNotReadMemory(unsigned OpNo) const {
+ bool onlyWritesMemory(unsigned OpNo) const {
return dataOperandHasImpliedAttr(OpNo, Attribute::WriteOnly) ||
dataOperandHasImpliedAttr(OpNo, Attribute::ReadNone);
}
/// Extract the alignment of the return value.
- MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
+ MaybeAlign getRetAlign() const {
+ if (auto Align = Attrs.getRetAlignment())
+ return Align;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().getRetAlignment();
+ return None;
+ }
/// Extract the alignment for a call or parameter (0=unknown).
MaybeAlign getParamAlign(unsigned ArgNo) const {
@@ -1818,16 +1824,16 @@ public:
/// Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return hasImpliedFnAttr(Attribute::ReadOnly);
}
void setOnlyReadsMemory() { addFnAttr(Attribute::ReadOnly); }
/// Determine if the call does not access or only writes memory.
- bool doesNotReadMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
+ bool onlyWritesMemory() const {
+ return hasImpliedFnAttr(Attribute::WriteOnly);
}
- void setDoesNotReadMemory() { addFnAttr(Attribute::WriteOnly); }
+ void setOnlyWritesMemory() { addFnAttr(Attribute::WriteOnly); }
/// Determine if the call can access memmory only using pointers based
/// on its arguments.
@@ -2087,8 +2093,8 @@ public:
/// Is the function attribute S disallowed by some operand bundle on
/// this operand bundle user?
bool isFnAttrDisallowedByOpBundle(StringRef S) const {
- // Operand bundles only possibly disallow readnone, readonly and argmemonly
- // attributes. All String attributes are fine.
+ // Operand bundles only possibly disallow memory access attributes. All
+ // String attributes are fine.
return false;
}
@@ -2113,6 +2119,9 @@ public:
case Attribute::ReadOnly:
return hasClobberingOperandBundles();
+
+ case Attribute::WriteOnly:
+ return hasReadingOperandBundles();
}
llvm_unreachable("switch has a default case!");
@@ -2285,6 +2294,26 @@ private:
return hasFnAttrOnCalledFunction(Kind);
}
+ /// A specialized version of hasFnAttrImpl for when the caller wants to
+ /// know if an attribute's semantics are implied, not whether the attribute
+ /// is actually present. This distinction only exists when checking whether
+ /// something is readonly or writeonly since readnone implies both. The case
+ /// which motivates the specialized code is a callee with readnone, and an
+ /// operand bundle on the call which disallows readnone but not either
+ /// readonly or writeonly.
+ bool hasImpliedFnAttr(Attribute::AttrKind Kind) const {
+ assert((Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly) &&
+ "use hasFnAttrImpl instead");
+ if (Attrs.hasFnAttr(Kind) || Attrs.hasFnAttr(Attribute::ReadNone))
+ return true;
+
+ if (isFnAttrDisallowedByOpBundle(Kind))
+ return false;
+
+ return hasFnAttrOnCalledFunction(Kind) ||
+ hasFnAttrOnCalledFunction(Attribute::ReadNone);
+ }
+
/// Determine whether the return value has the given attribute. Supports
/// Attribute::AttrKind and StringRef as \p AttrKind types.
template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
index 647a912b72f6..f4e571e86493 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
@@ -530,8 +530,8 @@ public:
Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); }
/// Returns the comparison predicate underlying the intrinsic.
- ICmpInst::Predicate getPredicate() const {
- switch (getIntrinsicID()) {
+ static ICmpInst::Predicate getPredicate(Intrinsic::ID ID) {
+ switch (ID) {
case Intrinsic::umin:
return ICmpInst::Predicate::ICMP_ULT;
case Intrinsic::umax:
@@ -545,8 +545,58 @@ public:
}
}
+ /// Returns the comparison predicate underlying the intrinsic.
+ ICmpInst::Predicate getPredicate() const {
+ return getPredicate(getIntrinsicID());
+ }
+
/// Whether the intrinsic is signed or unsigned.
- bool isSigned() const { return ICmpInst::isSigned(getPredicate()); };
+ static bool isSigned(Intrinsic::ID ID) {
+ return ICmpInst::isSigned(getPredicate(ID));
+ };
+
+ /// Whether the intrinsic is signed or unsigned.
+ bool isSigned() const { return isSigned(getIntrinsicID()); };
+
+ /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
+ /// so there is a certain threshold value, upon reaching which,
+ /// their value can no longer change. Return said threshold.
+ static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits) {
+ switch (ID) {
+ case Intrinsic::umin:
+ return APInt::getMinValue(numBits);
+ case Intrinsic::umax:
+ return APInt::getMaxValue(numBits);
+ case Intrinsic::smin:
+ return APInt::getSignedMinValue(numBits);
+ case Intrinsic::smax:
+ return APInt::getSignedMaxValue(numBits);
+ default:
+ llvm_unreachable("Invalid intrinsic");
+ }
+ }
+
+ /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
+ /// so there is a certain threshold value, upon reaching which,
+ /// their value can no longer change. Return said threshold.
+ APInt getSaturationPoint(unsigned numBits) const {
+ return getSaturationPoint(getIntrinsicID(), numBits);
+ }
+
+ /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
+ /// so there is a certain threshold value, upon reaching which,
+ /// their value can no longer change. Return said threshold.
+ static Constant *getSaturationPoint(Intrinsic::ID ID, Type *Ty) {
+ return Constant::getIntegerValue(
+ Ty, getSaturationPoint(ID, Ty->getScalarSizeInBits()));
+ }
+
+ /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
+ /// so there is a certain threshold value, upon reaching which,
+ /// their value can no longer change. Return said threshold.
+ Constant *getSaturationPoint(Type *Ty) const {
+ return getSaturationPoint(getIntrinsicID(), Ty);
+ }
};
/// This class represents an intrinsic that is based on a binary operation.
@@ -1126,36 +1176,37 @@ public:
Value *getSrc() const { return const_cast<Value *>(getArgOperand(1)); }
};
-/// This represents the llvm.instrprof_increment intrinsic.
-class InstrProfIncrementInst : public IntrinsicInst {
+/// A base class for all instrprof intrinsics.
+class InstrProfInstBase : public IntrinsicInst {
public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::instrprof_increment;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-
+ // The name of the instrumented function.
GlobalVariable *getName() const {
return cast<GlobalVariable>(
const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
}
-
+ // The hash of the CFG for the instrumented function.
ConstantInt *getHash() const {
return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
}
+ // The number of counters for the instrumented function.
+ ConstantInt *getNumCounters() const;
+ // The index of the counter that this instruction acts on.
+ ConstantInt *getIndex() const;
+};
- ConstantInt *getNumCounters() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+/// This represents the llvm.instrprof.increment intrinsic.
+class InstrProfIncrementInst : public InstrProfInstBase {
+public:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_increment;
}
-
- ConstantInt *getIndex() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
-
Value *getStep() const;
};
+/// This represents the llvm.instrprof.increment.step intrinsic.
class InstrProfIncrementInstStep : public InstrProfIncrementInst {
public:
static bool classof(const IntrinsicInst *I) {
@@ -1166,8 +1217,8 @@ public:
}
};
-/// This represents the llvm.instrprof_value_profile intrinsic.
-class InstrProfValueProfileInst : public IntrinsicInst {
+/// This represents the llvm.instrprof.value.profile intrinsic.
+class InstrProfValueProfileInst : public InstrProfInstBase {
public:
static bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
@@ -1176,15 +1227,6 @@ public:
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
- GlobalVariable *getName() const {
- return cast<GlobalVariable>(
- const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
- }
-
- ConstantInt *getHash() const {
- return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
- }
-
Value *getTargetValue() const {
return cast<Value>(const_cast<Value *>(getArgOperand(2)));
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
index da580de3dbd3..3e40bbf39dd4 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
@@ -1272,6 +1272,7 @@ def int_coro_end_async
def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_coro_noop : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
+def int_coro_align : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
@@ -1507,6 +1508,12 @@ def int_vp_select : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
LLVMMatchType<0>,
llvm_i32_ty]>;
+def int_vp_merge : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+ [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ llvm_i32_ty]>;
+
// Reductions
let IntrProperties = [IntrSpeculatable, IntrNoMem, IntrNoSync, IntrWillReturn] in {
def int_vp_reduce_fadd : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
index c586af45f34d..e610c28a5923 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -162,6 +162,10 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
[IntrNoMem]>;
+ class AdvSIMD_3IntArg_Intrinsic
+ : DefaultAttrsIntrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
class AdvSIMD_3VectorArg_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
@@ -258,6 +262,9 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
def int_aarch64_neon_sqrdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
def int_aarch64_neon_sqrdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;
+ def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic;
+ def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic;
+
// Vector Polynominal Multiply
def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 2f2564702b87..c5d266eb57ec 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -558,6 +558,9 @@ class AMDGPUSampleVariant<string ucmod, string lcmod, list<AMDGPUArg> extra_addr
// {offset} {bias} {z-compare}
list<AMDGPUArg> ExtraAddrArgs = extra_addr;
+ bit Offset = false;
+ bit Bias = false;
+ bit ZCompare = false;
bit Gradients = false;
// Name of the {lod} or {clamp} argument that is appended to the coordinates,
@@ -571,6 +574,7 @@ defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
multiclass AMDGPUSampleHelper_Offset<string ucmod, string lcmod,
list<AMDGPUArg> extra_addr> {
def NAME#lcmod : AMDGPUSampleVariant<ucmod, lcmod, extra_addr>;
+ let Offset = true in
def NAME#lcmod#_o : AMDGPUSampleVariant<
ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg<llvm_i32_ty, "offset">], extra_addr)>;
}
@@ -578,6 +582,7 @@ defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
multiclass AMDGPUSampleHelper_Compare<string ucmod, string lcmod,
list<AMDGPUArg> extra_addr> {
defm NAME : AMDGPUSampleHelper_Offset<ucmod, lcmod, extra_addr>;
+ let ZCompare = true in
defm NAME : AMDGPUSampleHelper_Offset<
"_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg<llvm_float_ty, "zcompare">])>;
}
@@ -591,6 +596,7 @@ defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
defset list<AMDGPUSampleVariant> AMDGPUSampleVariantsNoGradients = {
defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>;
+ let Bias = true in
defm AMDGPUSample : AMDGPUSampleHelper_Clamp<
"_B", "_b", [AMDGPUArg<llvm_anyfloat_ty, "bias">]>;
let LodOrClamp = "lod" in
@@ -618,6 +624,9 @@ class AMDGPUDimProfile<string opmod,
list<LLVMType> RetTypes = [];
list<AMDGPUArg> DataArgs = [];
list<AMDGPUArg> ExtraAddrArgs = [];
+ bit Offset = false;
+ bit Bias = false;
+ bit ZCompare = false;
bit Gradients = false;
string LodClampMip = "";
@@ -652,6 +661,9 @@ class AMDGPUDimProfileCopy<AMDGPUDimProfile base> : AMDGPUDimProfile<base.OpMod,
let RetTypes = base.RetTypes;
let DataArgs = base.DataArgs;
let ExtraAddrArgs = base.ExtraAddrArgs;
+ let Offset = base.Offset;
+ let Bias = base.Bias;
+ let ZCompare = base.ZCompare;
let Gradients = base.Gradients;
let LodClampMip = base.LodClampMip;
}
@@ -662,6 +674,9 @@ class AMDGPUDimSampleProfile<string opmod,
let IsSample = true;
let RetTypes = [llvm_any_ty];
let ExtraAddrArgs = sample.ExtraAddrArgs;
+ let Offset = sample.Offset;
+ let Bias = sample.Bias;
+ let ZCompare = sample.ZCompare;
let Gradients = sample.Gradients;
let LodClampMip = sample.LodOrClamp;
}
@@ -702,7 +717,10 @@ class AMDGPUDimGetResInfoProfile<AMDGPUDimProps dim>
class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
int NumDataArgs = !size(P_.DataArgs);
int NumDmaskArgs = !not(P_.IsAtomic);
- int NumExtraAddrArgs = !size(P_.ExtraAddrArgs);
+ int NumOffsetArgs = !if(P_.Offset, 1, 0);
+ int NumBiasArgs = !if(P_.Bias, 1, 0);
+ int NumZCompareArgs = !if(P_.ZCompare, 1, 0);
+ int NumExtraAddrArgs = !add(NumOffsetArgs, NumBiasArgs, NumZCompareArgs);
int NumVAddrArgs = !size(P_.AddrArgs);
int NumGradientArgs = !if(P_.Gradients, !size(P_.Dim.GradientArgs), 0);
int NumCoordArgs = !if(P_.IsSample, !size(P_.Dim.CoordSliceArgs), !size(P_.Dim.CoordSliceIntArgs));
@@ -710,6 +728,9 @@ class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
int NumSampArgs = !if(P_.IsSample, 2, 0);
int DmaskArgIndex = NumDataArgs;
int VAddrArgIndex = !add(DmaskArgIndex, NumDmaskArgs);
+ int OffsetArgIndex = VAddrArgIndex;
+ int BiasArgIndex = !add(VAddrArgIndex, NumOffsetArgs);
+ int ZCompareArgIndex = !add(BiasArgIndex, NumBiasArgs);
int GradientArgIndex = !add(VAddrArgIndex, NumExtraAddrArgs);
int CoordArgIndex = !add(GradientArgIndex, NumGradientArgs);
int LodArgIndex = !add(VAddrArgIndex, NumVAddrArgs, -1);
@@ -1514,12 +1535,6 @@ def int_amdgcn_writelane :
[IntrNoMem, IntrConvergent, IntrWillReturn]
>;
-// FIXME: Deprecated. This is equivalent to llvm.fshr
-def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn]
->;
-
def int_amdgcn_alignbyte : GCCBuiltin<"__builtin_amdgcn_alignbyte">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, IntrWillReturn]
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
index cf375b9280db..a42484757592 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -764,6 +764,9 @@ def int_arm_neon_sha256h: SHA_3Arg_v4i32_Intrinsic;
def int_arm_neon_sha256h2: SHA_3Arg_v4i32_Intrinsic;
def int_arm_neon_sha256su1: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_vqrdmlah : Neon_3Arg_Intrinsic;
+def int_arm_neon_vqrdmlsh : Neon_3Arg_Intrinsic;
+
// Armv8.2-A dot product instructions
class Neon_Dot_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsNVVM.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 6f55d1ef730e..41b28db56c75 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -1185,6 +1185,36 @@ let TargetPrefix = "nvvm" in {
def int_nvvm_f2h_rn : GCCBuiltin<"__nvvm_f2h_rn">,
DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
+ def int_nvvm_ff2bf16x2_rn : GCCBuiltin<"__nvvm_ff2bf16x2_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2bf16x2_rn_relu : GCCBuiltin<"__nvvm_ff2bf16x2_rn_relu">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2bf16x2_rz : GCCBuiltin<"__nvvm_ff2bf16x2_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2bf16x2_rz_relu : GCCBuiltin<"__nvvm_ff2bf16x2_rz_relu">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_ff2f16x2_rn : GCCBuiltin<"__nvvm_ff2f16x2_rn">,
+ Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2f16x2_rn_relu : GCCBuiltin<"__nvvm_ff2f16x2_rn_relu">,
+ Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2f16x2_rz : GCCBuiltin<"__nvvm_ff2f16x2_rz">,
+ Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ff2f16x2_rz_relu : GCCBuiltin<"__nvvm_ff2f16x2_rz_relu">,
+ Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2bf16_rn : GCCBuiltin<"__nvvm_f2bf16_rn">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2bf16_rn_relu : GCCBuiltin<"__nvvm_f2bf16_rn_relu">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2bf16_rz : GCCBuiltin<"__nvvm_f2bf16_rz">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2bf16_rz_relu : GCCBuiltin<"__nvvm_f2bf16_rz_relu">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2tf32_rna : GCCBuiltin<"__nvvm_f2tf32_rna">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
//
// Bitcast
//
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 747049b1035b..6780436bd701 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -80,19 +80,28 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_any_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
+ class BitManipGPRGPRGRIntrinsics
+ : Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
// Zbb
def int_riscv_orc_b : BitManipGPRIntrinsics;
- // Zbc
+ // Zbc or Zbkc
def int_riscv_clmul : BitManipGPRGPRIntrinsics;
def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
+
+ // Zbc
def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
// Zbe
def int_riscv_bcompress : BitManipGPRGPRIntrinsics;
def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
+ // Zbf
+ def int_riscv_bfp : BitManipGPRGPRIntrinsics;
+
// Zbp
def int_riscv_grev : BitManipGPRGPRIntrinsics;
def int_riscv_gorc : BitManipGPRGPRIntrinsics;
@@ -112,17 +121,37 @@ let TargetPrefix = "riscv" in {
def int_riscv_crc32c_h : BitManipGPRIntrinsics;
def int_riscv_crc32c_w : BitManipGPRIntrinsics;
def int_riscv_crc32c_d : BitManipGPRIntrinsics;
+
+ // Zbt
+ def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
+ def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
+
+ // Zbkb
+ def int_riscv_brev8 : BitManipGPRIntrinsics;
+ def int_riscv_zip : BitManipGPRIntrinsics;
+ def int_riscv_unzip : BitManipGPRIntrinsics;
+
+ // Zbkx
+ def int_riscv_xperm4 : BitManipGPRGPRIntrinsics;
+ def int_riscv_xperm8 : BitManipGPRGPRIntrinsics;
} // TargetPrefix = "riscv"
//===----------------------------------------------------------------------===//
// Vectors
+// The intrinsic does not have any operand that must be extended.
+defvar NoSplatOperand = 0xF;
+
+// The intrinsic does not have a VL operand.
+// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
+defvar NoVLOperand = 0x1F;
+
class RISCVVIntrinsic {
// These intrinsics may accept illegal integer values in their llvm_any_ty
- // operand, so they have to be extended. If set to zero then the intrinsic
- // does not have any operand that must be extended.
+ // operand, so they have to be extended.
Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
- bits<4> SplatOperand = 0;
+ bits<4> SplatOperand = NoSplatOperand;
+ bits<5> VLOperand = NoVLOperand;
}
let TargetPrefix = "riscv" in {
@@ -141,23 +170,54 @@ let TargetPrefix = "riscv" in {
ImmArg<ArgIndex<0>>,
ImmArg<ArgIndex<1>>]>;
- // For unit stride load
+ // Versions without side effects: better optimizable and usable if only the
+ // returned vector length is important.
+ def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty],
+ /* AVL */ [LLVMMatchType<0>,
+ /* VSEW */ LLVMMatchType<0>,
+ /* VLMUL */ LLVMMatchType<0>],
+ [IntrNoMem,
+ ImmArg<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>]>;
+ def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
+ /* VSEW */ [LLVMMatchType<0>,
+ /* VLMUL */ LLVMMatchType<0>],
+ [IntrNoMem,
+ ImmArg<ArgIndex<0>>,
+ ImmArg<ArgIndex<1>>]>;
+
+ // For unit stride mask load
// Input: (pointer, vl)
- class RISCVUSLoad
+ class RISCVUSMLoad
: Intrinsic<[llvm_anyvector_ty],
[LLVMPointerType<LLVMMatchType<0>>,
llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
+ // For unit stride load
+ // Input: (passthru, pointer, vl)
+ class RISCVUSLoad
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For unit stride fault-only-first load
- // Input: (pointer, vl)
+ // Input: (passthru, pointer, vl)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
class RISCVUSLoadFF
: Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>]>,
- RISCVVIntrinsic;
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
+ [NoCapture<ArgIndex<1>>]>,
+ RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For unit stride load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
class RISCVUSLoadMask
@@ -167,7 +227,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For unit stride fault-only-first load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Output: (data, vl)
@@ -179,14 +241,19 @@ let TargetPrefix = "riscv" in {
LLVMPointerType<LLVMMatchType<0>>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<1>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic;
- // For strided load
- // Input: (pointer, stride, vl)
+ [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
+ // For strided load with passthru operand
+ // Input: (passthru, pointer, stride, vl)
class RISCVSLoad
: Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>,
llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For strided load with mask
// Input: (maskedoff, pointer, stride, mask, vl, ta)
class RISCVSLoadMask
@@ -196,14 +263,19 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
- RISCVVIntrinsic;
- // For indexed load
- // Input: (pointer, index, vl)
+ RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
+ // For indexed load with passthru operand
+ // Input: (passthru, pointer, index, vl)
class RISCVILoad
: Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>,
llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For indexed load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
class RISCVILoadMask
@@ -213,7 +285,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
[NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For unit stride store
// Input: (vector_in, pointer, vl)
class RISCVUSStore
@@ -221,7 +295,9 @@ let TargetPrefix = "riscv" in {
[llvm_anyvector_ty,
LLVMPointerType<LLVMMatchType<0>>,
llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For unit stride store with mask
// Input: (vector_in, pointer, mask, vl)
class RISCVUSStoreMask
@@ -230,7 +306,9 @@ let TargetPrefix = "riscv" in {
LLVMPointerType<LLVMMatchType<0>>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For strided store
// Input: (vector_in, pointer, stride, vl)
class RISCVSStore
@@ -238,7 +316,9 @@ let TargetPrefix = "riscv" in {
[llvm_anyvector_ty,
LLVMPointerType<LLVMMatchType<0>>,
llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For stride store with mask
// Input: (vector_in, pointer, stirde, mask, vl)
class RISCVSStoreMask
@@ -246,7 +326,9 @@ let TargetPrefix = "riscv" in {
[llvm_anyvector_ty,
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For indexed store
// Input: (vector_in, pointer, index, vl)
class RISCVIStore
@@ -254,7 +336,9 @@ let TargetPrefix = "riscv" in {
[llvm_anyvector_ty,
LLVMPointerType<LLVMMatchType<0>>,
llvm_anyint_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For indexed store with mask
// Input: (vector_in, pointer, index, mask, vl)
class RISCVIStoreMask
@@ -262,13 +346,17 @@ let TargetPrefix = "riscv" in {
[llvm_anyvector_ty,
LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For destination vector type is the same as source vector.
// Input: (vector_in, vl)
class RISCVUnaryAANoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For destination vector type is the same as first source vector (with mask).
// Input: (vector_in, mask, vl, ta)
class RISCVUnaryAAMask
@@ -276,24 +364,32 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<1>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
class RISCVUnaryAAMaskNoTA
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, vl)
class RISCVBinaryAAANoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, int_vector_in, vl)
class RISCVRGatherVVNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int_vector_in, vl, ta)
class RISCVRGatherVVMask
@@ -301,22 +397,28 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<1>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// Input: (vector_in, int16_vector_in, vl)
class RISCVRGatherEI16VVNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is the same as first and second source vector.
// Input: (vector_in, vector_in, int16_vector_in, vl, ta)
class RISCVRGatherEI16VVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<1>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For destination vector type is the same as first source vector, and the
// second operand is XLen.
// Input: (vector_in, xlen_in, vl)
@@ -324,6 +426,7 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
[IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
// Second operand is XLen.
@@ -334,6 +437,7 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
LLVMMatchType<1>],
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
}
// For destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
@@ -341,7 +445,8 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -351,7 +456,8 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 3;
+ let SplatOperand = 2;
+ let VLOperand = 4;
}
// For destination vector type is the same as first source vector. The
// second source operand must match the destination type or be an XLen scalar.
@@ -359,7 +465,9 @@ let TargetPrefix = "riscv" in {
class RISCVBinaryAAShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -368,14 +476,17 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For destination vector type is NOT the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
class RISCVBinaryABXNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 2;
}
// For destination vector type is NOT the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -385,7 +496,8 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<3>],
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 3;
+ let SplatOperand = 2;
+ let VLOperand = 4;
}
// For destination vector type is NOT the same as first source vector. The
// second source operand must match the destination type or be an XLen scalar.
@@ -393,7 +505,9 @@ let TargetPrefix = "riscv" in {
class RISCVBinaryABShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is NOT the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
@@ -402,7 +516,9 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<3>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For binary operations with V0 as input.
// Input: (vector_in, vector_in/scalar_in, V0, vl)
class RISCVBinaryWithV0
@@ -411,7 +527,8 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 3;
}
// For binary operations with mask type output and V0 as input.
// Output: (mask type output)
@@ -422,7 +539,8 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 3;
}
// For binary operations with mask type output.
// Output: (mask type output)
@@ -431,7 +549,8 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 2;
}
// For binary operations with mask type output without mask.
// Output: (mask type output)
@@ -440,7 +559,8 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 2;
}
// For binary operations with mask type output with mask.
// Output: (mask type output)
@@ -451,7 +571,8 @@ let TargetPrefix = "riscv" in {
llvm_anyvector_ty, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 3;
+ let SplatOperand = 2;
+ let VLOperand = 4;
}
// For FP classify operations.
// Output: (bit mask type output)
@@ -459,7 +580,9 @@ let TargetPrefix = "riscv" in {
class RISCVClassifyNoMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For FP classify operations with mask.
// Output: (bit mask type output)
// Input: (maskedoff, vector_in, mask, vl)
@@ -467,7 +590,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
@@ -475,7 +600,8 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 2;
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
@@ -486,7 +612,8 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let SplatOperand = 3;
+ let SplatOperand = 2;
+ let VLOperand = 4;
}
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
@@ -495,7 +622,9 @@ let TargetPrefix = "riscv" in {
class RISCVSaturatingBinaryAAShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
+ [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
@@ -505,7 +634,9 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For Saturating binary operations.
// The destination vector type is NOT the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
@@ -513,7 +644,9 @@ let TargetPrefix = "riscv" in {
class RISCVSaturatingBinaryABShiftNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
+ [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For Saturating binary operations with mask.
// The destination vector type is NOT the same as first source vector (with mask).
// The second source operand matches the destination type or is an XLen scalar.
@@ -523,44 +656,54 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<3>],
- [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
class RISCVTernaryAAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
class RISCVTernaryAAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
class RISCVTernaryAAXANoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 3;
}
class RISCVTernaryAAXAMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 4;
}
class RISCVTernaryWideNoMask
: Intrinsic< [llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
llvm_anyint_ty],
[IntrNoMem] >, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 3;
}
class RISCVTernaryWideMask
: Intrinsic< [llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
+ let SplatOperand = 1;
+ let VLOperand = 4;
}
// For Reduction ternary operations.
// For destination vector type is the same as first and third source vector.
@@ -569,7 +712,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For Reduction ternary operations with mask.
// For destination vector type is the same as first and third source vector.
// The mask type come from second source vector.
@@ -578,27 +723,35 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// For unary operations with scalar type output without mask
// Output: (scalar type)
// Input: (vector_in, vl)
class RISCVMaskUnarySOutNoMask
: Intrinsic<[LLVMMatchType<1>],
[llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For unary operations with scalar type output with mask
// Output: (scalar type)
// Input: (vector_in, mask, vl)
class RISCVMaskUnarySOutMask
: Intrinsic<[LLVMMatchType<1>],
[llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For destination vector type is NOT the same as source vector.
// Input: (vector_in, vl)
class RISCVUnaryABNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For destination vector type is NOT the same as source vector (with mask).
// Input: (maskedoff, vector_in, mask, vl, ta)
class RISCVUnaryABMask
@@ -606,14 +759,18 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<2>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For unary operations with the same vector type in/out without mask
// Output: (vector)
// Input: (vector_in, vl)
class RISCVUnaryNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For mask unary operations with mask type in/out with mask
// Output: (mask type output)
// Input: (mask type maskedoff, mask type vector_in, mask, vl)
@@ -621,19 +778,25 @@ let TargetPrefix = "riscv" in {
: Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// Output: (vector)
// Input: (vl)
class RISCVNullaryIntrinsic
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 0;
+ }
// For Conversion unary operations.
// Input: (vector_in, vl)
class RISCVConversionNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For Conversion unary operations with mask.
// Input: (maskedoff, vector_in, mask, vl, ta)
class RISCVConversionMask
@@ -641,7 +804,9 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
LLVMMatchType<2>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
+ [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// For unit stride segment load
// Input: (pointer, vl)
@@ -649,7 +814,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
[LLVMPointerToElt<0>, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For unit stride segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
class RISCVUSSegLoadMask<int nf>
@@ -660,7 +827,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<1>]),
[ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = !add(nf, 2);
+ }
// For unit stride fault-only-first segment load
// Input: (pointer, vl)
@@ -671,7 +840,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1)), [llvm_anyint_ty]),
[LLVMPointerToElt<0>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// For unit stride fault-only-first segment load with mask
// Input: (maskedoff, pointer, mask, vl, ta)
// Output: (data, vl)
@@ -685,7 +856,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<1>, LLVMMatchType<1>]),
[ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = !add(nf, 2);
+ }
// For stride segment load
// Input: (pointer, offset, vl)
@@ -693,7 +866,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
[LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For stride segment load with mask
// Input: (maskedoff, pointer, offset, mask, vl, ta)
class RISCVSSegLoadMask<int nf>
@@ -705,7 +880,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<1>, LLVMMatchType<1>]),
[ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = !add(nf, 3);
+ }
// For indexed segment load
// Input: (pointer, index, vl)
@@ -713,7 +890,9 @@ let TargetPrefix = "riscv" in {
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
[LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
// For indexed segment load with mask
// Input: (maskedoff, pointer, index, mask, vl, ta)
class RISCVISegLoadMask<int nf>
@@ -725,7 +904,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<2>]),
[ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic;
+ RISCVVIntrinsic {
+ let VLOperand = !add(nf, 3);
+ }
// For unit stride segment store
// Input: (value, pointer, vl)
@@ -734,7 +915,9 @@ let TargetPrefix = "riscv" in {
!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
[LLVMPointerToElt<0>, llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 1);
+ }
// For unit stride segment store with mask
// Input: (value, pointer, mask, vl)
class RISCVUSSegStoreMask<int nf>
@@ -744,7 +927,9 @@ let TargetPrefix = "riscv" in {
[LLVMPointerToElt<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 2);
+ }
// For stride segment store
// Input: (value, pointer, offset, vl)
@@ -754,7 +939,9 @@ let TargetPrefix = "riscv" in {
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
[LLVMPointerToElt<0>, llvm_anyint_ty,
LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 2);
+ }
// For stride segment store with mask
// Input: (value, pointer, offset, mask, vl)
class RISCVSSegStoreMask<int nf>
@@ -764,7 +951,9 @@ let TargetPrefix = "riscv" in {
[LLVMPointerToElt<0>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 3);
+ }
// For indexed segment store
// Input: (value, pointer, offset, vl)
@@ -774,7 +963,9 @@ let TargetPrefix = "riscv" in {
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
[LLVMPointerToElt<0>, llvm_anyvector_ty,
llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 2);
+ }
// For indexed segment store with mask
// Input: (value, pointer, offset, mask, vl)
class RISCVISegStoreMask<int nf>
@@ -784,7 +975,9 @@ let TargetPrefix = "riscv" in {
[LLVMPointerToElt<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+ [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
+ let VLOperand = !add(nf, 3);
+ }
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
@@ -955,7 +1148,7 @@ let TargetPrefix = "riscv" in {
defm vsoxei : RISCVIStore;
defm vsuxei : RISCVIStore;
- def int_riscv_vlm : RISCVUSLoad;
+ def int_riscv_vlm : RISCVUSMLoad;
def int_riscv_vsm : RISCVUSStore;
defm vadd : RISCVBinaryAAX;
@@ -1051,13 +1244,19 @@ let TargetPrefix = "riscv" in {
def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
[LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
[LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
[llvm_anyint_ty],
@@ -1065,7 +1264,9 @@ let TargetPrefix = "riscv" in {
def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMVectorElementType<0>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
[llvm_anyfloat_ty],
@@ -1073,7 +1274,9 @@ let TargetPrefix = "riscv" in {
def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMVectorElementType<0>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
defm vfmul : RISCVBinaryAAX;
defm vfdiv : RISCVBinaryAAX;
@@ -1210,7 +1413,9 @@ let TargetPrefix = "riscv" in {
def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
// Output: (vector)
// Input: (maskedoff, mask type vector_in, mask, vl)
def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
@@ -1218,7 +1423,9 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
// Output: (vector)
// Input: (vl)
def int_riscv_vid : RISCVNullaryIntrinsic;
@@ -1229,7 +1436,9 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
defm vlseg # nf : RISCVUSSegLoad<nf>;
@@ -1255,3 +1464,92 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>;
} // TargetPrefix = "riscv"
+
+//===----------------------------------------------------------------------===//
+// Scalar Cryptography
+//
+// These intrinsics will lower directly into the corresponding instructions
+// added by the scalar cyptography extension, if the extension is present.
+
+let TargetPrefix = "riscv" in {
+
+class ScalarCryptoGprIntrinsicAny
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]>;
+
+class ScalarCryptoByteSelect32
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem, IntrWillReturn, IntrSpeculatable,
+ ImmArg<ArgIndex<2>>]>;
+
+class ScalarCryptoGprGprIntrinsic32
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoGprGprIntrinsic64
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoGprIntrinsic64
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty],
+ [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCryptoByteSelectAny
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
+ [IntrNoMem, IntrSpeculatable, IntrWillReturn,
+ ImmArg<ArgIndex<2>>, Returned<ArgIndex<0>>]>;
+
+// Zknd
+def int_riscv_aes32dsi : ScalarCryptoByteSelect32;
+def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
+
+def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64;
+
+def int_riscv_aes64im : ScalarCryptoGprIntrinsic64;
+
+// Zkne
+def int_riscv_aes32esi : ScalarCryptoByteSelect32;
+def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
+
+def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64;
+
+// Zknd & Zkne
+def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64;
+def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable,
+ IntrWillReturn, ImmArg<ArgIndex<1>>]>;
+
+// Zknh
+def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
+
+def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
+def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
+
+def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
+def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
+
+// Zksed
+def int_riscv_sm4ks : ScalarCryptoByteSelectAny;
+def int_riscv_sm4ed : ScalarCryptoByteSelectAny;
+
+// Zksh
+def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny;
+def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny;
+} // TargetPrefix = "riscv"
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/LLVMContext.h b/contrib/llvm-project/llvm/include/llvm/IR/LLVMContext.h
index 1c902ebce5ad..d165a405ce22 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/LLVMContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/LLVMContext.h
@@ -15,6 +15,7 @@
#define LLVM_IR_LLVMCONTEXT_H
#include "llvm-c/Types.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstdint>
@@ -32,7 +33,6 @@ class Module;
class OptPassGate;
template <typename T> class SmallVectorImpl;
template <typename T> class StringMapEntry;
-class SMDiagnostic;
class StringRef;
class Twine;
class LLVMRemarkStreamer;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/LegacyPassManagers.h b/contrib/llvm-project/llvm/include/llvm/IR/LegacyPassManagers.h
index 0bcb408d4929..311a407f1a19 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/LegacyPassManagers.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/LegacyPassManagers.h
@@ -90,7 +90,6 @@ template <typename T> class ArrayRef;
class Module;
class StringRef;
class Value;
-class Timer;
class PMDataManager;
// enums for debugging strings
@@ -460,8 +459,7 @@ private:
class FPPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
- explicit FPPassManager()
- : ModulePass(ID), PMDataManager() { }
+ explicit FPPassManager() : ModulePass(ID) {}
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/MatrixBuilder.h b/contrib/llvm-project/llvm/include/llvm/IR/MatrixBuilder.h
index 6cc5797269e2..4c8286692ebf 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/MatrixBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/MatrixBuilder.h
@@ -68,7 +68,7 @@ public:
// Deal with the pointer
PointerType *PtrTy = cast<PointerType>(DataPtr->getType());
- Type *EltTy = PtrTy->getElementType();
+ Type *EltTy = PtrTy->getPointerElementType();
auto *RetType = FixedVectorType::get(EltTy, Rows * Columns);
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Module.h b/contrib/llvm-project/llvm/include/llvm/IR/Module.h
index 4ddbd6ff14b3..7b834fbeeebf 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Module.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Module.h
@@ -47,9 +47,7 @@ class GVMaterializer;
class LLVMContext;
class MemoryBuffer;
class ModuleSummaryIndex;
-class Pass;
class RandomNumberGenerator;
-template <class PtrType> class SmallPtrSetImpl;
class StructType;
class VersionTuple;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/NoFolder.h b/contrib/llvm-project/llvm/include/llvm/IR/NoFolder.h
index dcffa6b2f9da..ec149747e3f4 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/NoFolder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/NoFolder.h
@@ -38,18 +38,37 @@ public:
explicit NoFolder() = default;
//===--------------------------------------------------------------------===//
- // Binary Operators
+ // Value-based folders.
+ //
+ // Return an existing value or a constant if the operation can be simplified.
+ // Otherwise return nullptr.
//===--------------------------------------------------------------------===//
+ Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
+ bool HasNSW = false) const override {
+ return nullptr;
+ }
- Instruction *CreateAdd(Constant *LHS, Constant *RHS,
- bool HasNUW = false,
- bool HasNSW = false) const override {
- BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS);
- if (HasNUW) BO->setHasNoUnsignedWrap();
- if (HasNSW) BO->setHasNoSignedWrap();
- return BO;
+ Value *FoldAnd(Value *LHS, Value *RHS) const override { return nullptr; }
+
+ Value *FoldOr(Value *LHS, Value *RHS) const override { return nullptr; }
+
+ Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ return nullptr;
+ }
+
+ Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ bool IsInBounds = false) const override {
+ return nullptr;
}
+ Value *FoldSelect(Value *C, Value *True, Value *False) const override {
+ return nullptr;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateFAdd(LHS, RHS);
}
@@ -132,14 +151,6 @@ public:
return BinaryOperator::CreateExactAShr(LHS, RHS);
}
- Instruction *CreateAnd(Constant *LHS, Constant *RHS) const override {
- return BinaryOperator::CreateAnd(LHS, RHS);
- }
-
- Instruction *CreateOr(Constant *LHS, Constant *RHS) const override {
- return BinaryOperator::CreateOr(LHS, RHS);
- }
-
Instruction *CreateXor(Constant *LHS, Constant *RHS) const override {
return BinaryOperator::CreateXor(LHS, RHS);
}
@@ -176,46 +187,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Memory Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Constant *> IdxList) const override {
- return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
- }
-
- Constant *CreateGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return ConstantExpr::getGetElementPtr(Ty, C, Idx);
- }
-
- Instruction *CreateGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> IdxList) const override {
- return GetElementPtrInst::Create(Ty, C, IdxList);
- }
-
- Constant *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const override {
- return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
- }
-
- Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
- Constant *Idx) const override {
- // This form of the function only exists to avoid ambiguous overload
- // warnings about whether to convert Idx to ArrayRef<Constant *> or
- // ArrayRef<Value *>.
- return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
- }
-
- Instruction *CreateInBoundsGetElementPtr(
- Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const override {
- return GetElementPtrInst::CreateInBounds(Ty, C, IdxList);
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -270,11 +241,6 @@ public:
// Compare Instructions
//===--------------------------------------------------------------------===//
- Instruction *CreateICmp(CmpInst::Predicate P,
- Constant *LHS, Constant *RHS) const override {
- return new ICmpInst(P, LHS, RHS);
- }
-
Instruction *CreateFCmp(CmpInst::Predicate P,
Constant *LHS, Constant *RHS) const override {
return new FCmpInst(P, LHS, RHS);
@@ -284,11 +250,6 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- Instruction *CreateSelect(Constant *C,
- Constant *True, Constant *False) const override {
- return SelectInst::Create(C, True, False);
- }
-
Instruction *CreateExtractElement(Constant *Vec,
Constant *Idx) const override {
return ExtractElementInst::Create(Vec, Idx);
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/PatternMatch.h b/contrib/llvm-project/llvm/include/llvm/IR/PatternMatch.h
index 320deb80bb1f..f9f4f1603861 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/PatternMatch.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/PatternMatch.h
@@ -589,6 +589,9 @@ struct is_lowbit_mask {
inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
return cst_pred_ty<is_lowbit_mask>();
}
+inline api_pred_ty<is_lowbit_mask> m_LowBitMask(const APInt *&V) {
+ return V;
+}
struct icmp_pred_with_threshold {
ICmpInst::Predicate Pred;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/PseudoProbe.h b/contrib/llvm-project/llvm/include/llvm/IR/PseudoProbe.h
index 51ba7e675efe..7d14213143c0 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/PseudoProbe.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/PseudoProbe.h
@@ -21,7 +21,6 @@
namespace llvm {
class Instruction;
-class BasicBlock;
constexpr const char *PseudoProbeDescMetadataName = "llvm.pseudo_probe_desc";
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h b/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h
index 9d9290a2c1d7..8879512610c2 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/SSAContext.h
@@ -26,7 +26,6 @@ class BasicBlock;
class Function;
class Instruction;
class Value;
-template <typename> class SmallVectorImpl;
template <typename, bool> class DominatorTreeBase;
template <> class GenericSSAContext<Function> {
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Statepoint.h b/contrib/llvm-project/llvm/include/llvm/IR/Statepoint.h
index c6251b9bf5c9..a254a67e6b1f 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Statepoint.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Statepoint.h
@@ -123,7 +123,7 @@ public:
/// statepoint.
Type *getActualReturnType() const {
auto *CalleeTy =
- cast<PointerType>(getActualCalledOperand()->getType())->getElementType();
+ getActualCalledOperand()->getType()->getPointerElementType();
return cast<FunctionType>(CalleeTy)->getReturnType();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Type.h b/contrib/llvm-project/llvm/include/llvm/IR/Type.h
index c899c46d4055..98c97375ad7b 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Type.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Type.h
@@ -366,7 +366,16 @@ public:
return ContainedTys[0];
}
+ /// This method is deprecated without replacement. Pointer element types are
+ /// not available with opaque pointers.
Type *getPointerElementType() const {
+ return getNonOpaquePointerElementType();
+ }
+
+ /// Only use this method in code that is not reachable with opaque pointers,
+ /// or part of deprecated methods that will be removed as part of the opaque
+ /// pointers transition.
+ Type *getNonOpaquePointerElementType() const {
assert(getTypeID() == PointerTyID);
assert(NumContainedTys &&
"Attempting to get element type of opaque pointer");
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/TypeFinder.h b/contrib/llvm-project/llvm/include/llvm/IR/TypeFinder.h
index a83f85ea84c3..dd2b70c65c2d 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/TypeFinder.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/TypeFinder.h
@@ -14,6 +14,7 @@
#define LLVM_IR_TYPEFINDER_H
#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Attributes.h"
#include <cstddef>
#include <vector>
@@ -32,6 +33,7 @@ class TypeFinder {
// objects, we keep several helper maps.
DenseSet<const Value*> VisitedConstants;
DenseSet<const MDNode *> VisitedMetadata;
+ DenseSet<AttributeList> VisitedAttributes;
DenseSet<Type*> VisitedTypes;
std::vector<StructType*> StructTypes;
@@ -74,6 +76,9 @@ private:
/// incorporateMDNode - This method is used to walk the operands of an MDNode
/// to find types hiding within.
void incorporateMDNode(const MDNode *V);
+
+ /// Incorporate types referenced by attributes.
+ void incorporateAttributes(AttributeList AL);
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def b/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
index 121c8bbc6c27..1abcbb874a8d 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/contrib/llvm-project/llvm/include/llvm/IR/VPIntrinsics.def
@@ -39,9 +39,9 @@
// same name. Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
// \p VPSD The SelectionDAG Node id (eg VP_ADD).
-// \p LEGALPOS The operand position of the SDNode that is used for legalizing
-// this SDNode. This can be `-1`, in which case the return type of
-// the SDNode is used.
+// \p LEGALPOS The operand position of the SDNode that is used for legalizing.
+// If LEGALPOS < 0, then the return type given by
+// TheNode->getValueType(-1-LEGALPOS) is used.
// \p TDNAME The name of the TableGen definition of this SDNode.
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
@@ -349,6 +349,10 @@ BEGIN_REGISTER_VP(vp_select, 0, 3, VP_SELECT, -1)
VP_PROPERTY_FUNCTIONAL_OPC(Select)
END_REGISTER_VP(vp_select, VP_SELECT)
+// llvm.vp.merge(mask,on_true,on_false,pivot)
+BEGIN_REGISTER_VP(vp_merge, 0, 3, VP_MERGE, -1)
+END_REGISTER_VP(vp_merge, VP_MERGE)
+
BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1)
END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE)
diff --git a/contrib/llvm-project/llvm/include/llvm/InitializePasses.h b/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
index 0c5ebc9a2f28..489ef045796f 100644
--- a/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm-project/llvm/include/llvm/InitializePasses.h
@@ -380,6 +380,7 @@ void initializeReassociateLegacyPassPass(PassRegistry&);
void initializeRedundantDbgInstEliminationPass(PassRegistry&);
void initializeRegAllocEvictionAdvisorAnalysisPass(PassRegistry &);
void initializeRegAllocFastPass(PassRegistry&);
+void initializeRegAllocScoringPass(PassRegistry &);
void initializeRegBankSelectPass(PassRegistry&);
void initializeRegToMemLegacyPass(PassRegistry&);
void initializeRegUsageInfoCollectorPass(PassRegistry&);
diff --git a/contrib/llvm-project/llvm/include/llvm/LTO/LTO.h b/contrib/llvm-project/llvm/include/llvm/LTO/LTO.h
index d2b0fef1ca47..0d085a88a193 100644
--- a/contrib/llvm-project/llvm/include/llvm/LTO/LTO.h
+++ b/contrib/llvm-project/llvm/include/llvm/LTO/LTO.h
@@ -35,7 +35,6 @@ class LLVMContext;
class MemoryBufferRef;
class Module;
class raw_pwrite_stream;
-class Target;
class ToolOutputFile;
/// Resolve linkage for prevailing symbols in the \p Index. Linkage changes
diff --git a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOModule.h b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOModule.h
index 01e63db4bab3..1b2de3b33385 100644
--- a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOModule.h
+++ b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOModule.h
@@ -40,8 +40,8 @@ private:
struct NameAndAttributes {
StringRef name;
uint32_t attributes = 0;
- bool isFunction = 0;
- const GlobalValue *symbol = 0;
+ bool isFunction = false;
+ const GlobalValue *symbol = nullptr;
};
std::unique_ptr<LLVMContext> OwnedContext;
diff --git a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
index b8daea9441e0..be1f3154029c 100644
--- a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
+++ b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
@@ -29,7 +29,6 @@
namespace llvm {
class StringRef;
-class LLVMContext;
class TargetMachine;
/// Helper to gather options relevant to the target machine creation
diff --git a/contrib/llvm-project/llvm/include/llvm/Linker/Linker.h b/contrib/llvm-project/llvm/include/llvm/Linker/Linker.h
index c9b1d42b3903..ac8041d8df1a 100644
--- a/contrib/llvm-project/llvm/include/llvm/Linker/Linker.h
+++ b/contrib/llvm-project/llvm/include/llvm/Linker/Linker.h
@@ -14,8 +14,6 @@
namespace llvm {
class Module;
-class StructType;
-class Type;
/// This class provides the core functionality of linking in LLVM. It keeps a
/// pointer to the merged module so far. It doesn't take ownership of the
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCContext.h b/contrib/llvm-project/llvm/include/llvm/MC/MCContext.h
index bde750759a0b..88d86d5b675a 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCContext.h
@@ -814,10 +814,6 @@ namespace llvm {
void diagnose(const SMDiagnostic &SMD);
void reportError(SMLoc L, const Twine &Msg);
void reportWarning(SMLoc L, const Twine &Msg);
- // Unrecoverable error has occurred. Display the best diagnostic we can
- // and bail via exit(1). For now, most MC backend errors are unrecoverable.
- // FIXME: We should really do something about that.
- [[noreturn]] void reportFatalError(SMLoc L, const Twine &Msg);
const MCAsmMacro *lookupMacro(StringRef Name) {
StringMap<MCAsmMacro>::iterator I = MacroMap.find(Name);
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCFixedLenDisassembler.h b/contrib/llvm-project/llvm/include/llvm/MC/MCFixedLenDisassembler.h
index 218ae0d13189..1edf3899c130 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCFixedLenDisassembler.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCFixedLenDisassembler.h
@@ -27,7 +27,7 @@ enum DecoderOps {
OPC_Fail // OPC_Fail()
};
-} // namespace MCDecode
+} // namespace MCD
} // namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index dbb76bd36591..29386ffc45ac 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -262,6 +262,7 @@ public:
bool parseOptionalToken(AsmToken::TokenKind T);
bool parseComma() { return parseToken(AsmToken::Comma, "expected comma"); }
+ bool parseRParen() { return parseToken(AsmToken::RParen, "expected ')'"); }
bool parseEOL();
bool parseEOL(const Twine &ErrMsg);
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCPseudoProbe.h b/contrib/llvm-project/llvm/include/llvm/MC/MCPseudoProbe.h
index abc9705f0851..17b7446baae8 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCPseudoProbe.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCPseudoProbe.h
@@ -60,7 +60,6 @@
namespace llvm {
class MCSection;
-class MCStreamer;
class MCSymbol;
class MCObjectStreamer;
class raw_ostream;
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
index 7bfbdb880098..3d6c512bfe73 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCStreamer.h
@@ -27,6 +27,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
@@ -40,7 +41,6 @@ namespace llvm {
class AssemblerConstantPools;
class MCAsmBackend;
-class MCCodeEmitter;
class MCContext;
struct MCDwarfFrameInfo;
class MCExpr;
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h b/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
index 3510eeca8953..db50dc6749e2 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCTargetOptions.h
@@ -62,7 +62,6 @@ public:
std::string ABIName;
std::string AssemblyLanguage;
std::string SplitDwarfFile;
- std::string COFFOutputFilename;
const char *Argv0 = nullptr;
ArrayRef<std::string> CommandLineArgs;
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/SubtargetFeature.h b/contrib/llvm-project/llvm/include/llvm/MC/SubtargetFeature.h
index cc36b25a4965..032e2a7df1f2 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/SubtargetFeature.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/SubtargetFeature.h
@@ -18,6 +18,7 @@
#define LLVM_MC_SUBTARGETFEATURE_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/MathExtras.h"
#include <array>
#include <bitset>
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/CustomBehaviour.h b/contrib/llvm-project/llvm/include/llvm/MCA/CustomBehaviour.h
index 395b07cf722b..5b993c6a5345 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/CustomBehaviour.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/CustomBehaviour.h
@@ -43,6 +43,10 @@ public:
virtual ~InstrPostProcess() {}
+ /// This method can be overriden by targets to modify the mca::Instruction
+ /// object after it has been lowered from the MCInst.
+ /// This is generally a less disruptive alternative to modifying the
+ /// scheduling model.
virtual void postProcessInstruction(std::unique_ptr<Instruction> &Inst,
const MCInst &MCI) {}
};
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h b/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h
index 7eddd067aa0c..c05f770df8eb 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/LSUnit.h
@@ -55,7 +55,7 @@ public:
MemoryGroup()
: NumPredecessors(0), NumExecutingPredecessors(0),
NumExecutedPredecessors(0), NumInstructions(0), NumExecuting(0),
- NumExecuted(0), CriticalPredecessor(), CriticalMemoryInstruction() {}
+ NumExecuted(0), CriticalPredecessor() {}
MemoryGroup(MemoryGroup &&) = default;
size_t getNumSuccessors() const {
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h b/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h
index b679b0d7d537..7467fd6754f0 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/HardwareUnits/ResourceManager.h
@@ -118,8 +118,8 @@ class DefaultResourceStrategy final : public ResourceStrategy {
public:
DefaultResourceStrategy(uint64_t UnitMask)
- : ResourceStrategy(), ResourceUnitMask(UnitMask),
- NextInSequenceMask(UnitMask), RemovedFromNextInSequence(0) {}
+ : ResourceUnitMask(UnitMask), NextInSequenceMask(UnitMask),
+ RemovedFromNextInSequence(0) {}
virtual ~DefaultResourceStrategy() = default;
uint64_t select(uint64_t ReadyMask) override;
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Instruction.h b/contrib/llvm-project/llvm/include/llvm/MCA/Instruction.h
index 3eb32186d551..33e3c8a2e630 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Instruction.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Instruction.h
@@ -406,7 +406,7 @@ public:
bool operator<(const CycleSegment &Other) const {
return Begin < Other.Begin;
}
- CycleSegment &operator--(void) {
+ CycleSegment &operator--() {
if (Begin)
Begin--;
if (End)
@@ -517,9 +517,14 @@ class InstructionBase {
// Instruction opcode which can be used by mca::CustomBehaviour
unsigned Opcode;
+ // Flags used by the LSUnit.
+ bool IsALoadBarrier;
+ bool IsAStoreBarrier;
+
public:
InstructionBase(const InstrDesc &D, const unsigned Opcode)
- : Desc(D), IsOptimizableMove(false), Operands(0), Opcode(Opcode) {}
+ : Desc(D), IsOptimizableMove(false), Operands(0), Opcode(Opcode),
+ IsALoadBarrier(false), IsAStoreBarrier(false) {}
SmallVectorImpl<WriteState> &getDefs() { return Defs; }
ArrayRef<WriteState> getDefs() const { return Defs; }
@@ -530,6 +535,10 @@ public:
unsigned getLatency() const { return Desc.MaxLatency; }
unsigned getNumMicroOps() const { return Desc.NumMicroOps; }
unsigned getOpcode() const { return Opcode; }
+ bool isALoadBarrier() const { return IsALoadBarrier; }
+ bool isAStoreBarrier() const { return IsAStoreBarrier; }
+ void setLoadBarrier(bool IsBarrier) { IsALoadBarrier = IsBarrier; }
+ void setStoreBarrier(bool IsBarrier) { IsAStoreBarrier = IsBarrier; }
/// Return the MCAOperand which corresponds to index Idx within the original
/// MCInst.
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/EntryStage.h b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/EntryStage.h
index 1c133898d603..4c50838bef4b 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/EntryStage.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/EntryStage.h
@@ -36,7 +36,7 @@ class EntryStage final : public Stage {
EntryStage &operator=(const EntryStage &Other) = delete;
public:
- EntryStage(SourceMgr &SM) : CurrentInstruction(), SM(SM), NumRetired(0) { }
+ EntryStage(SourceMgr &SM) : SM(SM), NumRetired(0) {}
bool isAvailable(const InstRef &IR) const override;
bool hasWorkToComplete() const override;
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/ExecuteStage.h b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/ExecuteStage.h
index 4c09ca8255ff..03a78a8b6b85 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/ExecuteStage.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/ExecuteStage.h
@@ -49,7 +49,7 @@ class ExecuteStage final : public Stage {
public:
ExecuteStage(Scheduler &S) : ExecuteStage(S, false) {}
ExecuteStage(Scheduler &S, bool ShouldPerformBottleneckAnalysis)
- : Stage(), HWS(S), NumDispatchedOpcodes(0), NumIssuedOpcodes(0),
+ : HWS(S), NumDispatchedOpcodes(0), NumIssuedOpcodes(0),
EnablePressureEvents(ShouldPerformBottleneckAnalysis) {}
// This stage works under the assumption that the Pipeline will eventually
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
index 42f386a13d85..40bc3b5aed94 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InOrderIssueStage.h
@@ -38,7 +38,7 @@ struct StallInfo {
unsigned CyclesLeft;
StallKind Kind;
- StallInfo() : IR(), CyclesLeft(), Kind(StallKind::DEFAULT) {}
+ StallInfo() : CyclesLeft(), Kind(StallKind::DEFAULT) {}
StallKind getStallKind() const { return Kind; }
unsigned getCyclesLeft() const { return CyclesLeft; }
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InstructionTables.h b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InstructionTables.h
index 35b21b0ba94d..9617fd49db6e 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InstructionTables.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/InstructionTables.h
@@ -32,7 +32,7 @@ class InstructionTables final : public Stage {
public:
InstructionTables(const MCSchedModel &Model)
- : Stage(), SM(Model), Masks(Model.getNumProcResourceKinds()) {
+ : SM(Model), Masks(Model.getNumProcResourceKinds()) {
computeProcResourceMasks(Model, Masks);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/RetireStage.h b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/RetireStage.h
index b635a01db85e..aafe2815df15 100644
--- a/contrib/llvm-project/llvm/include/llvm/MCA/Stages/RetireStage.h
+++ b/contrib/llvm-project/llvm/include/llvm/MCA/Stages/RetireStage.h
@@ -36,7 +36,7 @@ class RetireStage final : public Stage {
public:
RetireStage(RetireControlUnit &R, RegisterFile &F, LSUnitBase &LS)
- : Stage(), RCU(R), PRF(F), LSU(LS) {}
+ : RCU(R), PRF(F), LSU(LS) {}
bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
Error cycleStart() override;
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/Archive.h b/contrib/llvm-project/llvm/include/llvm/Object/Archive.h
index 5b024c7baebc..5a5fc90f18bd 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/Archive.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/Archive.h
@@ -32,50 +32,127 @@
namespace llvm {
namespace object {
+const char ArchiveMagic[] = "!<arch>\n";
+const char ThinArchiveMagic[] = "!<thin>\n";
+const char BigArchiveMagic[] = "<bigaf>\n";
+
class Archive;
-class ArchiveMemberHeader {
+class AbstractArchiveMemberHeader {
+protected:
+ AbstractArchiveMemberHeader(const Archive *Parent) : Parent(Parent){};
+
public:
friend class Archive;
-
- ArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
- uint64_t Size, Error *Err);
- // ArchiveMemberHeader() = default;
+ virtual std::unique_ptr<AbstractArchiveMemberHeader> clone() const = 0;
+ virtual ~AbstractArchiveMemberHeader(){};
/// Get the name without looking up long names.
- Expected<StringRef> getRawName() const;
+ virtual Expected<StringRef> getRawName() const = 0;
+ virtual StringRef getRawAccessMode() const = 0;
+ virtual StringRef getRawLastModified() const = 0;
+ virtual StringRef getRawUID() const = 0;
+ virtual StringRef getRawGID() const = 0;
/// Get the name looking up long names.
- Expected<StringRef> getName(uint64_t Size) const;
+ virtual Expected<StringRef> getName(uint64_t Size) const = 0;
+ virtual Expected<uint64_t> getSize() const = 0;
+ virtual uint64_t getOffset() const = 0;
- Expected<uint64_t> getSize() const;
+ /// Get next file member location.
+ virtual Expected<const char *> getNextChildLoc() const = 0;
+ virtual Expected<bool> isThin() const = 0;
Expected<sys::fs::perms> getAccessMode() const;
Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const;
+ Expected<unsigned> getUID() const;
+ Expected<unsigned> getGID() const;
+
+ /// Returns the size in bytes of the format-defined member header of the
+ /// concrete archive type.
+ virtual uint64_t getSizeOf() const = 0;
+
+ const Archive *Parent;
+};
+
+template <typename T>
+class CommonArchiveMemberHeader : public AbstractArchiveMemberHeader {
+public:
+ CommonArchiveMemberHeader(const Archive *Parent, const T *RawHeaderPtr)
+ : AbstractArchiveMemberHeader(Parent), ArMemHdr(RawHeaderPtr){};
+ StringRef getRawAccessMode() const override;
+ StringRef getRawLastModified() const override;
+ StringRef getRawUID() const override;
+ StringRef getRawGID() const override;
+
+ uint64_t getOffset() const override;
+ uint64_t getSizeOf() const override { return sizeof(T); }
+
+ T const *ArMemHdr;
+};
- StringRef getRawLastModified() const {
- return StringRef(ArMemHdr->LastModified, sizeof(ArMemHdr->LastModified))
- .rtrim(' ');
+struct UnixArMemHdrType {
+ char Name[16];
+ char LastModified[12];
+ char UID[6];
+ char GID[6];
+ char AccessMode[8];
+ char Size[10]; ///< Size of data, not including header or padding.
+ char Terminator[2];
+};
+
+class ArchiveMemberHeader : public CommonArchiveMemberHeader<UnixArMemHdrType> {
+public:
+ ArchiveMemberHeader(const Archive *Parent, const char *RawHeaderPtr,
+ uint64_t Size, Error *Err);
+
+ std::unique_ptr<AbstractArchiveMemberHeader> clone() const override {
+ return std::make_unique<ArchiveMemberHeader>(*this);
}
- Expected<unsigned> getUID() const;
- Expected<unsigned> getGID() const;
+ Expected<StringRef> getRawName() const override;
- // This returns the size of the private struct ArMemHdrType
- uint64_t getSizeOf() const { return sizeof(ArMemHdrType); }
+ Expected<StringRef> getName(uint64_t Size) const override;
+ Expected<uint64_t> getSize() const override;
+ Expected<const char *> getNextChildLoc() const override;
+ Expected<bool> isThin() const override;
+};
-private:
- struct ArMemHdrType {
- char Name[16];
- char LastModified[12];
- char UID[6];
- char GID[6];
- char AccessMode[8];
- char Size[10]; ///< Size of data, not including header or padding.
+// File Member Header
+struct BigArMemHdrType {
+ char Size[20]; // File member size in decimal
+ char NextOffset[20]; // Next member offset in decimal
+ char PrevOffset[20]; // Previous member offset in decimal
+ char LastModified[12];
+ char UID[12];
+ char GID[12];
+ char AccessMode[12];
+ char NameLen[4]; // File member name length in decimal
+ union {
+ char Name[2]; // Start of member name
char Terminator[2];
};
- Archive const *Parent;
- ArMemHdrType const *ArMemHdr;
+};
+
+// Define file member header of AIX big archive.
+class BigArchiveMemberHeader
+ : public CommonArchiveMemberHeader<BigArMemHdrType> {
+
+public:
+ BigArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
+ uint64_t Size, Error *Err);
+ std::unique_ptr<AbstractArchiveMemberHeader> clone() const override {
+ return std::make_unique<BigArchiveMemberHeader>(*this);
+ }
+
+ Expected<StringRef> getRawName() const override;
+ Expected<uint64_t> getRawNameSize() const;
+
+ Expected<StringRef> getName(uint64_t Size) const override;
+ Expected<uint64_t> getSize() const override;
+ Expected<const char *> getNextChildLoc() const override;
+ Expected<uint64_t> getNextOffset() const;
+ Expected<bool> isThin() const override { return false; }
};
class Archive : public Binary {
@@ -84,10 +161,10 @@ class Archive : public Binary {
public:
class Child {
friend Archive;
- friend ArchiveMemberHeader;
+ friend AbstractArchiveMemberHeader;
const Archive *Parent;
- ArchiveMemberHeader Header;
+ std::unique_ptr<AbstractArchiveMemberHeader> Header;
/// Includes header but not padding byte.
StringRef Data;
/// Offset from Data to the start of the file.
@@ -99,6 +176,44 @@ public:
Child(const Archive *Parent, const char *Start, Error *Err);
Child(const Archive *Parent, StringRef Data, uint16_t StartOfFile);
+ Child(const Child &C)
+ : Parent(C.Parent), Data(C.Data), StartOfFile(C.StartOfFile) {
+ if (C.Header)
+ Header = C.Header->clone();
+ }
+
+ Child(Child &&C) {
+ Parent = std::move(C.Parent);
+ Header = std::move(C.Header);
+ Data = C.Data;
+ StartOfFile = C.StartOfFile;
+ }
+
+ Child &operator=(Child &&C) noexcept {
+ if (&C == this)
+ return *this;
+
+ Parent = std::move(C.Parent);
+ Header = std::move(C.Header);
+ Data = C.Data;
+ StartOfFile = C.StartOfFile;
+
+ return *this;
+ }
+
+ Child &operator=(const Child &C) {
+ if (&C == this)
+ return *this;
+
+ Parent = C.Parent;
+ if (C.Header)
+ Header = C.Header->clone();
+ Data = C.Data;
+ StartOfFile = C.StartOfFile;
+
+ return *this;
+ }
+
bool operator==(const Child &other) const {
assert(!Parent || !other.Parent || Parent == other.Parent);
return Data.begin() == other.Data.begin();
@@ -109,19 +224,21 @@ public:
Expected<StringRef> getName() const;
Expected<std::string> getFullName() const;
- Expected<StringRef> getRawName() const { return Header.getRawName(); }
+ Expected<StringRef> getRawName() const { return Header->getRawName(); }
Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const {
- return Header.getLastModified();
+ return Header->getLastModified();
}
- StringRef getRawLastModified() const { return Header.getRawLastModified(); }
+ StringRef getRawLastModified() const {
+ return Header->getRawLastModified();
+ }
- Expected<unsigned> getUID() const { return Header.getUID(); }
- Expected<unsigned> getGID() const { return Header.getGID(); }
+ Expected<unsigned> getUID() const { return Header->getUID(); }
+ Expected<unsigned> getGID() const { return Header->getGID(); }
Expected<sys::fs::perms> getAccessMode() const {
- return Header.getAccessMode();
+ return Header->getAccessMode();
}
/// \return the size of the archive member without the header or padding.
@@ -218,7 +335,7 @@ public:
/// Size field is 10 decimal digits long
static const uint64_t MaxMemberSize = 9999999999;
- enum Kind { K_GNU, K_GNU64, K_BSD, K_DARWIN, K_DARWIN64, K_COFF };
+ enum Kind { K_GNU, K_GNU64, K_BSD, K_DARWIN, K_DARWIN64, K_COFF, K_AIXBIG };
Kind kind() const { return (Kind)Format; }
bool isThin() const { return IsThin; }
@@ -236,7 +353,6 @@ public:
return make_range(symbol_begin(), symbol_end());
}
- // Cast methods.
static bool classof(Binary const *v) { return v->isArchive(); }
// check if a symbol is in the archive
@@ -247,24 +363,55 @@ public:
StringRef getSymbolTable() const { return SymbolTable; }
StringRef getStringTable() const { return StringTable; }
uint32_t getNumberOfSymbols() const;
+ virtual uint64_t getFirstChildOffset() const { return getArchiveMagicLen(); }
std::vector<std::unique_ptr<MemoryBuffer>> takeThinBuffers() {
return std::move(ThinBuffers);
}
+ std::unique_ptr<AbstractArchiveMemberHeader>
+ createArchiveMemberHeader(const char *RawHeaderPtr, uint64_t Size,
+ Error *Err) const;
+
+protected:
+ uint64_t getArchiveMagicLen() const;
+ void setFirstRegular(const Child &C);
+
private:
StringRef SymbolTable;
StringRef StringTable;
StringRef FirstRegularData;
uint16_t FirstRegularStartOfFile = -1;
- void setFirstRegular(const Child &C);
unsigned Format : 3;
unsigned IsThin : 1;
mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
};
+class BigArchive : public Archive {
+ /// Fixed-Length Header.
+ struct FixLenHdr {
+ char Magic[sizeof(BigArchiveMagic) - 1]; ///< Big archive magic string.
+ char MemOffset[20]; ///< Offset to member table.
+ char GlobSymOffset[20]; ///< Offset to global symbol table.
+ char
+ GlobSym64Offset[20]; ///< Offset global symbol table for 64-bit objects.
+ char FirstChildOffset[20]; ///< Offset to first archive member.
+ char LastChildOffset[20]; ///< Offset to last archive member.
+ char FreeOffset[20]; ///< Offset to first mem on free list.
+ };
+
+ const FixLenHdr *ArFixLenHdr;
+ uint64_t FirstChildOffset = 0;
+ uint64_t LastChildOffset = 0;
+
+public:
+ BigArchive(MemoryBufferRef Source, Error &Err);
+ uint64_t getFirstChildOffset() const override { return FirstChildOffset; }
+ uint64_t getLastChildOffset() const { return LastChildOffset; }
+};
+
} // end namespace object
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/ELFObjectFile.h b/contrib/llvm-project/llvm/include/llvm/Object/ELFObjectFile.h
index 716b94d92d03..e2d2784d4f23 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/ELFObjectFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/ELFObjectFile.h
@@ -34,6 +34,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/ScopedPrinter.h"
#include <cassert>
#include <cstdint>
#include <system_error>
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/Error.h b/contrib/llvm-project/llvm/include/llvm/Object/Error.h
index 1fc1f6603a36..af334fc42658 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/Error.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/Error.h
@@ -22,8 +22,6 @@ class Twine;
namespace object {
-class Binary;
-
const std::error_category &object_category();
enum class object_error {
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/IRObjectFile.h b/contrib/llvm-project/llvm/include/llvm/Object/IRObjectFile.h
index 338b1941eca1..db47960237a0 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/IRObjectFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/IRObjectFile.h
@@ -20,10 +20,7 @@
namespace llvm {
class BitcodeModule;
-class Mangler;
class Module;
-class GlobalValue;
-class Triple;
namespace object {
class ObjectFile;
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/MachOUniversal.h b/contrib/llvm-project/llvm/include/llvm/Object/MachOUniversal.h
index 9bcacb510108..e87eb31aad4e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/MachOUniversal.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/MachOUniversal.h
@@ -22,7 +22,6 @@
namespace llvm {
class StringRef;
-class Module;
class LLVMContext;
namespace object {
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm-project/llvm/include/llvm/Object/ObjectFile.h
index 267fe3046738..12704b1fc88e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/ObjectFile.h
@@ -31,7 +31,6 @@
namespace llvm {
-class ARMAttributeParser;
class SubtargetFeatures;
namespace object {
diff --git a/contrib/llvm-project/llvm/include/llvm/Object/XCOFFObjectFile.h b/contrib/llvm-project/llvm/include/llvm/Object/XCOFFObjectFile.h
index 94136afc45ea..ac911e534f34 100644
--- a/contrib/llvm-project/llvm/include/llvm/Object/XCOFFObjectFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Object/XCOFFObjectFile.h
@@ -348,6 +348,57 @@ struct XCOFFSectAuxEntForStat {
uint8_t Pad[10];
}; // 32-bit XCOFF file only.
+struct XCOFFFunctionAuxEnt32 {
+ support::ubig32_t OffsetToExceptionTbl;
+ support::ubig32_t SizeOfFunction;
+ support::ubig32_t PtrToLineNum;
+ support::big32_t SymIdxOfNextBeyond;
+ uint8_t Pad[2];
+};
+
+struct XCOFFFunctionAuxEnt64 {
+ support::ubig64_t PtrToLineNum;
+ support::ubig32_t SizeOfFunction;
+ support::big32_t SymIdxOfNextBeyond;
+ uint8_t Pad;
+ XCOFF::SymbolAuxType AuxType; // Contains _AUX_FCN; Type of auxiliary entry
+};
+
+struct XCOFFExceptionAuxEnt {
+ support::ubig64_t OffsetToExceptionTbl;
+ support::ubig32_t SizeOfFunction;
+ support::big32_t SymIdxOfNextBeyond;
+ uint8_t Pad;
+ XCOFF::SymbolAuxType AuxType; // Contains _AUX_EXCEPT; Type of auxiliary entry
+};
+
+struct XCOFFBlockAuxEnt32 {
+ uint8_t ReservedZeros1[2];
+ support::ubig16_t LineNumHi;
+ support::ubig16_t LineNumLo;
+ uint8_t ReservedZeros2[12];
+};
+
+struct XCOFFBlockAuxEnt64 {
+ support::ubig32_t LineNum;
+ uint8_t Pad[13];
+ XCOFF::SymbolAuxType AuxType; // Contains _AUX_SYM; Type of auxiliary entry
+};
+
+struct XCOFFSectAuxEntForDWARF32 {
+ support::ubig32_t LengthOfSectionPortion;
+ uint8_t Pad1[4];
+ support::ubig32_t NumberOfRelocEnt;
+ uint8_t Pad2[6];
+};
+
+struct XCOFFSectAuxEntForDWARF64 {
+ support::ubig64_t LengthOfSectionPortion;
+ support::ubig64_t NumberOfRelocEnt;
+ uint8_t Pad;
+ XCOFF::SymbolAuxType AuxType; // Contains _AUX_SECT; Type of Auxillary entry
+};
+
template <typename AddressType> struct XCOFFRelocation {
// Masks for packing/unpacking the r_rsize field of relocations.
diff --git a/contrib/llvm-project/llvm/include/llvm/ObjectYAML/DWARFEmitter.h b/contrib/llvm-project/llvm/include/llvm/ObjectYAML/DWARFEmitter.h
index eb56d1e29326..30bb16deb810 100644
--- a/contrib/llvm-project/llvm/include/llvm/ObjectYAML/DWARFEmitter.h
+++ b/contrib/llvm-project/llvm/include/llvm/ObjectYAML/DWARFEmitter.h
@@ -26,7 +26,6 @@ class raw_ostream;
namespace DWARFYAML {
struct Data;
-struct PubSection;
Error emitDebugAbbrev(raw_ostream &OS, const Data &DI);
Error emitDebugStr(raw_ostream &OS, const Data &DI);
diff --git a/contrib/llvm-project/llvm/include/llvm/ObjectYAML/XCOFFYAML.h b/contrib/llvm-project/llvm/include/llvm/ObjectYAML/XCOFFYAML.h
index aa1bc396f134..39bc334e1a89 100644
--- a/contrib/llvm-project/llvm/include/llvm/ObjectYAML/XCOFFYAML.h
+++ b/contrib/llvm-project/llvm/include/llvm/ObjectYAML/XCOFFYAML.h
@@ -82,6 +82,110 @@ struct Section {
std::vector<Relocation> Relocations;
};
+enum AuxSymbolType : uint8_t {
+ AUX_EXCEPT = 255,
+ AUX_FCN = 254,
+ AUX_SYM = 253,
+ AUX_FILE = 252,
+ AUX_CSECT = 251,
+ AUX_SECT = 250,
+ AUX_STAT = 249
+};
+
+struct AuxSymbolEnt {
+ AuxSymbolType Type;
+
+ explicit AuxSymbolEnt(AuxSymbolType T) : Type(T) {}
+ virtual ~AuxSymbolEnt();
+};
+
+struct FileAuxEnt : AuxSymbolEnt {
+ Optional<StringRef> FileNameOrString;
+ Optional<XCOFF::CFileStringType> FileStringType;
+
+ FileAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_FILE) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_FILE;
+ }
+};
+
+struct CsectAuxEnt : AuxSymbolEnt {
+ // Only for XCOFF32.
+ Optional<uint32_t> SectionOrLength;
+ Optional<uint32_t> StabInfoIndex;
+ Optional<uint16_t> StabSectNum;
+ // Only for XCOFF64.
+ Optional<uint32_t> SectionOrLengthLo;
+ Optional<uint32_t> SectionOrLengthHi;
+ // Common fields for both XCOFF32 and XCOFF64.
+ Optional<uint32_t> ParameterHashIndex;
+ Optional<uint16_t> TypeChkSectNum;
+ Optional<uint8_t> SymbolAlignmentAndType;
+ Optional<XCOFF::StorageMappingClass> StorageMappingClass;
+
+ CsectAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_CSECT) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_CSECT;
+ }
+};
+
+struct FunctionAuxEnt : AuxSymbolEnt {
+ Optional<uint32_t> OffsetToExceptionTbl; // Only for XCOFF32.
+ Optional<uint64_t> PtrToLineNum;
+ Optional<uint32_t> SizeOfFunction;
+ Optional<int32_t> SymIdxOfNextBeyond;
+
+ FunctionAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_FCN) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_FCN;
+ }
+};
+
+struct ExcpetionAuxEnt : AuxSymbolEnt {
+ Optional<uint64_t> OffsetToExceptionTbl;
+ Optional<uint32_t> SizeOfFunction;
+ Optional<int32_t> SymIdxOfNextBeyond;
+
+ ExcpetionAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_EXCEPT) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_EXCEPT;
+ }
+}; // Only for XCOFF64.
+
+struct BlockAuxEnt : AuxSymbolEnt {
+ // Only for XCOFF32.
+ Optional<uint16_t> LineNumHi;
+ Optional<uint16_t> LineNumLo;
+ // Only for XCOFF64.
+ Optional<uint32_t> LineNum;
+
+ BlockAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_SYM) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_SYM;
+ }
+};
+
+struct SectAuxEntForDWARF : AuxSymbolEnt {
+ Optional<uint32_t> LengthOfSectionPortion;
+ Optional<uint32_t> NumberOfRelocEnt;
+
+ SectAuxEntForDWARF() : AuxSymbolEnt(AuxSymbolType::AUX_SECT) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_SECT;
+ }
+};
+
+struct SectAuxEntForStat : AuxSymbolEnt {
+ Optional<uint32_t> SectionLength;
+ Optional<uint16_t> NumberOfRelocEnt;
+ Optional<uint16_t> NumberOfLineNum;
+
+ SectAuxEntForStat() : AuxSymbolEnt(AuxSymbolType::AUX_STAT) {}
+ static bool classof(const AuxSymbolEnt *S) {
+ return S->Type == AuxSymbolType::AUX_STAT;
+ }
+}; // Only for XCOFF32.
+
struct Symbol {
StringRef SymbolName;
llvm::yaml::Hex64 Value; // Symbol value; storage class-dependent.
@@ -89,7 +193,8 @@ struct Symbol {
Optional<uint16_t> SectionIndex;
llvm::yaml::Hex16 Type;
XCOFF::StorageClass StorageClass;
- uint8_t NumberOfAuxEntries;
+ Optional<uint8_t> NumberOfAuxEntries;
+ std::vector<std::unique_ptr<AuxSymbolEnt>> AuxEntries;
};
struct StringTable {
@@ -114,6 +219,7 @@ struct Object {
LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Section)
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::XCOFFYAML::AuxSymbolEnt>)
namespace llvm {
namespace yaml {
@@ -126,6 +232,18 @@ template <> struct ScalarEnumerationTraits<XCOFF::StorageClass> {
static void enumeration(IO &IO, XCOFF::StorageClass &Value);
};
+template <> struct ScalarEnumerationTraits<XCOFF::StorageMappingClass> {
+ static void enumeration(IO &IO, XCOFF::StorageMappingClass &Value);
+};
+
+template <> struct ScalarEnumerationTraits<XCOFF::CFileStringType> {
+ static void enumeration(IO &IO, XCOFF::CFileStringType &Type);
+};
+
+template <> struct ScalarEnumerationTraits<XCOFFYAML::AuxSymbolType> {
+ static void enumeration(IO &IO, XCOFFYAML::AuxSymbolType &Type);
+};
+
template <> struct MappingTraits<XCOFFYAML::FileHeader> {
static void mapping(IO &IO, XCOFFYAML::FileHeader &H);
};
@@ -134,6 +252,10 @@ template <> struct MappingTraits<XCOFFYAML::AuxiliaryHeader> {
static void mapping(IO &IO, XCOFFYAML::AuxiliaryHeader &AuxHdr);
};
+template <> struct MappingTraits<std::unique_ptr<XCOFFYAML::AuxSymbolEnt>> {
+ static void mapping(IO &IO, std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym);
+};
+
template <> struct MappingTraits<XCOFFYAML::Symbol> {
static void mapping(IO &IO, XCOFFYAML::Symbol &S);
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h b/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
index 6cab4ce7d138..9eb754a4d824 100644
--- a/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
+++ b/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
@@ -432,7 +432,7 @@ public:
}
// Return the label of the basic block reached on a transition on \p S.
- const StringRef getSuccessorLabel(StringRef S) const {
+ StringRef getSuccessorLabel(StringRef S) const {
assert(Successors.count(S) == 1 && "Expected to find successor.");
return Successors.find(S)->getValue();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
index d3a5d44ce8dd..e1f45019b1a9 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -702,7 +702,7 @@ public:
LineCoverageIterator(const CoverageData &CD, unsigned Line)
: CD(CD), WrappedSegment(nullptr), Next(CD.begin()), Ended(false),
- Line(Line), Segments(), Stats() {
+ Line(Line) {
this->operator++();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
index 242ffdd16e3f..39c0045369be 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
@@ -56,10 +56,10 @@ public:
using reference = value_type &;
CoverageMappingIterator()
- : Reader(nullptr), Record(), ReadErr(coveragemap_error::success) {}
+ : Reader(nullptr), ReadErr(coveragemap_error::success) {}
CoverageMappingIterator(CoverageMappingReader *Reader)
- : Reader(Reader), Record(), ReadErr(coveragemap_error::success) {
+ : Reader(Reader), ReadErr(coveragemap_error::success) {
increment();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
index 6c5efb2f6d5d..4d3bb0e8ff10 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProf.h
@@ -293,7 +293,6 @@ enum class instrprof_error {
missing_debug_info_for_correlation,
unexpected_debug_info_for_correlation,
unable_to_correlate_profile,
- unsupported_debug_format,
unknown_function,
invalid_prof,
hash_mismatch,
@@ -530,6 +529,12 @@ public:
/// Return the name section data.
inline StringRef getNameData() const { return Data; }
+
+ /// Dump the symbols in this table.
+ void dumpNames(raw_ostream &OS) const {
+ for (StringRef S : NameTab.keys())
+ OS << S << "\n";
+ }
};
Error InstrProfSymtab::create(StringRef D, uint64_t BaseAddr) {
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h
index eae7b4e0322c..135936b99f24 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfCorrelator.h
@@ -12,6 +12,7 @@
#ifndef LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
#define LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
+#include "llvm/ADT/DenseSet.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
@@ -34,6 +35,20 @@ public:
/// to their functions.
virtual Error correlateProfileData() = 0;
+ /// Return the number of ProfileData elements.
+ llvm::Optional<size_t> getDataSize() const;
+
+ /// Return a pointer to the names string that this class constructs.
+ const char *getNamesPointer() const { return Names.c_str(); }
+
+ /// Return the number of bytes in the names string.
+ size_t getNamesSize() const { return Names.size(); }
+
+ /// Return the size of the counters section in bytes.
+ uint64_t getCountersSectionSize() const {
+ return Ctx->CountersSectionEnd - Ctx->CountersSectionStart;
+ }
+
static const char *FunctionNameAttributeName;
static const char *CFGHashAttributeName;
static const char *NumCountersAttributeName;
@@ -58,6 +73,9 @@ protected:
InstrProfCorrelator(InstrProfCorrelatorKind K, std::unique_ptr<Context> Ctx)
: Ctx(std::move(Ctx)), Kind(K) {}
+ std::string Names;
+ std::vector<std::string> NamesVec;
+
private:
static llvm::Expected<std::unique_ptr<InstrProfCorrelator>>
get(std::unique_ptr<MemoryBuffer> Buffer);
@@ -82,22 +100,12 @@ public:
/// Return the number of ProfileData elements.
size_t getDataSize() const { return Data.size(); }
- /// Return a pointer to the compressed names string that this class
- /// constructs.
- const char *getCompressedNamesPointer() const {
- return CompressedNames.c_str();
- }
-
- /// Return the number of bytes in the compressed names string.
- size_t getCompressedNamesSize() const { return CompressedNames.size(); }
-
static llvm::Expected<std::unique_ptr<InstrProfCorrelatorImpl<IntPtrT>>>
get(std::unique_ptr<InstrProfCorrelator::Context> Ctx,
const object::ObjectFile &Obj);
protected:
std::vector<RawInstrProf::ProfileData<IntPtrT>> Data;
- std::string CompressedNames;
Error correlateProfileData() override;
virtual void correlateProfileDataImpl() = 0;
@@ -109,7 +117,7 @@ private:
InstrProfCorrelatorImpl(InstrProfCorrelatorKind Kind,
std::unique_ptr<InstrProfCorrelator::Context> Ctx)
: InstrProfCorrelator(Kind, std::move(Ctx)){};
- std::vector<std::string> Names;
+ llvm::DenseSet<IntPtrT> CounterOffsets;
// Byte-swap the value if necessary.
template <class T> T maybeSwap(T Value) const {
@@ -138,7 +146,7 @@ private:
static bool isDIEOfProbe(const DWARFDie &Die);
/// Iterate over DWARF DIEs to find those that symbolize instrumentation
- /// probes and construct the ProfileData vector and CompressedNames string.
+ /// probes and construct the ProfileData vector and Names string.
///
/// Here is some example DWARF for an instrumentation probe we are looking
/// for:
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
index 44719126b596..0544b6b2ef71 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfData.inc
@@ -128,8 +128,10 @@ INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
+/* FIXME: A more accurate name is NumData */
INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
+/* FIXME: A more accurate name is NumCounters */
INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
@@ -644,6 +646,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
(uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
(uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
+/* FIXME: Please remedy the fixme in the header before bumping the version. */
/* Raw profile format version (start from 1). */
#define INSTR_PROF_RAW_VERSION 8
/* Indexed profile format version (start from 1). */
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
index c615e8533178..1326cbf0e1ce 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/InstrProfReader.h
@@ -233,7 +233,8 @@ private:
uint64_t NamesDelta;
const RawInstrProf::ProfileData<IntPtrT> *Data;
const RawInstrProf::ProfileData<IntPtrT> *DataEnd;
- const uint64_t *CountersStart;
+ const char *CountersStart;
+ const char *CountersEnd;
const char *NamesStart;
const char *NamesEnd;
// After value profile is all read, this pointer points to
@@ -310,6 +311,15 @@ private:
bool atEnd() const { return Data == DataEnd; }
void advanceData() {
+ // `CountersDelta` is a constant zero when using debug info correlation.
+ if (!Correlator) {
+ // The initial CountersDelta is the in-memory address difference between
+ // the data and counts sections:
+ // start(__llvm_prf_cnts) - start(__llvm_prf_data)
+ // As we advance to the next record, we maintain the correct CountersDelta
+ // with respect to the next record.
+ CountersDelta -= sizeof(*Data);
+ }
Data++;
ValueDataStart += CurValueDataSize;
}
@@ -319,20 +329,11 @@ private:
return (const char *)ValueDataStart;
}
- /// Get the offset of \p CounterPtr from the start of the counters section of
- /// the profile. The offset has units of "number of counters", i.e. increasing
- /// the offset by 1 corresponds to an increase in the *byte offset* by 8.
- ptrdiff_t getCounterOffset(IntPtrT CounterPtr) const {
- return (swap(CounterPtr) - CountersDelta) / sizeof(uint64_t);
- }
-
- const uint64_t *getCounter(ptrdiff_t Offset) const {
- return CountersStart + Offset;
- }
-
StringRef getName(uint64_t NameRef) const {
return Symtab->getFuncName(swap(NameRef));
}
+
+ int getCounterTypeSize() const { return sizeof(uint64_t); }
};
using RawInstrProfReader32 = RawInstrProfReader<uint32_t>;
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc b/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
index d64227e4ba31..f2cb3738f053 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
@@ -1,5 +1,5 @@
-#ifndef MEMPROF_DATA_INC
-#define MEMPROF_DATA_INC
+#ifndef LLVM_PROFILEDATA_MEMPROFDATA_INC
+#define LLVM_PROFILEDATA_MEMPROFDATA_INC
/*===-- MemProfData.inc - MemProf profiling runtime structures -*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
index dc6522f4ec4c..bad2139fe8f0 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProf.h
@@ -195,19 +195,21 @@ enum class SecProfSummaryFlags : uint32_t {
/// The common profile is usually merged from profiles collected
/// from running other targets.
SecFlagPartial = (1 << 0),
- /// SecFlagContext means this is context-sensitive profile for
+ /// SecFlagContext means this is context-sensitive flat profile for
/// CSSPGO
SecFlagFullContext = (1 << 1),
/// SecFlagFSDiscriminator means this profile uses flow-sensitive
/// discriminators.
- SecFlagFSDiscriminator = (1 << 2)
+ SecFlagFSDiscriminator = (1 << 2),
+ /// SecFlagIsCSNested means this is context-sensitive nested profile for
+ /// CSSPGO
+ SecFlagIsCSNested = (1 << 4),
};
enum class SecFuncMetadataFlags : uint32_t {
SecFlagInvalid = 0,
SecFlagIsProbeBased = (1 << 0),
SecFlagHasAttribute = (1 << 1),
- SecFlagIsCSNested = (1 << 2),
};
enum class SecFuncOffsetFlags : uint32_t {
@@ -448,7 +450,7 @@ static inline hash_code hash_value(const SampleContextFrame &arg) {
arg.Location.Discriminator);
}
-using SampleContextFrameVector = SmallVector<SampleContextFrame, 10>;
+using SampleContextFrameVector = SmallVector<SampleContextFrame, 1>;
using SampleContextFrames = ArrayRef<SampleContextFrame>;
struct SampleContextFrameHash {
diff --git a/contrib/llvm-project/llvm/include/llvm/Remarks/RemarkSerializer.h b/contrib/llvm-project/llvm/include/llvm/Remarks/RemarkSerializer.h
index 97fd224ea082..90e556df87e7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Remarks/RemarkSerializer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Remarks/RemarkSerializer.h
@@ -48,7 +48,7 @@ struct RemarkSerializer {
RemarkSerializer(Format SerializerFormat, raw_ostream &OS,
SerializerMode Mode)
- : SerializerFormat(SerializerFormat), OS(OS), Mode(Mode), StrTab() {}
+ : SerializerFormat(SerializerFormat), OS(OS), Mode(Mode) {}
/// This is just an interface.
virtual ~RemarkSerializer() = default;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.def b/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.def
index 48e82fa55a0f..26f4bae53119 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.def
+++ b/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.def
@@ -58,6 +58,13 @@ AARCH64_ARCH("armv8.7-a", ARMV8_7A, "8.7-A", "v8.7a",
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
+AARCH64_ARCH("armv8.8-a", ARMV8_8A, "8.8-A", "v8.8a",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (AArch64::AEK_CRC | AArch64::AEK_FP |
+ AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
+ AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
+ AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
+ AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
AARCH64_ARCH("armv9-a", ARMV9A, "9-A", "v9a",
ARMBuildAttrs::CPUArch::v8_A, FK_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_FP |
@@ -76,6 +83,12 @@ AARCH64_ARCH("armv9.2-a", ARMV9_2A, "9.2-A", "v9.2a",
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SVE2))
+AARCH64_ARCH("armv9.3-a", ARMV9_3A, "9.3-A", "v9.3",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (AArch64::AEK_CRC | AArch64::AEK_FP |
+ AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
+ AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
+ AArch64::AEK_SVE2))
// For v8-R, we do not enable crypto and align with GCC that enables a more
// minimal set of optional architecture extensions.
AARCH64_ARCH("armv8-r", ARMV8R, "8-R", "v8r",
@@ -90,47 +103,50 @@ AARCH64_ARCH("armv8-r", ARMV8R, "8-R", "v8r",
#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
// FIXME: This would be nicer were it tablegen
-AARCH64_ARCH_EXT_NAME("invalid", AArch64::AEK_INVALID, nullptr, nullptr)
-AARCH64_ARCH_EXT_NAME("none", AArch64::AEK_NONE, nullptr, nullptr)
-AARCH64_ARCH_EXT_NAME("crc", AArch64::AEK_CRC, "+crc", "-crc")
-AARCH64_ARCH_EXT_NAME("lse", AArch64::AEK_LSE, "+lse", "-lse")
-AARCH64_ARCH_EXT_NAME("rdm", AArch64::AEK_RDM, "+rdm", "-rdm")
-AARCH64_ARCH_EXT_NAME("crypto", AArch64::AEK_CRYPTO, "+crypto","-crypto")
-AARCH64_ARCH_EXT_NAME("sm4", AArch64::AEK_SM4, "+sm4", "-sm4")
-AARCH64_ARCH_EXT_NAME("sha3", AArch64::AEK_SHA3, "+sha3", "-sha3")
-AARCH64_ARCH_EXT_NAME("sha2", AArch64::AEK_SHA2, "+sha2", "-sha2")
-AARCH64_ARCH_EXT_NAME("aes", AArch64::AEK_AES, "+aes", "-aes")
-AARCH64_ARCH_EXT_NAME("dotprod", AArch64::AEK_DOTPROD, "+dotprod","-dotprod")
-AARCH64_ARCH_EXT_NAME("fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8")
-AARCH64_ARCH_EXT_NAME("simd", AArch64::AEK_SIMD, "+neon", "-neon")
-AARCH64_ARCH_EXT_NAME("fp16", AArch64::AEK_FP16, "+fullfp16", "-fullfp16")
-AARCH64_ARCH_EXT_NAME("fp16fml", AArch64::AEK_FP16FML, "+fp16fml", "-fp16fml")
-AARCH64_ARCH_EXT_NAME("profile", AArch64::AEK_PROFILE, "+spe", "-spe")
-AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
-AARCH64_ARCH_EXT_NAME("sve", AArch64::AEK_SVE, "+sve", "-sve")
-AARCH64_ARCH_EXT_NAME("sve2", AArch64::AEK_SVE2, "+sve2", "-sve2")
-AARCH64_ARCH_EXT_NAME("sve2-aes", AArch64::AEK_SVE2AES, "+sve2-aes", "-sve2-aes")
-AARCH64_ARCH_EXT_NAME("sve2-sm4", AArch64::AEK_SVE2SM4, "+sve2-sm4", "-sve2-sm4")
-AARCH64_ARCH_EXT_NAME("sve2-sha3", AArch64::AEK_SVE2SHA3, "+sve2-sha3", "-sve2-sha3")
+AARCH64_ARCH_EXT_NAME("invalid", AArch64::AEK_INVALID, nullptr, nullptr)
+AARCH64_ARCH_EXT_NAME("none", AArch64::AEK_NONE, nullptr, nullptr)
+AARCH64_ARCH_EXT_NAME("crc", AArch64::AEK_CRC, "+crc", "-crc")
+AARCH64_ARCH_EXT_NAME("lse", AArch64::AEK_LSE, "+lse", "-lse")
+AARCH64_ARCH_EXT_NAME("rdm", AArch64::AEK_RDM, "+rdm", "-rdm")
+AARCH64_ARCH_EXT_NAME("crypto", AArch64::AEK_CRYPTO, "+crypto", "-crypto")
+AARCH64_ARCH_EXT_NAME("sm4", AArch64::AEK_SM4, "+sm4", "-sm4")
+AARCH64_ARCH_EXT_NAME("sha3", AArch64::AEK_SHA3, "+sha3", "-sha3")
+AARCH64_ARCH_EXT_NAME("sha2", AArch64::AEK_SHA2, "+sha2", "-sha2")
+AARCH64_ARCH_EXT_NAME("aes", AArch64::AEK_AES, "+aes", "-aes")
+AARCH64_ARCH_EXT_NAME("dotprod", AArch64::AEK_DOTPROD, "+dotprod", "-dotprod")
+AARCH64_ARCH_EXT_NAME("fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8")
+AARCH64_ARCH_EXT_NAME("simd", AArch64::AEK_SIMD, "+neon", "-neon")
+AARCH64_ARCH_EXT_NAME("fp16", AArch64::AEK_FP16, "+fullfp16", "-fullfp16")
+AARCH64_ARCH_EXT_NAME("fp16fml", AArch64::AEK_FP16FML, "+fp16fml", "-fp16fml")
+AARCH64_ARCH_EXT_NAME("profile", AArch64::AEK_PROFILE, "+spe", "-spe")
+AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
+AARCH64_ARCH_EXT_NAME("sve", AArch64::AEK_SVE, "+sve", "-sve")
+AARCH64_ARCH_EXT_NAME("sve2", AArch64::AEK_SVE2, "+sve2", "-sve2")
+AARCH64_ARCH_EXT_NAME("sve2-aes", AArch64::AEK_SVE2AES, "+sve2-aes", "-sve2-aes")
+AARCH64_ARCH_EXT_NAME("sve2-sm4", AArch64::AEK_SVE2SM4, "+sve2-sm4", "-sve2-sm4")
+AARCH64_ARCH_EXT_NAME("sve2-sha3", AArch64::AEK_SVE2SHA3, "+sve2-sha3", "-sve2-sha3")
AARCH64_ARCH_EXT_NAME("sve2-bitperm", AArch64::AEK_SVE2BITPERM, "+sve2-bitperm", "-sve2-bitperm")
-AARCH64_ARCH_EXT_NAME("rcpc", AArch64::AEK_RCPC, "+rcpc", "-rcpc")
-AARCH64_ARCH_EXT_NAME("rng", AArch64::AEK_RAND, "+rand", "-rand")
-AARCH64_ARCH_EXT_NAME("memtag", AArch64::AEK_MTE, "+mte", "-mte")
-AARCH64_ARCH_EXT_NAME("ssbs", AArch64::AEK_SSBS, "+ssbs", "-ssbs")
-AARCH64_ARCH_EXT_NAME("sb", AArch64::AEK_SB, "+sb", "-sb")
-AARCH64_ARCH_EXT_NAME("predres", AArch64::AEK_PREDRES, "+predres", "-predres")
-AARCH64_ARCH_EXT_NAME("bf16", AArch64::AEK_BF16, "+bf16", "-bf16")
-AARCH64_ARCH_EXT_NAME("i8mm", AArch64::AEK_I8MM, "+i8mm", "-i8mm")
-AARCH64_ARCH_EXT_NAME("f32mm", AArch64::AEK_F32MM, "+f32mm", "-f32mm")
-AARCH64_ARCH_EXT_NAME("f64mm", AArch64::AEK_F64MM, "+f64mm", "-f64mm")
-AARCH64_ARCH_EXT_NAME("tme", AArch64::AEK_TME, "+tme", "-tme")
-AARCH64_ARCH_EXT_NAME("ls64", AArch64::AEK_LS64, "+ls64", "-ls64")
-AARCH64_ARCH_EXT_NAME("brbe", AArch64::AEK_BRBE, "+brbe", "-brbe")
-AARCH64_ARCH_EXT_NAME("pauth", AArch64::AEK_PAUTH, "+pauth", "-pauth")
-AARCH64_ARCH_EXT_NAME("flagm", AArch64::AEK_FLAGM, "+flagm", "-flagm")
-AARCH64_ARCH_EXT_NAME("sme", AArch64::AEK_SME, "+sme", "-sme")
-AARCH64_ARCH_EXT_NAME("sme-f64", AArch64::AEK_SMEF64, "+sme-f64", "-sme-f64")
-AARCH64_ARCH_EXT_NAME("sme-i64", AArch64::AEK_SMEI64, "+sme-i64", "-sme-i64")
+AARCH64_ARCH_EXT_NAME("rcpc", AArch64::AEK_RCPC, "+rcpc", "-rcpc")
+AARCH64_ARCH_EXT_NAME("rng", AArch64::AEK_RAND, "+rand", "-rand")
+AARCH64_ARCH_EXT_NAME("memtag", AArch64::AEK_MTE, "+mte", "-mte")
+AARCH64_ARCH_EXT_NAME("ssbs", AArch64::AEK_SSBS, "+ssbs", "-ssbs")
+AARCH64_ARCH_EXT_NAME("sb", AArch64::AEK_SB, "+sb", "-sb")
+AARCH64_ARCH_EXT_NAME("predres", AArch64::AEK_PREDRES, "+predres", "-predres")
+AARCH64_ARCH_EXT_NAME("bf16", AArch64::AEK_BF16, "+bf16", "-bf16")
+AARCH64_ARCH_EXT_NAME("i8mm", AArch64::AEK_I8MM, "+i8mm", "-i8mm")
+AARCH64_ARCH_EXT_NAME("f32mm", AArch64::AEK_F32MM, "+f32mm", "-f32mm")
+AARCH64_ARCH_EXT_NAME("f64mm", AArch64::AEK_F64MM, "+f64mm", "-f64mm")
+AARCH64_ARCH_EXT_NAME("tme", AArch64::AEK_TME, "+tme", "-tme")
+AARCH64_ARCH_EXT_NAME("ls64", AArch64::AEK_LS64, "+ls64", "-ls64")
+AARCH64_ARCH_EXT_NAME("brbe", AArch64::AEK_BRBE, "+brbe", "-brbe")
+AARCH64_ARCH_EXT_NAME("pauth", AArch64::AEK_PAUTH, "+pauth", "-pauth")
+AARCH64_ARCH_EXT_NAME("flagm", AArch64::AEK_FLAGM, "+flagm", "-flagm")
+AARCH64_ARCH_EXT_NAME("sme", AArch64::AEK_SME, "+sme", "-sme")
+AARCH64_ARCH_EXT_NAME("sme-f64", AArch64::AEK_SMEF64, "+sme-f64", "-sme-f64")
+AARCH64_ARCH_EXT_NAME("sme-i64", AArch64::AEK_SMEI64, "+sme-i64", "-sme-i64")
+AARCH64_ARCH_EXT_NAME("hbc", AArch64::AEK_HBC, "+hbc", "-hbc")
+AARCH64_ARCH_EXT_NAME("mops", AArch64::AEK_MOPS, "+mops", "-mops")
+AARCH64_ARCH_EXT_NAME("pmuv3", AArch64::AEK_PERFMON, "+perfmon", "-perfmon")
#undef AARCH64_ARCH_EXT_NAME
#ifndef AARCH64_CPU_NAME
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.h b/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.h
index 15bb428f19bc..d094c704d291 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/AArch64TargetParser.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_AARCH64TARGETPARSER_H
#define LLVM_SUPPORT_AARCH64TARGETPARSER_H
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ARMTargetParser.h"
#include <vector>
@@ -69,6 +68,9 @@ enum ArchExtKind : uint64_t {
AEK_SME = 1ULL << 37,
AEK_SMEF64 = 1ULL << 38,
AEK_SMEI64 = 1ULL << 39,
+ AEK_HBC = 1ULL << 40,
+ AEK_MOPS = 1ULL << 41,
+ AEK_PERFMON = 1ULL << 42,
};
enum class ArchKind {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ARMAttributeParser.h b/contrib/llvm-project/llvm/include/llvm/Support/ARMAttributeParser.h
index b46a4d9f690f..cbb5701540e1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ARMAttributeParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ARMAttributeParser.h
@@ -11,14 +11,12 @@
#include "ARMBuildAttributes.h"
#include "ELFAttributeParser.h"
-#include "ScopedPrinter.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/Support/DataExtractor.h"
-#include "llvm/Support/Endian.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
namespace llvm {
-class StringRef;
+
+class ScopedPrinter;
class ARMAttributeParser : public ELFAttributeParser {
struct DisplayHandler {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.def b/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.def
index 7d29808f0501..433d7fdc2c3b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.def
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.def
@@ -122,6 +122,12 @@ ARM_ARCH("armv8.7-a", ARMV8_7A, "8.7-A", "v8.7a",
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
+ARM_ARCH("armv8.8-a", ARMV8_8A, "8.8-A", "v8.8a",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
+ ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_SHA2 | ARM::AEK_AES |
+ ARM::AEK_I8MM))
ARM_ARCH("armv9-a", ARMV9A, "9-A", "v9a",
ARMBuildAttrs::CPUArch::v8_A, FK_NEON_FP_ARMV8,
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
@@ -137,6 +143,11 @@ ARM_ARCH("armv9.2-a", ARMV9_2A, "9.2-A", "v9.2a",
(ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
+ARM_ARCH("armv9.3-a", ARMV9_3A, "9.3-A", "v9.3a",
+ ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
+ (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
+ ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
+ ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8-r", ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
FK_NEON_FP_ARMV8,
(ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.h b/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.h
index b40704c24e87..1ada6daaad3b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ARMTargetParser.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_ARMTARGETPARSER_H
#define LLVM_SUPPORT_ARMTARGETPARSER_H
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include <vector>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Allocator.h b/contrib/llvm-project/llvm/include/llvm/Support/Allocator.h
index 9e8ce4e36197..ec5ed06b7fa4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Allocator.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Allocator.h
@@ -22,16 +22,12 @@
#include "llvm/Support/Alignment.h"
#include "llvm/Support/AllocatorBase.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/MemAlloc.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
-#include <cstdlib>
#include <iterator>
-#include <type_traits>
#include <utility>
namespace llvm {
@@ -141,8 +137,10 @@ public:
}
/// Allocate space at the specified alignment.
- LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
- Allocate(size_t Size, Align Alignment) {
+ // This method is *not* marked noalias, because
+ // SpecificBumpPtrAllocator::DestroyAll() loops over all allocations, and
+ // that loop is not based on the Allocate() return value.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, Align Alignment) {
// Keep track of how many bytes we've allocated.
BytesAllocated += Size;
@@ -198,7 +196,7 @@ public:
return AlignedPtr;
}
- inline LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
+ inline LLVM_ATTRIBUTE_RETURNS_NONNULL void *
Allocate(size_t Size, size_t Alignment) {
assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.");
return Allocate(Size, Align(Alignment));
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/AllocatorBase.h b/contrib/llvm-project/llvm/include/llvm/Support/AllocatorBase.h
index e5549d111622..eccced1d1ff4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/AllocatorBase.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/AllocatorBase.h
@@ -21,6 +21,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemAlloc.h"
+#include <type_traits>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
index 7d8b6d2dc43d..dc4adba26f16 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
@@ -17,7 +17,6 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/MemoryBuffer.h"
-#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamArray.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamArray.h
index 85d29be26ca9..c3e0db4dcff0 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamArray.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamArray.h
@@ -323,7 +323,7 @@ public:
FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
: Array(Array), Index(Index) {}
- FixedStreamArrayIterator<T>(const FixedStreamArrayIterator<T> &Other)
+ FixedStreamArrayIterator(const FixedStreamArrayIterator<T> &Other)
: Array(Other.Array), Index(Other.Index) {}
FixedStreamArrayIterator<T> &
operator=(const FixedStreamArrayIterator<T> &Other) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamReader.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamReader.h
index 29b4b09b848c..c664ac48daad 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamReader.h
@@ -10,7 +10,6 @@
#define LLVM_SUPPORT_BINARYSTREAMREADER_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/BinaryStreamArray.h"
@@ -18,7 +17,6 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/type_traits.h"
#include <type_traits>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamRef.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamRef.h
index e0aaab82ffab..bc8c6a496ecf 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamRef.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamRef.h
@@ -14,7 +14,6 @@
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/Error.h"
-#include <algorithm>
#include <cstdint>
#include <memory>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamWriter.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamWriter.h
index 3054f4ac7ef0..c05b0420aaa3 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryStreamWriter.h
@@ -10,7 +10,6 @@
#define LLVM_SUPPORT_BINARYSTREAMWRITER_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamError.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BlockFrequency.h b/contrib/llvm-project/llvm/include/llvm/Support/BlockFrequency.h
index 18fb60e1904b..bf0ad46ab499 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BlockFrequency.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BlockFrequency.h
@@ -13,12 +13,11 @@
#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
#define LLVM_SUPPORT_BLOCKFREQUENCY_H
-#include "llvm/Support/BranchProbability.h"
-#include "llvm/Support/DataTypes.h"
+#include <cstdint>
namespace llvm {
-class raw_ostream;
+class BranchProbability;
// This class represents Block Frequency as a 64-bit value.
class BlockFrequency {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BranchProbability.h b/contrib/llvm-project/llvm/include/llvm/Support/BranchProbability.h
index 6c7ad1fe2a52..6f071c15421f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BranchProbability.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BranchProbability.h
@@ -16,7 +16,6 @@
#include "llvm/Support/DataTypes.h"
#include <algorithm>
#include <cassert>
-#include <climits>
#include <numeric>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Caching.h b/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
index 5c30a822ef38..bef23ae757f2 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Caching.h
@@ -62,10 +62,11 @@ using AddBufferFn =
std::function<void(unsigned Task, std::unique_ptr<MemoryBuffer> MB)>;
/// Create a local file system cache which uses the given cache name, temporary
-/// file prefix, cache directory and file callback. This function also creates
-/// the cache directory if it does not already exist. The cache name appears in
-/// error messages for errors during caching. The temporary file prefix is used
-/// in the temporary file naming scheme used when writing files atomically.
+/// file prefix, cache directory and file callback. This function does not
+/// immediately create the cache directory if it does not yet exist; this is
+/// done lazily the first time a file is added. The cache name appears in error
+/// messages for errors during caching. The temporary file prefix is used in the
+/// temporary file naming scheme used when writing files atomically.
Expected<FileCache> localCache(
Twine CacheNameRef, Twine TempFilePrefixRef, Twine CacheDirectoryPathRef,
AddBufferFn AddBuffer = [](size_t Task, std::unique_ptr<MemoryBuffer> MB) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h b/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
index 629a37a90aae..9c2bd45d2803 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Chrono.h
@@ -14,6 +14,7 @@
#include <chrono>
#include <ctime>
+#include <ratio>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/CodeGenCoverage.h b/contrib/llvm-project/llvm/include/llvm/Support/CodeGenCoverage.h
index a5b1796ca422..2acdd6a36a51 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/CodeGenCoverage.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/CodeGenCoverage.h
@@ -14,7 +14,6 @@
#include "llvm/ADT/BitVector.h"
namespace llvm {
-class LLVMContext;
class MemoryBuffer;
class CodeGenCoverage {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm-project/llvm/include/llvm/Support/CommandLine.h
index 2ee02010ff1d..120ab1840915 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/CommandLine.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/CommandLine.h
@@ -1550,8 +1550,9 @@ public:
}
template <class T> void addValue(const T &V) {
- assert(Location != 0 && "cl::location(...) not specified for a command "
- "line option with external storage!");
+ assert(Location != nullptr &&
+ "cl::location(...) not specified for a command "
+ "line option with external storage!");
Location->push_back(V);
}
};
@@ -1754,8 +1755,9 @@ public:
}
template <class T> void addValue(const T &V) {
- assert(Location != 0 && "cl::location(...) not specified for a command "
- "line option with external storage!");
+ assert(Location != nullptr &&
+ "cl::location(...) not specified for a command "
+ "line option with external storage!");
*Location |= Bit(V);
}
@@ -2080,7 +2082,8 @@ void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
///
/// It reads content of the specified file, tokenizes it and expands "@file"
/// commands resolving file names in them relative to the directory where
-/// CfgFilename resides.
+/// CfgFilename resides. It also expands "<CFGDIR>" to the base path of the
+/// current config file.
///
bool readConfigFile(StringRef CfgFileName, StringSaver &Saver,
SmallVectorImpl<const char *> &Argv);
@@ -2100,13 +2103,15 @@ bool readConfigFile(StringRef CfgFileName, StringSaver &Saver,
/// with nullptrs in the Argv vector.
/// \param [in] RelativeNames true if names of nested response files must be
/// resolved relative to including file.
+/// \param [in] ExpandBasePath If true, "<CFGDIR>" expands to the base path of
+/// the current response file.
/// \param [in] FS File system used for all file access when running the tool.
/// \param [in] CurrentDir Path used to resolve relative rsp files. If set to
/// None, process' cwd is used instead.
/// \return true if all @files were expanded successfully or there were none.
bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv, bool MarkEOLs,
- bool RelativeNames,
+ bool RelativeNames, bool ExpandBasePath,
llvm::Optional<llvm::StringRef> CurrentDir,
llvm::vfs::FileSystem &FS);
@@ -2115,7 +2120,7 @@ bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
bool ExpandResponseFiles(
StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv, bool MarkEOLs = false,
- bool RelativeNames = false,
+ bool RelativeNames = false, bool ExpandBasePath = false,
llvm::Optional<llvm::StringRef> CurrentDir = llvm::None);
/// A convenience helper which concatenates the options specified by the
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h b/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
index b31ba6bc7fc2..f4c277fae7cc 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Compiler.h
@@ -17,9 +17,6 @@
#include "llvm/Config/llvm-config.h"
-#ifdef __cplusplus
-#include <new>
-#endif
#include <stddef.h>
#if defined(_MSC_VER)
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ConvertUTF.h b/contrib/llvm-project/llvm/include/llvm/Support/ConvertUTF.h
index 1add185330fa..374cdb907fdc 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ConvertUTF.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ConvertUTF.h
@@ -91,7 +91,10 @@
#include <cstddef>
#include <string>
+
+#if defined(_WIN32)
#include <system_error>
+#endif
// Wrap everything in namespace llvm so that programs can link with llvm and
// their own version of the unicode libraries.
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/CrashRecoveryContext.h b/contrib/llvm-project/llvm/include/llvm/Support/CrashRecoveryContext.h
index 2604ccb38431..f60e7335e197 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/CrashRecoveryContext.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/CrashRecoveryContext.h
@@ -9,7 +9,7 @@
#ifndef LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
namespace llvm {
class CrashRecoveryContextCleanup;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/DivisionByConstantInfo.h b/contrib/llvm-project/llvm/include/llvm/Support/DivisionByConstantInfo.h
index 5bb326178c3e..896bc679885e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/DivisionByConstantInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/DivisionByConstantInfo.h
@@ -10,8 +10,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_SUPPORT_DIVISON_BY_CONSTANT_INFO_H
-#define LLVM_SUPPORT_DIVISON_BY_CONSTANT_INFO_H
+#ifndef LLVM_SUPPORT_DIVISIONBYCONSTANTINFO_H
+#define LLVM_SUPPORT_DIVISIONBYCONSTANTINFO_H
#include "llvm/ADT/APInt.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Duration.h b/contrib/llvm-project/llvm/include/llvm/Support/Duration.h
new file mode 100644
index 000000000000..a5a0e2a3357a
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Duration.h
@@ -0,0 +1,28 @@
+//===--- Duration.h - wrapper around std::chrono::Duration ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The sole purpose of this file is to avoid the dependency on <chrono> in
+// raw_ostream.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DURATION_H
+#define LLVM_SUPPORT_DURATION_H
+
+#include <chrono>
+
+namespace llvm {
+class Duration {
+ std::chrono::milliseconds Value;
+ public:
+ Duration(std::chrono::milliseconds Value) : Value(Value) {}
+ std::chrono::milliseconds getDuration() const { return Value; }
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ELFAttributeParser.h b/contrib/llvm-project/llvm/include/llvm/Support/ELFAttributeParser.h
index 8bf87b2d84f0..3062dfffff68 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ELFAttributeParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ELFAttributeParser.h
@@ -10,15 +10,16 @@
#define LLVM_SUPPORT_ELFATTRIBUTEPARSER_H
#include "ELFAttributes.h"
-#include "ScopedPrinter.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <unordered_map>
namespace llvm {
class StringRef;
+class ScopedPrinter;
class ELFAttributeParser {
StringRef vendor;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Error.h b/contrib/llvm-project/llvm/include/llvm/Support/Error.h
index e2002b89ada2..881049b15b0d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Error.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Error.h
@@ -14,7 +14,6 @@
#define LLVM_SUPPORT_ERROR_H
#include "llvm-c/Error.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
@@ -26,7 +25,6 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ExtensibleRTTI.h b/contrib/llvm-project/llvm/include/llvm/Support/ExtensibleRTTI.h
index 21055247e932..d3193be6f529 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ExtensibleRTTI.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ExtensibleRTTI.h
@@ -62,8 +62,6 @@
namespace llvm {
-template <typename ThisT, typename ParentT> class RTTIExtends;
-
/// Base class for the extensible RTTI hierarchy.
///
/// This class defines virtual methods, dynamicClassID and isA, that enable
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/FileCollector.h b/contrib/llvm-project/llvm/include/llvm/Support/FileCollector.h
index 264fb55c9dba..232dc8658aa3 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/FileCollector.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/FileCollector.h
@@ -9,7 +9,6 @@
#ifndef LLVM_SUPPORT_FILECOLLECTOR_H
#define LLVM_SUPPORT_FILECOLLECTOR_H
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/VirtualFileSystem.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm-project/llvm/include/llvm/Support/FileSystem.h
index 1a049533b82b..033482977a10 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/FileSystem.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/FileSystem.h
@@ -1014,6 +1014,25 @@ file_t getStderrHandle();
/// @returns The number of bytes read, or error.
Expected<size_t> readNativeFile(file_t FileHandle, MutableArrayRef<char> Buf);
+/// Default chunk size for \a readNativeFileToEOF().
+enum : size_t { DefaultReadChunkSize = 4 * 4096 };
+
+/// Reads from \p FileHandle until EOF, appending to \p Buffer in chunks of
+/// size \p ChunkSize.
+///
+/// This calls \a readNativeFile() in a loop. On Error, previous chunks that
+/// were read successfully are left in \p Buffer and returned.
+///
+/// Note: For reading the final chunk at EOF, \p Buffer's capacity needs extra
+/// storage of \p ChunkSize.
+///
+/// \param FileHandle File to read from.
+/// \param Buffer Where to put the file content.
+/// \param ChunkSize Size of chunks.
+/// \returns The error if EOF was not found.
+Error readNativeFileToEOF(file_t FileHandle, SmallVectorImpl<char> &Buffer,
+ ssize_t ChunkSize = DefaultReadChunkSize);
+
/// Reads \p Buf.size() bytes from \p FileHandle at offset \p Offset into \p
/// Buf. If 'pread' is available, this will use that, otherwise it will use
/// 'lseek'. Returns the number of bytes actually read. Returns 0 when reaching
@@ -1279,6 +1298,7 @@ private:
}
void unmapImpl();
+ void dontNeedImpl();
std::error_code init(sys::fs::file_t FD, uint64_t Offset, mapmode Mode);
@@ -1308,6 +1328,7 @@ public:
unmapImpl();
copyFrom(mapped_file_region());
}
+ void dontNeed() { dontNeedImpl(); }
size_t size() const;
char *data() const;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/FileUtilities.h b/contrib/llvm-project/llvm/include/llvm/Support/FileUtilities.h
index 04efdced32a4..f8a37fe1177d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/FileUtilities.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/FileUtilities.h
@@ -15,10 +15,10 @@
#define LLVM_SUPPORT_FILEUTILITIES_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/Errc.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Path.h"
+
+#include <system_error>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadic.h b/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadic.h
index 89575f01b717..a872afb5e45e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadic.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadic.h
@@ -29,16 +29,17 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatCommon.h"
#include "llvm/Support/FormatProviders.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/Support/raw_ostream.h"
+#include <array>
#include <cstddef>
#include <string>
#include <tuple>
#include <utility>
-#include <vector>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadicDetails.h b/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadicDetails.h
index 08f8fc61f69b..2cafc120c1d7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadicDetails.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/FormatVariadicDetails.h
@@ -10,6 +10,7 @@
#define LLVM_SUPPORT_FORMATVARIADICDETAILS_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <type_traits>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
index 1c0f5f702c6d..515057e7e312 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/GraphWriter.h
@@ -28,8 +28,6 @@
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <cstddef>
#include <iterator>
#include <string>
#include <type_traits>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h b/contrib/llvm-project/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h
index 8e1b3d631983..aa7997a0228b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ItaniumManglingCanonicalizer.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
#define LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
-#include <cstddef>
#include <cstdint>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/JSON.h b/contrib/llvm-project/llvm/include/llvm/Support/JSON.h
index 469f50be40e0..719e8b60d0fa 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/JSON.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/JSON.h
@@ -49,6 +49,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/KnownBits.h b/contrib/llvm-project/llvm/include/llvm/Support/KnownBits.h
index 1f32760a6fd1..5ef0ba31f785 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/KnownBits.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/KnownBits.h
@@ -249,7 +249,17 @@ public:
return countMinLeadingZeros();
if (isNegative())
return countMinLeadingOnes();
- return 0;
+ // Every value has at least 1 sign bit.
+ return 1;
+ }
+
+ /// Returns the maximum number of bits needed to represent all possible
+ /// signed values with these known bits. This is the inverse of the minimum
+ /// number of known sign bits. Examples for bitwidth 5:
+ /// 110?? --> 4
+ /// 0000? --> 2
+ unsigned countMaxSignificantBits() const {
+ return getBitWidth() - countMinSignBits() + 1;
}
/// Returns the maximum number of trailing zero bits possible.
@@ -282,6 +292,9 @@ public:
return getBitWidth() - Zero.countPopulation();
}
+ /// Returns the maximum number of bits needed to represent all possible
+ /// unsigned values with these known bits. This is the inverse of the
+ /// minimum number of leading zeros.
unsigned countMaxActiveBits() const {
return getBitWidth() - countMinLeadingZeros();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/LowLevelTypeImpl.h b/contrib/llvm-project/llvm/include/llvm/Support/LowLevelTypeImpl.h
index 2071a08d8711..dd286f5228fe 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/LowLevelTypeImpl.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/LowLevelTypeImpl.h
@@ -33,7 +33,6 @@
namespace llvm {
-class DataLayout;
class Type;
class raw_ostream;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/MD5.h b/contrib/llvm-project/llvm/include/llvm/Support/MD5.h
index 3b960cd4fd88..70d046601346 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/MD5.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/MD5.h
@@ -88,7 +88,7 @@ public:
/// Translates the bytes in \p Res to a hex string that is
/// deposited into \p Str. The result will be of length 32.
- static void stringifyResult(MD5Result &Result, SmallString<32> &Str);
+ static void stringifyResult(MD5Result &Result, SmallVectorImpl<char> &Str);
/// Computes the hash for a given bytes.
static std::array<uint8_t, 16> hash(ArrayRef<uint8_t> Data);
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/MachineValueType.h b/contrib/llvm-project/llvm/include/llvm/Support/MachineValueType.h
index ce10a4c58dfe..643c2d8ce981 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/MachineValueType.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/MachineValueType.h
@@ -848,7 +848,11 @@ namespace llvm {
}
unsigned getVectorNumElements() const {
- // TODO: Check that this isn't a scalable vector.
+ if (isScalableVector())
+ llvm::reportInvalidSizeRequest(
+ "Possible incorrect use of MVT::getVectorNumElements() for "
+ "scalable vector. Scalable flag may be dropped, use "
+ "MVT::getVectorElementCount() instead");
return getVectorMinNumElements();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/MemoryBuffer.h b/contrib/llvm-project/llvm/include/llvm/Support/MemoryBuffer.h
index c9ceeedbf3dc..6385805eba1d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/MemoryBuffer.h
@@ -74,6 +74,13 @@ public:
/// from.
virtual StringRef getBufferIdentifier() const { return "Unknown buffer"; }
+ /// For read-only MemoryBuffer_MMap, mark the buffer as unused in the near
+ /// future and the kernel can free resources associated with it. Further
+ /// access is supported but may be expensive. This calls
+ /// madvise(MADV_DONTNEED) on read-only file mappings on *NIX systems. This
+ /// function should not be called on a writable buffer.
+ virtual void dontNeedIfMmap() {}
+
/// Open the specified file as a MemoryBuffer, returning a new MemoryBuffer
/// if successful, otherwise returning null.
///
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Parallel.h b/contrib/llvm-project/llvm/include/llvm/Support/Parallel.h
index 5c3b26d5754c..04caf5eac961 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Parallel.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Parallel.h
@@ -130,64 +130,6 @@ void parallel_sort(RandomAccessIterator Start, RandomAccessIterator End,
// improving to take the number of available cores into account.)
enum { MaxTasksPerGroup = 1024 };
-template <class IterTy, class FuncTy>
-void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
- // If we have zero or one items, then do not incur the overhead of spinning up
- // a task group. They are surprisingly expensive, and because they do not
- // support nested parallelism, a single entry task group can block parallel
- // execution underneath them.
- auto NumItems = std::distance(Begin, End);
- if (NumItems <= 1) {
- if (NumItems)
- Fn(*Begin);
- return;
- }
-
- // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
- // overhead on large inputs.
- ptrdiff_t TaskSize = NumItems / MaxTasksPerGroup;
- if (TaskSize == 0)
- TaskSize = 1;
-
- TaskGroup TG;
- while (TaskSize < std::distance(Begin, End)) {
- TG.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
- Begin += TaskSize;
- }
- std::for_each(Begin, End, Fn);
-}
-
-template <class IndexTy, class FuncTy>
-void parallel_for_each_n(IndexTy Begin, IndexTy End, FuncTy Fn) {
- // If we have zero or one items, then do not incur the overhead of spinning up
- // a task group. They are surprisingly expensive, and because they do not
- // support nested parallelism, a single entry task group can block parallel
- // execution underneath them.
- auto NumItems = End - Begin;
- if (NumItems <= 1) {
- if (NumItems)
- Fn(Begin);
- return;
- }
-
- // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
- // overhead on large inputs.
- ptrdiff_t TaskSize = NumItems / MaxTasksPerGroup;
- if (TaskSize == 0)
- TaskSize = 1;
-
- TaskGroup TG;
- IndexTy I = Begin;
- for (; I + TaskSize < End; I += TaskSize) {
- TG.spawn([=, &Fn] {
- for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
- Fn(J);
- });
- }
- for (IndexTy J = I; J < End; ++J)
- Fn(J);
-}
-
template <class IterTy, class ResultTy, class ReduceFuncTy,
class TransformFuncTy>
ResultTy parallel_transform_reduce(IterTy Begin, IterTy End, ResultTy Init,
@@ -251,27 +193,11 @@ void parallelSort(RandomAccessIterator Start, RandomAccessIterator End,
llvm::sort(Start, End, Comp);
}
+void parallelForEachN(size_t Begin, size_t End, function_ref<void(size_t)> Fn);
+
template <class IterTy, class FuncTy>
void parallelForEach(IterTy Begin, IterTy End, FuncTy Fn) {
-#if LLVM_ENABLE_THREADS
- if (parallel::strategy.ThreadsRequested != 1) {
- parallel::detail::parallel_for_each(Begin, End, Fn);
- return;
- }
-#endif
- std::for_each(Begin, End, Fn);
-}
-
-template <class FuncTy>
-void parallelForEachN(size_t Begin, size_t End, FuncTy Fn) {
-#if LLVM_ENABLE_THREADS
- if (parallel::strategy.ThreadsRequested != 1) {
- parallel::detail::parallel_for_each_n(Begin, End, Fn);
- return;
- }
-#endif
- for (size_t I = Begin; I != End; ++I)
- Fn(I);
+ parallelForEachN(0, End - Begin, [&](size_t I) { Fn(Begin[I]); });
}
template <class IterTy, class ResultTy, class ReduceFuncTy,
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
index 1ba4d449b709..b450c1df3558 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
@@ -9,8 +9,6 @@
#ifndef LLVM_SUPPORT_RISCVISAINFO_H
#define LLVM_SUPPORT_RISCVISAINFO_H
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
@@ -61,9 +59,13 @@ public:
unsigned getXLen() const { return XLen; };
unsigned getFLen() const { return FLen; };
+ unsigned getMinVLen() const { return MinVLen; }
+ unsigned getMaxELen() const { return MaxELen; }
+ unsigned getMaxELenFp() const { return MaxELenFp; }
bool hasExtension(StringRef Ext) const;
std::string toString() const;
+ std::vector<std::string> toFeatureVector() const;
static bool isSupportedExtensionFeature(StringRef Ext);
static bool isSupportedExtension(StringRef Ext);
@@ -71,10 +73,13 @@ public:
unsigned MinorVersion);
private:
- RISCVISAInfo(unsigned XLen) : XLen(XLen), FLen(0) {}
+ RISCVISAInfo(unsigned XLen)
+ : XLen(XLen), FLen(0), MinVLen(0), MaxELen(0), MaxELenFp(0) {}
unsigned XLen;
unsigned FLen;
+ unsigned MinVLen;
+ unsigned MaxELen, MaxELenFp;
OrderedExtensionMap Exts;
@@ -85,6 +90,8 @@ private:
void updateImplication();
void updateFLen();
+ void updateMinVLen();
+ void updateMaxELen();
};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h b/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
index 865337e3cc7f..9bde4f455a2d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ScopedPrinter.h
@@ -18,7 +18,6 @@
#include "llvm/Support/Endian.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
namespace llvm {
@@ -123,7 +122,7 @@ public:
void indent(int Levels = 1) { IndentLevel += Levels; }
void unindent(int Levels = 1) {
- IndentLevel = std::max(0, IndentLevel - Levels);
+ IndentLevel = IndentLevel > Levels ? IndentLevel - Levels : 0;
}
void resetIndent() { IndentLevel = 0; }
@@ -799,7 +798,7 @@ struct DelimitedScope {
};
struct DictScope : DelimitedScope {
- explicit DictScope() : DelimitedScope() {}
+ explicit DictScope() {}
explicit DictScope(ScopedPrinter &W) : DelimitedScope(W) { W.objectBegin(); }
DictScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
@@ -818,7 +817,7 @@ struct DictScope : DelimitedScope {
};
struct ListScope : DelimitedScope {
- explicit ListScope() : DelimitedScope() {}
+ explicit ListScope() {}
explicit ListScope(ScopedPrinter &W) : DelimitedScope(W) { W.arrayBegin(); }
ListScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/SymbolRemappingReader.h b/contrib/llvm-project/llvm/include/llvm/Support/SymbolRemappingReader.h
index 820cf9e02192..4fdaf87be082 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/SymbolRemappingReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/SymbolRemappingReader.h
@@ -62,10 +62,11 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ItaniumManglingCanonicalizer.h"
-#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
+class MemoryBuffer;
+
class SymbolRemappingParseError : public ErrorInfo<SymbolRemappingParseError> {
public:
SymbolRemappingParseError(StringRef File, int64_t Line, const Twine &Message)
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TargetOpcodes.def b/contrib/llvm-project/llvm/include/llvm/Support/TargetOpcodes.def
index b34b885ddc35..428cbb44705d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TargetOpcodes.def
@@ -228,10 +228,11 @@ HANDLE_TARGET_OPCODE(ICALL_BRANCH_FUNNEL)
/// generate code. These instructions only act as optimization hints.
HANDLE_TARGET_OPCODE(G_ASSERT_SEXT)
HANDLE_TARGET_OPCODE(G_ASSERT_ZEXT)
+HANDLE_TARGET_OPCODE(G_ASSERT_ALIGN)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_START,
G_ASSERT_SEXT)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_END,
- G_ASSERT_ZEXT)
+ G_ASSERT_ALIGN)
/// Generic ADD instruction. This is an integer add.
HANDLE_TARGET_OPCODE(G_ADD)
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h b/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
index 01e25a0ea857..02a8d72483db 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TargetParser.h
@@ -16,14 +16,14 @@
// FIXME: vector is used because that's what clang uses for subtarget feature
// lists, but SmallVector would probably be better
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/AArch64TargetParser.h"
-#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/RISCVISAInfo.h"
#include <vector>
namespace llvm {
+
class StringRef;
+template <typename T> class SmallVectorImpl;
+class Triple;
// Target specific information in their own namespaces.
// (ARM/AArch64/X86 are declared in ARM/AArch64/X86TargetParser.h)
@@ -158,12 +158,7 @@ enum CPUKind : unsigned {
enum FeatureKind : unsigned {
FK_INVALID = 0,
FK_NONE = 1,
- FK_STDEXTM = 1 << 2,
- FK_STDEXTA = 1 << 3,
- FK_STDEXTF = 1 << 4,
- FK_STDEXTD = 1 << 5,
- FK_STDEXTC = 1 << 6,
- FK_64BIT = 1 << 7,
+ FK_64BIT = 1 << 2,
};
bool checkCPUKind(CPUKind Kind, bool IsRV64);
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h b/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
index aecff122d3cb..868dd2819f83 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ThreadPool.h
@@ -19,7 +19,6 @@
#include <future>
-#include <atomic>
#include <condition_variable>
#include <functional>
#include <memory>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TimeProfiler.h b/contrib/llvm-project/llvm/include/llvm/Support/TimeProfiler.h
index 84794a25f78e..378253dc2ab9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TimeProfiler.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TimeProfiler.h
@@ -10,10 +10,12 @@
#define LLVM_SUPPORT_TIMEPROFILER_H
#include "llvm/Support/Error.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
namespace llvm {
+class raw_pwrite_stream;
+
struct TimeTraceProfiler;
TimeTraceProfiler *getTimeTraceProfilerInstance();
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Timer.h b/contrib/llvm-project/llvm/include/llvm/Support/Timer.h
index c5874ed35698..eb49e805b40d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Timer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Timer.h
@@ -13,13 +13,12 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
+#include <memory>
#include <string>
-#include <utility>
#include <vector>
namespace llvm {
-class Timer;
class TimerGroup;
class raw_ostream;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TrigramIndex.h b/contrib/llvm-project/llvm/include/llvm/Support/TrigramIndex.h
index 0be6a1012718..f772deca0301 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TrigramIndex.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TrigramIndex.h
@@ -33,7 +33,6 @@
#include <vector>
namespace llvm {
-class StringRef;
class TrigramIndex {
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/TypeSize.h b/contrib/llvm-project/llvm/include/llvm/Support/TypeSize.h
index 7d1274735a37..6bddb602e8c1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/TypeSize.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/TypeSize.h
@@ -17,7 +17,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/WithColor.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <array>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h b/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
index 78bcdbf3e932..f5dde334b0a7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
@@ -419,6 +420,21 @@ namespace detail {
class InMemoryDirectory;
class InMemoryFile;
+class InMemoryNode;
+
+struct NewInMemoryNodeInfo {
+ llvm::sys::fs::UniqueID DirUID;
+ StringRef Path;
+ StringRef Name;
+ time_t ModificationTime;
+ std::unique_ptr<llvm::MemoryBuffer> Buffer;
+ uint32_t User;
+ uint32_t Group;
+ llvm::sys::fs::file_type Type;
+ llvm::sys::fs::perms Perms;
+
+ Status makeStatus() const;
+};
} // namespace detail
@@ -428,14 +444,15 @@ class InMemoryFileSystem : public FileSystem {
std::string WorkingDirectory;
bool UseNormalizedPaths = true;
- /// If HardLinkTarget is non-null, a hardlink is created to the To path which
- /// must be a file. If it is null then it adds the file as the public addFile.
+ using MakeNodeFn = llvm::function_ref<std::unique_ptr<detail::InMemoryNode>(
+ detail::NewInMemoryNodeInfo)>;
+
+ /// Create node with \p MakeNode and add it into this filesystem at \p Path.
bool addFile(const Twine &Path, time_t ModificationTime,
std::unique_ptr<llvm::MemoryBuffer> Buffer,
Optional<uint32_t> User, Optional<uint32_t> Group,
Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms,
- const detail::InMemoryFile *HardLinkTarget);
+ Optional<llvm::sys::fs::perms> Perms, MakeNodeFn MakeNode);
public:
explicit InMemoryFileSystem(bool UseNormalizedPaths = true);
@@ -547,6 +564,9 @@ class RedirectingFileSystemParser;
/// }
/// \endverbatim
///
+/// The roots may be absolute or relative. If relative they will be made
+/// absolute against the current working directory.
+///
/// All configuration options are optional.
/// 'case-sensitive': <boolean, default=(true for Posix, false for Windows)>
/// 'use-external-names': <boolean, default=true>
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/X86TargetParser.h b/contrib/llvm-project/llvm/include/llvm/Support/X86TargetParser.h
index bfa3e23dbd9d..612046f3b2d9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/X86TargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/X86TargetParser.h
@@ -14,10 +14,10 @@
#define LLVM_SUPPORT_X86TARGETPARSER_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
namespace llvm {
+template <typename T> class SmallVectorImpl;
class StringRef;
namespace X86 {
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/YAMLTraits.h b/contrib/llvm-project/llvm/include/llvm/Support/YAMLTraits.h
index bea232e6e000..7ad73543fc6e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/YAMLTraits.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/YAMLTraits.h
@@ -9,6 +9,7 @@
#ifndef LLVM_SUPPORT_YAMLTRAITS_H
#define LLVM_SUPPORT_YAMLTRAITS_H
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -18,17 +19,12 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
-#include "llvm/Support/Regex.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cctype>
-#include <cstddef>
-#include <cstdint>
-#include <iterator>
#include <map>
#include <memory>
#include <new>
@@ -38,6 +34,9 @@
#include <vector>
namespace llvm {
+
+class VersionTuple;
+
namespace yaml {
enum class NodeKind : uint8_t {
@@ -1523,7 +1522,7 @@ private:
std::error_code EC;
BumpPtrAllocator StringAllocator;
document_iterator DocIterator;
- std::vector<bool> BitValuesUsed;
+ llvm::BitVector BitValuesUsed;
HNode *CurrentNode = nullptr;
bool ScalarMatchFound = false;
bool AllowUnknownKeys = false;
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
index 98c26ef0b1e5..58adb41cb0ef 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
@@ -15,9 +15,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
-#include <chrono>
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -30,6 +30,7 @@
namespace llvm {
+class Duration;
class formatv_object_base;
class format_object_base;
class FormattedString;
@@ -444,6 +445,7 @@ class raw_fd_ostream : public raw_pwrite_stream {
int FD;
bool ShouldClose;
bool SupportsSeeking = false;
+ bool IsRegularFile = false;
mutable Optional<bool> HasColors;
#ifdef _WIN32
@@ -514,6 +516,8 @@ public:
bool supportsSeeking() const { return SupportsSeeking; }
+ bool isRegularFile() const { return IsRegularFile; }
+
/// Flushes the stream and repositions the underlying file descriptor position
/// to the offset specified from the beginning of the file.
uint64_t seek(uint64_t off);
@@ -571,7 +575,7 @@ public:
///
/// It is used as @ref lock.
LLVM_NODISCARD
- Expected<sys::fs::FileLocker> tryLockFor(std::chrono::milliseconds Timeout);
+ Expected<sys::fs::FileLocker> tryLockFor(Duration const& Timeout);
};
/// This returns a reference to a raw_fd_ostream for standard output. Use it
@@ -622,6 +626,9 @@ public:
/// A raw_ostream that writes to an std::string. This is a simple adaptor
/// class. This class does not encounter output errors.
+/// raw_string_ostream operates without a buffer, delegating all memory
+/// management to the std::string. Thus the std::string is always up-to-date,
+/// may be used directly and there is no need to call flush().
class raw_string_ostream : public raw_ostream {
std::string &OS;
@@ -636,14 +643,11 @@ public:
explicit raw_string_ostream(std::string &O) : OS(O) {
SetUnbuffered();
}
- ~raw_string_ostream() override;
- /// Flushes the stream contents to the target string and returns the string's
- /// reference.
- std::string& str() {
- flush();
- return OS;
- }
+ /// Returns the string's reference. In most cases it is better to simply use
+ /// the underlying std::string directly.
+ /// TODO: Consider removing this API.
+ std::string &str() { return OS; }
void reserveExtraSpace(uint64_t ExtraSize) override {
OS.reserve(tell() + ExtraSize);
diff --git a/contrib/llvm-project/llvm/include/llvm/TableGen/Record.h b/contrib/llvm-project/llvm/include/llvm/TableGen/Record.h
index 5869a5cf0423..add05bd078d6 100644
--- a/contrib/llvm-project/llvm/include/llvm/TableGen/Record.h
+++ b/contrib/llvm-project/llvm/include/llvm/TableGen/Record.h
@@ -44,7 +44,6 @@ struct RecordContext;
} // namespace detail
class ListRecTy;
-struct MultiClass;
class Record;
class RecordKeeper;
class RecordVal;
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/CGPassBuilderOption.h b/contrib/llvm-project/llvm/include/llvm/Target/CGPassBuilderOption.h
index 11b904e6e7fe..f84889392d13 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/CGPassBuilderOption.h
+++ b/contrib/llvm-project/llvm/include/llvm/Target/CGPassBuilderOption.h
@@ -19,7 +19,6 @@
#include "llvm/Target/TargetOptions.h"
namespace llvm {
-class TargetMachine;
enum class RunOutliner { TargetDefault, AlwaysOutline, NeverOutline };
enum class RegAllocType { Default, Basic, Fast, Greedy, PBQP };
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/GenericOpcodes.td b/contrib/llvm-project/llvm/include/llvm/Target/GenericOpcodes.td
index 72c974834a2f..2af20ab6a53f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/contrib/llvm-project/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1434,3 +1434,10 @@ def G_ASSERT_SEXT : GenericInstruction {
let InOperandList = (ins type0:$src, untyped_imm_0:$sz);
let hasSideEffects = false;
}
+
+// Asserts that a value has at least the given alignment.
+def G_ASSERT_ALIGN : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src, untyped_imm_0:$align);
+ let hasSideEffects = false;
+}
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td b/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td
index 1d189c6dea6d..4859cf6b57b7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -391,7 +391,7 @@ def add_p2i_to_ptradd : GICombineRule<
>;
// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
-def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"int64_t">;
+def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
def const_ptradd_to_i2p: GICombineRule<
(defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
(match (wip_match_opcode G_PTR_ADD):$root,
@@ -535,6 +535,14 @@ def unmerge_cst : GICombineRule<
(apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
>;
+// Fold (unmerge undef) -> undef, undef, ...
+def unmerge_undef : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $root,
+ [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
// Transform x,y<dead> = unmerge z -> x = trunc z.
def unmerge_dead_to_trunc : GICombineRule<
(defs root:$d),
@@ -844,7 +852,8 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
propagate_undef_any_op,
propagate_undef_all_ops,
propagate_undef_shuffle_mask,
- erase_undef_store]>;
+ erase_undef_store,
+ unmerge_undef]>;
def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
binop_same_val, binop_left_to_zero,
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/Target.td b/contrib/llvm-project/llvm/include/llvm/Target/Target.td
index 7ae690b83770..d8faa63ee877 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm-project/llvm/include/llvm/Target/Target.td
@@ -993,7 +993,7 @@ class InstrInfo {
// by default, and TableGen will infer their value from the instruction
// pattern when possible.
//
- // Normally, TableGen will issue an error it it can't infer the value of a
+ // Normally, TableGen will issue an error if it can't infer the value of a
// property that hasn't been set explicitly. When guessInstructionProperties
// is set, it will guess a safe value instead.
//
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm-project/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index 752032d3d04d..392ee4334cb5 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -221,7 +221,7 @@ public:
}
/// Returns the register used as static base in RWPI variants.
- virtual const MCRegister getStaticBase() const { return MCRegister::NoRegister; }
+ virtual MCRegister getStaticBase() const { return MCRegister::NoRegister; }
/// Get the target specific RWPI relocation.
virtual const MCExpr *getIndirectSymViaRWPI(const MCSymbol *Sym) const {
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
index c639f326abc9..a636c4822832 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm-project/llvm/include/llvm/Target/TargetOptions.h
@@ -140,9 +140,9 @@ namespace llvm {
EnableMachineFunctionSplitter(false), SupportsDefaultOutlining(false),
EmitAddrsig(false), EmitCallSiteInfo(false),
SupportsDebugEntryValues(false), EnableDebugEntryValues(false),
- ValueTrackingVariableLocations(false),
- ForceDwarfFrameSection(false), XRayOmitFunctionIndex(false),
- DebugStrictDwarf(false),
+ ValueTrackingVariableLocations(false), ForceDwarfFrameSection(false),
+ XRayOmitFunctionIndex(false), DebugStrictDwarf(false),
+ Hotpatch(false),
FPDenormalMode(DenormalMode::IEEE, DenormalMode::IEEE) {}
/// DisableFramePointerElim - This returns true if frame pointer elimination
@@ -342,6 +342,9 @@ namespace llvm {
/// By default, it is set to false.
unsigned DebugStrictDwarf : 1;
+ /// Emit the hotpatch flag in CodeView debug.
+ unsigned Hotpatch : 1;
+
/// Name of the stack usage file (i.e., .su file) if user passes
/// -fstack-usage. If empty, it can be implied that -fstack-usage is not
/// passed on the command line.
diff --git a/contrib/llvm-project/llvm/include/llvm/Testing/Support/Annotations.h b/contrib/llvm-project/llvm/include/llvm/Testing/Support/Annotations.h
index cc99d1061520..4e442269600d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Testing/Support/Annotations.h
+++ b/contrib/llvm-project/llvm/include/llvm/Testing/Support/Annotations.h
@@ -16,6 +16,8 @@
namespace llvm {
+class raw_ostream;
+
/// Annotations lets you mark points and ranges inside source code, for tests:
///
/// Annotations Example(R"cpp(
diff --git a/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h b/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
index 6ef4db2ae158..5f07397adaca 100644
--- a/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
+++ b/contrib/llvm-project/llvm/include/llvm/TextAPI/InterfaceFile.h
@@ -19,11 +19,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
-#include "llvm/BinaryFormat/MachO.h"
-#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Error.h"
-#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/PackedVersion.h"
#include "llvm/TextAPI/Platform.h"
diff --git a/contrib/llvm-project/llvm/include/llvm/TextAPI/Platform.h b/contrib/llvm-project/llvm/include/llvm/TextAPI/Platform.h
index f7affc3ae980..d4225ca533fc 100644
--- a/contrib/llvm-project/llvm/include/llvm/TextAPI/Platform.h
+++ b/contrib/llvm-project/llvm/include/llvm/TextAPI/Platform.h
@@ -18,29 +18,14 @@
namespace llvm {
namespace MachO {
-/// Defines the list of MachO platforms.
-enum class PlatformKind : unsigned {
- unknown,
- macOS = MachO::PLATFORM_MACOS,
- iOS = MachO::PLATFORM_IOS,
- tvOS = MachO::PLATFORM_TVOS,
- watchOS = MachO::PLATFORM_WATCHOS,
- bridgeOS = MachO::PLATFORM_BRIDGEOS,
- macCatalyst = MachO::PLATFORM_MACCATALYST,
- iOSSimulator = MachO::PLATFORM_IOSSIMULATOR,
- tvOSSimulator = MachO::PLATFORM_TVOSSIMULATOR,
- watchOSSimulator = MachO::PLATFORM_WATCHOSSIMULATOR,
- driverKit = MachO::PLATFORM_DRIVERKIT,
-};
+using PlatformSet = SmallSet<PlatformType, 3>;
-using PlatformSet = SmallSet<PlatformKind, 3>;
-
-PlatformKind mapToPlatformKind(PlatformKind Platform, bool WantSim);
-PlatformKind mapToPlatformKind(const Triple &Target);
+PlatformType mapToPlatformType(PlatformType Platform, bool WantSim);
+PlatformType mapToPlatformType(const Triple &Target);
PlatformSet mapToPlatformSet(ArrayRef<Triple> Targets);
-StringRef getPlatformName(PlatformKind Platform);
-PlatformKind getPlatformFromName(StringRef Name);
-std::string getOSAndEnvironmentName(PlatformKind Platform,
+StringRef getPlatformName(PlatformType Platform);
+PlatformType getPlatformFromName(StringRef Name);
+std::string getOSAndEnvironmentName(PlatformType Platform,
std::string Version = "");
} // end namespace MachO.
diff --git a/contrib/llvm-project/llvm/include/llvm/TextAPI/Target.h b/contrib/llvm-project/llvm/include/llvm/TextAPI/Target.h
index c2588b9d5a21..fbb76295f706 100644
--- a/contrib/llvm-project/llvm/include/llvm/TextAPI/Target.h
+++ b/contrib/llvm-project/llvm/include/llvm/TextAPI/Target.h
@@ -9,13 +9,15 @@
#ifndef LLVM_TEXTAPI_TARGET_H
#define LLVM_TEXTAPI_TARGET_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Error.h"
#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/Platform.h"
namespace llvm {
+
+class Triple;
+
namespace MachO {
// This is similar to a llvm Triple, but the triple doesn't have all the
@@ -24,17 +26,17 @@ namespace MachO {
class Target {
public:
Target() = default;
- Target(Architecture Arch, PlatformKind Platform)
+ Target(Architecture Arch, PlatformType Platform)
: Arch(Arch), Platform(Platform) {}
explicit Target(const llvm::Triple &Triple)
- : Arch(mapToArchitecture(Triple)), Platform(mapToPlatformKind(Triple)) {}
+ : Arch(mapToArchitecture(Triple)), Platform(mapToPlatformType(Triple)) {}
static llvm::Expected<Target> create(StringRef Target);
operator std::string() const;
Architecture Arch;
- PlatformKind Platform;
+ PlatformType Platform;
};
inline bool operator==(const Target &LHS, const Target &RHS) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Coroutines/CoroSplit.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Coroutines/CoroSplit.h
index ffca51578551..7623c9c0eb68 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Coroutines/CoroSplit.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Coroutines/CoroSplit.h
@@ -22,14 +22,14 @@
namespace llvm {
struct CoroSplitPass : PassInfoMixin<CoroSplitPass> {
- CoroSplitPass(bool ReuseFrameSlot = false) : ReuseFrameSlot(ReuseFrameSlot) {}
+ CoroSplitPass(bool OptimizeFrame = false) : OptimizeFrame(OptimizeFrame) {}
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR);
static bool isRequired() { return true; }
// Would be true if the Optimization level isn't O0.
- bool ReuseFrameSlot;
+ bool OptimizeFrame;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
index d4cbc9bd20b7..d56a43ec7961 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -133,7 +133,6 @@ struct InformationCache;
struct AAIsDead;
struct AttributorCallGraph;
-class AAManager;
class AAResults;
class Function;
@@ -173,7 +172,8 @@ combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
const Optional<Value *> &B, Type *Ty);
/// Return the initial value of \p Obj with type \p Ty if that is a constant.
-Constant *getInitialValueForObj(Value &Obj, Type &Ty);
+Constant *getInitialValueForObj(Value &Obj, Type &Ty,
+ const TargetLibraryInfo *TLI);
/// Collect all potential underlying objects of \p Ptr at position \p CtxI in
/// \p Objects. Assumed information is used and dependences onto \p QueryingAA
@@ -1623,10 +1623,17 @@ public:
///
/// This method will evaluate \p Pred on all (transitive) uses of the
/// associated value and return true if \p Pred holds every time.
+ /// If uses are skipped in favor of equivalent ones, e.g., if we look through
+ /// memory, the \p EquivalentUseCB will be used to give the caller an idea
+ /// what original used was replaced by a new one (or new ones). The visit is
+ /// cut short if \p EquivalentUseCB returns false and the function will return
+ /// false as well.
bool checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
const AbstractAttribute &QueryingAA, const Value &V,
bool CheckBBLivenessOnly = false,
- DepClassTy LivenessDepClass = DepClassTy::OPTIONAL);
+ DepClassTy LivenessDepClass = DepClassTy::OPTIONAL,
+ function_ref<bool(const Use &OldU, const Use &NewU)>
+ EquivalentUseCB = nullptr);
/// Emit a remark generically.
///
@@ -2354,11 +2361,11 @@ private:
};
/// Simple wrapper for a single bit (boolean) state.
-struct BooleanState : public IntegerStateBase<bool, 1, 0> {
- using super = IntegerStateBase<bool, 1, 0>;
+struct BooleanState : public IntegerStateBase<bool, true, false> {
+ using super = IntegerStateBase<bool, true, false>;
using base_t = IntegerStateBase::base_t;
- BooleanState() : super() {}
+ BooleanState() {}
BooleanState(base_t Assumed) : super(Assumed) {}
/// Set the assumed value to \p Value but never below the known one.
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/IROutliner.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/IROutliner.h
index 110c0b4dcf16..ed74c8ed0e96 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/IROutliner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/IROutliner.h
@@ -95,6 +95,10 @@ struct OutlinableRegion {
/// required for the following basic blocks in this case.
bool EndsInBranch = false;
+ /// The PHIBlocks with their corresponding return block based on the return
+ /// value as the key.
+ DenseMap<Value *, BasicBlock *> PHIBlocks;
+
/// Mapping of the argument number in the deduplicated function
/// to a given constant, which is used when creating the arguments to the call
/// to the newly created deduplicated function. This is handled separately
@@ -182,7 +186,14 @@ public:
IROutliner(function_ref<TargetTransformInfo &(Function &)> GTTI,
function_ref<IRSimilarityIdentifier &(Module &)> GIRSI,
function_ref<OptimizationRemarkEmitter &(Function &)> GORE)
- : getTTI(GTTI), getIRSI(GIRSI), getORE(GORE) {}
+ : getTTI(GTTI), getIRSI(GIRSI), getORE(GORE) {
+
+ // Check that the DenseMap implementation has not changed.
+ assert(DenseMapInfo<unsigned>::getEmptyKey() == (unsigned)-1 &&
+ "DenseMapInfo<unsigned>'s empty key isn't -1!");
+ assert(DenseMapInfo<unsigned>::getTombstoneKey() == (unsigned)-2 &&
+ "DenseMapInfo<unsigned>'s tombstone key isn't -2!");
+ }
bool run(Module &M);
private:
@@ -331,8 +342,7 @@ private:
bool visitBranchInst(BranchInst &BI) {
return EnableBranches;
}
- // TODO: Determine a scheme to resolve when the labels are similar enough.
- bool visitPHINode(PHINode &PN) { return false; }
+ bool visitPHINode(PHINode &PN) { return EnableBranches; }
// TODO: Handle allocas.
bool visitAllocaInst(AllocaInst &AI) { return false; }
// VAArg instructions are not allowed since this could cause difficulty when
@@ -354,7 +364,15 @@ private:
// that they have a name in these cases.
bool visitCallInst(CallInst &CI) {
Function *F = CI.getCalledFunction();
- if (!F || CI.isIndirectCall() || !F->hasName())
+ bool IsIndirectCall = CI.isIndirectCall();
+ if (IsIndirectCall && !EnableIndirectCalls)
+ return false;
+ if (!F && !IsIndirectCall)
+ return false;
+ // Returning twice can cause issues with the state of the function call
+ // that were not expected when the function was used, so we do not include
+ // the call in outlined functions.
+ if (CI.canReturnTwice())
return false;
return true;
}
@@ -373,6 +391,10 @@ private:
// The flag variable that marks whether we should allow branch instructions
// to be outlined.
bool EnableBranches = false;
+
+ // The flag variable that marks whether we should allow indirect calls
+ // to be outlined.
+ bool EnableIndirectCalls = true;
};
/// A InstVisitor used to exclude certain instructions from being outlined.
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ModuleInliner.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ModuleInliner.h
index 963d74d71003..7474e48aafaf 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ModuleInliner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/ModuleInliner.h
@@ -18,9 +18,6 @@
namespace llvm {
-class AssumptionCacheTracker;
-class ProfileSummaryInfo;
-
/// The module inliner pass for the new pass manager.
///
/// This pass wires together the inlining utilities and the inline cost
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
index c6ac5bd46f57..bf08336663b6 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
@@ -21,7 +21,7 @@ namespace omp {
using Kernel = Function *;
/// Set of kernels in the module
-using KernelSet = SmallPtrSet<Kernel, 4>;
+using KernelSet = SetVector<Kernel>;
/// Helper to determine if \p M contains OpenMP.
bool containsOpenMP(Module &M);
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 7f321a688aff..3b944878a810 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -24,7 +24,6 @@ namespace llvm {
class ModuleSummaryIndex;
class Pass;
class TargetLibraryInfoImpl;
-class TargetMachine;
// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/contrib/llvm-project/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index c6aee439b5a0..f8cb6dc73a6f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -480,9 +480,9 @@ public:
return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
}
- unsigned ComputeMinSignedBits(const Value *Op, unsigned Depth = 0,
- const Instruction *CxtI = nullptr) const {
- return llvm::ComputeMinSignedBits(Op, DL, Depth, &AC, CxtI, &DT);
+ unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
+ const Instruction *CxtI = nullptr) const {
+ return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
}
OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 6c351e3f8e1f..5a0fb835606a 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -26,6 +26,7 @@ class InterestingMemoryOperand {
public:
Use *PtrUse;
bool IsWrite;
+ Type *OpType;
uint64_t TypeSize;
MaybeAlign Alignment;
// The mask Value, if we're looking at a masked load/store.
@@ -34,7 +35,8 @@ public:
InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
class Type *OpType, MaybeAlign Alignment,
Value *MaybeMask = nullptr)
- : IsWrite(IsWrite), Alignment(Alignment), MaybeMask(MaybeMask) {
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask) {
const DataLayout &DL = I->getModule()->getDataLayout();
TypeSize = DL.getTypeStoreSizeInBits(OpType);
PtrUse = &I->getOperandUse(OperandNo);
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h
index 94b156f3b137..64523d7d073c 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/InstrProfiling.h
@@ -100,7 +100,7 @@ private:
///
/// If the counter array doesn't yet exist, the profile data variables
/// referring to them will also be created.
- GlobalVariable *getOrCreateRegionCounters(InstrProfIncrementInst *Inc);
+ GlobalVariable *getOrCreateRegionCounters(InstrProfInstBase *Inc);
/// Emit the section with compressed function names.
void emitNameData();
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
index d47beb93397e..e5779dc775ba 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
@@ -19,11 +19,15 @@
namespace llvm {
struct MemorySanitizerOptions {
- MemorySanitizerOptions() : MemorySanitizerOptions(0, false, false){};
- MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel);
+ MemorySanitizerOptions() : MemorySanitizerOptions(0, false, false, false){};
+ MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel)
+ : MemorySanitizerOptions(TrackOrigins, Recover, Kernel, false) {}
+ MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel,
+ bool EagerChecks);
bool Kernel;
int TrackOrigins;
bool Recover;
+ bool EagerChecks;
};
// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h
index cbe5057b9cde..9e660c92124e 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -39,11 +39,9 @@ class AssumptionCache;
class BasicBlock;
class BranchInst;
class CallInst;
-class Constant;
class ExtractValueInst;
class Function;
class FunctionPass;
-class IntrinsicInst;
class LoadInst;
class LoopInfo;
class MemDepResult;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/InstSimplifyPass.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/InstSimplifyPass.h
index f5781e085f7b..09a4a95401d8 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/InstSimplifyPass.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/InstSimplifyPass.h
@@ -18,8 +18,6 @@
namespace llvm {
-class FunctionPass;
-
/// Run instruction simplification across each instruction in the function.
///
/// Instruction simplification has useful constraints in some contexts:
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
index 419729271a23..7ba9d65cae55 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -435,8 +435,7 @@ public:
bool UseBlockFrequencyInfo = false,
bool UseBranchProbabilityInfo = false,
bool LoopNestMode = false)
- : Pass(std::move(Pass)), LoopCanonicalizationFPM(),
- UseMemorySSA(UseMemorySSA),
+ : Pass(std::move(Pass)), UseMemorySSA(UseMemorySSA),
UseBlockFrequencyInfo(UseBlockFrequencyInfo),
UseBranchProbabilityInfo(UseBranchProbabilityInfo),
LoopNestMode(LoopNestMode) {
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopReroll.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopReroll.h
index 6ae309e48a28..496e8df85ea0 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopReroll.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopReroll.h
@@ -14,8 +14,6 @@
namespace llvm {
-class Function;
-
class LoopRerollPass : public PassInfoMixin<LoopRerollPass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
index 6125fc7636a0..72663d3d62a8 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
@@ -13,7 +13,6 @@
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
-class Function;
/// A simple loop rotation transformation.
class LoopUnrollAndJamPass : public PassInfoMixin<LoopUnrollAndJamPass> {
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SCCP.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SCCP.h
index 2d7c94918699..cd4100447880 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SCCP.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SCCP.h
@@ -32,8 +32,6 @@
namespace llvm {
-class PostDominatorTree;
-
/// This pass performs function-level constant propagation and merging.
class SCCPPass : public PassInfoMixin<SCCPPass> {
public:
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SROA.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SROA.h
index f1a43435d89a..b74c45e71d95 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SROA.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/SROA.h
@@ -27,7 +27,6 @@ class AllocaInst;
class AssumptionCache;
class DominatorTree;
class Function;
-class Instruction;
class LLVMContext;
class PHINode;
class SelectInst;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h
index 2d5942a3f569..04a5f7e6ff38 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/WarnMissedTransforms.h
@@ -17,8 +17,6 @@
namespace llvm {
class Function;
-class Loop;
-class LPMUpdater;
// New pass manager boilerplate.
class WarnMissedTransformationsPass
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/AssumeBundleBuilder.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
index 5c06af9bc84c..a497722eece6 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
@@ -22,7 +22,6 @@
#include "llvm/IR/PassManager.h"
namespace llvm {
-class IntrinsicInst;
class AssumptionCache;
class DominatorTree;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Cloning.h
index 5a1f322b2054..fdc55bea99e7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -33,13 +33,11 @@ class AAResults;
class AllocaInst;
class BasicBlock;
class BlockFrequencyInfo;
-class CallInst;
class CallGraph;
class DebugInfoFinder;
class DominatorTree;
class Function;
class Instruction;
-class InvokeInst;
class Loop;
class LoopInfo;
class Module;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
index f08173e45a5b..8aed3d0e40d9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -168,7 +168,7 @@ public:
///
/// Based on the blocks used when constructing the code extractor,
/// determine whether it is eligible for extraction.
- ///
+ ///
/// Checks that varargs handling (with vastart and vaend) is only done in
/// the outlined blocks.
bool isEligible() const;
@@ -214,6 +214,10 @@ public:
/// original block will be added to the outline region.
BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
+ /// Exclude a value from aggregate argument passing when extracting a code
+ /// region, passing it instead as a scalar.
+ void excludeArgFromAggregate(Value *Arg);
+
private:
struct LifetimeMarkerInfo {
bool SinkLifeStart = false;
@@ -222,6 +226,8 @@ public:
Instruction *LifeEnd = nullptr;
};
+ ValueSet ExcludeArgsFromAggregate;
+
LifetimeMarkerInfo
getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
Instruction *Addr, BasicBlock *ExitBlock) const;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h
index 987a5651a8b6..a0e5f8c7d014 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CodeLayout.h
@@ -20,8 +20,6 @@
namespace llvm {
-class MachineBasicBlock;
-
/// Find a layout of nodes (basic blocks) of a given CFG optimizing jump
/// locality and thus processor I-cache utilization. This is achieved via
/// increasing the number of fall-through jumps and co-locating frequently
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CtorUtils.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CtorUtils.h
index 3625ee662b1c..3ef3ba244b43 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CtorUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/CtorUtils.h
@@ -17,7 +17,6 @@
namespace llvm {
-class GlobalVariable;
class Function;
class Module;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Evaluator.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Evaluator.h
index 1b93b0af86e2..99e826bf855f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Evaluator.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Evaluator.h
@@ -36,6 +36,49 @@ class TargetLibraryInfo;
/// be iterated over after the evaluation is complete. Once an evaluation call
/// fails, the evaluation object should not be reused.
class Evaluator {
+ struct MutableAggregate;
+
+ /// The evaluator represents values either as a Constant*, or as a
+ /// MutableAggregate, which allows changing individual aggregate elements
+ /// without creating a new interned Constant.
+ class MutableValue {
+ PointerUnion<Constant *, MutableAggregate *> Val;
+ void clear();
+ bool makeMutable();
+
+ public:
+ MutableValue(Constant *C) { Val = C; }
+ MutableValue(const MutableValue &) = delete;
+ MutableValue(MutableValue &&Other) {
+ Val = Other.Val;
+ Other.Val = nullptr;
+ }
+ ~MutableValue() { clear(); }
+
+ Type *getType() const {
+ if (auto *C = Val.dyn_cast<Constant *>())
+ return C->getType();
+ return Val.get<MutableAggregate *>()->Ty;
+ }
+
+ Constant *toConstant() const {
+ if (auto *C = Val.dyn_cast<Constant *>())
+ return C;
+ return Val.get<MutableAggregate *>()->toConstant();
+ }
+
+ Constant *read(Type *Ty, APInt Offset, const DataLayout &DL) const;
+ bool write(Constant *V, APInt Offset, const DataLayout &DL);
+ };
+
+ struct MutableAggregate {
+ Type *Ty;
+ SmallVector<MutableValue> Elements;
+
+ MutableAggregate(Type *Ty) : Ty(Ty) {}
+ Constant *toConstant() const;
+ };
+
public:
Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
: DL(DL), TLI(TLI) {
@@ -57,8 +100,11 @@ public:
bool EvaluateFunction(Function *F, Constant *&RetVal,
const SmallVectorImpl<Constant*> &ActualArgs);
- const DenseMap<Constant *, Constant *> &getMutatedMemory() const {
- return MutatedMemory;
+ DenseMap<GlobalVariable *, Constant *> getMutatedInitializers() const {
+ DenseMap<GlobalVariable *, Constant *> Result;
+ for (auto &Pair : MutatedMemory)
+ Result[Pair.first] = Pair.second.toConstant();
+ return Result;
}
const SmallPtrSetImpl<GlobalVariable *> &getInvariants() const {
@@ -81,7 +127,7 @@ private:
}
/// Casts call result to a type of bitcast call expression
- Constant *castCallResultIfNeeded(Value *CallExpr, Constant *RV);
+ Constant *castCallResultIfNeeded(Type *ReturnType, Constant *RV);
/// Given call site return callee and list of its formal arguments
Function *getCalleeWithFormalArgs(CallBase &CB,
@@ -106,7 +152,7 @@ private:
/// For each store we execute, we update this map. Loads check this to get
/// the most up-to-date value. If evaluation is successful, this state is
/// committed to the process.
- DenseMap<Constant*, Constant*> MutatedMemory;
+ DenseMap<GlobalVariable *, MutableValue> MutatedMemory;
/// To 'execute' an alloca, we create a temporary global variable to represent
/// its body. This vector is needed so we can delete the temporary globals
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/GlobalStatus.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
index 78d7845c4353..775dd23d8f23 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -73,10 +73,6 @@ struct GlobalStatus {
const Function *AccessingFunction = nullptr;
bool HasMultipleAccessingFunctions = false;
- /// Set to true if this global has a user that is not an instruction (e.g. a
- /// constant expr or GV initializer).
- bool HasNonInstructionUser = false;
-
/// Set to the strongest atomic ordering requirement.
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
index a914c6e0925f..873127554b47 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/Local.h
@@ -42,9 +42,7 @@ class BasicBlock;
class BranchInst;
class CallBase;
class CallInst;
-class DbgDeclareInst;
class DbgVariableIntrinsic;
-class DbgValueInst;
class DIBuilder;
class DomTreeUpdater;
class Function;
@@ -243,7 +241,7 @@ inline Align getKnownAlignment(Value *V, const DataLayout &DL,
CallInst *createCallMatchingInvoke(InvokeInst *II);
/// This function converts the specified invoek into a normall call.
-void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);
+CallInst *changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);
///===---------------------------------------------------------------------===//
/// Dbg Intrinsic utilities
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopPeel.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopPeel.h
index 6f1b4a880457..7b6595c192de 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopPeel.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopPeel.h
@@ -32,7 +32,7 @@ gatherPeelingPreferences(Loop *L, ScalarEvolution &SE,
void computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::PeelingPreferences &PP,
- unsigned &TripCount, DominatorTree &DT,
+ unsigned TripCount, DominatorTree &DT,
ScalarEvolution &SE, unsigned Threshold = UINT_MAX);
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index e0a9115f61b0..3a712d78df67 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -37,7 +37,6 @@ class MemorySSAUpdater;
class OptimizationRemarkEmitter;
class PredIteratorCache;
class ScalarEvolution;
-class ScalarEvolutionExpander;
class SCEV;
class SCEVExpander;
class TargetLibraryInfo;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
index e5f8a46eaf23..8dc0f1e26a92 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
@@ -27,8 +27,6 @@ class Instruction;
class IntrinsicInst;
class Value;
class OptimizationRemarkEmitter;
-class OptimizationRemarkMissed;
-class OptimizationRemarkAnalysis;
class StoreInst;
// FIXME: Once we get to more remarks like this one, we need to re-evaluate how
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
index f53ba546dc5c..9bbe8ea7e1e8 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -92,7 +92,7 @@ void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
/// DeadComdatFunctions are those where every member of the comdat is listed
/// and thus removing them is safe (provided *all* are removed).
void filterDeadComdatFunctions(
- Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
+ SmallVectorImpl<Function *> &DeadComdatFunctions);
/// Produce a unique identifier for this module by taking the MD5 sum of
/// the names of the module's strong external symbols that are not comdat
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
index efc3cc775e11..277eb7acf238 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetFolder.h"
@@ -122,7 +123,7 @@ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
/// "expanded" form.
bool LSRMode;
- typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderType;
+ typedef IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> BuilderType;
BuilderType Builder;
// RAII object that stores the current insertion point and restores it when
@@ -178,7 +179,7 @@ public:
: SE(se), DL(DL), IVName(name), PreserveLCSSA(PreserveLCSSA),
IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr), CanonicalMode(true),
LSRMode(false),
- Builder(se.getContext(), TargetFolder(DL),
+ Builder(se.getContext(), InstSimplifyFolder(DL),
IRBuilderCallbackInserter(
[this](Instruction *I) { rememberInstruction(I); })) {
#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
@@ -450,6 +451,14 @@ private:
/// Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
+ Value *expandSMaxExpr(const SCEVNAryExpr *S);
+
+ Value *expandUMaxExpr(const SCEVNAryExpr *S);
+
+ Value *expandSMinExpr(const SCEVNAryExpr *S);
+
+ Value *expandUMinExpr(const SCEVNAryExpr *S);
+
Value *visitConstant(const SCEVConstant *S) { return S->getValue(); }
Value *visitPtrToIntExpr(const SCEVPtrToIntExpr *S);
@@ -476,6 +485,8 @@ private:
Value *visitUMinExpr(const SCEVUMinExpr *S);
+ Value *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S);
+
Value *visitUnknown(const SCEVUnknown *S) { return S->getValue(); }
void rememberInstruction(Value *I);
@@ -504,15 +515,13 @@ private:
class SCEVExpanderCleaner {
SCEVExpander &Expander;
- DominatorTree &DT;
-
/// Indicates whether the result of the expansion is used. If false, the
/// instructions added during expansion are removed.
bool ResultUsed;
public:
- SCEVExpanderCleaner(SCEVExpander &Expander, DominatorTree &DT)
- : Expander(Expander), DT(DT), ResultUsed(false) {}
+ SCEVExpanderCleaner(SCEVExpander &Expander)
+ : Expander(Expander), ResultUsed(false) {}
~SCEVExpanderCleaner() { cleanup(); }
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index 20b360212506..461669d6a217 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -19,8 +19,6 @@
namespace llvm {
-class BasicBlock;
-
class UnifyFunctionExitNodesLegacyPass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/BlockIndexer.h b/contrib/llvm-project/llvm/include/llvm/XRay/BlockIndexer.h
index eabc4e3f5c6e..77af77e5ec26 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/BlockIndexer.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/BlockIndexer.h
@@ -41,7 +41,7 @@ private:
Block CurrentBlock{0, 0, nullptr, {}};
public:
- explicit BlockIndexer(Index &I) : RecordVisitor(), Indices(I) {}
+ explicit BlockIndexer(Index &I) : Indices(I) {}
Error visit(BufferExtents &) override;
Error visit(WallclockRecord &) override;
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/BlockPrinter.h b/contrib/llvm-project/llvm/include/llvm/XRay/BlockPrinter.h
index 9215d64e73b9..2f9fed668069 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/BlockPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/BlockPrinter.h
@@ -36,8 +36,7 @@ class BlockPrinter : public RecordVisitor {
State CurrentState = State::Start;
public:
- explicit BlockPrinter(raw_ostream &O, RecordPrinter &P)
- : RecordVisitor(), OS(O), RP(P) {}
+ explicit BlockPrinter(raw_ostream &O, RecordPrinter &P) : OS(O), RP(P) {}
Error visit(BufferExtents &) override;
Error visit(WallclockRecord &) override;
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecordConsumer.h b/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecordConsumer.h
index 91020b0a4fef..8fff9fb86158 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecordConsumer.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecordConsumer.h
@@ -30,7 +30,7 @@ class LogBuilderConsumer : public RecordConsumer {
public:
explicit LogBuilderConsumer(std::vector<std::unique_ptr<Record>> &R)
- : RecordConsumer(), Records(R) {}
+ : Records(R) {}
Error consume(std::unique_ptr<Record> R) override;
};
@@ -42,8 +42,7 @@ class PipelineConsumer : public RecordConsumer {
std::vector<RecordVisitor *> Visitors;
public:
- PipelineConsumer(std::initializer_list<RecordVisitor *> V)
- : RecordConsumer(), Visitors(V) {}
+ PipelineConsumer(std::initializer_list<RecordVisitor *> V) : Visitors(V) {}
Error consume(std::unique_ptr<Record> R) override;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecords.h b/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecords.h
index 9c318805d61b..8af88f5b0e13 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecords.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/FDRRecords.h
@@ -424,7 +424,7 @@ public:
static constexpr uint16_t DefaultVersion = 5u;
explicit RecordInitializer(DataExtractor &DE, uint64_t &OP, uint16_t V)
- : RecordVisitor(), E(DE), OffsetPtr(OP), Version(V) {}
+ : E(DE), OffsetPtr(OP), Version(V) {}
explicit RecordInitializer(DataExtractor &DE, uint64_t &OP)
: RecordInitializer(DE, OP, DefaultVersion) {}
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/FDRTraceExpander.h b/contrib/llvm-project/llvm/include/llvm/XRay/FDRTraceExpander.h
index 4a0bd24cfa9b..197c123fff1e 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/FDRTraceExpander.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/FDRTraceExpander.h
@@ -36,7 +36,7 @@ class TraceExpander : public RecordVisitor {
public:
explicit TraceExpander(function_ref<void(const XRayRecord &)> F, uint16_t L)
- : RecordVisitor(), C(std::move(F)), LogVersion(L) {}
+ : C(std::move(F)), LogVersion(L) {}
Error visit(BufferExtents &) override;
Error visit(WallclockRecord &) override;
diff --git a/contrib/llvm-project/llvm/include/llvm/XRay/RecordPrinter.h b/contrib/llvm-project/llvm/include/llvm/XRay/RecordPrinter.h
index f7b809c062f2..8ca4794dce5e 100644
--- a/contrib/llvm-project/llvm/include/llvm/XRay/RecordPrinter.h
+++ b/contrib/llvm-project/llvm/include/llvm/XRay/RecordPrinter.h
@@ -25,7 +25,7 @@ class RecordPrinter : public RecordVisitor {
public:
explicit RecordPrinter(raw_ostream &O, std::string D)
- : RecordVisitor(), OS(O), Delim(std::move(D)) {}
+ : OS(O), Delim(std::move(D)) {}
explicit RecordPrinter(raw_ostream &O) : RecordPrinter(O, ""){};
diff --git a/contrib/llvm-project/llvm/include/llvm/module.modulemap b/contrib/llvm-project/llvm/include/llvm/module.modulemap
index b0f7f2120606..25c7aeee148e 100644
--- a/contrib/llvm-project/llvm/include/llvm/module.modulemap
+++ b/contrib/llvm-project/llvm/include/llvm/module.modulemap
@@ -251,6 +251,7 @@ module LLVM_intrinsic_gen {
module IR_Function { header "IR/Function.h" export * }
module IR_InstrTypes { header "IR/InstrTypes.h" export * }
module IR_Instructions { header "IR/Instructions.h" export * }
+ module IR_TypeFinder { header "IR/TypeFinder.h" export * }
// Intrinsics.h
diff --git a/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
index 49199060786c..a8132e5abf54 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -242,7 +242,7 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
if (onlyReadsMemory(MRB))
Result = clearMod(Result);
- else if (doesNotReadMemory(MRB))
+ else if (onlyWritesMemory(MRB))
Result = clearRef(Result);
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
@@ -320,7 +320,7 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
// from Call1 reading memory written by Call2.
if (onlyReadsMemory(Call1B))
Result = clearMod(Result);
- else if (doesNotReadMemory(Call1B))
+ else if (onlyWritesMemory(Call1B))
Result = clearRef(Result);
// If Call2 only access memory through arguments, accumulate the mod/ref
@@ -988,6 +988,29 @@ bool llvm::isIdentifiedFunctionLocal(const Value *V) {
return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
}
+bool llvm::isNotVisibleOnUnwind(const Value *Object,
+ bool &RequiresNoCaptureBeforeUnwind) {
+ RequiresNoCaptureBeforeUnwind = false;
+
+ // Alloca goes out of scope on unwind.
+ if (isa<AllocaInst>(Object))
+ return true;
+
+ // Byval goes out of scope on unwind.
+ if (auto *A = dyn_cast<Argument>(Object))
+ return A->hasByValAttr();
+
+ // A noalias return is not accessible from any other code. If the pointer
+ // does not escape prior to the unwind, then the caller cannot access the
+ // memory either.
+ if (isNoAliasCall(Object)) {
+ RequiresNoCaptureBeforeUnwind = true;
+ return true;
+ }
+
+ return false;
+}
+
void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
// This function needs to be in sync with llvm::createLegacyPMAAResults -- if
// more alias analyses are added to llvm::createLegacyPMAAResults, they need
diff --git a/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 0c097b2fa302..1577f1eb70b1 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -142,13 +142,13 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
auto I1Size = LocationSize::afterPointer();
- Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
+ Type *I1ElTy = (*I1)->getType()->getPointerElementType();
if (I1ElTy->isSized())
I1Size = LocationSize::precise(DL.getTypeStoreSize(I1ElTy));
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
auto I2Size = LocationSize::afterPointer();
- Type *I2ElTy = cast<PointerType>((*I2)->getType())->getElementType();
+ Type *I2ElTy = (*I2)->getType()->getPointerElementType();
if (I2ElTy->isSized())
I2Size = LocationSize::precise(DL.getTypeStoreSize(I2ElTy));
@@ -233,7 +233,7 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
for (CallBase *Call : Calls) {
for (auto Pointer : Pointers) {
auto Size = LocationSize::afterPointer();
- Type *ElTy = cast<PointerType>(Pointer->getType())->getElementType();
+ Type *ElTy = Pointer->getType()->getPointerElementType();
if (ElTy->isSized())
Size = LocationSize::precise(DL.getTypeStoreSize(ElTy));
diff --git a/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 5f1bf2001d47..b4c985962837 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -779,7 +779,7 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
// than that.
if (Call->onlyReadsMemory())
Min = FMRB_OnlyReadsMemory;
- else if (Call->doesNotReadMemory())
+ else if (Call->onlyWritesMemory())
Min = FMRB_OnlyWritesMemory;
if (Call->onlyAccessesArgMemory())
@@ -812,7 +812,7 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
// If the function declares it only reads memory, go with that.
if (F->onlyReadsMemory())
Min = FMRB_OnlyReadsMemory;
- else if (F->doesNotReadMemory())
+ else if (F->onlyWritesMemory())
Min = FMRB_OnlyWritesMemory;
if (F->onlyAccessesArgMemory())
@@ -972,7 +972,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
continue;
}
// Operand aliases 'Object' but call only writes into it.
- if (Call->doesNotReadMemory(OperandNo)) {
+ if (Call->onlyWritesMemory(OperandNo)) {
Result = setMod(Result);
continue;
}
@@ -1020,9 +1020,9 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
// It's also possible for Loc to alias both src and dest, or neither.
ModRefInfo rv = ModRefInfo::NoModRef;
- if (SrcAA != AliasResult::NoAlias)
+ if (SrcAA != AliasResult::NoAlias || Call->hasReadingOperandBundles())
rv = setRef(rv);
- if (DestAA != AliasResult::NoAlias)
+ if (DestAA != AliasResult::NoAlias || Call->hasClobberingOperandBundles())
rv = setMod(rv);
return rv;
}
@@ -1248,8 +1248,8 @@ AliasResult BasicAAResult::aliasGEP(
else
GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
- ConstantRange CR =
- computeConstantRange(Index.Val.V, true, &AC, Index.CxtI);
+ ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
+ true, &AC, Index.CxtI);
KnownBits Known =
computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
CR = CR.intersectWith(
diff --git a/contrib/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 856d7e90acb2..ffb80134749a 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -42,6 +42,7 @@
#include <cassert>
#include <cstdint>
#include <iterator>
+#include <map>
#include <utility>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
index 9467bb3c9b2d..090dccc53b6e 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
@@ -63,7 +63,7 @@ using namespace llvm::cflaa;
CFLSteensAAResult::CFLSteensAAResult(
std::function<const TargetLibraryInfo &(Function &F)> GetTLI)
- : AAResultBase(), GetTLI(std::move(GetTLI)) {}
+ : GetTLI(std::move(GetTLI)) {}
CFLSteensAAResult::CFLSteensAAResult(CFLSteensAAResult &&Arg)
: AAResultBase(std::move(Arg)), GetTLI(std::move(Arg.GetTLI)) {}
CFLSteensAAResult::~CFLSteensAAResult() = default;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp b/contrib/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
index f2e5eab72bf2..930cb13c0cb3 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
@@ -61,7 +61,7 @@ class CGPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
- explicit CGPassManager() : ModulePass(ID), PMDataManager() {}
+ explicit CGPassManager() : ModulePass(ID) {}
/// Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
diff --git a/contrib/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
index 9b45f455be08..ba8462e659d5 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
@@ -75,7 +75,7 @@ bool CaptureTracker::isDereferenceableOrNull(Value *O, const DataLayout &DL) {
namespace {
struct SimpleCaptureTracker : public CaptureTracker {
explicit SimpleCaptureTracker(bool ReturnCaptures)
- : ReturnCaptures(ReturnCaptures), Captured(false) {}
+ : ReturnCaptures(ReturnCaptures) {}
void tooManyUses() override { Captured = true; }
@@ -89,7 +89,7 @@ namespace {
bool ReturnCaptures;
- bool Captured;
+ bool Captured = false;
};
/// Only find pointer captures which happen before the given instruction. Uses
@@ -101,7 +101,7 @@ namespace {
CapturesBefore(bool ReturnCaptures, const Instruction *I,
const DominatorTree *DT, bool IncludeI, const LoopInfo *LI)
: BeforeHere(I), DT(DT), ReturnCaptures(ReturnCaptures),
- IncludeI(IncludeI), Captured(false), LI(LI) {}
+ IncludeI(IncludeI), LI(LI) {}
void tooManyUses() override { Captured = true; }
@@ -139,7 +139,7 @@ namespace {
bool ReturnCaptures;
bool IncludeI;
- bool Captured;
+ bool Captured = false;
const LoopInfo *LI;
};
@@ -155,7 +155,7 @@ namespace {
struct EarliestCaptures : public CaptureTracker {
EarliestCaptures(bool ReturnCaptures, Function &F, const DominatorTree &DT)
- : DT(DT), ReturnCaptures(ReturnCaptures), Captured(false), F(F) {}
+ : DT(DT), ReturnCaptures(ReturnCaptures), F(F) {}
void tooManyUses() override {
Captured = true;
@@ -199,7 +199,7 @@ namespace {
bool ReturnCaptures;
- bool Captured;
+ bool Captured = false;
Function &F;
};
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
index 922b38e92785..7cf69f613c66 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
@@ -106,11 +106,8 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
"Invalid constantexpr bitcast!");
// Catch the obvious splat cases.
- if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
- return Constant::getNullValue(DestTy);
- if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
- !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
- return Constant::getAllOnesValue(DestTy);
+ if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
+ return Res;
if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
// Handle a vector->scalar integer/fp cast.
@@ -362,16 +359,8 @@ Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
// Catch the obvious splat cases (since all-zeros can coerce non-integral
// pointers legally).
- if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
- return Constant::getNullValue(DestTy);
- if (C->isAllOnesValue() &&
- (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
- DestTy->isVectorTy()) &&
- !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() &&
- !DestTy->isPtrOrPtrVectorTy())
- // Get ones when the input is trivial, but
- // only for supported types inside getAllOnesValue.
- return Constant::getAllOnesValue(DestTy);
+ if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
+ return Res;
// If the type sizes are the same and a cast is legal, just directly
// cast the constant.
@@ -410,6 +399,12 @@ Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
} while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
C = ElemC;
} else {
+ // For non-byte-sized vector elements, the first element is not
+ // necessarily located at the vector base address.
+ if (auto *VT = dyn_cast<VectorType>(SrcTy))
+ if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
+ return nullptr;
+
C = C->getAggregateElement(0u);
}
} while (C);
@@ -558,23 +553,16 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
- // If this is a float/double load, we can try folding it as an int32/64 load
- // and then bitcast the result. This can be useful for union cases. Note
+ // If this is a non-integer load, we can try folding it as an int load and
+ // then bitcast the result. This can be useful for union cases. Note
// that address spaces don't matter here since we're not going to result in
// an actual new load.
- Type *MapTy;
- if (LoadTy->isHalfTy())
- MapTy = Type::getInt16Ty(C->getContext());
- else if (LoadTy->isFloatTy())
- MapTy = Type::getInt32Ty(C->getContext());
- else if (LoadTy->isDoubleTy())
- MapTy = Type::getInt64Ty(C->getContext());
- else if (LoadTy->isVectorTy()) {
- MapTy = PointerType::getIntNTy(
- C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
- } else
+ if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
+ !LoadTy->isVectorTy())
return nullptr;
+ Type *MapTy = Type::getIntNTy(
+ C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
!LoadTy->isX86_AMXTy())
@@ -680,9 +668,21 @@ Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
return Result;
+ // Explicitly check for out-of-bounds access, so we return undef even if the
+ // constant is a uniform value.
+ TypeSize Size = DL.getTypeAllocSize(C->getType());
+ if (!Size.isScalable() && Offset.sge(Size.getFixedSize()))
+ return UndefValue::get(Ty);
+
+ // Try an offset-independent fold of a uniform value.
+ if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty))
+ return Result;
+
// Try hard to fold loads from bitcasted strange and non-type-safe things.
if (Offset.getMinSignedBits() <= 64)
- return FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL);
+ if (Constant *Result =
+ FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
+ return Result;
return nullptr;
}
@@ -704,15 +704,13 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
Offset, DL))
return Result;
- // If this load comes from anywhere in a constant global, and if the global
- // is all undef or zero, we know what it loads.
+ // If this load comes from anywhere in a uniform constant global, the value
+ // is always the same, regardless of the loaded offset.
if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
- if (GV->getInitializer()->isNullValue() && !Ty->isX86_MMXTy() &&
- !Ty->isX86_AMXTy())
- return Constant::getNullValue(Ty);
- if (isa<UndefValue>(GV->getInitializer()))
- return UndefValue::get(Ty);
+ if (Constant *Res =
+ ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty))
+ return Res;
}
}
@@ -725,6 +723,19 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
}
+Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) {
+ if (isa<PoisonValue>(C))
+ return PoisonValue::get(Ty);
+ if (isa<UndefValue>(C))
+ return UndefValue::get(Ty);
+ if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
+ return Constant::getNullValue(Ty);
+ if (C->isAllOnesValue() &&
+ (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
+ return Constant::getAllOnesValue(Ty);
+ return nullptr;
+}
+
namespace {
/// One of Op0/Op1 is a constant expression.
@@ -930,7 +941,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
if (auto *GV = dyn_cast<GlobalValue>(Ptr))
SrcElemTy = GV->getValueType();
else if (!PTy->isOpaque())
- SrcElemTy = PTy->getElementType();
+ SrcElemTy = PTy->getNonOpaquePointerElementType();
else
SrcElemTy = Type::getInt8Ty(Ptr->getContext());
@@ -1171,10 +1182,11 @@ Constant *llvm::ConstantFoldInstOperands(Instruction *I,
return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
}
-Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
+Constant *llvm::ConstantFoldCompareInstOperands(unsigned IntPredicate,
Constant *Ops0, Constant *Ops1,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
+ CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp null, (inttoptr x) -> icmp 0, x
// fold: icmp (ptrtoint x), 0 -> icmp x, null
@@ -1248,10 +1260,30 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
}
+
+ // Convert pointer comparison (base+offset1) pred (base+offset2) into
+ // offset1 pred offset2, for the case where the offset is inbounds. This
+ // only works for equality and unsigned comparison, as inbounds permits
+ // crossing the sign boundary. However, the offset comparison itself is
+ // signed.
+ if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
+ unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
+ APInt Offset0(IndexWidth, 0);
+ Value *Stripped0 =
+ Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
+ APInt Offset1(IndexWidth, 0);
+ Value *Stripped1 =
+ Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
+ if (Stripped0 == Stripped1)
+ return ConstantExpr::getCompare(
+ ICmpInst::getSignedPredicate(Predicate),
+ ConstantInt::get(CE0->getContext(), Offset0),
+ ConstantInt::get(CE0->getContext(), Offset1));
+ }
} else if (isa<ConstantExpr>(Ops1)) {
// If RHS is a constant expression, but the left side isn't, swap the
// operands and try again.
- Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
+ Predicate = ICmpInst::getSwappedPredicate(Predicate);
return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
}
@@ -1347,23 +1379,6 @@ Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
}
}
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
- ConstantExpr *CE,
- Type *Ty,
- const DataLayout &DL) {
- if (!CE->getOperand(1)->isNullValue())
- return nullptr; // Do not allow stepping over the value!
-
- // Loop over all of the operands, tracking down which value we are
- // addressing.
- for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
- C = C->getAggregateElement(CE->getOperand(i));
- if (!C)
- return nullptr;
- }
- return ConstantFoldLoadThroughBitcast(C, Ty, DL);
-}
-
//===----------------------------------------------------------------------===//
// Constant Folding for Calls
//
@@ -2463,36 +2478,21 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
!getConstIntOrUndef(Operands[1], C1))
return nullptr;
- unsigned BitWidth = Ty->getScalarSizeInBits();
switch (IntrinsicID) {
default: break;
case Intrinsic::smax:
- if (!C0 && !C1)
- return UndefValue::get(Ty);
- if (!C0 || !C1)
- return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
- return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1);
-
case Intrinsic::smin:
- if (!C0 && !C1)
- return UndefValue::get(Ty);
- if (!C0 || !C1)
- return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth));
- return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1);
-
case Intrinsic::umax:
- if (!C0 && !C1)
- return UndefValue::get(Ty);
- if (!C0 || !C1)
- return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth));
- return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1);
-
case Intrinsic::umin:
if (!C0 && !C1)
return UndefValue::get(Ty);
if (!C0 || !C1)
- return ConstantInt::get(Ty, APInt::getMinValue(BitWidth));
- return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1);
+ return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
+ return ConstantInt::get(
+ Ty, ICmpInst::compare(*C0, *C1,
+ MinMaxIntrinsic::getPredicate(IntrinsicID))
+ ? *C0
+ : *C1);
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -2572,9 +2572,9 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
case Intrinsic::ctlz:
assert(C1 && "Must be constant int");
- // cttz(0, 1) and ctlz(0, 1) are undef.
+ // cttz(0, 1) and ctlz(0, 1) are poison.
if (C1->isOne() && (!C0 || C0->isZero()))
- return UndefValue::get(Ty);
+ return PoisonValue::get(Ty);
if (!C0)
return Constant::getNullValue(Ty);
if (IntrinsicID == Intrinsic::cttz)
@@ -2583,13 +2583,15 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
return ConstantInt::get(Ty, C0->countLeadingZeros());
case Intrinsic::abs:
- // Undef or minimum val operand with poison min --> undef
assert(C1 && "Must be constant int");
+ assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
+
+ // Undef or minimum val operand with poison min --> undef
if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
return UndefValue::get(Ty);
// Undef operand with no poison min --> 0 (sign bit must be clear)
- if (C1->isZero() && !C0)
+ if (!C0)
return Constant::getNullValue(Ty);
return ConstantInt::get(Ty, C0->abs());
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp b/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp
index 9739c6af5769..773f71ada0ee 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ConstraintSystem.cpp
@@ -142,7 +142,7 @@ bool ConstraintSystem::mayHaveSolution() {
return HasSolution;
}
-bool ConstraintSystem::isConditionImplied(SmallVector<int64_t, 8> R) {
+bool ConstraintSystem::isConditionImplied(SmallVector<int64_t, 8> R) const {
// If all variable coefficients are 0, we have 'C >= 0'. If the constant is >=
// 0, R is always true, regardless of the system.
if (all_of(makeArrayRef(R).drop_front(1), [](int64_t C) { return C == 0; }))
diff --git a/contrib/llvm-project/llvm/lib/Analysis/CostModel.cpp b/contrib/llvm-project/llvm/lib/Analysis/CostModel.cpp
index f407ec0d017a..326bacad01fe 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/CostModel.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/CostModel.cpp
@@ -50,7 +50,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
+ CostModelAnalysis() : FunctionPass(ID) {
initializeCostModelAnalysisPass(
*PassRegistry::getPassRegistry());
}
@@ -69,9 +69,9 @@ namespace {
void print(raw_ostream &OS, const Module*) const override;
/// The function that we analyze.
- Function *F;
+ Function *F = nullptr;
/// Target information.
- const TargetTransformInfo *TTI;
+ const TargetTransformInfo *TTI = nullptr;
};
} // End of anonymous namespace
diff --git a/contrib/llvm-project/llvm/lib/Analysis/DDG.cpp b/contrib/llvm-project/llvm/lib/Analysis/DDG.cpp
index da5de75a038c..7e1357959a3f 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/DDG.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/DDG.cpp
@@ -106,7 +106,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const DDGNode &N) {
//===--------------------------------------------------------------------===//
SimpleDDGNode::SimpleDDGNode(Instruction &I)
- : DDGNode(NodeKind::SingleInstruction), InstList() {
+ : DDGNode(NodeKind::SingleInstruction) {
assert(InstList.empty() && "Expected empty list.");
InstList.push_back(&I);
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/contrib/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 31b2dafa29b4..4a792fce51d1 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -11,8 +11,10 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
+#include "llvm/Support/Casting.h"
#if defined(LLVM_HAVE_TF_API)
+#include "llvm/ADT/BitVector.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
#include "llvm/Analysis/MLInlineAdvisor.h"
@@ -111,7 +113,7 @@ private:
StringRef LogFileName;
const ModelUnderTrainingRunner *const MUTR;
std::unique_ptr<Logger> L;
- std::vector<bool> Effects;
+ BitVector Effects;
/// There's at least one output. We'll set this to a different value if MUTR
/// is avaliable.
size_t OutputCount = 1;
@@ -150,7 +152,7 @@ public:
DevelopmentModeMLInlineAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::unique_ptr<MLModelRunner> ModelRunner,
- std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
+ std::function<bool(CallBase &)> GetDefaultAdvice,
std::unique_ptr<TrainingLogger> Logger);
size_t getTotalSizeEstimate();
@@ -341,10 +343,11 @@ void TrainingLogger::print() {
DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::unique_ptr<MLModelRunner> ModelRunner,
- std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
+ std::function<bool(CallBase &)> GetDefaultAdvice,
std::unique_ptr<TrainingLogger> Logger)
: MLInlineAdvisor(M, MAM, std::move(ModelRunner)),
- GetDefaultAdvice(GetDefaultAdvice), IsDoingInference(IsDoingInference),
+ GetDefaultAdvice(GetDefaultAdvice),
+ IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())),
Logger(std::move(Logger)),
InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
CurrentNativeSize(InitialNativeSize) {
@@ -410,8 +413,6 @@ size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
for (auto &F : M) {
if (F.isDeclaration())
continue;
- if (isFunctionDeleted(&F))
- continue;
Ret += *getNativeSizeEstimate(F);
}
return Ret;
@@ -422,30 +423,20 @@ std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
std::function<bool(CallBase &)> GetDefaultAdvice) {
auto &Ctx = M.getContext();
std::unique_ptr<MLModelRunner> Runner;
- ModelUnderTrainingRunner *MUTRPtr = nullptr;
- bool IsDoingInference = false;
if (TFModelUnderTrainingPath.empty())
Runner.reset(new NoInferenceModelRunner(Ctx, getInputFeatures()));
- else {
- std::unique_ptr<ModelUnderTrainingRunner> MUTR;
- if (auto MaybeOutputSpecs = loadOutputSpecs(
- Ctx, DecisionName, TFModelUnderTrainingPath, TFOutputSpecOverride))
- MUTR = std::make_unique<ModelUnderTrainingRunner>(
- Ctx, TFModelUnderTrainingPath, getInputFeatures(), *MaybeOutputSpecs);
- if (!MUTR || !MUTR->isValid()) {
- Ctx.emitError("Could not load the policy model from the provided path");
- return nullptr;
- }
- IsDoingInference = true;
- MUTRPtr = MUTR.get();
- Runner = std::move(MUTR);
- }
+ else
+ Runner = ModelUnderTrainingRunner::createAndEnsureValid(
+ Ctx, TFModelUnderTrainingPath, DecisionName, getInputFeatures(),
+ TFOutputSpecOverride);
+ if (!Runner)
+ return nullptr;
std::unique_ptr<TrainingLogger> Logger;
if (!TrainingLog.empty())
- Logger = std::make_unique<TrainingLogger>(TrainingLog, MUTRPtr);
+ Logger = std::make_unique<TrainingLogger>(
+ TrainingLog, dyn_cast<ModelUnderTrainingRunner>(Runner.get()));
return std::make_unique<DevelopmentModeMLInlineAdvisor>(
- M, MAM, std::move(Runner), GetDefaultAdvice, IsDoingInference,
- std::move(Logger));
+ M, MAM, std::move(Runner), GetDefaultAdvice, std::move(Logger));
}
#endif // defined(LLVM_HAVE_TF_API)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
index 7426d0c07592..39e80c2ad51c 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
@@ -130,7 +130,7 @@ bool DivergenceAnalysisImpl::inRegion(const Instruction &I) const {
}
bool DivergenceAnalysisImpl::inRegion(const BasicBlock &BB) const {
- return (!RegionLoop && BB.getParent() == &F) || RegionLoop->contains(&BB);
+ return RegionLoop ? RegionLoop->contains(&BB) : (BB.getParent() == &F);
}
void DivergenceAnalysisImpl::pushUsers(const Value &V) {
@@ -348,7 +348,7 @@ DivergenceInfo::DivergenceInfo(Function &F, const DominatorTree &DT,
const PostDominatorTree &PDT, const LoopInfo &LI,
const TargetTransformInfo &TTI,
bool KnownReducible)
- : F(F), ContainsIrreducible(false) {
+ : F(F) {
if (!KnownReducible) {
using RPOTraversal = ReversePostOrderTraversal<const Function *>;
RPOTraversal FuncRPOT(&F);
diff --git a/contrib/llvm-project/llvm/lib/Analysis/DomPrinter.cpp b/contrib/llvm-project/llvm/lib/Analysis/DomPrinter.cpp
index ebbe0d3e2c5f..6088de53028d 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/DomPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/DomPrinter.cpp
@@ -80,6 +80,19 @@ struct DOTGraphTraits<PostDominatorTree*>
};
}
+PreservedAnalyses DomTreePrinterPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ WriteDOTGraphToFile(F, &AM.getResult<DominatorTreeAnalysis>(F), "dom", false);
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses DomTreeOnlyPrinterPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ WriteDOTGraphToFile(F, &AM.getResult<DominatorTreeAnalysis>(F), "domonly",
+ true);
+ return PreservedAnalyses::all();
+}
+
void DominatorTree::viewGraph(const Twine &Name, const Twine &Title) {
#ifndef NDEBUG
ViewGraph(this, Name, false, Title);
diff --git a/contrib/llvm-project/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm-project/llvm/lib/Analysis/DominanceFrontier.cpp
index 14e6965f1259..a8806fe5a480 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/DominanceFrontier.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -37,7 +37,7 @@ INITIALIZE_PASS_END(DominanceFrontierWrapperPass, "domfrontier",
"Dominance Frontier Construction", true, true)
DominanceFrontierWrapperPass::DominanceFrontierWrapperPass()
- : FunctionPass(ID), DF() {
+ : FunctionPass(ID) {
initializeDominanceFrontierWrapperPassPass(*PassRegistry::getPassRegistry());
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp b/contrib/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
index d00a7c944f10..6869530148c5 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -102,7 +102,7 @@ class GlobalsAAResult::FunctionInfo {
"Insufficient low bits to store our flag and ModRef info.");
public:
- FunctionInfo() : Info() {}
+ FunctionInfo() {}
~FunctionInfo() {
delete Info.getPointer();
}
@@ -401,14 +401,14 @@ bool GlobalsAAResult::AnalyzeUsesOfPointer(Value *V,
/// AnalyzeIndirectGlobalMemory - We found an non-address-taken global variable
/// which holds a pointer type. See if the global always points to non-aliased
-/// heap memory: that is, all initializers of the globals are allocations, and
-/// those allocations have no use other than initialization of the global.
+/// heap memory: that is, all initializers of the globals store a value known
+/// to be obtained via a noalias return function call which have no other use.
/// Further, all loads out of GV must directly use the memory, not store the
/// pointer somewhere. If this is true, we consider the memory pointed to by
/// GV to be owned by GV and can disambiguate other pointers from it.
bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
// Keep track of values related to the allocation of the memory, f.e. the
- // value produced by the malloc call and any casts.
+ // value produced by the noalias call and any casts.
std::vector<Value *> AllocRelatedValues;
// If the initializer is a valid pointer, bail.
@@ -438,7 +438,7 @@ bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
// Check the value being stored.
Value *Ptr = getUnderlyingObject(SI->getOperand(0));
- if (!isAllocLikeFn(Ptr, &GetTLI(*SI->getFunction())))
+ if (!isNoAliasCall(Ptr))
return false; // Too hard to analyze.
// Analyze all uses of the allocation. If any of them are used in a
@@ -963,7 +963,7 @@ ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
GlobalsAAResult::GlobalsAAResult(
const DataLayout &DL,
std::function<const TargetLibraryInfo &(Function &F)> GetTLI)
- : AAResultBase(), DL(DL), GetTLI(std::move(GetTLI)) {}
+ : DL(DL), GetTLI(std::move(GetTLI)) {}
GlobalsAAResult::GlobalsAAResult(GlobalsAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL), GetTLI(std::move(Arg.GetTLI)),
diff --git a/contrib/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp b/contrib/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
index 2ec6cbeabda2..d2f0c57f6dab 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
@@ -23,12 +23,24 @@
using namespace llvm;
using namespace IRSimilarity;
+namespace llvm {
cl::opt<bool>
DisableBranches("no-ir-sim-branch-matching", cl::init(false),
cl::ReallyHidden,
cl::desc("disable similarity matching, and outlining, "
"across branches for debugging purposes."));
+cl::opt<bool>
+ DisableIndirectCalls("no-ir-sim-indirect-calls", cl::init(false),
+ cl::ReallyHidden,
+ cl::desc("disable outlining indirect calls."));
+
+cl::opt<bool>
+ MatchCallsByName("ir-sim-calls-by-name", cl::init(false), cl::ReallyHidden,
+ cl::desc("only allow matching call instructions if the "
+ "name and type signature match."));
+} // namespace llvm
+
IRInstructionData::IRInstructionData(Instruction &I, bool Legality,
IRInstructionDataList &IDList)
: Inst(&I), Legal(Legality), IDL(&IDList) {
@@ -57,10 +69,16 @@ void IRInstructionData::initializeInstruction() {
OperVals.push_back(OI.get());
}
+
+ // We capture the incoming BasicBlocks as values as well as the incoming
+ // Values in order to check for structural similarity.
+ if (PHINode *PN = dyn_cast<PHINode>(Inst))
+ for (BasicBlock *BB : PN->blocks())
+ OperVals.push_back(BB);
}
IRInstructionData::IRInstructionData(IRInstructionDataList &IDList)
- : Inst(nullptr), Legal(false), IDL(&IDList) {}
+ : IDL(&IDList) {}
void IRInstructionData::setBranchSuccessors(
DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger) {
@@ -86,6 +104,43 @@ void IRInstructionData::setBranchSuccessors(
}
}
+void IRInstructionData::setCalleeName(bool MatchByName) {
+ CallInst *CI = dyn_cast<CallInst>(Inst);
+ assert(CI && "Instruction must be call");
+
+ CalleeName = "";
+ if (!CI->isIndirectCall() && MatchByName)
+ CalleeName = CI->getCalledFunction()->getName().str();
+}
+
+void IRInstructionData::setPHIPredecessors(
+ DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger) {
+ assert(isa<PHINode>(Inst) && "Instruction must be phi node");
+
+ PHINode *PN = cast<PHINode>(Inst);
+ DenseMap<BasicBlock *, unsigned>::iterator BBNumIt;
+
+ BBNumIt = BasicBlockToInteger.find(PN->getParent());
+ assert(BBNumIt != BasicBlockToInteger.end() &&
+ "Could not find location for BasicBlock!");
+
+ int CurrentBlockNumber = static_cast<int>(BBNumIt->second);
+
+ // Convert the incoming blocks of the PHINode to an integer value, based on
+ // the relative distances between the current block and the incoming block.
+ for (unsigned Idx = 0; Idx < PN->getNumIncomingValues(); Idx++) {
+ BasicBlock *Incoming = PN->getIncomingBlock(Idx);
+ BBNumIt = BasicBlockToInteger.find(Incoming);
+ assert(BBNumIt != BasicBlockToInteger.end() &&
+ "Could not find number for BasicBlock!");
+ int OtherBlockNumber = static_cast<int>(BBNumIt->second);
+
+ int Relative = OtherBlockNumber - CurrentBlockNumber;
+ RelativeBlockLocations.push_back(Relative);
+ RelativeBlockLocations.push_back(Relative);
+ }
+}
+
CmpInst::Predicate IRInstructionData::predicateForConsistency(CmpInst *CI) {
switch (CI->getPredicate()) {
case CmpInst::FCMP_OGT:
@@ -112,10 +167,13 @@ CmpInst::Predicate IRInstructionData::getPredicate() const {
return cast<CmpInst>(Inst)->getPredicate();
}
-static StringRef getCalledFunctionName(CallInst &CI) {
- assert(CI.getCalledFunction() != nullptr && "Called Function is nullptr?");
+StringRef IRInstructionData::getCalleeName() const {
+ assert(isa<CallInst>(Inst) &&
+ "Can only get a name from a call instruction");
- return CI.getCalledFunction()->getName();
+ assert(CalleeName.hasValue() && "CalleeName has not been set");
+
+ return *CalleeName;
}
bool IRSimilarity::isClose(const IRInstructionData &A,
@@ -170,13 +228,11 @@ bool IRSimilarity::isClose(const IRInstructionData &A,
});
}
- // If the instructions are functions, we make sure that the function name is
- // the same. We already know that the types are since is isSameOperationAs is
- // true.
+ // If the instructions are functions calls, we make sure that the function
+ // name is the same. We already know that the types are since is
+ // isSameOperationAs is true.
if (isa<CallInst>(A.Inst) && isa<CallInst>(B.Inst)) {
- CallInst *CIA = cast<CallInst>(A.Inst);
- CallInst *CIB = cast<CallInst>(B.Inst);
- if (getCalledFunctionName(*CIA).compare(getCalledFunctionName(*CIB)) != 0)
+ if (A.getCalleeName().str().compare(B.getCalleeName().str()) != 0)
return false;
}
@@ -244,6 +300,12 @@ unsigned IRInstructionMapper::mapToLegalUnsigned(
if (isa<BranchInst>(*It))
ID->setBranchSuccessors(BasicBlockToInteger);
+ if (isa<CallInst>(*It))
+ ID->setCalleeName(EnableMatchCallsByName);
+
+ if (isa<PHINode>(*It))
+ ID->setPHIPredecessors(BasicBlockToInteger);
+
// Add to the instruction list
bool WasInserted;
DenseMap<IRInstructionData *, unsigned, IRInstructionDataTraits>::iterator
@@ -1075,6 +1137,8 @@ SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(
std::vector<IRInstructionData *> InstrList;
std::vector<unsigned> IntegerMapping;
Mapper.InstClassifier.EnableBranches = this->EnableBranches;
+ Mapper.InstClassifier.EnableIndirectCalls = EnableIndirectCalls;
+ Mapper.EnableMatchCallsByName = EnableMatchingCallsByName;
populateMapper(Modules, InstrList, IntegerMapping);
findCandidates(InstrList, IntegerMapping);
@@ -1085,6 +1149,8 @@ SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(
SimilarityGroupList &IRSimilarityIdentifier::findSimilarity(Module &M) {
resetSimilarityCandidates();
Mapper.InstClassifier.EnableBranches = this->EnableBranches;
+ Mapper.InstClassifier.EnableIndirectCalls = EnableIndirectCalls;
+ Mapper.EnableMatchCallsByName = EnableMatchingCallsByName;
std::vector<IRInstructionData *> InstrList;
std::vector<unsigned> IntegerMapping;
@@ -1105,7 +1171,8 @@ IRSimilarityIdentifierWrapperPass::IRSimilarityIdentifierWrapperPass()
}
bool IRSimilarityIdentifierWrapperPass::doInitialization(Module &M) {
- IRSI.reset(new IRSimilarityIdentifier(!DisableBranches));
+ IRSI.reset(new IRSimilarityIdentifier(!DisableBranches, !DisableIndirectCalls,
+ MatchCallsByName));
return false;
}
@@ -1123,7 +1190,8 @@ AnalysisKey IRSimilarityAnalysis::Key;
IRSimilarityIdentifier IRSimilarityAnalysis::run(Module &M,
ModuleAnalysisManager &) {
- auto IRSI = IRSimilarityIdentifier(!DisableBranches);
+ auto IRSI = IRSimilarityIdentifier(!DisableBranches, !DisableIndirectCalls,
+ MatchCallsByName);
IRSI.findSimilarity(M);
return IRSI;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp b/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
index f5fa6748d053..44b1d94ebdc8 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
@@ -161,19 +161,22 @@ static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
/// Collect cast instructions that can be ignored in the vectorizer's cost
/// model, given a reduction exit value and the minimal type in which the
-/// reduction can be represented.
-static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit,
- Type *RecurrenceType,
- SmallPtrSetImpl<Instruction *> &Casts) {
+// reduction can be represented. Also search casts to the recurrence type
+// to find the minimum width used by the recurrence.
+static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
+ Type *RecurrenceType,
+ SmallPtrSetImpl<Instruction *> &Casts,
+ unsigned &MinWidthCastToRecurTy) {
SmallVector<Instruction *, 8> Worklist;
SmallPtrSet<Instruction *, 8> Visited;
Worklist.push_back(Exit);
+ MinWidthCastToRecurTy = -1U;
while (!Worklist.empty()) {
Instruction *Val = Worklist.pop_back_val();
Visited.insert(Val);
- if (auto *Cast = dyn_cast<CastInst>(Val))
+ if (auto *Cast = dyn_cast<CastInst>(Val)) {
if (Cast->getSrcTy() == RecurrenceType) {
// If the source type of a cast instruction is equal to the recurrence
// type, it will be eliminated, and should be ignored in the vectorizer
@@ -181,7 +184,16 @@ static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit,
Casts.insert(Cast);
continue;
}
-
+ if (Cast->getDestTy() == RecurrenceType) {
+ // The minimum width used by the recurrence is found by checking for
+ // casts on its operands. The minimum width is used by the vectorizer
+ // when finding the widest type for in-loop reductions without any
+ // loads/stores.
+ MinWidthCastToRecurTy = std::min<unsigned>(
+ MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
+ continue;
+ }
+ }
// Add all operands to the work list if they are loop-varying values that
// we haven't yet visited.
for (Value *O : cast<User>(Val)->operands())
@@ -265,6 +277,7 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
// Data used for determining if the recurrence has been type-promoted.
Type *RecurrenceType = Phi->getType();
SmallPtrSet<Instruction *, 4> CastInsts;
+ unsigned MinWidthCastToRecurrenceType;
Instruction *Start = Phi;
bool IsSigned = false;
@@ -296,6 +309,10 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
// flags from all the reduction operations.
FastMathFlags FMF = FastMathFlags::getFast();
+ // The first instruction in the use-def chain of the Phi node that requires
+ // exact floating point operations.
+ Instruction *ExactFPMathInst = nullptr;
+
// A value in the reduction can be used:
// - By the reduction:
// - Reduction operation:
@@ -339,6 +356,9 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
if (Cur != Start) {
ReduxDesc =
isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
+ ExactFPMathInst = ExactFPMathInst == nullptr
+ ? ReduxDesc.getExactFPMathInst()
+ : ExactFPMathInst;
if (!ReduxDesc.isRecurrence())
return false;
// FIXME: FMF is allowed on phi, but propagation is not handled correctly.
@@ -467,8 +487,8 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
return false;
- const bool IsOrdered = checkOrderedReduction(
- Kind, ReduxDesc.getExactFPMathInst(), ExitInstruction, Phi);
+ const bool IsOrdered =
+ checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
if (Start != Phi) {
// If the starting value is not the same as the phi node, we speculatively
@@ -500,21 +520,24 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
computeRecurrenceType(ExitInstruction, DB, AC, DT);
if (ComputedType != RecurrenceType)
return false;
-
- // The recurrence expression will be represented in a narrower type. If
- // there are any cast instructions that will be unnecessary, collect them
- // in CastInsts. Note that the 'and' instruction was already included in
- // this list.
- //
- // TODO: A better way to represent this may be to tag in some way all the
- // instructions that are a part of the reduction. The vectorizer cost
- // model could then apply the recurrence type to these instructions,
- // without needing a white list of instructions to ignore.
- // This may also be useful for the inloop reductions, if it can be
- // kept simple enough.
- collectCastsToIgnore(TheLoop, ExitInstruction, RecurrenceType, CastInsts);
}
+ // Collect cast instructions and the minimum width used by the recurrence.
+ // If the starting value is not the same as the phi node and the computed
+ // recurrence type is equal to the recurrence type, the recurrence expression
+ // will be represented in a narrower or wider type. If there are any cast
+ // instructions that will be unnecessary, collect them in CastsFromRecurTy.
+ // Note that the 'and' instruction was already included in this list.
+ //
+ // TODO: A better way to represent this may be to tag in some way all the
+ // instructions that are a part of the reduction. The vectorizer cost
+ // model could then apply the recurrence type to these instructions,
+ // without needing a white list of instructions to ignore.
+ // This may also be useful for the inloop reductions, if it can be
+ // kept simple enough.
+ collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
+ MinWidthCastToRecurrenceType);
+
// We found a reduction var if we have reached the original phi node and we
// only have a single instruction with out-of-loop users.
@@ -522,9 +545,9 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
// is saved as part of the RecurrenceDescriptor.
// Save the description of this reduction variable.
- RecurrenceDescriptor RD(RdxStart, ExitInstruction, Kind, FMF,
- ReduxDesc.getExactFPMathInst(), RecurrenceType,
- IsSigned, IsOrdered, CastInsts);
+ RecurrenceDescriptor RD(RdxStart, ExitInstruction, Kind, FMF, ExactFPMathInst,
+ RecurrenceType, IsSigned, IsOrdered, CastInsts,
+ MinWidthCastToRecurrenceType);
RedDes = RD;
return true;
@@ -1397,8 +1420,9 @@ bool InductionDescriptor::isInductionPHI(
// Always use i8 element type for opaque pointer inductions.
PointerType *PtrTy = cast<PointerType>(PhiTy);
- Type *ElementType = PtrTy->isOpaque() ? Type::getInt8Ty(PtrTy->getContext())
- : PtrTy->getElementType();
+ Type *ElementType = PtrTy->isOpaque()
+ ? Type::getInt8Ty(PtrTy->getContext())
+ : PtrTy->getNonOpaquePointerElementType();
if (!ElementType->isSized())
return false;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm-project/llvm/lib/Analysis/IVUsers.cpp
index d7b202f83189..0f3929f45506 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/IVUsers.cpp
@@ -254,7 +254,7 @@ IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
IVUsers::IVUsers(Loop *L, AssumptionCache *AC, LoopInfo *LI, DominatorTree *DT,
ScalarEvolution *SE)
- : L(L), AC(AC), LI(LI), DT(DT), SE(SE), IVUses() {
+ : L(L), AC(AC), LI(LI), DT(DT), SE(SE) {
// Collect ephemeral values so that AddUsersIfInteresting skips them.
EphValues.clear();
CodeMetrics::collectEphemeralValues(L, AC, EphValues);
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp b/contrib/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
index 140c88eb8b0d..f6e3dd354ff8 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -21,11 +21,15 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "inline"
+#ifdef LLVM_HAVE_TF_AOT_INLINERSIZEMODEL
+#define LLVM_HAVE_TF_AOT
+#endif
// This weirdly named statistic tracks the number of times that, when attempting
// to inline a function A into B, we analyze the callers of B in order to see
@@ -160,18 +164,6 @@ InlineAdvice::InlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
DLoc(CB.getDebugLoc()), Block(CB.getParent()), ORE(ORE),
IsInliningRecommended(IsInliningRecommended) {}
-void InlineAdvisor::markFunctionAsDeleted(Function *F) {
- assert((!DeletedFunctions.count(F)) &&
- "Cannot put cause a function to become dead twice!");
- DeletedFunctions.insert(F);
-}
-
-void InlineAdvisor::freeDeletedFunctions() {
- for (auto *F : DeletedFunctions)
- delete F;
- DeletedFunctions.clear();
-}
-
void InlineAdvice::recordInlineStatsIfNeeded() {
if (Advisor->ImportedFunctionsStats)
Advisor->ImportedFunctionsStats->recordInline(*Caller, *Callee);
@@ -186,7 +178,6 @@ void InlineAdvice::recordInlining() {
void InlineAdvice::recordInliningWithCalleeDeleted() {
markRecorded();
recordInlineStatsIfNeeded();
- Advisor->markFunctionAsDeleted(Callee);
recordInliningWithCalleeDeletedImpl();
}
@@ -523,8 +514,6 @@ InlineAdvisor::~InlineAdvisor() {
ImportedFunctionsStats->dump(InlinerFunctionImportStats ==
InlinerFunctionImportStatsOpts::Verbose);
}
-
- freeDeletedFunctions();
}
std::unique_ptr<InlineAdvice> InlineAdvisor::getMandatoryAdvice(CallBase &CB,
@@ -569,3 +558,13 @@ std::unique_ptr<InlineAdvice> InlineAdvisor::getAdvice(CallBase &CB,
OptimizationRemarkEmitter &InlineAdvisor::getCallerORE(CallBase &CB) {
return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*CB.getCaller());
}
+
+PreservedAnalyses
+InlineAdvisorAnalysisPrinterPass::run(Module &M, ModuleAnalysisManager &MAM) {
+ const auto *IA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
+ if (!IA)
+ OS << "No Inline Advisor\n";
+ else
+ IA->getAdvisor()->print(OS);
+ return PreservedAnalyses::all();
+}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
index ff31e81aad08..d5411d916c77 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
@@ -361,10 +361,10 @@ protected:
/// Model the elimination of repeated loads that is expected to happen
/// whenever we simplify away the stores that would otherwise cause them to be
/// loads.
- bool EnableLoadElimination;
+ bool EnableLoadElimination = true;
/// Whether we allow inlining for recursive call.
- bool AllowRecursiveCall;
+ bool AllowRecursiveCall = false;
SmallPtrSet<Value *, 16> LoadAddrSet;
@@ -455,8 +455,7 @@ public:
OptimizationRemarkEmitter *ORE = nullptr)
: TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
- CandidateCall(Call), EnableLoadElimination(true),
- AllowRecursiveCall(false) {}
+ CandidateCall(Call) {}
InlineResult analyze();
@@ -2898,15 +2897,6 @@ Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
if (Call.isNoInline())
return InlineResult::failure("noinline call site attribute");
- // Don't inline functions if one does not have any stack protector attribute
- // but the other does.
- if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr())
- return InlineResult::failure(
- "stack protected caller but callee requested no stack protector");
- if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr())
- return InlineResult::failure(
- "stack protected callee but caller requested no stack protector");
-
return None;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
index 4831b22b1d46..b71b39334ace 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -27,6 +27,7 @@
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/CmpInstAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/OverflowInstAnalysis.h"
@@ -70,7 +71,7 @@ static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
static Value *SimplifyCastInst(unsigned, Value *, Type *,
const SimplifyQuery &, unsigned);
-static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, bool,
+static Value *SimplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
const SimplifyQuery &, unsigned);
static Value *SimplifySelectInst(Value *, Value *, Value *,
const SimplifyQuery &, unsigned);
@@ -620,6 +621,10 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
return C;
+ // X + poison -> poison
+ if (isa<PoisonValue>(Op1))
+ return Op1;
+
// X + undef -> undef
if (Q.isUndefValue(Op1))
return Op1;
@@ -1074,6 +1079,16 @@ static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
}
// IsSigned == false.
+
+ // Is the unsigned dividend known to be less than a constant divisor?
+ // TODO: Convert this (and above) to range analysis
+ // ("computeConstantRangeIncludingKnownBits")?
+ const APInt *C;
+ if (match(Y, m_APInt(C)) &&
+ computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT).getMaxValue().ult(*C))
+ return true;
+
+ // Try again for any divisor:
// Is the dividend unsigned less than the divisor?
return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
}
@@ -2254,14 +2269,21 @@ static Value *simplifyOrLogic(Value *X, Value *Y) {
match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
return NotA;
- // ~(A ^ B) | (A & B) --> ~(A & B)
- // ~(A ^ B) | (B & A) --> ~(A & B)
+ // ~(A ^ B) | (A & B) --> ~(A ^ B)
+ // ~(A ^ B) | (B & A) --> ~(A ^ B)
Value *NotAB;
if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
m_Value(NotAB))) &&
match(Y, m_c_And(m_Specific(A), m_Specific(B))))
return NotAB;
+ // ~(A & B) | (A ^ B) --> ~(A & B)
+ // ~(A & B) | (B ^ A) --> ~(A & B)
+ if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
+ m_Value(NotAB))) &&
+ match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
+ return NotAB;
+
return nullptr;
}
@@ -2685,7 +2707,9 @@ computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
// Fold comparisons for non-escaping pointer even if the allocation call
// cannot be elided. We cannot fold malloc comparison to null. Also, the
- // dynamic allocation call could be either of the operands.
+ // dynamic allocation call could be either of the operands. Note that
+ // the other operand can not be based on the alloc - if it were, then
+ // the cmp itself would be a capture.
Value *MI = nullptr;
if (isAllocLikeFn(LHS, TLI) &&
llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
@@ -2890,7 +2914,8 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
if (RHS_CR.isFullSet())
return ConstantInt::getTrue(ITy);
- ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo);
+ ConstantRange LHS_CR =
+ computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
if (!LHS_CR.isFullSet()) {
if (RHS_CR.contains(LHS_CR))
return ConstantInt::getTrue(ITy);
@@ -4057,9 +4082,9 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
NewOps[1], Q, MaxRecurse - 1));
if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
- return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
- NewOps, GEP->isInBounds(), Q,
- MaxRecurse - 1));
+ return PreventSelfSimplify(SimplifyGEPInst(
+ GEP->getSourceElementType(), NewOps[0], makeArrayRef(NewOps).slice(1),
+ GEP->isInBounds(), Q, MaxRecurse - 1));
if (isa<SelectInst>(I))
return PreventSelfSimplify(
@@ -4417,45 +4442,52 @@ Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
/// Given operands for an GetElementPtrInst, see if we can fold the result.
/// If not, this returns null.
-static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
+static Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr,
+ ArrayRef<Value *> Indices, bool InBounds,
const SimplifyQuery &Q, unsigned) {
// The type of the GEP pointer operand.
unsigned AS =
- cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
+ cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
// getelementptr P -> P.
- if (Ops.size() == 1)
- return Ops[0];
+ if (Indices.empty())
+ return Ptr;
// Compute the (pointer) type returned by the GEP instruction.
- Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
+ Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
Type *GEPTy = PointerType::get(LastType, AS);
- for (Value *Op : Ops) {
- // If one of the operands is a vector, the result type is a vector of
- // pointers. All vector operands must have the same number of elements.
- if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
- GEPTy = VectorType::get(GEPTy, VT->getElementCount());
- break;
+ if (VectorType *VT = dyn_cast<VectorType>(Ptr->getType()))
+ GEPTy = VectorType::get(GEPTy, VT->getElementCount());
+ else {
+ for (Value *Op : Indices) {
+ // If one of the operands is a vector, the result type is a vector of
+ // pointers. All vector operands must have the same number of elements.
+ if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
+ GEPTy = VectorType::get(GEPTy, VT->getElementCount());
+ break;
+ }
}
}
// getelementptr poison, idx -> poison
// getelementptr baseptr, poison -> poison
- if (any_of(Ops, [](const auto *V) { return isa<PoisonValue>(V); }))
+ if (isa<PoisonValue>(Ptr) ||
+ any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
return PoisonValue::get(GEPTy);
- if (Q.isUndefValue(Ops[0]))
- return UndefValue::get(GEPTy);
+ if (Q.isUndefValue(Ptr))
+ // If inbounds, we can choose an out-of-bounds pointer as a base pointer.
+ return InBounds ? PoisonValue::get(GEPTy) : UndefValue::get(GEPTy);
bool IsScalableVec =
- isa<ScalableVectorType>(SrcTy) || any_of(Ops, [](const Value *V) {
+ isa<ScalableVectorType>(SrcTy) || any_of(Indices, [](const Value *V) {
return isa<ScalableVectorType>(V->getType());
});
- if (Ops.size() == 2) {
+ if (Indices.size() == 1) {
// getelementptr P, 0 -> P.
- if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
- return Ops[0];
+ if (match(Indices[0], m_Zero()) && Ptr->getType() == GEPTy)
+ return Ptr;
Type *Ty = SrcTy;
if (!IsScalableVec && Ty->isSized()) {
@@ -4463,37 +4495,37 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
uint64_t C;
uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
// getelementptr P, N -> P if P points to a type of zero size.
- if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
- return Ops[0];
+ if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
+ return Ptr;
// The following transforms are only safe if the ptrtoint cast
// doesn't truncate the pointers.
- if (Ops[1]->getType()->getScalarSizeInBits() ==
+ if (Indices[0]->getType()->getScalarSizeInBits() ==
Q.DL.getPointerSizeInBits(AS)) {
- auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool {
+ auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
return P->getType() == GEPTy &&
- getUnderlyingObject(P) == getUnderlyingObject(V);
+ getUnderlyingObject(P) == getUnderlyingObject(Ptr);
};
// getelementptr V, (sub P, V) -> P if P points to a type of size 1.
if (TyAllocSize == 1 &&
- match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)),
- m_PtrToInt(m_Specific(Ops[0])))) &&
+ match(Indices[0],
+ m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
CanSimplify())
return P;
// getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
// size 1 << C.
- if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
- m_PtrToInt(m_Specific(Ops[0]))),
- m_ConstantInt(C))) &&
+ if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
+ m_PtrToInt(m_Specific(Ptr))),
+ m_ConstantInt(C))) &&
TyAllocSize == 1ULL << C && CanSimplify())
return P;
// getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
// size C.
- if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
- m_PtrToInt(m_Specific(Ops[0]))),
- m_SpecificInt(TyAllocSize))) &&
+ if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
+ m_PtrToInt(m_Specific(Ptr))),
+ m_SpecificInt(TyAllocSize))) &&
CanSimplify())
return P;
}
@@ -4501,29 +4533,28 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
}
if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
- all_of(Ops.slice(1).drop_back(1),
+ all_of(Indices.drop_back(1),
[](Value *Idx) { return match(Idx, m_Zero()); })) {
unsigned IdxWidth =
- Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
- if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
+ Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
+ if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
APInt BasePtrOffset(IdxWidth, 0);
Value *StrippedBasePtr =
- Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
- BasePtrOffset);
+ Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
// Avoid creating inttoptr of zero here: While LLVMs treatment of
// inttoptr is generally conservative, this particular case is folded to
// a null pointer, which will have incorrect provenance.
// gep (gep V, C), (sub 0, V) -> C
- if (match(Ops.back(),
+ if (match(Indices.back(),
m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
!BasePtrOffset.isZero()) {
auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
return ConstantExpr::getIntToPtr(CI, GEPTy);
}
// gep (gep V, C), (xor V, -1) -> C-1
- if (match(Ops.back(),
+ if (match(Indices.back(),
m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
!BasePtrOffset.isOne()) {
auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
@@ -4533,17 +4564,18 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
}
// Check to see if this is constant foldable.
- if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
+ if (!isa<Constant>(Ptr) ||
+ !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
return nullptr;
- auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
- Ops.slice(1), InBounds);
+ auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
+ InBounds);
return ConstantFoldConstant(CE, Q.DL);
}
-Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, bool InBounds,
- const SimplifyQuery &Q) {
- return ::SimplifyGEPInst(SrcTy, Ops, InBounds, Q, RecursionLimit);
+Value *llvm::SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
+ bool InBounds, const SimplifyQuery &Q) {
+ return ::SimplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
}
/// Given operands for an InsertValueInst, see if we can fold the result.
@@ -5603,26 +5635,6 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
return nullptr;
}
-static APInt getMaxMinLimit(Intrinsic::ID IID, unsigned BitWidth) {
- switch (IID) {
- case Intrinsic::smax: return APInt::getSignedMaxValue(BitWidth);
- case Intrinsic::smin: return APInt::getSignedMinValue(BitWidth);
- case Intrinsic::umax: return APInt::getMaxValue(BitWidth);
- case Intrinsic::umin: return APInt::getMinValue(BitWidth);
- default: llvm_unreachable("Unexpected intrinsic");
- }
-}
-
-static ICmpInst::Predicate getMaxMinPredicate(Intrinsic::ID IID) {
- switch (IID) {
- case Intrinsic::smax: return ICmpInst::ICMP_SGE;
- case Intrinsic::smin: return ICmpInst::ICMP_SLE;
- case Intrinsic::umax: return ICmpInst::ICMP_UGE;
- case Intrinsic::umin: return ICmpInst::ICMP_ULE;
- default: llvm_unreachable("Unexpected intrinsic");
- }
-}
-
/// Given a min/max intrinsic, see if it can be removed based on having an
/// operand that is another min/max intrinsic with shared operand(s). The caller
/// is expected to swap the operand arguments to handle commutation.
@@ -5690,19 +5702,21 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// Assume undef is the limit value.
if (Q.isUndefValue(Op1))
- return ConstantInt::get(ReturnType, getMaxMinLimit(IID, BitWidth));
+ return ConstantInt::get(
+ ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
const APInt *C;
if (match(Op1, m_APIntAllowUndef(C))) {
// Clamp to limit value. For example:
// umax(i8 %x, i8 255) --> 255
- if (*C == getMaxMinLimit(IID, BitWidth))
+ if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
return ConstantInt::get(ReturnType, *C);
// If the constant op is the opposite of the limit value, the other must
// be larger/smaller or equal. For example:
// umin(i8 %x, i8 255) --> %x
- if (*C == getMaxMinLimit(getInverseMinMaxIntrinsic(IID), BitWidth))
+ if (*C == MinMaxIntrinsic::getSaturationPoint(
+ getInverseMinMaxIntrinsic(IID), BitWidth))
return Op0;
// Remove nested call if constant operands allow it. Example:
@@ -5713,10 +5727,9 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
const APInt *InnerC;
if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
- ((IID == Intrinsic::smax && InnerC->sge(*C)) ||
- (IID == Intrinsic::smin && InnerC->sle(*C)) ||
- (IID == Intrinsic::umax && InnerC->uge(*C)) ||
- (IID == Intrinsic::umin && InnerC->ule(*C))))
+ ICmpInst::compare(*InnerC, *C,
+ ICmpInst::getNonStrictPredicate(
+ MinMaxIntrinsic::getPredicate(IID))))
return Op0;
}
}
@@ -5726,7 +5739,8 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
return V;
- ICmpInst::Predicate Pred = getMaxMinPredicate(IID);
+ ICmpInst::Predicate Pred =
+ ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
return Op0;
if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
@@ -6277,8 +6291,9 @@ static Value *simplifyInstructionWithOperands(Instruction *I,
break;
case Instruction::GetElementPtr: {
auto *GEPI = cast<GetElementPtrInst>(I);
- Result = SimplifyGEPInst(GEPI->getSourceElementType(), NewOps,
- GEPI->isInBounds(), Q);
+ Result =
+ SimplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
+ makeArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q);
break;
}
case Instruction::InsertValue: {
@@ -6460,3 +6475,5 @@ const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
Function &);
}
+
+void InstSimplifyFolder::anchor() {}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp b/contrib/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
index 0007c54b16d0..e8e9593d7030 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
@@ -1503,7 +1503,7 @@ void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) {
void LazyCallGraph::removeDeadFunction(Function &F) {
// FIXME: This is unnecessarily restrictive. We should be able to remove
// functions which recursively call themselves.
- assert(F.use_empty() &&
+ assert(F.hasZeroLiveUses() &&
"This routine should only be called on trivially dead functions!");
// We shouldn't remove library functions as they are never really dead while
@@ -1522,13 +1522,6 @@ void LazyCallGraph::removeDeadFunction(Function &F) {
// Remove this from the entry edges if present.
EntryEdges.removeEdgeInternal(N);
- if (SCCMap.empty()) {
- // No SCCs have been formed, so removing this is fine and there is nothing
- // else necessary at this point but clearing out the node.
- N.clear();
- return;
- }
-
// Cannot remove a function which has yet to be visited in the DFS walk, so
// if we have a node at all then we must have an SCC and RefSCC.
auto CI = SCCMap.find(&N);
@@ -1544,15 +1537,9 @@ void LazyCallGraph::removeDeadFunction(Function &F) {
assert(C.size() == 1 && "Dead functions must be in a singular SCC");
assert(RC.size() == 1 && "Dead functions must be in a singular RefSCC");
- auto RCIndexI = RefSCCIndices.find(&RC);
- int RCIndex = RCIndexI->second;
- PostOrderRefSCCs.erase(PostOrderRefSCCs.begin() + RCIndex);
- RefSCCIndices.erase(RCIndexI);
- for (int i = RCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i)
- RefSCCIndices[PostOrderRefSCCs[i]] = i;
-
// Finally clear out all the data structures from the node down through the
- // components.
+ // components. postorder_ref_scc_iterator will skip empty RefSCCs, so no need
+ // to adjust LazyCallGraph data structures.
N.clear();
N.G = nullptr;
N.F = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
index 5b5d48bf6fe5..e311b40ab25c 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -395,7 +395,8 @@ class LazyValueInfoImpl {
/// if it exists in the module.
Function *GuardDecl;
- Optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB);
+ Optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
+ Instruction *CxtI);
Optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
BasicBlock *T, Instruction *CxtI = nullptr);
@@ -533,15 +534,17 @@ void LazyValueInfoImpl::solve() {
}
}
-Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue(Value *Val,
- BasicBlock *BB) {
+Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue(
+ Value *Val, BasicBlock *BB, Instruction *CxtI) {
// If already a constant, there is nothing to compute.
if (Constant *VC = dyn_cast<Constant>(Val))
return ValueLatticeElement::get(VC);
if (Optional<ValueLatticeElement> OptLatticeVal =
- TheCache.getCachedValueInfo(Val, BB))
+ TheCache.getCachedValueInfo(Val, BB)) {
+ intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
return OptLatticeVal;
+ }
// We have hit a cycle, assume overdefined.
if (!pushBlockValue({ BB, Val }))
@@ -792,31 +795,41 @@ void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
}
}
+static ConstantRange getConstantRangeOrFull(const ValueLatticeElement &Val,
+ Type *Ty, const DataLayout &DL) {
+ if (Val.isConstantRange())
+ return Val.getConstantRange();
+ return ConstantRange::getFull(DL.getTypeSizeInBits(Ty));
+}
+
Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
SelectInst *SI, BasicBlock *BB) {
// Recurse on our inputs if needed
Optional<ValueLatticeElement> OptTrueVal =
- getBlockValue(SI->getTrueValue(), BB);
+ getBlockValue(SI->getTrueValue(), BB, SI);
if (!OptTrueVal)
return None;
ValueLatticeElement &TrueVal = *OptTrueVal;
Optional<ValueLatticeElement> OptFalseVal =
- getBlockValue(SI->getFalseValue(), BB);
+ getBlockValue(SI->getFalseValue(), BB, SI);
if (!OptFalseVal)
return None;
ValueLatticeElement &FalseVal = *OptFalseVal;
- if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
- const ConstantRange &TrueCR = TrueVal.getConstantRange();
- const ConstantRange &FalseCR = FalseVal.getConstantRange();
+ if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
+ const ConstantRange &TrueCR =
+ getConstantRangeOrFull(TrueVal, SI->getType(), DL);
+ const ConstantRange &FalseCR =
+ getConstantRangeOrFull(FalseVal, SI->getType(), DL);
Value *LHS = nullptr;
Value *RHS = nullptr;
SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
// Is this a min specifically of our two inputs? (Avoid the risk of
// ValueTracking getting smarter looking back past our immediate inputs.)
if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
- LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
+ ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
+ (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
ConstantRange ResultCR = [&]() {
switch (SPR.Flavor) {
default:
@@ -873,17 +886,10 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
Optional<ConstantRange> LazyValueInfoImpl::getRangeFor(Value *V,
Instruction *CxtI,
BasicBlock *BB) {
- Optional<ValueLatticeElement> OptVal = getBlockValue(V, BB);
+ Optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
if (!OptVal)
return None;
-
- ValueLatticeElement &Val = *OptVal;
- intersectAssumeOrGuardBlockValueConstantRange(V, Val, CxtI);
- if (Val.isConstantRange())
- return Val.getConstantRange();
-
- const unsigned OperandBitWidth = DL.getTypeSizeInBits(V->getType());
- return ConstantRange::getFull(OperandBitWidth);
+ return getConstantRangeOrFull(*OptVal, V->getType(), DL);
}
Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
@@ -1017,7 +1023,7 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueExtractValue(
if (Value *V = SimplifyExtractValueInst(
EVI->getAggregateOperand(), EVI->getIndices(),
EVI->getModule()->getDataLayout()))
- return getBlockValue(V, BB);
+ return getBlockValue(V, BB, EVI);
LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined (unknown extractvalue).\n");
@@ -1126,14 +1132,16 @@ static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
}
// If (X urem Modulus) >= C, then X >= C.
+ // If trunc X >= C, then X >= C.
// TODO: An upper bound could be computed as well.
- if (match(LHS, m_URem(m_Specific(Val), m_Value())) &&
+ if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
+ m_Trunc(m_Specific(Val)))) &&
match(RHS, m_APInt(C))) {
// Use the icmp region so we don't have to deal with different predicates.
ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
if (!CR.isEmptySet())
return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
- CR.getUnsignedMin(), APInt(BitWidth, 0)));
+ CR.getUnsignedMin().zextOrSelf(BitWidth), APInt(BitWidth, 0)));
}
return ValueLatticeElement::getOverdefined();
@@ -1430,14 +1438,12 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::getEdgeValue(
// Can't get any more precise here
return LocalResult;
- Optional<ValueLatticeElement> OptInBlock = getBlockValue(Val, BBFrom);
+ Optional<ValueLatticeElement> OptInBlock =
+ getBlockValue(Val, BBFrom, BBFrom->getTerminator());
if (!OptInBlock)
return None;
ValueLatticeElement &InBlock = *OptInBlock;
- // Try to intersect ranges of the BB and the constraint on the edge.
- intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
- BBFrom->getTerminator());
// We can use the context instruction (generically the ultimate instruction
// the calling pass is trying to simplify) here, even though the result of
// this function is generally cached when called from the solve* functions
@@ -1457,15 +1463,14 @@ ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
<< BB->getName() << "'\n");
assert(BlockValueStack.empty() && BlockValueSet.empty());
- Optional<ValueLatticeElement> OptResult = getBlockValue(V, BB);
+ Optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
if (!OptResult) {
solve();
- OptResult = getBlockValue(V, BB);
+ OptResult = getBlockValue(V, BB, CxtI);
assert(OptResult && "Value not available after solving");
}
- ValueLatticeElement Result = *OptResult;
- intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
+ ValueLatticeElement Result = *OptResult;
LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 6444518dc70c..2ab78d2b7ee2 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -519,8 +519,7 @@ public:
AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
MemoryDepChecker::DepCandidates &DA,
PredicatedScalarEvolution &PSE)
- : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
- IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
+ : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), PSE(PSE) {}
/// Register a load and whether it is only read from.
void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
@@ -620,7 +619,7 @@ private:
/// memcheck analysis without dependency checking
/// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
/// cleared while this remains set if we have potentially dependent accesses.
- bool IsRTCheckAnalysisNeeded;
+ bool IsRTCheckAnalysisNeeded = false;
/// The SCEV predicate containing all the SCEV-related assumptions.
PredicatedScalarEvolution &PSE;
@@ -1055,7 +1054,6 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy,
bool ShouldCheckWrap) {
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
- assert(!AccessTy->isAggregateType() && "Bad stride - Not a pointer to a scalar type");
if (isa<ScalableVectorType>(AccessTy)) {
LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
@@ -2245,10 +2243,7 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
DominatorTree *DT, LoopInfo *LI)
: PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)),
- DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
- NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
- HasConvergentOp(false),
- HasDependenceInvolvingLoopInvariantAddress(false) {
+ DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
if (canAnalyzeLoop())
analyzeLoop(AA, LI, TLI, DT);
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index 7b895d8a5dc2..ba014bd08c98 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -477,9 +477,8 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) {
CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
ScalarEvolution &SE, TargetTransformInfo &TTI,
- AAResults &AA, DependenceInfo &DI,
- Optional<unsigned> TRT)
- : Loops(Loops), TripCounts(), LoopCosts(),
+ AAResults &AA, DependenceInfo &DI, Optional<unsigned> TRT)
+ : Loops(Loops),
TRT((TRT == None) ? Optional<unsigned>(TemporalReuseThreshold) : TRT),
LI(LI), SE(SE), TTI(TTI), AA(AA), DI(DI) {
assert(!Loops.empty() && "Expecting a non-empty loop vector.");
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
index b35fb2a190f6..dd6958716127 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
@@ -695,11 +695,10 @@ class UnloopUpdater {
// Flag the presence of an irreducible backedge whose destination is a block
// directly contained by the original unloop.
- bool FoundIB;
+ bool FoundIB = false;
public:
- UnloopUpdater(Loop *UL, LoopInfo *LInfo)
- : Unloop(*UL), LI(LInfo), DFS(UL), FoundIB(false) {}
+ UnloopUpdater(Loop *UL, LoopInfo *LInfo) : Unloop(*UL), LI(LInfo), DFS(UL) {}
void updateBlockParents();
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopPass.cpp
index 9e470e998e67..b720bab454e9 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LoopPass.cpp
@@ -69,8 +69,7 @@ char PrintLoopPassWrapper::ID = 0;
char LPPassManager::ID = 0;
-LPPassManager::LPPassManager()
- : FunctionPass(ID), PMDataManager() {
+LPPassManager::LPPassManager() : FunctionPass(ID) {
LI = nullptr;
CurrentLoop = nullptr;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp b/contrib/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
index f5a65cd2b689..0480c1cd2842 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -11,35 +11,34 @@
// 'release' mode) or a runtime-loaded model (the 'development' case).
//
//===----------------------------------------------------------------------===//
-#include "llvm/Config/config.h"
-#if defined(LLVM_HAVE_TF_AOT) || defined(LLVM_HAVE_TF_API)
-
-#include <limits>
-#include <unordered_map>
-#include <unordered_set>
-
+#include "llvm/Analysis/MLInlineAdvisor.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/FunctionPropertiesAnalysis.h"
#include "llvm/Analysis/InlineCost.h"
-#include "llvm/Analysis/MLInlineAdvisor.h"
+#include "llvm/Analysis/InlineModelFeatureMaps.h"
+#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/ReleaseModeModelRunner.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Config/config.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Path.h"
+#include <limits>
+#include <unordered_map>
+#include <unordered_set>
+
using namespace llvm;
-#ifdef LLVM_HAVE_TF_AOT
-#include "llvm/Analysis/ReleaseModeModelRunner.h"
+#if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
// codegen-ed file
#include "InlinerSizeModel.h" // NOLINT
-#include "llvm/Analysis/InlineModelFeatureMaps.h"
std::unique_ptr<InlineAdvisor>
llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) {
@@ -90,7 +89,8 @@ MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
std::unique_ptr<MLModelRunner> Runner)
: InlineAdvisor(
M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),
- ModelRunner(std::move(Runner)), CG(new CallGraph(M)),
+ ModelRunner(std::move(Runner)),
+ CG(MAM.getResult<LazyCallGraphAnalysis>(M)),
InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) {
assert(ModelRunner);
@@ -100,7 +100,8 @@ MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
// critical in behavioral cloning - i.e. training a model to mimic the manual
// heuristic's decisions - and, thus, equally important for training for
// improvement.
- for (auto I = scc_begin(CG.get()); !I.isAtEnd(); ++I) {
+ CallGraph CGraph(M);
+ for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {
const std::vector<CallGraphNode *> &CGNodes = *I;
unsigned Level = 0;
for (auto *CGNode : CGNodes) {
@@ -110,7 +111,7 @@ MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
for (auto &I : instructions(F)) {
if (auto *CS = getInlinableCS(I)) {
auto *Called = CS->getCalledFunction();
- auto Pos = FunctionLevels.find(Called);
+ auto Pos = FunctionLevels.find(&CG.get(*Called));
// In bottom up traversal, an inlinable callee is either in the
// same SCC, or to a function in a visited SCC. So not finding its
// level means we haven't visited it yet, meaning it's in this SCC.
@@ -123,24 +124,73 @@ MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
for (auto *CGNode : CGNodes) {
Function *F = CGNode->getFunction();
if (F && !F->isDeclaration())
- FunctionLevels[F] = Level;
+ FunctionLevels[&CG.get(*F)] = Level;
}
}
+ for (auto KVP : FunctionLevels) {
+ AllNodes.insert(KVP.first);
+ EdgeCount += getLocalCalls(KVP.first->getFunction());
+ }
+ NodeCount = AllNodes.size();
+}
+
+unsigned MLInlineAdvisor::getInitialFunctionLevel(const Function &F) const {
+ return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;
}
void MLInlineAdvisor::onPassEntry() {
// Function passes executed between InlinerPass runs may have changed the
// module-wide features.
- if (!Invalid)
- return;
- NodeCount = 0;
- EdgeCount = 0;
- for (auto &F : M)
- if (!F.isDeclaration()) {
- ++NodeCount;
- EdgeCount += getLocalCalls(F);
+ // The cgscc pass manager rules are such that:
+ // - if a pass leads to merging SCCs, then the pipeline is restarted on the
+ // merged SCC
+ // - if a pass leads to splitting the SCC, then we continue with one of the
+ // splits
+ // This means that the NodesInLastSCC is a superset (not strict) of the nodes
+ // that subsequent passes would have processed
+ // - in addition, if new Nodes were created by a pass (e.g. CoroSplit),
+ // they'd be adjacent to Nodes in the last SCC. So we just need to check the
+ // boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't
+ // care about the nature of the Edge (call or ref).
+ NodeCount -= static_cast<int64_t>(NodesInLastSCC.size());
+ while (!NodesInLastSCC.empty()) {
+ const auto *N = NodesInLastSCC.front();
+ NodesInLastSCC.pop_front();
+ // The Function wrapped by N could have been deleted since we last saw it.
+ if (N->isDead()) {
+ assert(!N->getFunction().isDeclaration());
+ continue;
+ }
+ ++NodeCount;
+ EdgeCount += getLocalCalls(N->getFunction());
+ for (const auto &E : *(*N)) {
+ const auto *AdjNode = &E.getNode();
+ assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());
+ auto I = AllNodes.insert(AdjNode);
+ if (I.second)
+ NodesInLastSCC.push_back(AdjNode);
}
- Invalid = false;
+ }
+
+ EdgeCount -= EdgesOfLastSeenNodes;
+ EdgesOfLastSeenNodes = 0;
+}
+
+void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *LastSCC) {
+ if (!LastSCC)
+ return;
+ // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
+ // we update the node count and edge count from the subset of these nodes that
+ // survived.
+ assert(NodesInLastSCC.empty());
+ assert(NodeCount >= LastSCC->size());
+ EdgesOfLastSeenNodes = 0;
+ for (const auto &N : *LastSCC) {
+ assert(!N.isDead());
+ EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());
+ NodesInLastSCC.push_back(&N);
+ }
+ assert(EdgeCount >= EdgesOfLastSeenNodes);
}
int64_t MLInlineAdvisor::getLocalCalls(Function &F) {
@@ -192,7 +242,7 @@ void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
int64_t MLInlineAdvisor::getModuleIRSize() const {
int64_t Ret = 0;
- for (auto &F : CG->getModule())
+ for (auto &F : M)
if (!F.isDeclaration())
Ret += getIRSize(F);
return Ret;
@@ -263,7 +313,7 @@ std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {
*ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) =
CalleeBefore.BasicBlockCount;
*ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) =
- FunctionLevels[&Caller];
+ getInitialFunctionLevel(Caller);
*ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount;
*ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams;
*ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount;
@@ -361,4 +411,3 @@ void MLInlineAdvice::recordUnattemptedInliningImpl() {
return R;
});
}
-#endif // defined(LLVM_HAVE_TF_AOT) || defined(LLVM_HAVE_TF_API)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
index ffdd7a2cfd4b..208f93aa1ac6 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -51,12 +51,13 @@ using namespace llvm;
enum AllocType : uint8_t {
OpNewLike = 1<<0, // allocates; never returns null
- MallocLike = 1<<1 | OpNewLike, // allocates; may return null
+ MallocLike = 1<<1, // allocates; may return null
AlignedAllocLike = 1<<2, // allocates with alignment; may return null
CallocLike = 1<<3, // allocates + bzero
ReallocLike = 1<<4, // reallocates
StrDupLike = 1<<5,
- MallocOrCallocLike = MallocLike | CallocLike | AlignedAllocLike,
+ MallocOrOpNewLike = MallocLike | OpNewLike,
+ MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike,
AllocLike = MallocOrCallocLike | StrDupLike,
AnyAlloc = AllocLike | ReallocLike
};
@@ -66,64 +67,59 @@ struct AllocFnsTy {
unsigned NumParams;
// First and Second size parameters (or -1 if unused)
int FstParam, SndParam;
+ // Alignment parameter for aligned_alloc and aligned new
+ int AlignParam;
};
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc.
static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
- {LibFunc_malloc, {MallocLike, 1, 0, -1}},
- {LibFunc_vec_malloc, {MallocLike, 1, 0, -1}},
- {LibFunc_valloc, {MallocLike, 1, 0, -1}},
- {LibFunc_Znwj, {OpNewLike, 1, 0, -1}}, // new(unsigned int)
- {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow)
- {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned int, align_val_t)
- {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, // new(unsigned int, align_val_t, nothrow)
- {MallocLike, 3, 0, -1}},
- {LibFunc_Znwm, {OpNewLike, 1, 0, -1}}, // new(unsigned long)
- {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned long, nothrow)
- {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned long, align_val_t)
- {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, // new(unsigned long, align_val_t, nothrow)
- {MallocLike, 3, 0, -1}},
- {LibFunc_Znaj, {OpNewLike, 1, 0, -1}}, // new[](unsigned int)
- {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow)
- {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned int, align_val_t)
- {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, // new[](unsigned int, align_val_t, nothrow)
- {MallocLike, 3, 0, -1}},
- {LibFunc_Znam, {OpNewLike, 1, 0, -1}}, // new[](unsigned long)
- {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned long, nothrow)
- {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned long, align_val_t)
- {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, // new[](unsigned long, align_val_t, nothrow)
- {MallocLike, 3, 0, -1}},
- {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1}}, // new(unsigned int)
- {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow)
- {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1}}, // new(unsigned long long)
- {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned long long, nothrow)
- {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1}}, // new[](unsigned int)
- {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow)
- {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1}}, // new[](unsigned long long)
- {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned long long, nothrow)
- {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1}},
- {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1}},
- {LibFunc_calloc, {CallocLike, 2, 0, 1}},
- {LibFunc_vec_calloc, {CallocLike, 2, 0, 1}},
- {LibFunc_realloc, {ReallocLike, 2, 1, -1}},
- {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1}},
- {LibFunc_reallocf, {ReallocLike, 2, 1, -1}},
- {LibFunc_strdup, {StrDupLike, 1, -1, -1}},
- {LibFunc_strndup, {StrDupLike, 2, 1, -1}},
- {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1}},
- // TODO: Handle "int posix_memalign(void **, size_t, size_t)"
+ {LibFunc_malloc, {MallocLike, 1, 0, -1, -1}},
+ {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1}},
+ {LibFunc_valloc, {MallocLike, 1, 0, -1, -1}},
+ {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int)
+ {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow)
+ {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned int, align_val_t)
+ {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned int, align_val_t, nothrow)
+ {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long)
+ {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long, nothrow)
+ {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new(unsigned long, align_val_t)
+ {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new(unsigned long, align_val_t, nothrow)
+ {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int)
+ {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow)
+ {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned int, align_val_t)
+ {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned int, align_val_t, nothrow)
+ {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long)
+ {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long, nothrow)
+ {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1}}, // new[](unsigned long, align_val_t)
+ {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1}}, // new[](unsigned long, align_val_t, nothrow)
+ {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned int)
+ {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned int, nothrow)
+ {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1}}, // new(unsigned long long)
+ {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new(unsigned long long, nothrow)
+ {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned int)
+ {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned int, nothrow)
+ {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1}}, // new[](unsigned long long)
+ {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1}}, // new[](unsigned long long, nothrow)
+ {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0}},
+ {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0}},
+ {LibFunc_calloc, {CallocLike, 2, 0, 1, -1}},
+ {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1}},
+ {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1}},
+ {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1}},
+ {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1}},
+ {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1}},
+ {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1}},
+ {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1}},
+ // TODO: Handle "int posix_memalign(void **, size_t, size_t)"
};
-static const Function *getCalledFunction(const Value *V, bool LookThroughBitCast,
+static const Function *getCalledFunction(const Value *V,
bool &IsNoBuiltin) {
// Don't care about intrinsics in this case.
if (isa<IntrinsicInst>(V))
return nullptr;
- if (LookThroughBitCast)
- V = V->stripPointerCasts();
-
const auto *CB = dyn_cast<CallBase>(V);
if (!CB)
return nullptr;
@@ -175,11 +171,9 @@ getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
}
static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy,
- const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false) {
+ const TargetLibraryInfo *TLI) {
bool IsNoBuiltinCall;
- if (const Function *Callee =
- getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall))
+ if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall))
if (!IsNoBuiltinCall)
return getAllocationDataForFunction(Callee, AllocTy, TLI);
return None;
@@ -187,11 +181,9 @@ static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy,
static Optional<AllocFnsTy>
getAllocationData(const Value *V, AllocType AllocTy,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast = false) {
+ function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
bool IsNoBuiltinCall;
- if (const Function *Callee =
- getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall))
+ if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall))
if (!IsNoBuiltinCall)
return getAllocationDataForFunction(
Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee)));
@@ -202,7 +194,7 @@ static Optional<AllocFnsTy> getAllocationSize(const Value *V,
const TargetLibraryInfo *TLI) {
bool IsNoBuiltinCall;
const Function *Callee =
- getCalledFunction(V, /*LookThroughBitCast=*/false, IsNoBuiltinCall);
+ getCalledFunction(V, IsNoBuiltinCall);
if (!Callee)
return None;
@@ -226,92 +218,57 @@ static Optional<AllocFnsTy> getAllocationSize(const Value *V,
Result.NumParams = Callee->getNumOperands();
Result.FstParam = Args.first;
Result.SndParam = Args.second.getValueOr(-1);
+ // Allocsize has no way to specify an alignment argument
+ Result.AlignParam = -1;
return Result;
}
-static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
- const auto *CB =
- dyn_cast<CallBase>(LookThroughBitCast ? V->stripPointerCasts() : V);
- return CB && CB->hasRetAttr(Attribute::NoAlias);
-}
-
/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast).hasValue();
+bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, AnyAlloc, TLI).hasValue();
}
bool llvm::isAllocationFn(
- const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, AnyAlloc, GetTLI, LookThroughBitCast).hasValue();
-}
-
-/// Tests if a value is a call or invoke to a function that returns a
-/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- // it's safe to consider realloc as noalias since accessing the original
- // pointer is undefined behavior
- return isAllocationFn(V, TLI, LookThroughBitCast) ||
- hasNoAliasAttr(V, LookThroughBitCast);
+ const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
+ return getAllocationData(V, AnyAlloc, GetTLI).hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
-bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, MallocLike, TLI, LookThroughBitCast).hasValue();
-}
-bool llvm::isMallocLikeFn(
- const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, MallocLike, GetTLI, LookThroughBitCast)
- .hasValue();
+static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, MallocOrOpNewLike, TLI).hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory with alignment (such as aligned_alloc).
-bool llvm::isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, AlignedAllocLike, TLI, LookThroughBitCast)
- .hasValue();
-}
-bool llvm::isAlignedAllocLikeFn(
- const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, AlignedAllocLike, GetTLI, LookThroughBitCast)
+static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, AlignedAllocLike, TLI)
.hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
-bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, CallocLike, TLI, LookThroughBitCast).hasValue();
+static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, CallocLike, TLI).hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
-bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, MallocOrCallocLike, TLI,
- LookThroughBitCast).hasValue();
+bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, MallocOrCallocLike, TLI).hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, AllocLike, TLI, LookThroughBitCast).hasValue();
+bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, AllocLike, TLI).hasValue();
}
/// Tests if a value is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
-bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast).hasValue();
+bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
+ return getAllocationData(V, ReallocLike, TLI).hasValue();
}
/// Tests if a functions is a call or invoke to a library function that
@@ -320,113 +277,122 @@ bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) {
return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue();
}
-/// Tests if a value is a call or invoke to a library function that
-/// allocates memory and throws if an allocation failed (e.g., new).
-bool llvm::isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast).hasValue();
-}
+bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) {
+ assert(isAllocationFn(CB, TLI));
-/// Tests if a value is a call or invoke to a library function that
-/// allocates memory (strdup, strndup).
-bool llvm::isStrdupLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast) {
- return getAllocationData(V, StrDupLike, TLI, LookThroughBitCast).hasValue();
-}
+ // Note: Removability is highly dependent on the source language. For
+ // example, recent C++ requires direct calls to the global allocation
+ // [basic.stc.dynamic.allocation] to be observable unless part of a new
+ // expression [expr.new paragraph 13].
-/// extractMallocCall - Returns the corresponding CallInst if the instruction
-/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
-/// ignore InvokeInst here.
-const CallInst *llvm::extractMallocCall(
- const Value *I,
- function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
- return isMallocLikeFn(I, GetTLI) ? dyn_cast<CallInst>(I) : nullptr;
+ // Historically we've treated the C family allocation routines as removable
+ return isAllocLikeFn(CB, TLI);
}
-static Value *computeArraySize(const CallInst *CI, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- bool LookThroughSExt = false) {
- if (!CI)
- return nullptr;
+Value *llvm::getAllocAlignment(const CallBase *V,
+ const TargetLibraryInfo *TLI) {
+ assert(isAllocationFn(V, TLI));
- // The size of the malloc's result type must be known to determine array size.
- Type *T = getMallocAllocatedType(CI, TLI);
- if (!T || !T->isSized())
+ const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI);
+ if (!FnData.hasValue() || FnData->AlignParam < 0) {
return nullptr;
+ }
+ return V->getOperand(FnData->AlignParam);
+}
- unsigned ElementSize = DL.getTypeAllocSize(T);
- if (StructType *ST = dyn_cast<StructType>(T))
- ElementSize = DL.getStructLayout(ST)->getSizeInBytes();
+/// When we're compiling N-bit code, and the user uses parameters that are
+/// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
+/// trouble with APInt size issues. This function handles resizing + overflow
+/// checks for us. Check and zext or trunc \p I depending on IntTyBits and
+/// I's value.
+static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) {
+ // More bits than we can handle. Checking the bit width isn't necessary, but
+ // it's faster than checking active bits, and should give `false` in the
+ // vast majority of cases.
+ if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
+ return false;
+ if (I.getBitWidth() != IntTyBits)
+ I = I.zextOrTrunc(IntTyBits);
+ return true;
+}
- // If malloc call's arg can be determined to be a multiple of ElementSize,
- // return the multiple. Otherwise, return NULL.
- Value *MallocArg = CI->getArgOperand(0);
- Value *Multiple = nullptr;
- if (ComputeMultiple(MallocArg, ElementSize, Multiple, LookThroughSExt))
- return Multiple;
+Optional<APInt>
+llvm::getAllocSize(const CallBase *CB,
+ const TargetLibraryInfo *TLI,
+ std::function<const Value*(const Value*)> Mapper) {
+ // Note: This handles both explicitly listed allocation functions and
+ // allocsize. The code structure could stand to be cleaned up a bit.
+ Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI);
+ if (!FnData)
+ return None;
- return nullptr;
-}
+ // Get the index type for this address space, results and intermediate
+ // computations are performed at that width.
+ auto &DL = CB->getModule()->getDataLayout();
+ const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType());
+
+ // Handle strdup-like functions separately.
+ if (FnData->AllocTy == StrDupLike) {
+ APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0))));
+ if (!Size)
+ return None;
-/// getMallocType - Returns the PointerType resulting from the malloc call.
-/// The PointerType depends on the number of bitcast uses of the malloc call:
-/// 0: PointerType is the calls' return type.
-/// 1: PointerType is the bitcast's result type.
-/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *llvm::getMallocType(const CallInst *CI,
- const TargetLibraryInfo *TLI) {
- assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
-
- PointerType *MallocType = nullptr;
- unsigned NumOfBitCastUses = 0;
-
- // Determine if CallInst has a bitcast use.
- for (const User *U : CI->users())
- if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- MallocType = cast<PointerType>(BCI->getDestTy());
- NumOfBitCastUses++;
+ // Strndup limits strlen.
+ if (FnData->FstParam > 0) {
+ const ConstantInt *Arg =
+ dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
+ if (!Arg)
+ return None;
+
+ APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
+ if (Size.ugt(MaxSize))
+ Size = MaxSize + 1;
}
+ return Size;
+ }
- // Malloc call has 1 bitcast use, so type is the bitcast's destination type.
- if (NumOfBitCastUses == 1)
- return MallocType;
+ const ConstantInt *Arg =
+ dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
+ if (!Arg)
+ return None;
- // Malloc call was not bitcast, so type is the malloc function's return type.
- if (NumOfBitCastUses == 0)
- return cast<PointerType>(CI->getType());
+ APInt Size = Arg->getValue();
+ if (!CheckedZextOrTrunc(Size, IntTyBits))
+ return None;
- // Type could not be determined.
- return nullptr;
-}
+ // Size is determined by just 1 parameter.
+ if (FnData->SndParam < 0)
+ return Size;
-/// getMallocAllocatedType - Returns the Type allocated by malloc call.
-/// The Type depends on the number of bitcast uses of the malloc call:
-/// 0: PointerType is the malloc calls' return type.
-/// 1: PointerType is the bitcast's result type.
-/// >1: Unique PointerType cannot be determined, return NULL.
-Type *llvm::getMallocAllocatedType(const CallInst *CI,
- const TargetLibraryInfo *TLI) {
- PointerType *PT = getMallocType(CI, TLI);
- return PT ? PT->getElementType() : nullptr;
-}
+ Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam)));
+ if (!Arg)
+ return None;
+
+ APInt NumElems = Arg->getValue();
+ if (!CheckedZextOrTrunc(NumElems, IntTyBits))
+ return None;
-/// getMallocArraySize - Returns the array size of a malloc call. If the
-/// argument passed to malloc is a multiple of the size of the malloced type,
-/// then return that multiple. For non-array mallocs, the multiple is
-/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
-/// determined.
-Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- bool LookThroughSExt) {
- assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
- return computeArraySize(CI, DL, TLI, LookThroughSExt);
+ bool Overflow;
+ Size = Size.umul_ov(NumElems, Overflow);
+ if (Overflow)
+ return None;
+ return Size;
}
-/// extractCallocCall - Returns the corresponding CallInst if the instruction
-/// is a calloc call.
-const CallInst *llvm::extractCallocCall(const Value *I,
- const TargetLibraryInfo *TLI) {
- return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr;
+Constant *llvm::getInitialValueOfAllocation(const CallBase *Alloc,
+ const TargetLibraryInfo *TLI,
+ Type *Ty) {
+ assert(isAllocationFn(Alloc, TLI));
+
+ // malloc and aligned_alloc are uninitialized (undef)
+ if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI))
+ return UndefValue::get(Ty);
+
+ // calloc zero initializes
+ if (isCallocLikeFn(Alloc, TLI))
+ return Constant::getNullValue(Ty);
+
+ return nullptr;
}
/// isLibFreeFunction - Returns true if the function is a builtin free()
@@ -485,8 +451,7 @@ bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) {
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
bool IsNoBuiltinCall;
- const Function *Callee =
- getCalledFunction(I, /*LookThroughBitCast=*/false, IsNoBuiltinCall);
+ const Function *Callee = getCalledFunction(I, IsNoBuiltinCall);
if (Callee == nullptr || IsNoBuiltinCall)
return nullptr;
@@ -644,20 +609,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return unknown();
}
-/// When we're compiling N-bit code, and the user uses parameters that are
-/// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
-/// trouble with APInt size issues. This function handles resizing + overflow
-/// checks for us. Check and zext or trunc \p I depending on IntTyBits and
-/// I's value.
bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) {
- // More bits than we can handle. Checking the bit width isn't necessary, but
- // it's faster than checking active bits, and should give `false` in the
- // vast majority of cases.
- if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
- return false;
- if (I.getBitWidth() != IntTyBits)
- I = I.zextOrTrunc(IntTyBits);
- return true;
+ return ::CheckedZextOrTrunc(I, IntTyBits);
}
SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
@@ -698,61 +651,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI);
- if (!FnData)
- return unknown();
-
- // Handle strdup-like functions separately.
- if (FnData->AllocTy == StrDupLike) {
- APInt Size(IntTyBits, GetStringLength(CB.getArgOperand(0)));
- if (!Size)
- return unknown();
-
- // Strndup limits strlen.
- if (FnData->FstParam > 0) {
- ConstantInt *Arg =
- dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam));
- if (!Arg)
- return unknown();
-
- APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
- if (Size.ugt(MaxSize))
- Size = MaxSize + 1;
- }
- return std::make_pair(Size, Zero);
- }
-
- ConstantInt *Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam));
- if (!Arg)
- return unknown();
-
- APInt Size = Arg->getValue();
- if (!CheckedZextOrTrunc(Size))
- return unknown();
-
- // Size is determined by just 1 parameter.
- if (FnData->SndParam < 0)
- return std::make_pair(Size, Zero);
-
- Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->SndParam));
- if (!Arg)
- return unknown();
-
- APInt NumElems = Arg->getValue();
- if (!CheckedZextOrTrunc(NumElems))
- return unknown();
-
- bool Overflow;
- Size = Size.umul_ov(NumElems, Overflow);
- return Overflow ? unknown() : std::make_pair(Size, Zero);
-
- // TODO: handle more standard functions (+ wchar cousins):
- // - strdup / strndup
- // - strcpy / strncpy
- // - strcat / strncat
- // - memcpy / memmove
- // - strcat / strncat
- // - memset
+ auto Mapper = [](const Value *V) { return V; };
+ if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper))
+ return std::make_pair(*Size, Zero);
+ return unknown();
}
SizeOffsetType
@@ -976,7 +878,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) {
// Handle strdup-like functions separately.
if (FnData->AllocTy == StrDupLike) {
- // TODO
+ // TODO: implement evaluation of strdup/strndup
return unknown();
}
@@ -989,14 +891,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) {
SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy);
Value *Size = Builder.CreateMul(FirstArg, SecondArg);
return std::make_pair(Size, Zero);
-
- // TODO: handle more standard functions (+ wchar cousins):
- // - strdup / strndup
- // - strcpy / strncpy
- // - strcat / strncat
- // - memcpy / memmove
- // - strcat / strncat
- // - memset
}
SizeOffsetEvalType
diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index da6bb4c49cba..36df462c7a66 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -594,7 +594,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// turn into undef. Note that we can bypass the allocation itself when
// looking for a clobber in many cases; that's an alias property and is
// handled by BasicAA.
- if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
+ if (isa<AllocaInst>(Inst) || isNoAliasCall(Inst)) {
const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr);
if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemorySSA.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
index ac20e20f0c0d..57f431ec21f5 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
@@ -1265,8 +1265,8 @@ void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
}
MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
- : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
- SkipWalker(nullptr), NextID(0) {
+ : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
+ SkipWalker(nullptr) {
// Build MemorySSA using a batch alias analysis. This reuses the internal
// state that AA collects during an alias()/getModRefInfo() call. This is
// safe because there are no CFG changes while building MemorySSA and can
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/contrib/llvm-project/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
index 941458f648bc..fab51d6a7aaf 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
@@ -22,12 +22,13 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
LLVMContext &Ctx, const std::string &ModelPath,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<LoggedFeatureSpec> &OutputSpecs)
- : MLModelRunner(Ctx), OutputSpecs(OutputSpecs) {
+ : MLModelRunner(Ctx, MLModelRunner::Kind::Development),
+ OutputSpecs(OutputSpecs) {
Evaluator = std::make_unique<TFModelEvaluator>(
ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
OutputSpecs.size());
if (!Evaluator || !Evaluator->isValid()) {
- Ctx.emitError("Failed to create inliner saved model evaluator");
+ Ctx.emitError("Failed to create saved model evaluator");
Evaluator.reset();
return;
}
@@ -46,4 +47,21 @@ void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
return Evaluator->getUntypedInput(Index);
}
+std::unique_ptr<ModelUnderTrainingRunner>
+ModelUnderTrainingRunner::createAndEnsureValid(
+ LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
+ const std::vector<TensorSpec> &InputSpecs,
+ StringRef OutputSpecsPathOverride) {
+ std::unique_ptr<ModelUnderTrainingRunner> MUTR;
+ if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath,
+ OutputSpecsPathOverride))
+ MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs,
+ *MaybeOutputSpecs));
+ if (MUTR && MUTR->isValid())
+ return MUTR;
+
+ Ctx.emitError("Could not load the policy model from the provided path");
+ return nullptr;
+}
+
#endif // defined(LLVM_HAVE_TF_API)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/NoInferenceModelRunner.cpp b/contrib/llvm-project/llvm/lib/Analysis/NoInferenceModelRunner.cpp
index 02ece6aa3900..7178120ebe4f 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/NoInferenceModelRunner.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/NoInferenceModelRunner.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
NoInferenceModelRunner::NoInferenceModelRunner(
LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs)
- : MLModelRunner(Ctx) {
+ : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) {
ValuesBuffer.reserve(Inputs.size());
for (const auto &TS : Inputs)
ValuesBuffer.push_back(std::make_unique<char[]>(TS.getElementCount() *
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp b/contrib/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
index f74a9f7f104f..d177ee056a93 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
@@ -32,8 +32,8 @@ raw_ostream &llvm::objcarc::operator<<(raw_ostream &OS,
return OS << "ARCInstKind::Retain";
case ARCInstKind::RetainRV:
return OS << "ARCInstKind::RetainRV";
- case ARCInstKind::ClaimRV:
- return OS << "ARCInstKind::ClaimRV";
+ case ARCInstKind::UnsafeClaimRV:
+ return OS << "ARCInstKind::UnsafeClaimRV";
case ARCInstKind::RetainBlock:
return OS << "ARCInstKind::RetainBlock";
case ARCInstKind::Release:
@@ -127,7 +127,7 @@ ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) {
case Intrinsic::objc_clang_arc_use:
return ARCInstKind::IntrinsicUser;
case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
- return ARCInstKind::ClaimRV;
+ return ARCInstKind::UnsafeClaimRV;
case Intrinsic::objc_retainedObject:
return ARCInstKind::NoopCast;
case Intrinsic::objc_unretainedObject:
@@ -334,7 +334,7 @@ bool llvm::objcarc::IsUser(ARCInstKind Class) {
case ARCInstKind::StoreStrong:
case ARCInstKind::Call:
case ARCInstKind::None:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
return false;
}
llvm_unreachable("covered switch isn't covered?");
@@ -370,7 +370,7 @@ bool llvm::objcarc::IsRetain(ARCInstKind Class) {
case ARCInstKind::Call:
case ARCInstKind::User:
case ARCInstKind::None:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
return false;
}
llvm_unreachable("covered switch isn't covered?");
@@ -384,7 +384,7 @@ bool llvm::objcarc::IsAutorelease(ARCInstKind Class) {
return true;
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::RetainBlock:
case ARCInstKind::Release:
case ARCInstKind::AutoreleasepoolPush:
@@ -416,7 +416,7 @@ bool llvm::objcarc::IsForwarding(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::Autorelease:
case ARCInstKind::AutoreleaseRV:
case ARCInstKind::NoopCast:
@@ -451,7 +451,7 @@ bool llvm::objcarc::IsNoopOnNull(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::Release:
case ARCInstKind::Autorelease:
case ARCInstKind::AutoreleaseRV:
@@ -486,7 +486,7 @@ bool llvm::objcarc::IsNoopOnGlobal(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::Release:
case ARCInstKind::Autorelease:
case ARCInstKind::AutoreleaseRV:
@@ -522,7 +522,7 @@ bool llvm::objcarc::IsAlwaysTail(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::AutoreleaseRV:
return true;
case ARCInstKind::Release:
@@ -563,7 +563,7 @@ bool llvm::objcarc::IsNeverTail(ARCInstKind Class) {
return true;
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::AutoreleaseRV:
case ARCInstKind::Release:
case ARCInstKind::RetainBlock:
@@ -598,7 +598,7 @@ bool llvm::objcarc::IsNoThrow(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::Release:
case ARCInstKind::Autorelease:
case ARCInstKind::AutoreleaseRV:
@@ -643,7 +643,7 @@ bool llvm::objcarc::CanInterruptRV(ARCInstKind Class) {
return true;
case ARCInstKind::Retain:
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::Release:
case ARCInstKind::AutoreleasepoolPush:
case ARCInstKind::RetainBlock:
@@ -696,7 +696,7 @@ bool llvm::objcarc::CanDecrementRefCount(ARCInstKind Kind) {
case ARCInstKind::StoreStrong:
case ARCInstKind::CallOrUser:
case ARCInstKind::Call:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
index 4c80f6743411..02d084937ccb 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
@@ -226,7 +226,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEP;
// Simplify the GEP to handle 'gep x, 0' -> x etc.
- if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), GEPOps,
+ if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), GEPOps[0],
+ ArrayRef<Value *>(GEPOps).slice(1),
GEP->isInBounds(), {DL, TLI, DT, AC})) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
diff --git a/contrib/llvm-project/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm-project/llvm/lib/Analysis/RegionPass.cpp
index c20ecff5f912..10c8569096c6 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/RegionPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/RegionPass.cpp
@@ -30,8 +30,7 @@ using namespace llvm;
char RGPassManager::ID = 0;
-RGPassManager::RGPassManager()
- : FunctionPass(ID), PMDataManager() {
+RGPassManager::RGPassManager() : FunctionPass(ID) {
RI = nullptr;
CurrentRegion = nullptr;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ReplayInlineAdvisor.cpp b/contrib/llvm-project/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
index f83d8b0fd230..294bc38c17ad 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
@@ -28,8 +28,7 @@ ReplayInlineAdvisor::ReplayInlineAdvisor(
std::unique_ptr<InlineAdvisor> OriginalAdvisor,
const ReplayInlinerSettings &ReplaySettings, bool EmitRemarks)
: InlineAdvisor(M, FAM), OriginalAdvisor(std::move(OriginalAdvisor)),
- HasReplayRemarks(false), ReplaySettings(ReplaySettings),
- EmitRemarks(EmitRemarks) {
+ ReplaySettings(ReplaySettings), EmitRemarks(EmitRemarks) {
auto BufferOrErr = MemoryBuffer::getFileOrSTDIN(ReplaySettings.ReplayFile);
std::error_code EC = BufferOrErr.getError();
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
index 0c3f32295ae1..07aac1523b47 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -301,7 +301,8 @@ void SCEV::print(raw_ostream &OS) const {
case scUMaxExpr:
case scSMaxExpr:
case scUMinExpr:
- case scSMinExpr: {
+ case scSMinExpr:
+ case scSequentialUMinExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
const char *OpStr = nullptr;
switch (NAry->getSCEVType()) {
@@ -315,6 +316,9 @@ void SCEV::print(raw_ostream &OS) const {
case scSMinExpr:
OpStr = " smin ";
break;
+ case scSequentialUMinExpr:
+ OpStr = " umin_seq ";
+ break;
default:
llvm_unreachable("There are no other nary expression types.");
}
@@ -392,6 +396,8 @@ Type *SCEV::getType() const {
case scUMinExpr:
case scSMinExpr:
return cast<SCEVMinMaxExpr>(this)->getType();
+ case scSequentialUMinExpr:
+ return cast<SCEVSequentialMinMaxExpr>(this)->getType();
case scAddExpr:
return cast<SCEVAddExpr>(this)->getType();
case scUDivExpr:
@@ -774,7 +780,8 @@ CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
case scSMaxExpr:
case scUMaxExpr:
case scSMinExpr:
- case scUMinExpr: {
+ case scUMinExpr:
+ case scSequentialUMinExpr: {
const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
@@ -2110,6 +2117,22 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
return S;
}
+const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op,
+ Type *Ty) {
+ switch (Kind) {
+ case scTruncate:
+ return getTruncateExpr(Op, Ty);
+ case scZeroExtend:
+ return getZeroExtendExpr(Op, Ty);
+ case scSignExtend:
+ return getSignExtendExpr(Op, Ty);
+ case scPtrToInt:
+ return getPtrToIntExpr(Op, Ty);
+ default:
+ llvm_unreachable("Not a SCEV cast expression!");
+ }
+}
+
/// getAnyExtendExpr - Return a SCEV for the given operand extended with
/// unspecified bits out to the given type.
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
@@ -3463,7 +3486,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
return S;
}
-static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
+const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
APInt A = C1->getAPInt().abs();
APInt B = C2->getAPInt().abs();
uint32_t ABW = A.getBitWidth();
@@ -3721,6 +3744,7 @@ const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
SmallVectorImpl<const SCEV *> &Ops) {
+ assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!");
assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
@@ -3857,6 +3881,209 @@ const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
return S;
}
+namespace {
+
+class SCEVSequentialMinMaxDeduplicatingVisitor final
+ : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
+ Optional<const SCEV *>> {
+ using RetVal = Optional<const SCEV *>;
+ using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>;
+
+ ScalarEvolution &SE;
+ const SCEVTypes RootKind; // Must be a sequential min/max expression.
+ const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind.
+ SmallPtrSet<const SCEV *, 16> SeenOps;
+
+ bool canRecurseInto(SCEVTypes Kind) const {
+ // We can only recurse into the SCEV expression of the same effective type
+ // as the type of our root SCEV expression.
+ return RootKind == Kind || NonSequentialRootKind == Kind;
+ };
+
+ RetVal visitAnyMinMaxExpr(const SCEV *S) {
+ assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
+ "Only for min/max expressions.");
+ SCEVTypes Kind = S->getSCEVType();
+
+ if (!canRecurseInto(Kind))
+ return S;
+
+ auto *NAry = cast<SCEVNAryExpr>(S);
+ SmallVector<const SCEV *> NewOps;
+ bool Changed =
+ visit(Kind, makeArrayRef(NAry->op_begin(), NAry->op_end()), NewOps);
+
+ if (!Changed)
+ return S;
+ if (NewOps.empty())
+ return None;
+
+ return isa<SCEVSequentialMinMaxExpr>(S)
+ ? SE.getSequentialMinMaxExpr(Kind, NewOps)
+ : SE.getMinMaxExpr(Kind, NewOps);
+ }
+
+ RetVal visit(const SCEV *S) {
+ // Has the whole operand been seen already?
+ if (!SeenOps.insert(S).second)
+ return None;
+ return Base::visit(S);
+ }
+
+public:
+ SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
+ SCEVTypes RootKind)
+ : SE(SE), RootKind(RootKind),
+ NonSequentialRootKind(
+ SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
+ RootKind)) {}
+
+ bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps,
+ SmallVectorImpl<const SCEV *> &NewOps) {
+ bool Changed = false;
+ SmallVector<const SCEV *> Ops;
+ Ops.reserve(OrigOps.size());
+
+ for (const SCEV *Op : OrigOps) {
+ RetVal NewOp = visit(Op);
+ if (NewOp != Op)
+ Changed = true;
+ if (NewOp)
+ Ops.emplace_back(*NewOp);
+ }
+
+ if (Changed)
+ NewOps = std::move(Ops);
+ return Changed;
+ }
+
+ RetVal visitConstant(const SCEVConstant *Constant) { return Constant; }
+
+ RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; }
+
+ RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; }
+
+ RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; }
+
+ RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; }
+
+ RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; }
+
+ RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; }
+
+ RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; }
+
+ RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
+
+ RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) {
+ return visitAnyMinMaxExpr(Expr);
+ }
+
+ RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) {
+ return visitAnyMinMaxExpr(Expr);
+ }
+
+ RetVal visitSMinExpr(const SCEVSMinExpr *Expr) {
+ return visitAnyMinMaxExpr(Expr);
+ }
+
+ RetVal visitUMinExpr(const SCEVUMinExpr *Expr) {
+ return visitAnyMinMaxExpr(Expr);
+ }
+
+ RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
+ return visitAnyMinMaxExpr(Expr);
+ }
+
+ RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; }
+
+ RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; }
+};
+
+} // namespace
+
+const SCEV *
+ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind,
+ SmallVectorImpl<const SCEV *> &Ops) {
+ assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
+ "Not a SCEVSequentialMinMaxExpr!");
+ assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
+ if (Ops.size() == 1)
+ return Ops[0];
+ if (Ops.size() == 2 &&
+ any_of(Ops, [](const SCEV *Op) { return isa<SCEVConstant>(Op); }))
+ return getMinMaxExpr(
+ SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind),
+ Ops);
+#ifndef NDEBUG
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
+ "Operand types don't match!");
+ assert(Ops[0]->getType()->isPointerTy() ==
+ Ops[i]->getType()->isPointerTy() &&
+ "min/max should be consistently pointerish");
+ }
+#endif
+
+ // Note that SCEVSequentialMinMaxExpr is *NOT* commutative,
+ // so we can *NOT* do any kind of sorting of the expressions!
+
+ // Check if we have created the same expression before.
+ if (const SCEV *S = findExistingSCEVInCache(Kind, Ops))
+ return S;
+
+ // FIXME: there are *some* simplifications that we can do here.
+
+ // Keep only the first instance of an operand.
+ {
+ SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind);
+ bool Changed = Deduplicator.visit(Kind, Ops, Ops);
+ if (Changed)
+ return getSequentialMinMaxExpr(Kind, Ops);
+ }
+
+ // Check to see if one of the operands is of the same kind. If so, expand its
+ // operands onto our operand list, and recurse to simplify.
+ {
+ unsigned Idx = 0;
+ bool DeletedAny = false;
+ while (Idx < Ops.size()) {
+ if (Ops[Idx]->getSCEVType() != Kind) {
+ ++Idx;
+ continue;
+ }
+ const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]);
+ Ops.erase(Ops.begin() + Idx);
+ Ops.insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end());
+ DeletedAny = true;
+ }
+
+ if (DeletedAny)
+ return getSequentialMinMaxExpr(Kind, Ops);
+ }
+
+ // Okay, it looks like we really DO need an expr. Check to see if we
+ // already have one, otherwise create a new one.
+ FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ ID.AddPointer(Ops[i]);
+ void *IP = nullptr;
+ const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
+ if (ExistingSCEV)
+ return ExistingSCEV;
+
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator)
+ SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
+
+ UniqueSCEVs.InsertNode(S, IP);
+ registerUser(S, Ops);
+ return S;
+}
+
const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
return getSMaxExpr(Ops);
@@ -3885,14 +4112,16 @@ const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
return getMinMaxExpr(scSMinExpr, Ops);
}
-const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
- const SCEV *RHS) {
+const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+ bool Sequential) {
SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
- return getUMinExpr(Ops);
+ return getUMinExpr(Ops, Sequential);
}
-const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
- return getMinMaxExpr(scUMinExpr, Ops);
+const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops,
+ bool Sequential) {
+ return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops)
+ : getMinMaxExpr(scUMinExpr, Ops);
}
const SCEV *
@@ -4375,13 +4604,15 @@ const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
- const SCEV *RHS) {
+ const SCEV *RHS,
+ bool Sequential) {
SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
- return getUMinFromMismatchedTypes(Ops);
+ return getUMinFromMismatchedTypes(Ops, Sequential);
}
-const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
- SmallVectorImpl<const SCEV *> &Ops) {
+const SCEV *
+ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+ bool Sequential) {
assert(!Ops.empty() && "At least one operand must be!");
// Trivial case.
if (Ops.size() == 1)
@@ -4402,7 +4633,7 @@ const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
// Generate umin.
- return getUMinExpr(PromotedOps);
+ return getUMinExpr(PromotedOps, Sequential);
}
const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
@@ -5513,6 +5744,7 @@ static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
case scSMaxExpr:
case scUMinExpr:
case scSMinExpr:
+ case scSequentialUMinExpr:
// These expressions are available if their operand(s) is/are.
return true;
@@ -6060,35 +6292,31 @@ ScalarEvolution::getRangeRef(const SCEV *S,
ConservativeResult.intersectWith(X, RangeType));
}
- if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
- ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
- for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
- X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
- return setRange(SMax, SignHint,
- ConservativeResult.intersectWith(X, RangeType));
- }
-
- if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
- ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
- for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
- X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
- return setRange(UMax, SignHint,
- ConservativeResult.intersectWith(X, RangeType));
- }
-
- if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
- ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
- for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
- X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
- return setRange(SMin, SignHint,
- ConservativeResult.intersectWith(X, RangeType));
- }
+ if (isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) {
+ Intrinsic::ID ID;
+ switch (S->getSCEVType()) {
+ case scUMaxExpr:
+ ID = Intrinsic::umax;
+ break;
+ case scSMaxExpr:
+ ID = Intrinsic::smax;
+ break;
+ case scUMinExpr:
+ case scSequentialUMinExpr:
+ ID = Intrinsic::umin;
+ break;
+ case scSMinExpr:
+ ID = Intrinsic::smin;
+ break;
+ default:
+ llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr.");
+ }
- if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
- ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
- for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
- X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
- return setRange(UMin, SignHint,
+ const auto *NAry = cast<SCEVNAryExpr>(S);
+ ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint);
+ for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
+ X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)});
+ return setRange(S, SignHint,
ConservativeResult.intersectWith(X, RangeType));
}
@@ -7368,11 +7596,6 @@ const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) {
auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize());
if (!Ty || !ArrSize || !ArrSize->isOne())
continue;
- // Also make sure step was increased the same with sizeof allocated
- // element type.
- const PointerType *GEPT = dyn_cast<PointerType>(GEP->getType());
- if (Ty->getElementType() != GEPT->getElementType())
- continue;
// FIXME: Since gep indices are silently zext to the indexing type,
// we will have a narrow gep index which wraps around rather than
@@ -8093,6 +8316,29 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
return getZero(CI->getType());
}
+ // If we're exiting based on the overflow flag of an x.with.overflow intrinsic
+ // with a constant step, we can form an equivalent icmp predicate and figure
+ // out how many iterations will be taken before we exit.
+ const WithOverflowInst *WO;
+ const APInt *C;
+ if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) &&
+ match(WO->getRHS(), m_APInt(C))) {
+ ConstantRange NWR =
+ ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C,
+ WO->getNoWrapKind());
+ CmpInst::Predicate Pred;
+ APInt NewRHSC, Offset;
+ NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
+ if (!ExitIfTrue)
+ Pred = ICmpInst::getInversePredicate(Pred);
+ auto *LHS = getSCEV(WO->getLHS());
+ if (Offset != 0)
+ LHS = getAddExpr(LHS, getConstant(Offset));
+ auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC),
+ ControlsExit, AllowPredicates);
+ if (EL.hasAnyInfo()) return EL;
+ }
+
// If it's not an integer or pointer comparison then compute it the hard way.
return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
}
@@ -8134,26 +8380,11 @@ ScalarEvolution::computeExitLimitFromCondFromBinOp(
if (EitherMayExit) {
// Both conditions must be same for the loop to continue executing.
// Choose the less conservative count.
- // If ExitCond is a short-circuit form (select), using
- // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general.
- // To see the detailed examples, please see
- // test/Analysis/ScalarEvolution/exit-count-select.ll
- bool PoisonSafe = isa<BinaryOperator>(ExitCond);
- if (!PoisonSafe)
- // Even if ExitCond is select, we can safely derive BECount using both
- // EL0 and EL1 in these cases:
- // (1) EL0.ExactNotTaken is non-zero
- // (2) EL1.ExactNotTaken is non-poison
- // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and
- // it cannot be umin(0, ..))
- // The PoisonSafe assignment below is simplified and the assertion after
- // BECount calculation fully guarantees the condition (3).
- PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) ||
- isa<SCEVConstant>(EL1.ExactNotTaken);
if (EL0.ExactNotTaken != getCouldNotCompute() &&
- EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) {
- BECount =
- getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
+ EL1.ExactNotTaken != getCouldNotCompute()) {
+ BECount = getUMinFromMismatchedTypes(
+ EL0.ExactNotTaken, EL1.ExactNotTaken,
+ /*Sequential=*/!isa<BinaryOperator>(ExitCond));
// If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
// it should have been simplified to zero (see the condition (3) above)
@@ -8203,6 +8434,26 @@ ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
+ ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit,
+ AllowPredicates);
+ if (EL.hasAnyInfo()) return EL;
+
+ auto *ExhaustiveCount =
+ computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
+
+ if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
+ return ExhaustiveCount;
+
+ return computeShiftCompareExitLimit(ExitCond->getOperand(0),
+ ExitCond->getOperand(1), L, OriginalPred);
+}
+ScalarEvolution::ExitLimit
+ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
+ ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ bool ControlsExit,
+ bool AllowPredicates) {
+
// Try to evaluate any dependencies out of the loop.
LHS = getSCEVAtScope(LHS, L);
RHS = getSCEVAtScope(RHS, L);
@@ -8312,14 +8563,7 @@ ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
break;
}
- auto *ExhaustiveCount =
- computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
-
- if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
- return ExhaustiveCount;
-
- return computeShiftCompareExitLimit(ExitCond->getOperand(0),
- ExitCond->getOperand(1), L, OriginalPred);
+ return getCouldNotCompute();
}
ScalarEvolution::ExitLimit
@@ -8941,7 +9185,8 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
case scUMaxExpr:
case scSMinExpr:
case scUMinExpr:
- return nullptr; // TODO: smax, umax, smin, umax.
+ case scSequentialUMinExpr:
+ return nullptr; // TODO: smax, umax, smin, umax, umin_seq.
}
llvm_unreachable("Unknown SCEV kind!");
}
@@ -9070,7 +9315,8 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
return V;
}
- if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
+ if (isa<SCEVCommutativeExpr>(V) || isa<SCEVSequentialMinMaxExpr>(V)) {
+ const auto *Comm = cast<SCEVNAryExpr>(V);
// Avoid performing the look-up in the common case where the specified
// expression has no loop-variant portions.
for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
@@ -9092,7 +9338,9 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
return getMulExpr(NewOps, Comm->getNoWrapFlags());
if (isa<SCEVMinMaxExpr>(Comm))
return getMinMaxExpr(Comm->getSCEVType(), NewOps);
- llvm_unreachable("Unknown commutative SCEV type!");
+ if (isa<SCEVSequentialMinMaxExpr>(Comm))
+ return getSequentialMinMaxExpr(Comm->getSCEVType(), NewOps);
+ llvm_unreachable("Unknown commutative / sequential min/max SCEV type!");
}
}
// If we got here, all operands are loop invariant.
@@ -9153,32 +9401,11 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
return AddRec;
}
- if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
- const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
- if (Op == Cast->getOperand())
- return Cast; // must be loop invariant
- return getZeroExtendExpr(Op, Cast->getType());
- }
-
- if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
- const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
- if (Op == Cast->getOperand())
- return Cast; // must be loop invariant
- return getSignExtendExpr(Op, Cast->getType());
- }
-
- if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
+ if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
if (Op == Cast->getOperand())
return Cast; // must be loop invariant
- return getTruncateExpr(Op, Cast->getType());
- }
-
- if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
- const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
- if (Op == Cast->getOperand())
- return Cast; // must be loop invariant
- return getPtrToIntExpr(Op, Cast->getType());
+ return getCastExpr(Cast->getSCEVType(), Op, Cast->getType());
}
llvm_unreachable("Unknown SCEV type!");
@@ -11236,6 +11463,48 @@ bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
return true;
}
+bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred,
+ const SCEV *LHS,
+ const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS) {
+ // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make
+ // sure that we are dealing with same LHS.
+ if (RHS == FoundRHS) {
+ std::swap(LHS, RHS);
+ std::swap(FoundLHS, FoundRHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ }
+ if (LHS != FoundLHS)
+ return false;
+
+ auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
+ if (!SUFoundRHS)
+ return false;
+
+ Value *Shiftee, *ShiftValue;
+
+ using namespace PatternMatch;
+ if (match(SUFoundRHS->getValue(),
+ m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) {
+ auto *ShifteeS = getSCEV(Shiftee);
+ // Prove one of the following:
+ // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS
+ // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS
+ // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
+ // ---> LHS <s RHS
+ // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0
+ // ---> LHS <=s RHS
+ if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
+ return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS);
+ if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
+ if (isKnownNonNegative(ShifteeS))
+ return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS);
+ }
+
+ return false;
+}
+
bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS,
@@ -11247,6 +11516,9 @@ bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
return true;
+ if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS))
+ return true;
+
if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
CtxI))
return true;
@@ -11323,6 +11595,7 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
case ICmpInst::ICMP_ULE:
return
// min(A, ...) <= A
+ // FIXME: what about umin_seq?
IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
// A <= max(A, ...)
IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
@@ -12723,7 +12996,8 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
case scUMaxExpr:
case scSMaxExpr:
case scUMinExpr:
- case scSMinExpr: {
+ case scSMinExpr:
+ case scSequentialUMinExpr: {
bool HasVarying = false;
for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
LoopDisposition D = getLoopDisposition(Op, L);
@@ -12813,7 +13087,8 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
case scUMaxExpr:
case scSMaxExpr:
case scUMinExpr:
- case scSMinExpr: {
+ case scSMinExpr:
+ case scSequentialUMinExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
bool Proper = true;
for (const SCEV *NAryOp : NAry->operands()) {
diff --git a/contrib/llvm-project/llvm/lib/Analysis/TFUtils.cpp b/contrib/llvm-project/llvm/lib/Analysis/TFUtils.cpp
index 3d10479c4544..26bc63983b4e 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/TFUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/TFUtils.cpp
@@ -14,6 +14,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Support/Base64.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/JSON.h"
@@ -22,6 +23,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "google/protobuf/struct.pb.h"
#include "google/protobuf/text_format.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
@@ -72,6 +74,14 @@ TFStatusPtr createTFStatus() {
TFSessionOptionsPtr createTFSessionOptions() {
return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
}
+
+void serialize(const Message &SE, std::string *OutStr) {
+ if (ProtobufTextMode) {
+ TextFormat::PrintToString(SE, OutStr);
+ } else {
+ *OutStr = SE.SerializeAsString();
+ }
+}
} // namespace
namespace llvm {
@@ -307,19 +317,13 @@ public:
IncludeReward(IncludeReward), FeatureLists(LoggedFeatureSpecs.size()) {}
// flush the logged info to a stream and clear the log contents.
- void flush(raw_ostream &OS) {
+ void flush(std::string *Str) {
size_t NrRecords = getNrRecords();
(void)NrRecords;
tensorflow::SequenceExample SE;
transferLog(SE);
assert(isSelfConsistent(SE, NrRecords));
- std::string OutStr;
- if (ProtobufTextMode)
- google::protobuf::TextFormat::PrintToString(SE, &OutStr);
- else
- OutStr = SE.SerializeAsString();
-
- OS << OutStr;
+ serialize(SE, Str);
}
char *addNewTensor(size_t FeatureID) {
@@ -567,5 +571,31 @@ char *Logger::addEntryAndGetFloatOrInt64Buffer(size_t FeatureID) {
return reinterpret_cast<char *>(LoggerData->addNewTensor(FeatureID));
}
-void Logger::flush(raw_ostream &OS) { LoggerData->flush(OS); }
+void Logger::flush(std::string *Str) { LoggerData->flush(Str); }
+
+void Logger::flush(raw_ostream &OS) {
+ std::string Buff;
+ LoggerData->flush(&Buff);
+ OS << Buff;
+}
+
+void Logger::flushLogs(raw_ostream &OS,
+ const StringMap<std::unique_ptr<Logger>> &Loggers) {
+ google::protobuf::Struct Msg;
+ for (const auto &NamedLogger : Loggers) {
+ tensorflow::SequenceExample SE;
+ const auto &Logger = NamedLogger.second;
+ std::string Unencoded;
+ if (Logger->LoggerData->getNrRecords() > 0)
+ Logger->flush(&Unencoded);
+
+ (*Msg.mutable_fields())[NamedLogger.first().str()]
+ .mutable_string_value()
+ ->append(ProtobufTextMode ? Unencoded : encodeBase64(Unencoded));
+ }
+
+ std::string OutStr;
+ serialize(Msg, &OutStr);
+ OS << OutStr;
+}
#endif // defined(LLVM_HAVE_TF_API)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
index 6aa9a77391dc..25e9dee98e13 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -408,6 +408,16 @@ bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
}
+bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType,
+ Align Alignment) const {
+ return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
+}
+
+bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType,
+ Align Alignment) const {
+ return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
+}
+
bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
return TTIImpl->isLegalMaskedCompressStore(DataType);
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
index fc378f97de0b..34358739f9a8 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
@@ -396,10 +396,10 @@ unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
-unsigned llvm::ComputeMinSignedBits(const Value *V, const DataLayout &DL,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI,
- const DominatorTree *DT) {
+unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
return V->getType()->getScalarSizeInBits() - SignBits + 1;
}
@@ -1593,7 +1593,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
// If we have a known 1, its position is our upper bound.
unsigned PossibleLZ = Known2.countMaxLeadingZeros();
- // If this call is undefined for 0, the result will be less than 2^n.
+ // If this call is poison for 0 input, the result will be less than 2^n.
if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
unsigned LowBits = Log2_32(PossibleLZ)+1;
@@ -1604,7 +1604,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
// If we have a known 1, its position is our upper bound.
unsigned PossibleTZ = Known2.countMaxTrailingZeros();
- // If this call is undefined for 0, the result will be less than 2^n.
+ // If this call is poison for 0 input, the result will be less than 2^n.
if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
unsigned LowBits = Log2_32(PossibleTZ)+1;
@@ -3248,125 +3248,6 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
return std::max(FirstAnswer, Known.countMinSignBits());
}
-/// This function computes the integer multiple of Base that equals V.
-/// If successful, it returns true and returns the multiple in
-/// Multiple. If unsuccessful, it returns false. It looks
-/// through SExt instructions only if LookThroughSExt is true.
-bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
- bool LookThroughSExt, unsigned Depth) {
- assert(V && "No Value?");
- assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
- assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
-
- Type *T = V->getType();
-
- ConstantInt *CI = dyn_cast<ConstantInt>(V);
-
- if (Base == 0)
- return false;
-
- if (Base == 1) {
- Multiple = V;
- return true;
- }
-
- ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
- Constant *BaseVal = ConstantInt::get(T, Base);
- if (CO && CO == BaseVal) {
- // Multiple is 1.
- Multiple = ConstantInt::get(T, 1);
- return true;
- }
-
- if (CI && CI->getZExtValue() % Base == 0) {
- Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
- return true;
- }
-
- if (Depth == MaxAnalysisRecursionDepth) return false;
-
- Operator *I = dyn_cast<Operator>(V);
- if (!I) return false;
-
- switch (I->getOpcode()) {
- default: break;
- case Instruction::SExt:
- if (!LookThroughSExt) return false;
- // otherwise fall through to ZExt
- LLVM_FALLTHROUGH;
- case Instruction::ZExt:
- return ComputeMultiple(I->getOperand(0), Base, Multiple,
- LookThroughSExt, Depth+1);
- case Instruction::Shl:
- case Instruction::Mul: {
- Value *Op0 = I->getOperand(0);
- Value *Op1 = I->getOperand(1);
-
- if (I->getOpcode() == Instruction::Shl) {
- ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
- if (!Op1CI) return false;
- // Turn Op0 << Op1 into Op0 * 2^Op1
- APInt Op1Int = Op1CI->getValue();
- uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
- APInt API(Op1Int.getBitWidth(), 0);
- API.setBit(BitToSet);
- Op1 = ConstantInt::get(V->getContext(), API);
- }
-
- Value *Mul0 = nullptr;
- if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
- if (Constant *Op1C = dyn_cast<Constant>(Op1))
- if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
- if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
- MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
- Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
- if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
- MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
- MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
-
- // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
- Multiple = ConstantExpr::getMul(MulC, Op1C);
- return true;
- }
-
- if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
- if (Mul0CI->getValue() == 1) {
- // V == Base * Op1, so return Op1
- Multiple = Op1;
- return true;
- }
- }
-
- Value *Mul1 = nullptr;
- if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
- if (Constant *Op0C = dyn_cast<Constant>(Op0))
- if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
- if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
- MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
- Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
- if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
- MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
- MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
-
- // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
- Multiple = ConstantExpr::getMul(MulC, Op0C);
- return true;
- }
-
- if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
- if (Mul1CI->getValue() == 1) {
- // V == Base * Op0, so return Op0
- Multiple = Op0;
- return true;
- }
- }
- }
- }
-
- // We could not determine if V is a multiple of Base.
- return false;
-}
-
Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
const TargetLibraryInfo *TLI) {
const Function *F = CB.getCalledFunction();
@@ -6756,17 +6637,27 @@ Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
}
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
- APInt &Upper, const InstrInfoQuery &IIQ) {
+ APInt &Upper, const InstrInfoQuery &IIQ,
+ bool PreferSignedRange) {
unsigned Width = Lower.getBitWidth();
const APInt *C;
switch (BO.getOpcode()) {
case Instruction::Add:
if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
- // FIXME: If we have both nuw and nsw, we should reduce the range further.
- if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
+ bool HasNSW = IIQ.hasNoSignedWrap(&BO);
+ bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
+
+ // If the caller expects a signed compare, then try to use a signed range.
+ // Otherwise if both no-wraps are set, use the unsigned range because it
+ // is never larger than the signed range. Example:
+ // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
+ if (PreferSignedRange && HasNSW && HasNUW)
+ HasNUW = false;
+
+ if (HasNUW) {
// 'add nuw x, C' produces [C, UINT_MAX].
Lower = *C;
- } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
+ } else if (HasNSW) {
if (C->isNegative()) {
// 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
Lower = APInt::getSignedMinValue(Width);
@@ -7083,8 +6974,8 @@ static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
}
}
-ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
- AssumptionCache *AC,
+ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
+ bool UseInstrInfo, AssumptionCache *AC,
const Instruction *CtxI,
const DominatorTree *DT,
unsigned Depth) {
@@ -7102,7 +6993,7 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
APInt Lower = APInt(BitWidth, 0);
APInt Upper = APInt(BitWidth, 0);
if (auto *BO = dyn_cast<BinaryOperator>(V))
- setLimitsForBinOp(*BO, Lower, Upper, IIQ);
+ setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
else if (auto *II = dyn_cast<IntrinsicInst>(V))
setLimitsForIntrinsic(*II, Lower, Upper);
else if (auto *SI = dyn_cast<SelectInst>(V))
@@ -7134,8 +7025,10 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
// Currently we just use information from comparisons.
if (!Cmp || Cmp->getOperand(0) != V)
continue;
- ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
- AC, I, DT, Depth + 1);
+ // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
+ ConstantRange RHS =
+ computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
+ UseInstrInfo, AC, I, DT, Depth + 1);
CR = CR.intersectWith(
ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
}
diff --git a/contrib/llvm-project/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm-project/llvm/lib/AsmParser/LLParser.cpp
index 35c615522fe2..432ec151cf8a 100644
--- a/contrib/llvm-project/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm-project/llvm/lib/AsmParser/LLParser.cpp
@@ -133,14 +133,17 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) {
for (const auto &RAG : ForwardRefAttrGroups) {
Value *V = RAG.first;
const std::vector<unsigned> &Attrs = RAG.second;
- AttrBuilder B;
+ AttrBuilder B(Context);
- for (const auto &Attr : Attrs)
- B.merge(NumberedAttrBuilders[Attr]);
+ for (const auto &Attr : Attrs) {
+ auto R = NumberedAttrBuilders.find(Attr);
+ if (R != NumberedAttrBuilders.end())
+ B.merge(R->second);
+ }
if (Function *Fn = dyn_cast<Function>(V)) {
AttributeList AS = Fn->getAttributes();
- AttrBuilder FnAttrs(AS.getFnAttrs());
+ AttrBuilder FnAttrs(M->getContext(), AS.getFnAttrs());
AS = AS.removeFnAttributes(Context);
FnAttrs.merge(B);
@@ -156,27 +159,27 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) {
Fn->setAttributes(AS);
} else if (CallInst *CI = dyn_cast<CallInst>(V)) {
AttributeList AS = CI->getAttributes();
- AttrBuilder FnAttrs(AS.getFnAttrs());
+ AttrBuilder FnAttrs(M->getContext(), AS.getFnAttrs());
AS = AS.removeFnAttributes(Context);
FnAttrs.merge(B);
AS = AS.addFnAttributes(Context, FnAttrs);
CI->setAttributes(AS);
} else if (InvokeInst *II = dyn_cast<InvokeInst>(V)) {
AttributeList AS = II->getAttributes();
- AttrBuilder FnAttrs(AS.getFnAttrs());
+ AttrBuilder FnAttrs(M->getContext(), AS.getFnAttrs());
AS = AS.removeFnAttributes(Context);
FnAttrs.merge(B);
AS = AS.addFnAttributes(Context, FnAttrs);
II->setAttributes(AS);
} else if (CallBrInst *CBI = dyn_cast<CallBrInst>(V)) {
AttributeList AS = CBI->getAttributes();
- AttrBuilder FnAttrs(AS.getFnAttrs());
+ AttrBuilder FnAttrs(M->getContext(), AS.getFnAttrs());
AS = AS.removeFnAttributes(Context);
FnAttrs.merge(B);
AS = AS.addFnAttributes(Context, FnAttrs);
CBI->setAttributes(AS);
} else if (auto *GV = dyn_cast<GlobalVariable>(V)) {
- AttrBuilder Attrs(GV->getAttributes());
+ AttrBuilder Attrs(M->getContext(), GV->getAttributes());
Attrs.merge(B);
GV->setAttributes(AttributeSet::get(Context,Attrs));
} else {
@@ -982,17 +985,18 @@ bool LLParser::parseAliasOrIFunc(const std::string &Name, LocTy NameLoc,
return error(AliaseeLoc, "An alias or ifunc must have pointer type");
unsigned AddrSpace = PTy->getAddressSpace();
- if (IsAlias && !PTy->isOpaqueOrPointeeTypeMatches(Ty)) {
- return error(
- ExplicitTypeLoc,
- typeComparisonErrorMessage(
- "explicit pointee type doesn't match operand's pointee type", Ty,
- PTy->getElementType()));
- }
-
- if (!IsAlias && !PTy->getElementType()->isFunctionTy()) {
- return error(ExplicitTypeLoc,
- "explicit pointee type should be a function type");
+ if (IsAlias) {
+ if (!PTy->isOpaqueOrPointeeTypeMatches(Ty))
+ return error(
+ ExplicitTypeLoc,
+ typeComparisonErrorMessage(
+ "explicit pointee type doesn't match operand's pointee type", Ty,
+ PTy->getNonOpaquePointerElementType()));
+ } else {
+ if (!PTy->isOpaque() &&
+ !PTy->getNonOpaquePointerElementType()->isFunctionTy())
+ return error(ExplicitTypeLoc,
+ "explicit pointee type should be a function type");
}
GlobalValue *GVal = nullptr;
@@ -1206,7 +1210,7 @@ bool LLParser::parseGlobal(const std::string &Name, LocTy NameLoc,
}
}
- AttrBuilder Attrs;
+ AttrBuilder Attrs(M->getContext());
LocTy BuiltinLoc;
std::vector<unsigned> FwdRefAttrGrps;
if (parseFnAttributeValuePairs(Attrs, FwdRefAttrGrps, false, BuiltinLoc))
@@ -1235,13 +1239,18 @@ bool LLParser::parseUnnamedAttrGrp() {
Lex.Lex();
if (parseToken(lltok::equal, "expected '=' here") ||
- parseToken(lltok::lbrace, "expected '{' here") ||
- parseFnAttributeValuePairs(NumberedAttrBuilders[VarID], unused, true,
- BuiltinLoc) ||
+ parseToken(lltok::lbrace, "expected '{' here"))
+ return true;
+
+ auto R = NumberedAttrBuilders.find(VarID);
+ if (R == NumberedAttrBuilders.end())
+ R = NumberedAttrBuilders.emplace(VarID, AttrBuilder(M->getContext())).first;
+
+ if (parseFnAttributeValuePairs(R->second, unused, true, BuiltinLoc) ||
parseToken(lltok::rbrace, "expected end of attribute group"))
return true;
- if (!NumberedAttrBuilders[VarID].hasAttributes())
+ if (!R->second.hasAttributes())
return error(AttrGrpLoc, "attribute group has no attributes");
return false;
@@ -1402,14 +1411,14 @@ static inline GlobalValue *createGlobalFwdRef(Module *M, PointerType *PTy) {
nullptr, GlobalVariable::NotThreadLocal,
PTy->getAddressSpace());
- if (auto *FT = dyn_cast<FunctionType>(PTy->getPointerElementType()))
+ Type *ElemTy = PTy->getNonOpaquePointerElementType();
+ if (auto *FT = dyn_cast<FunctionType>(ElemTy))
return Function::Create(FT, GlobalValue::ExternalWeakLinkage,
PTy->getAddressSpace(), "", M);
else
- return new GlobalVariable(*M, PTy->getPointerElementType(), false,
- GlobalValue::ExternalWeakLinkage, nullptr, "",
- nullptr, GlobalVariable::NotThreadLocal,
- PTy->getAddressSpace());
+ return new GlobalVariable(
+ *M, ElemTy, false, GlobalValue::ExternalWeakLinkage, nullptr, "",
+ nullptr, GlobalVariable::NotThreadLocal, PTy->getAddressSpace());
}
Value *LLParser::checkValidVariableType(LocTy Loc, const Twine &Name, Type *Ty,
@@ -2372,11 +2381,12 @@ bool LLParser::parseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
// parse the argument.
LocTy ArgLoc;
Type *ArgTy = nullptr;
- AttrBuilder ArgAttrs;
Value *V;
if (parseType(ArgTy, ArgLoc))
return true;
+ AttrBuilder ArgAttrs(M->getContext());
+
if (ArgTy->isMetadataTy()) {
if (parseMetadataAsValue(V, PFS))
return true;
@@ -2493,7 +2503,7 @@ bool LLParser::parseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
} else {
LocTy TypeLoc = Lex.getLoc();
Type *ArgTy = nullptr;
- AttrBuilder Attrs;
+ AttrBuilder Attrs(M->getContext());
std::string Name;
if (parseType(ArgTy) || parseOptionalParamAttrs(Attrs))
@@ -3579,7 +3589,7 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) {
ExplicitTypeLoc,
typeComparisonErrorMessage(
"explicit pointee type doesn't match operand's pointee type",
- Ty, BasePointerType->getElementType()));
+ Ty, BasePointerType->getNonOpaquePointerElementType()));
}
unsigned GEPWidth =
@@ -4541,16 +4551,17 @@ bool LLParser::parseDIStringType(MDNode *&Result, bool IsDistinct) {
OPTIONAL(name, MDStringField, ); \
OPTIONAL(stringLength, MDField, ); \
OPTIONAL(stringLengthExpression, MDField, ); \
+ OPTIONAL(stringLocationExpression, MDField, ); \
OPTIONAL(size, MDUnsignedField, (0, UINT64_MAX)); \
OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX)); \
OPTIONAL(encoding, DwarfAttEncodingField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS
- Result = GET_OR_DISTINCT(DIStringType,
- (Context, tag.Val, name.Val, stringLength.Val,
- stringLengthExpression.Val, size.Val, align.Val,
- encoding.Val));
+ Result = GET_OR_DISTINCT(
+ DIStringType,
+ (Context, tag.Val, name.Val, stringLength.Val, stringLengthExpression.Val,
+ stringLocationExpression.Val, size.Val, align.Val, encoding.Val));
return false;
}
@@ -5462,7 +5473,7 @@ bool LLParser::parseFunctionHeader(Function *&Fn, bool IsDefine) {
unsigned Visibility;
unsigned DLLStorageClass;
bool DSOLocal;
- AttrBuilder RetAttrs;
+ AttrBuilder RetAttrs(M->getContext());
unsigned CC;
bool HasLinkage;
Type *RetType = nullptr;
@@ -5525,7 +5536,7 @@ bool LLParser::parseFunctionHeader(Function *&Fn, bool IsDefine) {
SmallVector<ArgInfo, 8> ArgList;
bool IsVarArg;
- AttrBuilder FuncAttrs;
+ AttrBuilder FuncAttrs(M->getContext());
std::vector<unsigned> FwdRefAttrGrps;
LocTy BuiltinLoc;
std::string Section;
@@ -5593,7 +5604,7 @@ bool LLParser::parseFunctionHeader(Function *&Fn, bool IsDefine) {
if (FRVI != ForwardRefVals.end()) {
FwdFn = FRVI->second.first;
if (!FwdFn->getType()->isOpaque()) {
- if (!FwdFn->getType()->getPointerElementType()->isFunctionTy())
+ if (!FwdFn->getType()->getNonOpaquePointerElementType()->isFunctionTy())
return error(FRVI->second.second, "invalid forward reference to "
"function as global value!");
if (FwdFn->getType() != PFT)
@@ -6248,7 +6259,7 @@ bool LLParser::parseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
/// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
bool LLParser::parseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
LocTy CallLoc = Lex.getLoc();
- AttrBuilder RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs(M->getContext()), FnAttrs(M->getContext());
std::vector<unsigned> FwdRefAttrGrps;
LocTy NoBuiltinLoc;
unsigned CC;
@@ -6558,7 +6569,7 @@ bool LLParser::parseUnaryOp(Instruction *&Inst, PerFunctionState &PFS,
/// '[' LabelList ']'
bool LLParser::parseCallBr(Instruction *&Inst, PerFunctionState &PFS) {
LocTy CallLoc = Lex.getLoc();
- AttrBuilder RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs(M->getContext()), FnAttrs(M->getContext());
std::vector<unsigned> FwdRefAttrGrps;
LocTy NoBuiltinLoc;
unsigned CC;
@@ -6975,7 +6986,7 @@ bool LLParser::parseFreeze(Instruction *&Inst, PerFunctionState &PFS) {
/// OptionalAttrs Type Value ParameterList OptionalAttrs
bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
CallInst::TailCallKind TCK) {
- AttrBuilder RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs(M->getContext()), FnAttrs(M->getContext());
std::vector<unsigned> FwdRefAttrGrps;
LocTy BuiltinLoc;
unsigned CallAddrSpace;
@@ -7196,7 +7207,7 @@ int LLParser::parseLoad(Instruction *&Inst, PerFunctionState &PFS) {
ExplicitTypeLoc,
typeComparisonErrorMessage(
"explicit pointee type doesn't match operand's pointee type", Ty,
- cast<PointerType>(Val->getType())->getElementType()));
+ Val->getType()->getNonOpaquePointerElementType()));
}
SmallPtrSet<Type *, 4> Visited;
if (!Alignment && !Ty->isSized(&Visited))
@@ -7456,7 +7467,7 @@ int LLParser::parseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
ExplicitTypeLoc,
typeComparisonErrorMessage(
"explicit pointee type doesn't match operand's pointee type", Ty,
- BasePointerType->getElementType()));
+ BasePointerType->getNonOpaquePointerElementType()));
}
SmallVector<Value*, 16> Indices;
diff --git a/contrib/llvm-project/llvm/lib/BinaryFormat/AMDGPUMetadataVerifier.cpp b/contrib/llvm-project/llvm/lib/BinaryFormat/AMDGPUMetadataVerifier.cpp
index 284e469a1d2f..99d2c8221281 100644
--- a/contrib/llvm-project/llvm/lib/BinaryFormat/AMDGPUMetadataVerifier.cpp
+++ b/contrib/llvm-project/llvm/lib/BinaryFormat/AMDGPUMetadataVerifier.cpp
@@ -12,8 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/AMDGPUMetadata.h"
+#include "llvm/BinaryFormat/MsgPackDocument.h"
+
+#include <map>
+#include <utility>
namespace llvm {
namespace AMDGPU {
diff --git a/contrib/llvm-project/llvm/lib/BinaryFormat/ELF.cpp b/contrib/llvm-project/llvm/lib/BinaryFormat/ELF.cpp
index 2ede63f464d3..e2e601b6d90f 100644
--- a/contrib/llvm-project/llvm/lib/BinaryFormat/ELF.cpp
+++ b/contrib/llvm-project/llvm/lib/BinaryFormat/ELF.cpp
@@ -7,9 +7,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/BinaryFormat/ELF.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/Error.h"
using namespace llvm;
using namespace ELF;
diff --git a/contrib/llvm-project/llvm/lib/BinaryFormat/Magic.cpp b/contrib/llvm-project/llvm/lib/BinaryFormat/Magic.cpp
index 8c7f7b7043a0..044e4840cb3b 100644
--- a/contrib/llvm-project/llvm/lib/BinaryFormat/Magic.cpp
+++ b/contrib/llvm-project/llvm/lib/BinaryFormat/Magic.cpp
@@ -10,10 +10,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
-#include "llvm/BinaryFormat/ELF.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/Endian.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#if !defined(_MSC_VER) && !defined(__MINGW32__)
@@ -88,7 +86,10 @@ file_magic llvm::identify_magic(StringRef Magic) {
if (startswith(Magic, "!<arch>\n") || startswith(Magic, "!<thin>\n"))
return file_magic::archive;
break;
-
+ case '<':
+ if (startswith(Magic, "<bigaf>\n"))
+ return file_magic::archive;
+ break;
case '\177':
if (startswith(Magic, "\177ELF") && Magic.size() >= 18) {
bool Data2MSB = Magic[5] == 2;
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeAnalyzer.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeAnalyzer.cpp
index a36b256c29b6..ffef35299981 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeAnalyzer.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeAnalyzer.cpp
@@ -781,7 +781,7 @@ Error BitcodeAnalyzer::parseBlock(unsigned BlockID, unsigned IndentLevel,
uint64_t MetadataIndexOffset = 0;
// Read all the records for this block.
- while (1) {
+ while (true) {
if (Stream.AtEndOfStream())
return reportError("Premature end of bitstream");
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index f5a878f8788a..720ab560f988 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1349,7 +1349,7 @@ Error BitcodeReader::parseAttributeBlock() {
return error("Invalid record");
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- AttrBuilder B;
+ AttrBuilder B(Context);
decodeLLVMAttributesForBitcode(B, Record[i+1]);
Attrs.push_back(AttributeList::get(Context, Record[i], B));
}
@@ -1591,7 +1591,7 @@ Error BitcodeReader::parseAttributeGroupBlock() {
uint64_t GrpID = Record[0];
uint64_t Idx = Record[1]; // Index of the object this attribute refers to.
- AttrBuilder B;
+ AttrBuilder B(Context);
for (unsigned i = 2, e = Record.size(); i != e; ++i) {
if (Record[i] == 0) { // Enum attribute
Attribute::AttrKind Kind;
@@ -2702,7 +2702,7 @@ Error BitcodeReader::parseConstants() {
PointerType *OrigPtrTy = cast<PointerType>(Elt0FullTy->getScalarType());
if (!PointeeType)
- PointeeType = OrigPtrTy->getElementType();
+ PointeeType = OrigPtrTy->getPointerElementType();
else if (!OrigPtrTy->isOpaqueOrPointeeTypeMatches(PointeeType))
return error("Explicit gep operator type does not match pointee type "
"of pointer operand");
@@ -2824,9 +2824,9 @@ Error BitcodeReader::parseConstants() {
for (unsigned i = 0; i != ConstStrSize; ++i)
ConstrStr += (char)Record[3+AsmStrSize+i];
UpgradeInlineAsmString(&AsmStr);
- V = InlineAsm::get(
- cast<FunctionType>(cast<PointerType>(CurTy)->getElementType()),
- AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
break;
}
// This version adds support for the asm dialect keywords (e.g.,
@@ -2850,37 +2850,74 @@ Error BitcodeReader::parseConstants() {
for (unsigned i = 0; i != ConstStrSize; ++i)
ConstrStr += (char)Record[3+AsmStrSize+i];
UpgradeInlineAsmString(&AsmStr);
- V = InlineAsm::get(
- cast<FunctionType>(cast<PointerType>(CurTy)->getElementType()),
- AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
- InlineAsm::AsmDialect(AsmDialect));
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect));
break;
}
// This version adds support for the unwind keyword.
- case bitc::CST_CODE_INLINEASM: {
+ case bitc::CST_CODE_INLINEASM_OLD3: {
if (Record.size() < 2)
return error("Invalid record");
+ unsigned OpNum = 0;
std::string AsmStr, ConstrStr;
- bool HasSideEffects = Record[0] & 1;
- bool IsAlignStack = (Record[0] >> 1) & 1;
- unsigned AsmDialect = (Record[0] >> 2) & 1;
- bool CanThrow = (Record[0] >> 3) & 1;
- unsigned AsmStrSize = Record[1];
- if (2 + AsmStrSize >= Record.size())
+ bool HasSideEffects = Record[OpNum] & 1;
+ bool IsAlignStack = (Record[OpNum] >> 1) & 1;
+ unsigned AsmDialect = (Record[OpNum] >> 2) & 1;
+ bool CanThrow = (Record[OpNum] >> 3) & 1;
+ ++OpNum;
+ unsigned AsmStrSize = Record[OpNum];
+ ++OpNum;
+ if (OpNum + AsmStrSize >= Record.size())
return error("Invalid record");
- unsigned ConstStrSize = Record[2 + AsmStrSize];
- if (3 + AsmStrSize + ConstStrSize > Record.size())
+ unsigned ConstStrSize = Record[OpNum + AsmStrSize];
+ if (OpNum + 1 + AsmStrSize + ConstStrSize > Record.size())
return error("Invalid record");
for (unsigned i = 0; i != AsmStrSize; ++i)
- AsmStr += (char)Record[2 + i];
+ AsmStr += (char)Record[OpNum + i];
+ ++OpNum;
for (unsigned i = 0; i != ConstStrSize; ++i)
- ConstrStr += (char)Record[3 + AsmStrSize + i];
+ ConstrStr += (char)Record[OpNum + AsmStrSize + i];
UpgradeInlineAsmString(&AsmStr);
- V = InlineAsm::get(
- cast<FunctionType>(cast<PointerType>(CurTy)->getElementType()),
- AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
- InlineAsm::AsmDialect(AsmDialect), CanThrow);
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect), CanThrow);
+ break;
+ }
+ // This version adds explicit function type.
+ case bitc::CST_CODE_INLINEASM: {
+ if (Record.size() < 3)
+ return error("Invalid record");
+ unsigned OpNum = 0;
+ auto *FnTy = dyn_cast_or_null<FunctionType>(getTypeByID(Record[OpNum]));
+ ++OpNum;
+ if (!FnTy)
+ return error("Invalid record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[OpNum] & 1;
+ bool IsAlignStack = (Record[OpNum] >> 1) & 1;
+ unsigned AsmDialect = (Record[OpNum] >> 2) & 1;
+ bool CanThrow = (Record[OpNum] >> 3) & 1;
+ ++OpNum;
+ unsigned AsmStrSize = Record[OpNum];
+ ++OpNum;
+ if (OpNum + AsmStrSize >= Record.size())
+ return error("Invalid record");
+ unsigned ConstStrSize = Record[OpNum + AsmStrSize];
+ if (OpNum + 1 + AsmStrSize + ConstStrSize > Record.size())
+ return error("Invalid record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[OpNum + i];
+ ++OpNum;
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[OpNum + AsmStrSize + i];
+ UpgradeInlineAsmString(&AsmStr);
+ V = InlineAsm::get(FnTy, AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect), CanThrow);
break;
}
case bitc::CST_CODE_BLOCKADDRESS:{
@@ -3242,7 +3279,7 @@ Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) {
if (!Ty->isPointerTy())
return error("Invalid type for value");
AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
- Ty = cast<PointerType>(Ty)->getElementType();
+ Ty = Ty->getPointerElementType();
}
uint64_t RawLinkage = Record[3];
@@ -3335,7 +3372,7 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
if (!FTy)
return error("Invalid record");
if (auto *PTy = dyn_cast<PointerType>(FTy))
- FTy = PTy->getElementType();
+ FTy = PTy->getPointerElementType();
if (!isa<FunctionType>(FTy))
return error("Invalid type for value");
@@ -3376,7 +3413,7 @@ Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
Func->removeParamAttr(i, Kind);
Type *PTy = cast<FunctionType>(FTy)->getParamType(i);
- Type *PtrEltTy = cast<PointerType>(PTy)->getElementType();
+ Type *PtrEltTy = PTy->getPointerElementType();
Attribute NewAttr;
switch (Kind) {
case Attribute::ByVal:
@@ -3499,7 +3536,7 @@ Error BitcodeReader::parseGlobalIndirectSymbolRecord(
auto *PTy = dyn_cast<PointerType>(Ty);
if (!PTy)
return error("Invalid type for value");
- Ty = PTy->getElementType();
+ Ty = PTy->getPointerElementType();
AddrSpace = PTy->getAddressSpace();
} else {
AddrSpace = Record[OpNum++];
@@ -3795,6 +3832,11 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit,
if (Error Err = parseComdatRecord(Record))
return Err;
break;
+ // FIXME: BitcodeReader should handle {GLOBALVAR, FUNCTION, ALIAS, IFUNC}
+ // written by ThinLinkBitcodeWriter. See
+ // `ThinLinkBitcodeWriter::writeSimplifiedModuleInfo` for the format of each
+ // record
+ // (https://github.com/llvm/llvm-project/blob/b6a93967d9c11e79802b5e75cec1584d6c8aa472/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp#L4714)
case bitc::MODULE_CODE_GLOBALVAR:
if (Error Err = parseGlobalVarRecord(Record))
return Err;
@@ -3857,12 +3899,13 @@ void BitcodeReader::propagateAttributeTypes(CallBase *CB,
for (unsigned i = 0; i != CB->arg_size(); ++i) {
for (Attribute::AttrKind Kind : {Attribute::ByVal, Attribute::StructRet,
Attribute::InAlloca}) {
- if (!CB->paramHasAttr(i, Kind))
+ if (!CB->paramHasAttr(i, Kind) ||
+ CB->getParamAttr(i, Kind).getValueAsType())
continue;
CB->removeParamAttr(i, Kind);
- Type *PtrEltTy = cast<PointerType>(ArgsTys[i])->getElementType();
+ Type *PtrEltTy = ArgsTys[i]->getPointerElementType();
Attribute NewAttr;
switch (Kind) {
case Attribute::ByVal:
@@ -3882,11 +3925,28 @@ void BitcodeReader::propagateAttributeTypes(CallBase *CB,
}
}
+ if (CB->isInlineAsm()) {
+ const InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
+ unsigned ArgNo = 0;
+ for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
+ if (!CI.hasArg())
+ continue;
+
+ if (CI.isIndirect && !CB->getAttributes().getParamElementType(ArgNo)) {
+ Type *ElemTy = ArgsTys[ArgNo]->getPointerElementType();
+ CB->addParamAttr(
+ ArgNo, Attribute::get(Context, Attribute::ElementType, ElemTy));
+ }
+
+ ArgNo++;
+ }
+ }
+
switch (CB->getIntrinsicID()) {
case Intrinsic::preserve_array_access_index:
case Intrinsic::preserve_struct_access_index:
if (!CB->getAttributes().getParamElementType(0)) {
- Type *ElTy = cast<PointerType>(ArgsTys[0])->getElementType();
+ Type *ElTy = ArgsTys[0]->getPointerElementType();
Attribute NewAttr = Attribute::get(Context, Attribute::ElementType, ElTy);
CB->addParamAttr(0, NewAttr);
}
@@ -4176,8 +4236,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
return error("Invalid record");
if (!Ty) {
- Ty = cast<PointerType>(BasePtr->getType()->getScalarType())
- ->getElementType();
+ Ty = BasePtr->getType()->getScalarType()->getPointerElementType();
} else if (!cast<PointerType>(BasePtr->getType()->getScalarType())
->isOpaqueOrPointeeTypeMatches(Ty)) {
return error(
@@ -4693,8 +4752,8 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (!CalleeTy)
return error("Callee is not a pointer");
if (!FTy) {
- FTy = dyn_cast<FunctionType>(
- cast<PointerType>(Callee->getType())->getElementType());
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
if (!FTy)
return error("Callee is not of pointer to function type");
} else if (!CalleeTy->isOpaqueOrPointeeTypeMatches(FTy))
@@ -4774,26 +4833,29 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (!OpTy)
return error("Callee is not a pointer type");
if (!FTy) {
- FTy = dyn_cast<FunctionType>(
- cast<PointerType>(Callee->getType())->getElementType());
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
if (!FTy)
return error("Callee is not of pointer to function type");
- } else if (cast<PointerType>(Callee->getType())->getElementType() != FTy)
+ } else if (!OpTy->isOpaqueOrPointeeTypeMatches(FTy))
return error("Explicit call type does not match pointee type of "
"callee operand");
if (Record.size() < FTy->getNumParams() + OpNum)
return error("Insufficient operands to call");
SmallVector<Value*, 16> Args;
+ SmallVector<Type *, 16> ArgsTys;
// Read the fixed params.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+ Value *Arg;
if (FTy->getParamType(i)->isLabelTy())
- Args.push_back(getBasicBlock(Record[OpNum]));
+ Arg = getBasicBlock(Record[OpNum]);
else
- Args.push_back(getValue(Record, OpNum, NextValueNo,
- FTy->getParamType(i)));
- if (!Args.back())
+ Arg = getValue(Record, OpNum, NextValueNo, FTy->getParamType(i));
+ if (!Arg)
return error("Invalid record");
+ Args.push_back(Arg);
+ ArgsTys.push_back(Arg->getType());
}
// Read type/value pairs for varargs params.
@@ -4806,6 +4868,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
return error("Invalid record");
Args.push_back(Op);
+ ArgsTys.push_back(Op->getType());
}
}
@@ -4816,6 +4879,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
cast<CallBrInst>(I)->setCallingConv(
static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
cast<CallBrInst>(I)->setAttributes(PAL);
+ propagateAttributeTypes(cast<CallBase>(I), ArgsTys);
break;
}
case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
@@ -4932,7 +4996,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
auto *PTy = dyn_cast_or_null<PointerType>(Ty);
if (!PTy)
return error("Old-style alloca with a non-pointer type");
- Ty = PTy->getElementType();
+ Ty = PTy->getPointerElementType();
}
Type *OpTy = getTypeByID(Record[1]);
Value *Size = getFnValueByID(Record[2], OpTy);
@@ -4977,7 +5041,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (OpNum + 3 == Record.size()) {
Ty = getTypeByID(Record[OpNum++]);
} else {
- Ty = cast<PointerType>(Op->getType())->getElementType();
+ Ty = Op->getType()->getPointerElementType();
}
if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
@@ -5010,7 +5074,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (OpNum + 5 == Record.size()) {
Ty = getTypeByID(Record[OpNum++]);
} else {
- Ty = cast<PointerType>(Op->getType())->getElementType();
+ Ty = Op->getType()->getPointerElementType();
}
if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
@@ -5042,8 +5106,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
(BitCode == bitc::FUNC_CODE_INST_STORE
? getValueTypePair(Record, OpNum, NextValueNo, Val)
: popValue(Record, OpNum, NextValueNo,
- cast<PointerType>(Ptr->getType())->getElementType(),
- Val)) ||
+ Ptr->getType()->getPointerElementType(), Val)) ||
OpNum + 2 != Record.size())
return error("Invalid record");
@@ -5071,8 +5134,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
(BitCode == bitc::FUNC_CODE_INST_STOREATOMIC
? getValueTypePair(Record, OpNum, NextValueNo, Val)
: popValue(Record, OpNum, NextValueNo,
- cast<PointerType>(Ptr->getType())->getElementType(),
- Val)) ||
+ Ptr->getType()->getPointerElementType(), Val)) ||
OpNum + 4 != Record.size())
return error("Invalid record");
@@ -5323,8 +5385,8 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (!OpTy)
return error("Callee is not a pointer type");
if (!FTy) {
- FTy = dyn_cast<FunctionType>(
- cast<PointerType>(Callee->getType())->getElementType());
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
if (!FTy)
return error("Callee is not of pointer to function type");
} else if (!OpTy->isOpaqueOrPointeeTypeMatches(FTy))
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
index 60530d7f7a00..0f4111514057 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -1105,7 +1105,7 @@ void MetadataLoader::MetadataLoaderImpl::lazyLoadOneMetadata(
void MetadataLoader::MetadataLoaderImpl::resolveForwardRefsAndPlaceholders(
PlaceholderQueue &Placeholders) {
DenseSet<unsigned> Temporaries;
- while (1) {
+ while (true) {
// Populate Temporaries with the placeholders that haven't been loaded yet.
Placeholders.getTemporaries(MetadataList, Temporaries);
@@ -1423,15 +1423,21 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
break;
}
case bitc::METADATA_STRING_TYPE: {
- if (Record.size() != 8)
+ if (Record.size() > 9 || Record.size() < 8)
return error("Invalid record");
IsDistinct = Record[0];
+ bool SizeIs8 = Record.size() == 8;
+ // StringLocationExp (i.e. Record[5]) is added at a later time
+ // than the other fields. The code here enables backward compatibility.
+ Metadata *StringLocationExp = SizeIs8 ? nullptr : getMDOrNull(Record[5]);
+ unsigned Offset = SizeIs8 ? 5 : 6;
MetadataList.assignValue(
GET_OR_DISTINCT(DIStringType,
(Context, Record[1], getMDString(Record[2]),
getMDOrNull(Record[3]), getMDOrNull(Record[4]),
- Record[5], Record[6], Record[7])),
+ StringLocationExp, Record[Offset], Record[Offset + 1],
+ Record[Offset + 2])),
NextMetadataNo);
NextMetadataNo++;
break;
@@ -1632,7 +1638,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
Record.size() <= 16 ? true : Record[16],
Record.size() <= 17 ? false : Record[17],
Record.size() <= 18 ? 0 : Record[18],
- Record.size() <= 19 ? 0 : Record[19],
+ Record.size() <= 19 ? false : Record[19],
Record.size() <= 20 ? nullptr : getMDString(Record[20]),
Record.size() <= 21 ? nullptr : getMDString(Record[21]));
@@ -1675,7 +1681,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
SPFlags = DISubprogram::toSPFlags(
/*IsLocalToUnit=*/Record[7], /*IsDefinition=*/Record[8],
/*IsOptimized=*/Record[14], /*Virtuality=*/Record[11],
- /*DIFlagMainSubprogram=*/HasOldMainSubprogramFlag);
+ /*IsMainSubprogram=*/HasOldMainSubprogramFlag);
// All definitions should be distinct.
IsDistinct = (Record[0] & 1) || (SPFlags & DISubprogram::SPFlagDefinition);
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index dc06bc10cf95..eb4e09ea3a26 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -948,7 +948,7 @@ void ModuleBitcodeWriter::writeTypeTable() {
} else {
// POINTER: [pointee type, address space]
Code = bitc::TYPE_CODE_POINTER;
- TypeVals.push_back(VE.getTypeID(PTy->getElementType()));
+ TypeVals.push_back(VE.getTypeID(PTy->getNonOpaquePointerElementType()));
TypeVals.push_back(AddressSpace);
if (AddressSpace == 0)
AbbrevToUse = PtrAbbrev;
@@ -1657,6 +1657,7 @@ void ModuleBitcodeWriter::writeDIStringType(const DIStringType *N,
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
Record.push_back(VE.getMetadataOrNullID(N->getStringLength()));
Record.push_back(VE.getMetadataOrNullID(N->getStringLengthExp()));
+ Record.push_back(VE.getMetadataOrNullID(N->getStringLocationExp()));
Record.push_back(N->getSizeInBits());
Record.push_back(N->getAlignInBits());
Record.push_back(N->getEncoding());
@@ -2458,6 +2459,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
}
if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+ Record.push_back(VE.getTypeID(IA->getFunctionType()));
Record.push_back(
unsigned(IA->hasSideEffects()) | unsigned(IA->isAlignStack()) << 1 |
unsigned(IA->getDialect() & 1) << 2 | unsigned(IA->canThrow()) << 3);
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index df4f1a1873d7..01f7e85bd60e 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -386,8 +386,10 @@ ValueEnumerator::ValueEnumerator(const Module &M,
}
// Enumerate the ifuncs.
- for (const GlobalIFunc &GIF : M.ifuncs())
+ for (const GlobalIFunc &GIF : M.ifuncs()) {
EnumerateValue(&GIF);
+ EnumerateType(GIF.getValueType());
+ }
// Remember what is the cutoff between globalvalue's and other constants.
unsigned FirstConstant = Values.size();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 5c64622c7245..bb71d72256d8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -120,8 +120,7 @@ bool AggressiveAntiDepState::IsLive(unsigned Reg) {
AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
MachineFunction &MFi, const RegisterClassInfo &RCI,
TargetSubtargetInfo::RegClassVector &CriticalPathRCs)
- : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
- TII(MF.getSubtarget().getInstrInfo()),
+ : MF(MFi), MRI(MF.getRegInfo()), TII(MF.getSubtarget().getInstrInfo()),
TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
index 7e68e5e22879..e8fef505e43d 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp
@@ -577,9 +577,9 @@ bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
ADS = true;
- AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
- AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
- AttributeList::ReturnIndex);
+ AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
+ AttrBuilder CalleeAttrs(F->getContext(),
+ cast<CallInst>(I)->getAttributes().getRetAttrs());
// Following attributes are completely benign as far as calling convention
// goes, they shouldn't affect whether the call is a tail call.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
index 964cef75d164..03e63321e3c4 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
@@ -23,6 +23,8 @@ namespace llvm {
AIXException::AIXException(AsmPrinter *A) : DwarfCFIExceptionBase(A) {}
+void AIXException::markFunctionEnd() { endFragment(); }
+
void AIXException::emitExceptionInfoTable(const MCSymbol *LSDA,
const MCSymbol *PerSym) {
// Generate EH Info Table.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 533f20535655..4f3f798fe6f8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -247,6 +247,11 @@ void AsmPrinter::emitInitialRawDwarfLocDirective(const MachineFunction &MF) {
if (DD) {
assert(OutStreamer->hasRawTextSupport() &&
"Expected assembly output mode.");
+ // This is NVPTX specific and it's unclear why.
+ // PR51079: If we have code without debug information we need to give up.
+ DISubprogram *MFSP = MF.getFunction().getSubprogram();
+ if (!MFSP)
+ return;
(void)DD->emitInitialLocDirective(MF, /*CUID=*/0);
}
}
@@ -2477,7 +2482,8 @@ void AsmPrinter::emitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// two boundary. If a global value is specified, and if that global has
// an explicit alignment requested, it will override the alignment request
// if required for correctness.
-void AsmPrinter::emitAlignment(Align Alignment, const GlobalObject *GV) const {
+void AsmPrinter::emitAlignment(Align Alignment, const GlobalObject *GV,
+ unsigned MaxBytesToEmit) const {
if (GV)
Alignment = getGVAlignment(GV, GV->getParent()->getDataLayout(), Alignment);
@@ -2490,9 +2496,9 @@ void AsmPrinter::emitAlignment(Align Alignment, const GlobalObject *GV) const {
STI = &getSubtargetInfo();
else
STI = TM.getMCSubtargetInfo();
- OutStreamer->emitCodeAlignment(Alignment.value(), STI);
+ OutStreamer->emitCodeAlignment(Alignment.value(), STI, MaxBytesToEmit);
} else
- OutStreamer->emitValueToAlignment(Alignment.value());
+ OutStreamer->emitValueToAlignment(Alignment.value(), 0, 1, MaxBytesToEmit);
}
//===----------------------------------------------------------------------===//
@@ -3286,7 +3292,7 @@ void AsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) {
// Emit an alignment directive for this block, if needed.
const Align Alignment = MBB.getAlignment();
if (Alignment != Align(1))
- emitAlignment(Alignment);
+ emitAlignment(Alignment, nullptr, MBB.getMaxBytesForAlignment());
// Switch to a new section if this basic block must begin a section. The
// entry block is always placed in the function section and is handled
@@ -3648,6 +3654,12 @@ unsigned int AsmPrinter::getDwarfOffsetByteSize() const {
OutStreamer->getContext().getDwarfFormat());
}
+dwarf::FormParams AsmPrinter::getDwarfFormParams() const {
+ return {getDwarfVersion(), uint8_t(getPointerSize()),
+ OutStreamer->getContext().getDwarfFormat(),
+ MAI->doesDwarfUseRelocationsAcrossSections()};
+}
+
unsigned int AsmPrinter::getUnitLengthFieldByteSize() const {
return dwarf::getUnitLengthFieldByteSize(
OutStreamer->getContext().getDwarfFormat());
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h
index 5e7db1f2f76c..bd2c60eadd61 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h
@@ -33,6 +33,7 @@ class ByteStreamer {
virtual void emitSLEB128(uint64_t DWord, const Twine &Comment = "") = 0;
virtual void emitULEB128(uint64_t DWord, const Twine &Comment = "",
unsigned PadTo = 0) = 0;
+ virtual unsigned emitDIERef(const DIE &D) = 0;
};
class APByteStreamer final : public ByteStreamer {
@@ -54,15 +55,24 @@ public:
AP.OutStreamer->AddComment(Comment);
AP.emitULEB128(DWord, nullptr, PadTo);
}
+ unsigned emitDIERef(const DIE &D) override {
+ uint64_t Offset = D.getOffset();
+ static constexpr unsigned ULEB128PadSize = 4;
+ assert(Offset < (1ULL << (ULEB128PadSize * 7)) && "Offset wont fit");
+ emitULEB128(Offset, "", ULEB128PadSize);
+ // Return how many comments to skip in DwarfDebug::emitDebugLocEntry to keep
+ // comments aligned with debug loc entries.
+ return ULEB128PadSize;
+ }
};
class HashingByteStreamer final : public ByteStreamer {
private:
DIEHash &Hash;
public:
- HashingByteStreamer(DIEHash &H) : Hash(H) {}
- void emitInt8(uint8_t Byte, const Twine &Comment) override {
- Hash.update(Byte);
+ HashingByteStreamer(DIEHash &H) : Hash(H) {}
+ void emitInt8(uint8_t Byte, const Twine &Comment) override {
+ Hash.update(Byte);
}
void emitSLEB128(uint64_t DWord, const Twine &Comment) override {
Hash.addSLEB128(DWord);
@@ -71,6 +81,10 @@ class HashingByteStreamer final : public ByteStreamer {
unsigned PadTo) override {
Hash.addULEB128(DWord);
}
+ unsigned emitDIERef(const DIE &D) override {
+ Hash.hashRawTypeReference(D);
+ return 0; // Only used together with the APByteStreamer.
+ }
};
class BufferByteStreamer final : public ByteStreamer {
@@ -115,9 +129,15 @@ public:
// with each other.
for (size_t i = 1; i < Length; ++i)
Comments.push_back("");
-
}
}
+ unsigned emitDIERef(const DIE &D) override {
+ uint64_t Offset = D.getOffset();
+ static constexpr unsigned ULEB128PadSize = 4;
+ assert(Offset < (1ULL << (ULEB128PadSize * 7)) && "Offset wont fit");
+ emitULEB128(Offset, "", ULEB128PadSize);
+ return 0; // Only used together with the APByteStreamer.
+ }
};
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index d621108408f0..52c74713551c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -68,6 +68,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -600,6 +601,8 @@ static SourceLanguage MapDWLangToCVLang(unsigned DWLang) {
return SourceLanguage::D;
case dwarf::DW_LANG_Swift:
return SourceLanguage::Swift;
+ case dwarf::DW_LANG_Rust:
+ return SourceLanguage::Rust;
default:
// There's no CodeView representation for this language, and CV doesn't
// have an "unknown" option for the language field, so we'll use MASM,
@@ -843,6 +846,12 @@ void CodeViewDebug::emitCompilerInformation() {
if (MMI->getModule()->getProfileSummary(/*IsCS*/ false) != nullptr) {
Flags |= static_cast<uint32_t>(CompileSym3Flags::PGO);
}
+ using ArchType = llvm::Triple::ArchType;
+ ArchType Arch = Triple(MMI->getModule()->getTargetTriple()).getArch();
+ if (Asm->TM.Options.Hotpatch || Arch == ArchType::thumb ||
+ Arch == ArchType::aarch64) {
+ Flags |= static_cast<uint32_t>(CompileSym3Flags::HotPatch);
+ }
OS.AddComment("Flags and language");
OS.emitInt32(Flags);
@@ -857,8 +866,10 @@ void CodeViewDebug::emitCompilerInformation() {
StringRef CompilerVersion = CU->getProducer();
Version FrontVer = parseVersion(CompilerVersion);
OS.AddComment("Frontend version");
- for (int N : FrontVer.Part)
+ for (int N : FrontVer.Part) {
+ N = std::min<int>(N, std::numeric_limits<uint16_t>::max());
OS.emitInt16(N);
+ }
// Some Microsoft tools, like Binscope, expect a backend version number of at
// least 8.something, so we'll coerce the LLVM version into a form that
@@ -885,6 +896,34 @@ static TypeIndex getStringIdTypeIdx(GlobalTypeTableBuilder &TypeTable,
return TypeTable.writeLeafType(SIR);
}
+static std::string flattenCommandLine(ArrayRef<std::string> Args,
+ StringRef MainFilename) {
+ std::string FlatCmdLine;
+ raw_string_ostream OS(FlatCmdLine);
+ bool PrintedOneArg = false;
+ if (!StringRef(Args[0]).contains("-cc1")) {
+ llvm::sys::printArg(OS, "-cc1", /*Quote=*/true);
+ PrintedOneArg = true;
+ }
+ for (unsigned i = 0; i < Args.size(); i++) {
+ StringRef Arg = Args[i];
+ if (Arg.empty())
+ continue;
+ if (Arg == "-main-file-name" || Arg == "-o") {
+ i++; // Skip this argument and next one.
+ continue;
+ }
+ if (Arg.startswith("-object-file-name") || Arg == MainFilename)
+ continue;
+ if (PrintedOneArg)
+ OS << " ";
+ llvm::sys::printArg(OS, Arg, /*Quote=*/true);
+ PrintedOneArg = true;
+ }
+ OS.flush();
+ return FlatCmdLine;
+}
+
void CodeViewDebug::emitBuildInfo() {
// First, make LF_BUILDINFO. It's a sequence of strings with various bits of
// build info. The known prefix is:
@@ -905,8 +944,16 @@ void CodeViewDebug::emitBuildInfo() {
getStringIdTypeIdx(TypeTable, MainSourceFile->getDirectory());
BuildInfoArgs[BuildInfoRecord::SourceFile] =
getStringIdTypeIdx(TypeTable, MainSourceFile->getFilename());
- // FIXME: Path to compiler and command line. PDB is intentionally blank unless
- // we implement /Zi type servers.
+ // FIXME: PDB is intentionally blank unless we implement /Zi type servers.
+ BuildInfoArgs[BuildInfoRecord::TypeServerPDB] =
+ getStringIdTypeIdx(TypeTable, "");
+ if (Asm->TM.Options.MCOptions.Argv0 != nullptr) {
+ BuildInfoArgs[BuildInfoRecord::BuildTool] =
+ getStringIdTypeIdx(TypeTable, Asm->TM.Options.MCOptions.Argv0);
+ BuildInfoArgs[BuildInfoRecord::CommandLine] = getStringIdTypeIdx(
+ TypeTable, flattenCommandLine(Asm->TM.Options.MCOptions.CommandLineArgs,
+ MainSourceFile->getFilename()));
+ }
BuildInfoRecord BIR(BuildInfoArgs);
TypeIndex BuildInfoIndex = TypeTable.writeLeafType(BIR);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index 2834d9c3ebbf..1a0256f30d41 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -274,7 +274,7 @@ LLVM_DUMP_METHOD void DIE::dump() const {
}
#endif
-unsigned DIE::computeOffsetsAndAbbrevs(const AsmPrinter *AP,
+unsigned DIE::computeOffsetsAndAbbrevs(const dwarf::FormParams &FormParams,
DIEAbbrevSet &AbbrevSet,
unsigned CUOffset) {
// Unique the abbreviation and fill in the abbreviation number so this DIE
@@ -289,7 +289,7 @@ unsigned DIE::computeOffsetsAndAbbrevs(const AsmPrinter *AP,
// Add the byte size of all the DIE attribute values.
for (const auto &V : values())
- CUOffset += V.SizeOf(AP);
+ CUOffset += V.sizeOf(FormParams);
// Let the children compute their offsets and abbreviation numbers.
if (hasChildren()) {
@@ -297,7 +297,8 @@ unsigned DIE::computeOffsetsAndAbbrevs(const AsmPrinter *AP,
assert(Abbrev.hasChildren() && "Children flag not set");
for (auto &Child : children())
- CUOffset = Child.computeOffsetsAndAbbrevs(AP, AbbrevSet, CUOffset);
+ CUOffset =
+ Child.computeOffsetsAndAbbrevs(FormParams, AbbrevSet, CUOffset);
// Each child chain is terminated with a zero byte, adjust the offset.
CUOffset += sizeof(int8_t);
@@ -335,13 +336,13 @@ void DIEValue::emitValue(const AsmPrinter *AP) const {
}
}
-unsigned DIEValue::SizeOf(const AsmPrinter *AP) const {
+unsigned DIEValue::sizeOf(const dwarf::FormParams &FormParams) const {
switch (Ty) {
case isNone:
llvm_unreachable("Expected valid DIEValue");
#define HANDLE_DIEVALUE(T) \
case is##T: \
- return getDIE##T().SizeOf(AP, Form);
+ return getDIE##T().sizeOf(FormParams, Form);
#include "llvm/CodeGen/DIEValue.def"
}
llvm_unreachable("Unknown DIE kind");
@@ -407,7 +408,8 @@ void DIEInteger::emitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
case dwarf::DW_FORM_strp_sup:
case dwarf::DW_FORM_addr:
case dwarf::DW_FORM_ref_addr:
- Asm->OutStreamer->emitIntValue(Integer, SizeOf(Asm, Form));
+ Asm->OutStreamer->emitIntValue(Integer,
+ sizeOf(Asm->getDwarfFormParams(), Form));
return;
case dwarf::DW_FORM_GNU_str_index:
case dwarf::DW_FORM_GNU_addr_index:
@@ -425,15 +427,12 @@ void DIEInteger::emitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
}
}
-/// SizeOf - Determine size of integer value in bytes.
+/// sizeOf - Determine size of integer value in bytes.
///
-unsigned DIEInteger::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
- assert(AP && "AsmPrinter is required to set FormParams");
- dwarf::FormParams Params = {AP->getDwarfVersion(),
- uint8_t(AP->getPointerSize()),
- AP->OutStreamer->getContext().getDwarfFormat()};
-
- if (Optional<uint8_t> FixedSize = dwarf::getFixedFormByteSize(Form, Params))
+unsigned DIEInteger::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
+ if (Optional<uint8_t> FixedSize =
+ dwarf::getFixedFormByteSize(Form, FormParams))
return *FixedSize;
switch (Form) {
@@ -464,19 +463,20 @@ void DIEInteger::print(raw_ostream &O) const {
/// EmitValue - Emit expression value.
///
void DIEExpr::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
- AP->emitDebugValue(Expr, SizeOf(AP, Form));
+ AP->emitDebugValue(Expr, sizeOf(AP->getDwarfFormParams(), Form));
}
/// SizeOf - Determine size of expression value in bytes.
///
-unsigned DIEExpr::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEExpr::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_data4:
return 4;
case dwarf::DW_FORM_data8:
return 8;
case dwarf::DW_FORM_sec_offset:
- return AP->getDwarfOffsetByteSize();
+ return FormParams.getDwarfOffsetByteSize();
default:
llvm_unreachable("DIE Value form not supported yet");
}
@@ -493,12 +493,14 @@ void DIEExpr::print(raw_ostream &O) const { O << "Expr: " << *Expr; }
///
void DIELabel::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
bool IsSectionRelative = Form != dwarf::DW_FORM_addr;
- AP->emitLabelReference(Label, SizeOf(AP, Form), IsSectionRelative);
+ AP->emitLabelReference(Label, sizeOf(AP->getDwarfFormParams(), Form),
+ IsSectionRelative);
}
-/// SizeOf - Determine size of label value in bytes.
+/// sizeOf - Determine size of label value in bytes.
///
-unsigned DIELabel::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIELabel::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_data4:
return 4;
@@ -506,9 +508,9 @@ unsigned DIELabel::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
return 8;
case dwarf::DW_FORM_sec_offset:
case dwarf::DW_FORM_strp:
- return AP->getDwarfOffsetByteSize();
+ return FormParams.getDwarfOffsetByteSize();
case dwarf::DW_FORM_addr:
- return AP->MAI->getCodePointerSize();
+ return FormParams.AddrSize;
default:
llvm_unreachable("DIE Value form not supported yet");
}
@@ -527,7 +529,7 @@ void DIEBaseTypeRef::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
AP->emitULEB128(Offset, nullptr, ULEB128PadSize);
}
-unsigned DIEBaseTypeRef::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEBaseTypeRef::sizeOf(const dwarf::FormParams &, dwarf::Form) const {
return ULEB128PadSize;
}
@@ -541,19 +543,21 @@ void DIEBaseTypeRef::print(raw_ostream &O) const { O << "BaseTypeRef: " << Index
/// EmitValue - Emit delta value.
///
void DIEDelta::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
- AP->emitLabelDifference(LabelHi, LabelLo, SizeOf(AP, Form));
+ AP->emitLabelDifference(LabelHi, LabelLo,
+ sizeOf(AP->getDwarfFormParams(), Form));
}
/// SizeOf - Determine size of delta value in bytes.
///
-unsigned DIEDelta::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEDelta::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_data4:
return 4;
case dwarf::DW_FORM_data8:
return 8;
case dwarf::DW_FORM_sec_offset:
- return AP->getDwarfOffsetByteSize();
+ return FormParams.getDwarfOffsetByteSize();
default:
llvm_unreachable("DIE Value form not supported yet");
}
@@ -592,9 +596,10 @@ void DIEString::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
}
}
-/// SizeOf - Determine size of delta value in bytes.
+/// sizeOf - Determine size of delta value in bytes.
///
-unsigned DIEString::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEString::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
// Index of string in symbol table.
switch (Form) {
case dwarf::DW_FORM_GNU_str_index:
@@ -603,11 +608,11 @@ unsigned DIEString::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
case dwarf::DW_FORM_strx2:
case dwarf::DW_FORM_strx3:
case dwarf::DW_FORM_strx4:
- return DIEInteger(S.getIndex()).SizeOf(AP, Form);
+ return DIEInteger(S.getIndex()).sizeOf(FormParams, Form);
case dwarf::DW_FORM_strp:
- if (AP->MAI->doesDwarfUseRelocationsAcrossSections())
- return DIELabel(S.getSymbol()).SizeOf(AP, Form);
- return DIEInteger(S.getOffset()).SizeOf(AP, Form);
+ if (FormParams.DwarfUsesRelocationsAcrossSections)
+ return DIELabel(S.getSymbol()).sizeOf(FormParams, Form);
+ return DIEInteger(S.getOffset()).sizeOf(FormParams, Form);
default:
llvm_unreachable("Expected valid string form");
}
@@ -630,7 +635,7 @@ void DIEInlineString::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
llvm_unreachable("Expected valid string form");
}
-unsigned DIEInlineString::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEInlineString::sizeOf(const dwarf::FormParams &, dwarf::Form) const {
// Emit string bytes + NULL byte.
return S.size() + 1;
}
@@ -653,7 +658,8 @@ void DIEEntry::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
case dwarf::DW_FORM_ref2:
case dwarf::DW_FORM_ref4:
case dwarf::DW_FORM_ref8:
- AP->OutStreamer->emitIntValue(Entry->getOffset(), SizeOf(AP, Form));
+ AP->OutStreamer->emitIntValue(Entry->getOffset(),
+ sizeOf(AP->getDwarfFormParams(), Form));
return;
case dwarf::DW_FORM_ref_udata:
@@ -665,11 +671,12 @@ void DIEEntry::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
uint64_t Addr = Entry->getDebugSectionOffset();
if (const MCSymbol *SectionSym =
Entry->getUnit()->getCrossSectionRelativeBaseAddress()) {
- AP->emitLabelPlusOffset(SectionSym, Addr, SizeOf(AP, Form), true);
+ AP->emitLabelPlusOffset(SectionSym, Addr,
+ sizeOf(AP->getDwarfFormParams(), Form), true);
return;
}
- AP->OutStreamer->emitIntValue(Addr, SizeOf(AP, Form));
+ AP->OutStreamer->emitIntValue(Addr, sizeOf(AP->getDwarfFormParams(), Form));
return;
}
default:
@@ -677,7 +684,8 @@ void DIEEntry::emitValue(const AsmPrinter *AP, dwarf::Form Form) const {
}
}
-unsigned DIEEntry::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEEntry::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_ref1:
return 1;
@@ -690,15 +698,7 @@ unsigned DIEEntry::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
case dwarf::DW_FORM_ref_udata:
return getULEB128Size(Entry->getOffset());
case dwarf::DW_FORM_ref_addr:
- if (AP->getDwarfVersion() == 2)
- return AP->MAI->getCodePointerSize();
- switch (AP->OutStreamer->getContext().getDwarfFormat()) {
- case dwarf::DWARF32:
- return 4;
- case dwarf::DWARF64:
- return 8;
- }
- llvm_unreachable("Invalid DWARF format");
+ return FormParams.getRefAddrByteSize();
default:
llvm_unreachable("Improper form for DIE reference");
@@ -714,12 +714,10 @@ void DIEEntry::print(raw_ostream &O) const {
// DIELoc Implementation
//===----------------------------------------------------------------------===//
-/// ComputeSize - calculate the size of the location expression.
-///
-unsigned DIELoc::ComputeSize(const AsmPrinter *AP) const {
+unsigned DIELoc::computeSize(const dwarf::FormParams &FormParams) const {
if (!Size) {
for (const auto &V : values())
- Size += V.SizeOf(AP);
+ Size += V.sizeOf(FormParams);
}
return Size;
@@ -743,9 +741,9 @@ void DIELoc::emitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
V.emitValue(Asm);
}
-/// SizeOf - Determine size of location data in bytes.
+/// sizeOf - Determine size of location data in bytes.
///
-unsigned DIELoc::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIELoc::sizeOf(const dwarf::FormParams &, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
@@ -766,12 +764,10 @@ void DIELoc::print(raw_ostream &O) const {
// DIEBlock Implementation
//===----------------------------------------------------------------------===//
-/// ComputeSize - calculate the size of the block.
-///
-unsigned DIEBlock::ComputeSize(const AsmPrinter *AP) const {
+unsigned DIEBlock::computeSize(const dwarf::FormParams &FormParams) const {
if (!Size) {
for (const auto &V : values())
- Size += V.SizeOf(AP);
+ Size += V.sizeOf(FormParams);
}
return Size;
@@ -797,9 +793,9 @@ void DIEBlock::emitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
V.emitValue(Asm);
}
-/// SizeOf - Determine size of block data in bytes.
+/// sizeOf - Determine size of block data in bytes.
///
-unsigned DIEBlock::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIEBlock::sizeOf(const dwarf::FormParams &, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
@@ -820,22 +816,23 @@ void DIEBlock::print(raw_ostream &O) const {
// DIELocList Implementation
//===----------------------------------------------------------------------===//
-unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+unsigned DIELocList::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_loclistx:
return getULEB128Size(Index);
case dwarf::DW_FORM_data4:
- assert(!AP->isDwarf64() &&
+ assert(FormParams.Format != dwarf::DWARF64 &&
"DW_FORM_data4 is not suitable to emit a pointer to a location list "
"in the 64-bit DWARF format");
return 4;
case dwarf::DW_FORM_data8:
- assert(AP->isDwarf64() &&
+ assert(FormParams.Format == dwarf::DWARF64 &&
"DW_FORM_data8 is not suitable to emit a pointer to a location list "
"in the 32-bit DWARF format");
return 8;
case dwarf::DW_FORM_sec_offset:
- return AP->getDwarfOffsetByteSize();
+ return FormParams.getDwarfOffsetByteSize();
default:
llvm_unreachable("DIE Value form not supported yet");
}
@@ -860,9 +857,10 @@ void DIELocList::print(raw_ostream &O) const { O << "LocList: " << Index; }
// DIEAddrOffset Implementation
//===----------------------------------------------------------------------===//
-unsigned DIEAddrOffset::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
- return Addr.SizeOf(AP, dwarf::DW_FORM_addrx) +
- Offset.SizeOf(AP, dwarf::DW_FORM_data4);
+unsigned DIEAddrOffset::sizeOf(const dwarf::FormParams &FormParams,
+ dwarf::Form) const {
+ return Addr.sizeOf(FormParams, dwarf::DW_FORM_addrx) +
+ Offset.sizeOf(FormParams, dwarf::DW_FORM_data4);
}
/// EmitValue - Emit label value.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
index 5f4ee747fcca..e175854f7b93 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
@@ -207,6 +207,18 @@ void DIEHash::hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag,
computeHash(Entry);
}
+void DIEHash::hashRawTypeReference(const DIE &Entry) {
+ unsigned &DieNumber = Numbering[&Entry];
+ if (DieNumber) {
+ addULEB128('R');
+ addULEB128(DieNumber);
+ return;
+ }
+ DieNumber = Numbering.size();
+ addULEB128('T');
+ computeHash(Entry);
+}
+
// Hash all of the values in a block like set of values. This assumes that
// all of the data is going to be added as integers.
void DIEHash::hashBlockData(const DIE::const_value_range &Values) {
@@ -298,10 +310,10 @@ void DIEHash::hashAttribute(const DIEValue &Value, dwarf::Tag Tag) {
addULEB128(Attribute);
addULEB128(dwarf::DW_FORM_block);
if (Value.getType() == DIEValue::isBlock) {
- addULEB128(Value.getDIEBlock().ComputeSize(AP));
+ addULEB128(Value.getDIEBlock().computeSize(AP->getDwarfFormParams()));
hashBlockData(Value.getDIEBlock().values());
} else if (Value.getType() == DIEValue::isLoc) {
- addULEB128(Value.getDIELoc().ComputeSize(AP));
+ addULEB128(Value.getDIELoc().computeSize(AP->getDwarfFormParams()));
hashBlockData(Value.getDIELoc().values());
} else {
// We could add the block length, but that would take
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
index 29e1da4c5d60..24a973b39271 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
@@ -62,6 +62,8 @@ public:
/// Encodes and adds \param Value to the hash as a SLEB128.
void addSLEB128(int64_t Value);
+ void hashRawTypeReference(const DIE &Entry);
+
private:
/// Adds \param Str to the hash and includes a NULL byte.
void addString(StringRef Str);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
index 4df34d2c9402..18fc46c74eb4 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
@@ -155,7 +155,8 @@ uint64_t DebugHandlerBase::getBaseTypeSize(const DIType *Ty) {
if (Tag != dwarf::DW_TAG_member && Tag != dwarf::DW_TAG_typedef &&
Tag != dwarf::DW_TAG_const_type && Tag != dwarf::DW_TAG_volatile_type &&
- Tag != dwarf::DW_TAG_restrict_type && Tag != dwarf::DW_TAG_atomic_type)
+ Tag != dwarf::DW_TAG_restrict_type && Tag != dwarf::DW_TAG_atomic_type &&
+ Tag != dwarf::DW_TAG_immutable_type)
return DDTy->getSizeInBits();
DIType *BaseType = DDTy->getBaseType();
@@ -210,7 +211,8 @@ bool DebugHandlerBase::isUnsignedDIType(const DIType *Ty) {
return true;
assert(T == dwarf::DW_TAG_typedef || T == dwarf::DW_TAG_const_type ||
T == dwarf::DW_TAG_volatile_type ||
- T == dwarf::DW_TAG_restrict_type || T == dwarf::DW_TAG_atomic_type);
+ T == dwarf::DW_TAG_restrict_type || T == dwarf::DW_TAG_atomic_type ||
+ T == dwarf::DW_TAG_immutable_type);
assert(DTy->getBaseType() && "Expected valid base type");
return isUnsignedDIType(DTy->getBaseType());
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 9b73f0ab2f05..5913c687db48 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -127,9 +127,14 @@ unsigned DwarfCompileUnit::getOrCreateSourceID(const DIFile *File) {
if (!File)
return Asm->OutStreamer->emitDwarfFileDirective(0, "", "", None, None,
CUID);
- return Asm->OutStreamer->emitDwarfFileDirective(
- 0, File->getDirectory(), File->getFilename(), DD->getMD5AsBytes(File),
- File->getSource(), CUID);
+
+ if (LastFile != File) {
+ LastFile = File;
+ LastFileID = Asm->OutStreamer->emitDwarfFileDirective(
+ 0, File->getDirectory(), File->getFilename(), DD->getMD5AsBytes(File),
+ File->getSource(), CUID);
+ }
+ return LastFileID;
}
DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE(
@@ -260,9 +265,20 @@ void DwarfCompileUnit::addLocationAttribute(
if (Global) {
const MCSymbol *Sym = Asm->getSymbol(Global);
- unsigned PointerSize = Asm->getDataLayout().getPointerSize();
- assert((PointerSize == 4 || PointerSize == 8) &&
- "Add support for other sizes if necessary");
+ // 16-bit platforms like MSP430 and AVR take this path, so sink this
+ // assert to platforms that use it.
+ auto GetPointerSizedFormAndOp = [this]() {
+ unsigned PointerSize = Asm->getDataLayout().getPointerSize();
+ assert((PointerSize == 4 || PointerSize == 8) &&
+ "Add support for other sizes if necessary");
+ struct FormAndOp {
+ dwarf::Form Form;
+ dwarf::LocationAtom Op;
+ };
+ return PointerSize == 4
+ ? FormAndOp{dwarf::DW_FORM_data4, dwarf::DW_OP_const4u}
+ : FormAndOp{dwarf::DW_FORM_data8, dwarf::DW_OP_const8u};
+ };
if (Global->isThreadLocal()) {
if (Asm->TM.useEmulatedTLS()) {
// TODO: add debug info for emulated thread local mode.
@@ -270,15 +286,12 @@ void DwarfCompileUnit::addLocationAttribute(
// FIXME: Make this work with -gsplit-dwarf.
// Based on GCC's support for TLS:
if (!DD->useSplitDwarf()) {
+ auto FormAndOp = GetPointerSizedFormAndOp();
// 1) Start with a constNu of the appropriate pointer size
- addUInt(*Loc, dwarf::DW_FORM_data1,
- PointerSize == 4 ? dwarf::DW_OP_const4u
- : dwarf::DW_OP_const8u);
+ addUInt(*Loc, dwarf::DW_FORM_data1, FormAndOp.Op);
// 2) containing the (relocated) offset of the TLS variable
// within the module's TLS block.
- addExpr(*Loc,
- PointerSize == 4 ? dwarf::DW_FORM_data4
- : dwarf::DW_FORM_data8,
+ addExpr(*Loc, FormAndOp.Form,
Asm->getObjFileLowering().getDebugThreadLocalSymbol(Sym));
} else {
addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_const_index);
@@ -292,13 +305,11 @@ void DwarfCompileUnit::addLocationAttribute(
}
} else if (Asm->TM.getRelocationModel() == Reloc::RWPI ||
Asm->TM.getRelocationModel() == Reloc::ROPI_RWPI) {
+ auto FormAndOp = GetPointerSizedFormAndOp();
// Constant
- addUInt(*Loc, dwarf::DW_FORM_data1,
- PointerSize == 4 ? dwarf::DW_OP_const4u
- : dwarf::DW_OP_const8u);
+ addUInt(*Loc, dwarf::DW_FORM_data1, FormAndOp.Op);
// Relocation offset
- addExpr(*Loc, PointerSize == 4 ? dwarf::DW_FORM_data4
- : dwarf::DW_FORM_data8,
+ addExpr(*Loc, FormAndOp.Form,
Asm->getObjFileLowering().getIndirectSymViaRWPI(Sym));
// Base register
Register BaseReg = Asm->getObjFileLowering().getStaticBase();
@@ -1575,7 +1586,8 @@ void DwarfCompileUnit::createBaseTypeDIEs() {
Twine(dwarf::AttributeEncodingString(Btr.Encoding) +
"_" + Twine(Btr.BitSize)).toStringRef(Str));
addUInt(Die, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1, Btr.Encoding);
- addUInt(Die, dwarf::DW_AT_byte_size, None, Btr.BitSize / 8);
+ // Round up to smallest number of bytes that contains this number of bits.
+ addUInt(Die, dwarf::DW_AT_byte_size, None, divideCeil(Btr.BitSize, 8));
Btr.Die = &Die;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index fb03982b5e4a..f2e1f6346803 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -86,6 +86,9 @@ class DwarfCompileUnit final : public DwarfUnit {
/// DWO ID for correlating skeleton and split units.
uint64_t DWOId = 0;
+ const DIFile *LastFile = nullptr;
+ unsigned LastFileID;
+
/// Construct a DIE for the given DbgVariable without initializing the
/// DbgVariable's DIE reference.
DIE *constructVariableDIEImpl(const DbgVariable &DV, bool Abstract);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 48134f1fd774..680b9586228f 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -2539,12 +2539,10 @@ void DwarfDebug::emitDebugLocEntry(ByteStreamer &Streamer,
if (Op.getDescription().Op[I] == Encoding::SizeNA)
continue;
if (Op.getDescription().Op[I] == Encoding::BaseTypeRef) {
- uint64_t Offset =
- CU->ExprRefedBaseTypes[Op.getRawOperand(I)].Die->getOffset();
- assert(Offset < (1ULL << (ULEB128PadSize * 7)) && "Offset wont fit");
- Streamer.emitULEB128(Offset, "", ULEB128PadSize);
+ unsigned Length =
+ Streamer.emitDIERef(*CU->ExprRefedBaseTypes[Op.getRawOperand(I)].Die);
// Make sure comments stay aligned.
- for (unsigned J = 0; J < ULEB128PadSize; ++J)
+ for (unsigned J = 0; J < Length; ++J)
if (Comment != End)
Comment++;
} else {
@@ -3369,7 +3367,8 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
// Fast path if we're building some type units and one has already used the
// address pool we know we're going to throw away all this work anyway, so
// don't bother building dependent types.
- if (!TypeUnitsUnderConstruction.empty() && AddrPool.hasBeenUsed())
+ if (!TypeUnitsUnderConstruction.empty() &&
+ (AddrPool.hasBeenUsed() || SeenLocalType))
return;
auto Ins = TypeSignatures.insert(std::make_pair(CTy, 0));
@@ -3380,6 +3379,7 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
bool TopLevelType = TypeUnitsUnderConstruction.empty();
AddrPool.resetUsedFlag();
+ SeenLocalType = false;
auto OwnedUnit = std::make_unique<DwarfTypeUnit>(CU, Asm, this, &InfoHolder,
getDwoLineTable(CU));
@@ -3423,7 +3423,7 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
// Types referencing entries in the address table cannot be placed in type
// units.
- if (AddrPool.hasBeenUsed()) {
+ if (AddrPool.hasBeenUsed() || SeenLocalType) {
// Remove all the types built while building this type.
// This is pessimistic as some of these types might not be dependent on
@@ -3451,14 +3451,18 @@ void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
DwarfDebug::NonTypeUnitContext::NonTypeUnitContext(DwarfDebug *DD)
: DD(DD),
- TypeUnitsUnderConstruction(std::move(DD->TypeUnitsUnderConstruction)), AddrPoolUsed(DD->AddrPool.hasBeenUsed()) {
+ TypeUnitsUnderConstruction(std::move(DD->TypeUnitsUnderConstruction)),
+ AddrPoolUsed(DD->AddrPool.hasBeenUsed()),
+ SeenLocalType(DD->SeenLocalType) {
DD->TypeUnitsUnderConstruction.clear();
DD->AddrPool.resetUsedFlag();
+ DD->SeenLocalType = false;
}
DwarfDebug::NonTypeUnitContext::~NonTypeUnitContext() {
DD->TypeUnitsUnderConstruction = std::move(TypeUnitsUnderConstruction);
DD->AddrPool.resetUsedFlag(AddrPoolUsed);
+ DD->SeenLocalType = SeenLocalType;
}
DwarfDebug::NonTypeUnitContext DwarfDebug::enterNonTypeUnitContext() {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 4e1a1b1e068d..0043000652e8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -433,6 +433,7 @@ private:
DenseMap<const DIStringType *, unsigned> StringTypeLocMap;
AddressPool AddrPool;
+ bool SeenLocalType = false;
/// Accelerator tables.
AccelTable<DWARF5AccelTableData> AccelDebugNames;
@@ -671,6 +672,7 @@ public:
DwarfDebug *DD;
decltype(DwarfDebug::TypeUnitsUnderConstruction) TypeUnitsUnderConstruction;
bool AddrPoolUsed;
+ bool SeenLocalType;
friend class DwarfDebug;
NonTypeUnitContext(DwarfDebug *DD);
public:
@@ -679,6 +681,7 @@ public:
};
NonTypeUnitContext enterNonTypeUnitContext();
+ void seenLocalType() { SeenLocalType = true; }
/// Add a label so that arange data can be generated for it.
void addArangeLabel(SymbolCU SCU) { ArangeLabels.push_back(SCU); }
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
index 40898c9fc855..4defa8a30855 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -98,6 +98,8 @@ class LLVM_LIBRARY_VISIBILITY AIXException : public DwarfCFIExceptionBase {
public:
AIXException(AsmPrinter *A);
+ void markFunctionEnd() override;
+
void endModule() override {}
void beginFunction(const MachineFunction *MF) override {}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index 37407c98e75f..ee932d105107 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -681,9 +681,25 @@ void DwarfExpression::emitLegacySExt(unsigned FromBits) {
}
void DwarfExpression::emitLegacyZExt(unsigned FromBits) {
- // (X & (1 << FromBits - 1))
- emitOp(dwarf::DW_OP_constu);
- emitUnsigned((1ULL << FromBits) - 1);
+ // Heuristic to decide the most efficient encoding.
+ // A ULEB can encode 7 1-bits per byte.
+ if (FromBits / 7 < 1+1+1+1+1) {
+ // (X & (1 << FromBits - 1))
+ emitOp(dwarf::DW_OP_constu);
+ emitUnsigned((1ULL << FromBits) - 1);
+ } else {
+ // Note that the DWARF 4 stack consists of pointer-sized elements,
+ // so technically it doesn't make sense to shift left more than 64
+ // bits. We leave that for the consumer to decide though. LLDB for
+ // example uses APInt for the stack elements and can still deal
+ // with this.
+ emitOp(dwarf::DW_OP_lit1);
+ emitOp(dwarf::DW_OP_constu);
+ emitUnsigned(FromBits);
+ emitOp(dwarf::DW_OP_shl);
+ emitOp(dwarf::DW_OP_lit1);
+ emitOp(dwarf::DW_OP_minus);
+ }
emitOp(dwarf::DW_OP_and);
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp
index 838e1c9a10be..a67d0f032cf6 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -92,7 +92,8 @@ unsigned DwarfFile::computeSizeAndOffsetsForUnit(DwarfUnit *TheU) {
// Compute the size and offset of a DIE. The offset is relative to start of the
// CU. It returns the offset after laying out the DIE.
unsigned DwarfFile::computeSizeAndOffset(DIE &Die, unsigned Offset) {
- return Die.computeOffsetsAndAbbrevs(Asm, Abbrevs, Offset);
+ return Die.computeOffsetsAndAbbrevs(Asm->getDwarfFormParams(), Abbrevs,
+ Offset);
}
void DwarfFile::emitAbbrevs(MCSection *Section) { Abbrevs.Emit(Asm, Section); }
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 6b6d63f14f87..15d90c54adfc 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -77,7 +77,7 @@ void DIEDwarfExpression::enableTemporaryBuffer() {
void DIEDwarfExpression::disableTemporaryBuffer() { IsBuffering = false; }
unsigned DIEDwarfExpression::getTemporaryBufferSize() {
- return TmpDIE.ComputeSize(&AP);
+ return TmpDIE.computeSize(AP.getDwarfFormParams());
}
void DIEDwarfExpression::commitTemporaryBuffer() { OutDIE.takeValues(TmpDIE); }
@@ -394,14 +394,14 @@ DIE &DwarfUnit::createAndAddDIE(dwarf::Tag Tag, DIE &Parent, const DINode *N) {
}
void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Loc) {
- Loc->ComputeSize(Asm);
+ Loc->computeSize(Asm->getDwarfFormParams());
DIELocs.push_back(Loc); // Memoize so we can call the destructor later on.
addAttribute(Die, Attribute, Loc->BestForm(DD->getDwarfVersion()), Loc);
}
void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute, dwarf::Form Form,
DIEBlock *Block) {
- Block->ComputeSize(Asm);
+ Block->computeSize(Asm->getDwarfFormParams());
DIEBlocks.push_back(Block); // Memoize so we can call the destructor later on.
addAttribute(Die, Attribute, Form, Block);
}
@@ -597,10 +597,8 @@ DIE *DwarfUnit::createTypeDIE(const DIScope *Context, DIE &ContextDIE,
// Skip updating the accelerator tables since this is not the full type.
if (MDString *TypeId = CTy->getRawIdentifier())
DD->addDwarfTypeUnitType(getCU(), TypeId->getString(), TyDIE, CTy);
- else {
- auto X = DD->enterNonTypeUnitContext();
+ else
finishNonUnitTypeDIE(TyDIE, CTy);
- }
return &TyDIE;
}
constructTypeDIE(TyDIE, CTy);
@@ -744,6 +742,16 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DIStringType *STy) {
addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
}
+ if (DIExpression *Expr = STy->getStringLocationExp()) {
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc;
+ DIEDwarfExpression DwarfExpr(*Asm, getCU(), *Loc);
+ // This is to describe the memory location of the
+ // string, so lock it down as such.
+ DwarfExpr.setMemoryLocationKind();
+ DwarfExpr.addExpression(Expr);
+ addBlock(Buffer, dwarf::DW_AT_data_location, DwarfExpr.finalize());
+ }
+
if (STy->getEncoding()) {
// For eventual Unicode support.
addUInt(Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
@@ -1189,7 +1197,7 @@ bool DwarfUnit::applySubprogramDefinitionAttributes(const DISubprogram *SP,
DefinitionArgs = SP->getType()->getTypeArray();
if (DeclArgs.size() && DefinitionArgs.size())
- if (DefinitionArgs[0] != NULL && DeclArgs[0] != DefinitionArgs[0])
+ if (DefinitionArgs[0] != nullptr && DeclArgs[0] != DefinitionArgs[0])
addType(SPDie, DefinitionArgs[0]);
DeclDie = getDIE(SPDecl);
@@ -1842,5 +1850,25 @@ void DwarfTypeUnit::finishNonUnitTypeDIE(DIE& D, const DICompositeType *CTy) {
StringRef Name = CTy->getName();
if (!Name.empty())
addString(D, dwarf::DW_AT_name, Name);
+ if (Name.startswith("_STN") || !Name.contains('<'))
+ addTemplateParams(D, CTy->getTemplateParams());
+ // If the type is in an anonymous namespace, we can't reference it from a TU
+ // (since the type would be CU local and the TU doesn't specify which TU has
+ // the appropriate type definition) - so flag this emission as such and skip
+ // the rest of the emission now since we're going to throw out all this work
+ // and put the outer/referencing type in the CU instead.
+ // FIXME: Probably good to generalize this to a DICompositeType flag populated
+ // by the frontend, then we could use that to have types that can have
+ // decl+def merged by LTO but where the definition still doesn't go in a type
+ // unit because the type has only one definition.
+ for (DIScope *S = CTy->getScope(); S; S = S->getScope()) {
+ if (auto *NS = dyn_cast<DINamespace>(S)) {
+ if (NS->getName().empty()) {
+ DD->seenLocalType();
+ break;
+ }
+ }
+ }
+ auto X = DD->enterNonTypeUnitContext();
getCU().createTypeDIE(CTy);
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 54b0079dd7ce..330f3bacca43 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -25,9 +25,7 @@ namespace llvm {
class ConstantFP;
class ConstantInt;
-class DbgVariable;
class DwarfCompileUnit;
-class MachineOperand;
class MCDwarfDwoLineTable;
class MCSymbol;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h
index 7d5e51218693..a92a89084cad 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/PseudoProbePrinter.h
@@ -19,8 +19,6 @@
namespace llvm {
class AsmPrinter;
-class MCStreamer;
-class Module;
class DILocation;
class PseudoProbeHandler : public AsmPrinterHandler {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.cpp
index 1e3f33e70715..ad8432343a60 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.cpp
@@ -27,7 +27,7 @@
using namespace llvm;
-WinCFGuard::WinCFGuard(AsmPrinter *A) : AsmPrinterHandler(), Asm(A) {}
+WinCFGuard::WinCFGuard(AsmPrinter *A) : Asm(A) {}
WinCFGuard::~WinCFGuard() {}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/BranchFolding.h b/contrib/llvm-project/llvm/lib/CodeGen/BranchFolding.h
index 2a4ea92a92aa..95d5dcfbbd0f 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/BranchFolding.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/BranchFolding.h
@@ -23,7 +23,6 @@ class BasicBlock;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineLoopInfo;
-class MachineModuleInfo;
class MachineRegisterInfo;
class MBFIWrapper;
class ProfileSummaryInfo;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CFIInstrInserter.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CFIInstrInserter.cpp
index 1c2e3f998449..de173a9dfd62 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CFIInstrInserter.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CFIInstrInserter.cpp
@@ -347,7 +347,7 @@ bool CFIInstrInserter::insertCFIInstrs(MachineFunction &MF) {
}
if (ForceFullCFA) {
- MF.getSubtarget().getFrameLowering()->emitCalleeSavedFrameMoves(
+ MF.getSubtarget().getFrameLowering()->emitCalleeSavedFrameMovesFullCFA(
*MBBInfo.MBB, MBBI);
InsertedCFIInstr = true;
PrevMBBInfo = &MBBInfo;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp
index 5f9982cd155d..84a0e4142bb6 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -43,9 +43,9 @@ void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
}
// Return the preferred allocation register for reg, given a COPY instruction.
-static Register copyHint(const MachineInstr *MI, unsigned Reg,
- const TargetRegisterInfo &TRI,
- const MachineRegisterInfo &MRI) {
+Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, unsigned Reg,
+ const TargetRegisterInfo &TRI,
+ const MachineRegisterInfo &MRI) {
unsigned Sub, HSub;
Register HReg;
if (MI->getOperand(0).getReg() == Reg) {
@@ -77,9 +77,10 @@ static Register copyHint(const MachineInstr *MI, unsigned Reg,
}
// Check if all values in LI are rematerializable
-static bool isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS,
- const VirtRegMap &VRM,
- const TargetInstrInfo &TII) {
+bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
+ const LiveIntervals &LIS,
+ const VirtRegMap &VRM,
+ const TargetInstrInfo &TII) {
Register Reg = LI.reg();
Register Original = VRM.getOriginal(Reg);
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 747f4e4fdecc..28f24e5ea908 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -4168,11 +4168,11 @@ bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
// We can get through binary operator, if it is legal. In other words, the
// binary operator must have a nuw or nsw flag.
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) &&
- ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
- (IsSExt && BinOp->hasNoSignedWrap())))
- return true;
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
+ if (isa<OverflowingBinaryOperator>(BinOp) &&
+ ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
+ (IsSExt && BinOp->hasNoSignedWrap())))
+ return true;
// ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
if ((Inst->getOpcode() == Instruction::And ||
@@ -4181,10 +4181,10 @@ bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
// ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
if (Inst->getOpcode() == Instruction::Xor) {
- const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
// Make sure it is not a NOT.
- if (Cst && !Cst->getValue().isAllOnes())
- return true;
+ if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
+ if (!Cst->getValue().isAllOnes())
+ return true;
}
// zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CommandFlags.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CommandFlags.cpp
index 3bed81d5841d..1d50e1d22b95 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CommandFlags.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CommandFlags.cpp
@@ -90,7 +90,6 @@ CGOPT(bool, EnableAddrsig)
CGOPT(bool, EmitCallSiteInfo)
CGOPT(bool, EnableMachineFunctionSplitter)
CGOPT(bool, EnableDebugEntryValues)
-CGOPT_EXP(bool, ValueTrackingVariableLocations)
CGOPT(bool, ForceDwarfFrameSection)
CGOPT(bool, XRayOmitFunctionIndex)
CGOPT(bool, DebugStrictDwarf)
@@ -433,12 +432,6 @@ codegen::RegisterCodeGenFlags::RegisterCodeGenFlags() {
cl::init(false));
CGBINDOPT(EnableDebugEntryValues);
- static cl::opt<bool> ValueTrackingVariableLocations(
- "experimental-debug-variable-locations",
- cl::desc("Use experimental new value-tracking variable locations"),
- cl::init(false));
- CGBINDOPT(ValueTrackingVariableLocations);
-
static cl::opt<bool> EnableMachineFunctionSplitter(
"split-machine-functions",
cl::desc("Split out cold basic blocks from machine functions based on "
@@ -539,12 +532,6 @@ codegen::InitTargetOptionsFromCodeGenFlags(const Triple &TheTriple) {
Options.DebugStrictDwarf = getDebugStrictDwarf();
Options.LoopAlignment = getAlignLoops();
- if (auto Opt = getExplicitValueTrackingVariableLocations())
- Options.ValueTrackingVariableLocations = *Opt;
- else
- Options.ValueTrackingVariableLocations =
- getDefaultValueTrackingVariableLocations(TheTriple);
-
Options.MCOptions = mc::InitMCTargetOptionsFromFlags();
Options.ThreadModel = getThreadModel();
@@ -620,7 +607,7 @@ void codegen::setFunctionAttributes(StringRef CPU, StringRef Features,
Function &F) {
auto &Ctx = F.getContext();
AttributeList Attrs = F.getAttributes();
- AttrBuilder NewAttrs;
+ AttrBuilder NewAttrs(Ctx);
if (!CPU.empty() && !F.hasFnAttribute("target-cpu"))
NewAttrs.addAttribute("target-cpu", CPU);
@@ -698,8 +685,3 @@ void codegen::setFunctionAttributes(StringRef CPU, StringRef Features,
setFunctionAttributes(CPU, Features, F);
}
-bool codegen::getDefaultValueTrackingVariableLocations(const llvm::Triple &T) {
- if (T.getArch() == llvm::Triple::x86_64)
- return true;
- return false;
-}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 901409ea9f8f..eb2d449bc4af 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -40,8 +40,7 @@ using namespace llvm;
CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction &MFi,
const RegisterClassInfo &RCI)
- : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
- TII(MF.getSubtarget().getInstrInfo()),
+ : MF(MFi), MRI(MF.getRegInfo()), TII(MF.getSubtarget().getInstrInfo()),
TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RCI),
Classes(TRI->getNumRegs(), nullptr), KillIndices(TRI->getNumRegs(), 0),
DefIndices(TRI->getNumRegs(), 0), KeepRegs(TRI->getNumRegs(), false) {}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
index 7300ea6b50ee..d9caa8ad42d0 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -68,9 +68,16 @@ void ExpandPostRA::TransferImplicitOperands(MachineInstr *MI) {
MachineBasicBlock::iterator CopyMI = MI;
--CopyMI;
- for (const MachineOperand &MO : MI->implicit_operands())
- if (MO.isReg())
- CopyMI->addOperand(MO);
+ Register DstReg = MI->getOperand(0).getReg();
+ for (const MachineOperand &MO : MI->implicit_operands()) {
+ CopyMI->addOperand(MO);
+
+ // Be conservative about preserving kills when subregister defs are
+ // involved. If there was implicit kill of a super-register overlapping the
+ // copy result, we would kill the subregisters previous copies defined.
+ if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
+ CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
+ }
}
bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index 2676becdd807..1a642e233a6a 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -191,10 +191,10 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
assert(DstOps.size() == 1 && "Invalid dsts");
if (SrcOps[0].getLLTTy(*getMRI()).isVector()) {
// Try to constant fold vector constants.
- auto VecCst = ConstantFoldVectorBinop(
+ Register VecCst = ConstantFoldVectorBinop(
Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI(), *this);
if (VecCst)
- return MachineInstrBuilder(getMF(), *VecCst);
+ return buildCopy(DstOps[0], VecCst);
break;
}
if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(),
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index d061664e8c5d..1ec7868f2234 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -86,6 +86,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
CallLoweringInfo Info;
const DataLayout &DL = MIRBuilder.getDataLayout();
MachineFunction &MF = MIRBuilder.getMF();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
bool CanBeTailCalled = CB.isTailCall() &&
isInTailCallPosition(CB, MF.getTarget()) &&
(MF.getFunction()
@@ -109,6 +110,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
CanBeTailCalled = false;
}
+
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
// we'll pass to the assigner function.
@@ -136,10 +138,23 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
else
Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
+ Register ReturnHintAlignReg;
+ Align ReturnHintAlign;
+
Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
- if (!Info.OrigRet.Ty->isVoidTy())
+
+ if (!Info.OrigRet.Ty->isVoidTy()) {
setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
+ if (MaybeAlign Alignment = CB.getRetAlign()) {
+ if (*Alignment > Align(1)) {
+ ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
+ Info.OrigRet.Regs[0] = ReturnHintAlignReg;
+ ReturnHintAlign = *Alignment;
+ }
+ }
+ }
+
Info.CB = &CB;
Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
Info.CallConv = CallConv;
@@ -147,7 +162,15 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
Info.IsMustTailCall = CB.isMustTailCall();
Info.IsTailCall = CanBeTailCalled;
Info.IsVarArg = IsVarArg;
- return lowerCall(MIRBuilder, Info);
+ if (!lowerCall(MIRBuilder, Info))
+ return false;
+
+ if (ReturnHintAlignReg && !Info.IsTailCall) {
+ MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
+ ReturnHintAlign);
+ }
+
+ return true;
}
template <typename FuncInfoTy>
@@ -509,7 +532,8 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
bool CallLowering::determineAndHandleAssignments(
ValueHandler &Handler, ValueAssigner &Assigner,
SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
- CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
+ CallingConv::ID CallConv, bool IsVarArg,
+ ArrayRef<Register> ThisReturnRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
SmallVector<CCValAssign, 16> ArgLocs;
@@ -519,7 +543,7 @@ bool CallLowering::determineAndHandleAssignments(
return false;
return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
- ThisReturnReg);
+ ThisReturnRegs);
}
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
@@ -596,7 +620,7 @@ bool CallLowering::handleAssignments(ValueHandler &Handler,
CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs,
MachineIRBuilder &MIRBuilder,
- Register ThisReturnReg) const {
+ ArrayRef<Register> ThisReturnRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
const Function &F = MF.getFunction();
@@ -740,10 +764,10 @@ bool CallLowering::handleAssignments(ValueHandler &Handler,
assert(!VA.needsCustom() && "custom loc should have been handled already");
- if (i == 0 && ThisReturnReg.isValid() &&
+ if (i == 0 && !ThisReturnRegs.empty() &&
Handler.isIncomingArgumentHandler() &&
isTypeIsValidForThisReturn(ValVT)) {
- Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
+ Handler.assignValueToReg(ArgReg, ThisReturnRegs[Part], VA);
continue;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Combiner.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
index dd1ef74e8ad0..30f8838805b5 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
@@ -56,8 +56,7 @@ class WorkListMaintainer : public GISelChangeObserver {
SmallPtrSet<const MachineInstr *, 4> CreatedInstrs;
public:
- WorkListMaintainer(WorkListTy &WorkList)
- : GISelChangeObserver(), WorkList(WorkList) {}
+ WorkListMaintainer(WorkListTy &WorkList) : WorkList(WorkList) {}
virtual ~WorkListMaintainer() {
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index f7a634dad61a..d6a009744161 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1748,6 +1748,20 @@ void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
MI.eraseFromParent();
}
+bool CombinerHelper::matchCombineUnmergeUndef(
+ MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ unsigned SrcIdx = MI.getNumOperands() - 1;
+ Register SrcReg = MI.getOperand(SrcIdx).getReg();
+ MatchInfo = [&MI](MachineIRBuilder &B) {
+ unsigned NumElems = MI.getNumOperands() - 1;
+ for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
+ Register DstReg = MI.getOperand(Idx).getReg();
+ B.buildUndef(DstReg);
+ }
+ };
+ return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
+}
+
bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
@@ -2025,16 +2039,19 @@ void CombinerHelper::applyCombineAddP2IToPtrAdd(
}
bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
- int64_t &NewCst) {
+ APInt &NewCst) {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register LHS = PtrAdd.getBaseReg();
Register RHS = PtrAdd.getOffsetReg();
MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
- if (auto RHSCst = getIConstantVRegSExtVal(RHS, MRI)) {
- int64_t Cst;
+ if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
+ APInt Cst;
if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
- NewCst = Cst + *RHSCst;
+ auto DstTy = MRI.getType(PtrAdd.getReg(0));
+ // G_INTTOPTR uses zero-extension
+ NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
+ NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
return true;
}
}
@@ -2043,7 +2060,7 @@ bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
}
void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
- int64_t &NewCst) {
+ APInt &NewCst) {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register Dst = PtrAdd.getReg(0);
@@ -3875,39 +3892,48 @@ bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
LLT Ty = MRI.getType(Dst);
unsigned BitWidth = Ty.getScalarSizeInBits();
- Register ShlSrc, ShlAmt, LShrSrc, LShrAmt;
+ Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
unsigned FshOpc = 0;
- // Match (or (shl x, amt), (lshr y, sub(bw, amt))).
- if (mi_match(
- Dst, MRI,
- // m_GOr() handles the commuted version as well.
- m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
- m_GLShr(m_Reg(LShrSrc), m_GSub(m_SpecificICstOrSplat(BitWidth),
- m_Reg(LShrAmt)))))) {
+ // Match (or (shl ...), (lshr ...)).
+ if (!mi_match(Dst, MRI,
+ // m_GOr() handles the commuted version as well.
+ m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
+ m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
+ return false;
+
+ // Given constants C0 and C1 such that C0 + C1 is bit-width:
+ // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
+ // TODO: Match constant splat.
+ int64_t CstShlAmt, CstLShrAmt;
+ if (mi_match(ShlAmt, MRI, m_ICst(CstShlAmt)) &&
+ mi_match(LShrAmt, MRI, m_ICst(CstLShrAmt)) &&
+ CstShlAmt + CstLShrAmt == BitWidth) {
+ FshOpc = TargetOpcode::G_FSHR;
+ Amt = LShrAmt;
+
+ } else if (mi_match(LShrAmt, MRI,
+ m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
+ ShlAmt == Amt) {
+ // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
FshOpc = TargetOpcode::G_FSHL;
- // Match (or (shl x, sub(bw, amt)), (lshr y, amt)).
- } else if (mi_match(Dst, MRI,
- m_GOr(m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)),
- m_GShl(m_Reg(ShlSrc),
- m_GSub(m_SpecificICstOrSplat(BitWidth),
- m_Reg(ShlAmt)))))) {
+ } else if (mi_match(ShlAmt, MRI,
+ m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
+ LShrAmt == Amt) {
+ // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
FshOpc = TargetOpcode::G_FSHR;
} else {
return false;
}
- if (ShlAmt != LShrAmt)
- return false;
-
- LLT AmtTy = MRI.getType(ShlAmt);
+ LLT AmtTy = MRI.getType(Amt);
if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
return false;
MatchInfo = [=](MachineIRBuilder &B) {
- B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, ShlAmt});
+ B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
};
return true;
}
@@ -4127,8 +4153,9 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(
assert(MI.getOpcode() == TargetOpcode::G_AND);
Register Dst = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(Dst);
- if (!getTargetLowering().isConstantUnsignedBitfieldExtactLegal(
- TargetOpcode::G_UBFX, Ty, Ty))
+ LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+ if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
+ TargetOpcode::G_UBFX, Ty, ExtractTy))
return false;
int64_t AndImm, LSBImm;
@@ -4148,7 +4175,6 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(
if (static_cast<uint64_t>(LSBImm) >= Size)
return false;
- LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
MatchInfo = [=](MachineIRBuilder &B) {
auto WidthCst = B.buildConstant(ExtractTy, Width);
@@ -4214,8 +4240,9 @@ bool CombinerHelper::matchBitfieldExtractFromShrAnd(
const Register Dst = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(Dst);
- if (!getTargetLowering().isConstantUnsignedBitfieldExtactLegal(
- TargetOpcode::G_UBFX, Ty, Ty))
+ LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+ if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
+ TargetOpcode::G_UBFX, Ty, ExtractTy))
return false;
// Try to match shr (and x, c1), c2
@@ -4249,8 +4276,8 @@ bool CombinerHelper::matchBitfieldExtractFromShrAnd(
return false;
MatchInfo = [=](MachineIRBuilder &B) {
- auto WidthCst = B.buildConstant(Ty, Width);
- auto PosCst = B.buildConstant(Ty, Pos);
+ auto WidthCst = B.buildConstant(ExtractTy, Width);
+ auto PosCst = B.buildConstant(ExtractTy, Pos);
B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
};
return true;
@@ -4850,37 +4877,39 @@ bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
return false;
- MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
- MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+ Register Op1 = MI.getOperand(1).getReg();
+ Register Op2 = MI.getOperand(2).getReg();
+ DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
+ DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
unsigned PreferredFusedOpcode =
HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
- if (Aggressive && isContractableFMul(*LHS, AllowFusionGlobally) &&
- isContractableFMul(*RHS, AllowFusionGlobally)) {
- if (hasMoreUses(*LHS, *RHS, MRI))
+ if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
+ if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
std::swap(LHS, RHS);
}
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
- if (isContractableFMul(*LHS, AllowFusionGlobally) &&
- (Aggressive || MRI.hasOneNonDBGUse(LHS->getOperand(0).getReg()))) {
+ if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {LHS->getOperand(1).getReg(), LHS->getOperand(2).getReg(),
- RHS->getOperand(0).getReg()});
+ {LHS.MI->getOperand(1).getReg(),
+ LHS.MI->getOperand(2).getReg(), RHS.Reg});
};
return true;
}
// fold (fadd x, (fmul y, z)) -> (fma y, z, x)
- if (isContractableFMul(*RHS, AllowFusionGlobally) &&
- (Aggressive || MRI.hasOneNonDBGUse(RHS->getOperand(0).getReg()))) {
+ if (isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
+ (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {RHS->getOperand(1).getReg(), RHS->getOperand(2).getReg(),
- LHS->getOperand(0).getReg()});
+ {RHS.MI->getOperand(1).getReg(),
+ RHS.MI->getOperand(2).getReg(), LHS.Reg});
};
return true;
}
@@ -4897,8 +4926,10 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
return false;
const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
- MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
- MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+ Register Op1 = MI.getOperand(1).getReg();
+ Register Op2 = MI.getOperand(2).getReg();
+ DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
+ DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
LLT DstType = MRI.getType(MI.getOperand(0).getReg());
unsigned PreferredFusedOpcode =
@@ -4906,42 +4937,38 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
- if (Aggressive && isContractableFMul(*LHS, AllowFusionGlobally) &&
- isContractableFMul(*RHS, AllowFusionGlobally)) {
- if (hasMoreUses(*LHS, *RHS, MRI))
+ if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
+ if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
std::swap(LHS, RHS);
}
// fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
MachineInstr *FpExtSrc;
- if (mi_match(LHS->getOperand(0).getReg(), MRI,
- m_GFPExt(m_MInstr(FpExtSrc))) &&
+ if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
- B.buildInstr(
- PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {FpExtX.getReg(0), FpExtY.getReg(0), RHS->getOperand(0).getReg()});
+ B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
+ {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
};
return true;
}
// fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
// Note: Commutes FADD operands.
- if (mi_match(RHS->getOperand(0).getReg(), MRI,
- m_GFPExt(m_MInstr(FpExtSrc))) &&
+ if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
- B.buildInstr(
- PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {FpExtX.getReg(0), FpExtY.getReg(0), LHS->getOperand(0).getReg()});
+ B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
+ {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
};
return true;
}
@@ -4957,8 +4984,10 @@ bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true))
return false;
- MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
- MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+ Register Op1 = MI.getOperand(1).getReg();
+ Register Op2 = MI.getOperand(2).getReg();
+ DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
+ DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
unsigned PreferredFusedOpcode =
@@ -4966,31 +4995,31 @@ bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
- if (Aggressive && isContractableFMul(*LHS, AllowFusionGlobally) &&
- isContractableFMul(*RHS, AllowFusionGlobally)) {
- if (hasMoreUses(*LHS, *RHS, MRI))
+ if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
+ if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
std::swap(LHS, RHS);
}
MachineInstr *FMA = nullptr;
Register Z;
// fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
- if (LHS->getOpcode() == PreferredFusedOpcode &&
- (MRI.getVRegDef(LHS->getOperand(3).getReg())->getOpcode() ==
+ if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
+ (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
TargetOpcode::G_FMUL) &&
- MRI.hasOneNonDBGUse(LHS->getOperand(0).getReg()) &&
- MRI.hasOneNonDBGUse(LHS->getOperand(3).getReg())) {
- FMA = LHS;
- Z = RHS->getOperand(0).getReg();
+ MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
+ MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
+ FMA = LHS.MI;
+ Z = RHS.Reg;
}
// fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
- else if (RHS->getOpcode() == PreferredFusedOpcode &&
- (MRI.getVRegDef(RHS->getOperand(3).getReg())->getOpcode() ==
+ else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
+ (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
TargetOpcode::G_FMUL) &&
- MRI.hasOneNonDBGUse(RHS->getOperand(0).getReg()) &&
- MRI.hasOneNonDBGUse(RHS->getOperand(3).getReg())) {
- Z = LHS->getOperand(0).getReg();
- FMA = RHS;
+ MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
+ MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
+ Z = LHS.Reg;
+ FMA = RHS.MI;
}
if (FMA) {
@@ -5025,17 +5054,19 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
LLT DstType = MRI.getType(MI.getOperand(0).getReg());
- MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
- MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+ Register Op1 = MI.getOperand(1).getReg();
+ Register Op2 = MI.getOperand(2).getReg();
+ DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
+ DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
unsigned PreferredFusedOpcode =
HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
- if (Aggressive && isContractableFMul(*LHS, AllowFusionGlobally) &&
- isContractableFMul(*RHS, AllowFusionGlobally)) {
- if (hasMoreUses(*LHS, *RHS, MRI))
+ if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
+ if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
std::swap(LHS, RHS);
}
@@ -5054,16 +5085,17 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
MachineInstr *FMulMI, *FMAMI;
// fold (fadd (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y, (fma (fpext u), (fpext v), z))
- if (LHS->getOpcode() == PreferredFusedOpcode &&
- mi_match(LHS->getOperand(3).getReg(), MRI, m_GFPExt(m_MInstr(FMulMI))) &&
+ if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
+ mi_match(LHS.MI->getOperand(3).getReg(), MRI,
+ m_GFPExt(m_MInstr(FMulMI))) &&
isContractableFMul(*FMulMI, AllowFusionGlobally) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMulMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
buildMatchInfo(FMulMI->getOperand(1).getReg(),
- FMulMI->getOperand(2).getReg(),
- RHS->getOperand(0).getReg(), LHS->getOperand(1).getReg(),
- LHS->getOperand(2).getReg(), B);
+ FMulMI->getOperand(2).getReg(), RHS.Reg,
+ LHS.MI->getOperand(1).getReg(),
+ LHS.MI->getOperand(2).getReg(), B);
};
return true;
}
@@ -5073,7 +5105,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
- if (mi_match(LHS->getOperand(0).getReg(), MRI, m_GFPExt(m_MInstr(FMAMI))) &&
+ if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
FMAMI->getOpcode() == PreferredFusedOpcode) {
MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
@@ -5085,8 +5117,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
X = B.buildFPExt(DstType, X).getReg(0);
Y = B.buildFPExt(DstType, Y).getReg(0);
buildMatchInfo(FMulMI->getOperand(1).getReg(),
- FMulMI->getOperand(2).getReg(),
- RHS->getOperand(0).getReg(), X, Y, B);
+ FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B);
};
return true;
@@ -5095,16 +5126,17 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
// fold (fadd z, (fma x, y, (fpext (fmul u, v)))
// -> (fma x, y, (fma (fpext u), (fpext v), z))
- if (RHS->getOpcode() == PreferredFusedOpcode &&
- mi_match(RHS->getOperand(3).getReg(), MRI, m_GFPExt(m_MInstr(FMulMI))) &&
+ if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
+ mi_match(RHS.MI->getOperand(3).getReg(), MRI,
+ m_GFPExt(m_MInstr(FMulMI))) &&
isContractableFMul(*FMulMI, AllowFusionGlobally) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMulMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
buildMatchInfo(FMulMI->getOperand(1).getReg(),
- FMulMI->getOperand(2).getReg(),
- LHS->getOperand(0).getReg(), RHS->getOperand(1).getReg(),
- RHS->getOperand(2).getReg(), B);
+ FMulMI->getOperand(2).getReg(), LHS.Reg,
+ RHS.MI->getOperand(1).getReg(),
+ RHS.MI->getOperand(2).getReg(), B);
};
return true;
}
@@ -5114,7 +5146,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
- if (mi_match(RHS->getOperand(0).getReg(), MRI, m_GFPExt(m_MInstr(FMAMI))) &&
+ if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
FMAMI->getOpcode() == PreferredFusedOpcode) {
MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
@@ -5126,8 +5158,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
X = B.buildFPExt(DstType, X).getReg(0);
Y = B.buildFPExt(DstType, Y).getReg(0);
buildMatchInfo(FMulMI->getOperand(1).getReg(),
- FMulMI->getOperand(2).getReg(),
- LHS->getOperand(0).getReg(), X, Y, B);
+ FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B);
};
return true;
}
@@ -5144,16 +5175,18 @@ bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
return false;
- MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
- MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+ Register Op1 = MI.getOperand(1).getReg();
+ Register Op2 = MI.getOperand(2).getReg();
+ DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
+ DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
// If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
// prefer to fold the multiply with fewer uses.
int FirstMulHasFewerUses = true;
- if (isContractableFMul(*LHS, AllowFusionGlobally) &&
- isContractableFMul(*RHS, AllowFusionGlobally) &&
- hasMoreUses(*LHS, *RHS, MRI))
+ if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
+ hasMoreUses(*LHS.MI, *RHS.MI, MRI))
FirstMulHasFewerUses = false;
unsigned PreferredFusedOpcode =
@@ -5161,24 +5194,24 @@ bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
// fold (fsub (fmul x, y), z) -> (fma x, y, -z)
if (FirstMulHasFewerUses &&
- (isContractableFMul(*LHS, AllowFusionGlobally) &&
- (Aggressive || MRI.hasOneNonDBGUse(LHS->getOperand(0).getReg())))) {
+ (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
+ (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
- Register NegZ = B.buildFNeg(DstTy, RHS->getOperand(0).getReg()).getReg(0);
- B.buildInstr(
- PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {LHS->getOperand(1).getReg(), LHS->getOperand(2).getReg(), NegZ});
+ Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0);
+ B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
+ {LHS.MI->getOperand(1).getReg(),
+ LHS.MI->getOperand(2).getReg(), NegZ});
};
return true;
}
// fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
- else if ((isContractableFMul(*RHS, AllowFusionGlobally) &&
- (Aggressive || MRI.hasOneNonDBGUse(RHS->getOperand(0).getReg())))) {
+ else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
+ (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) {
MatchInfo = [=, &MI](MachineIRBuilder &B) {
- Register NegY = B.buildFNeg(DstTy, RHS->getOperand(1).getReg()).getReg(0);
- B.buildInstr(
- PreferredFusedOpcode, {MI.getOperand(0).getReg()},
- {NegY, RHS->getOperand(2).getReg(), LHS->getOperand(0).getReg()});
+ Register NegY =
+ B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
+ B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
+ {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
};
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 306af808659a..64c2f0d5f8e4 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -37,6 +37,11 @@ Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) {
switch (MI->getOpcode()) {
case TargetOpcode::COPY:
return computeKnownAlignment(MI->getOperand(1).getReg(), Depth);
+ case TargetOpcode::G_ASSERT_ALIGN: {
+ // TODO: Min with source
+ int64_t LogAlign = MI->getOperand(2).getImm();
+ return Align(1ull << LogAlign);
+ }
case TargetOpcode::G_FRAME_INDEX: {
int FrameIdx = MI->getOperand(1).getIndex();
return MF.getFrameInfo().getObjectAlign(FrameIdx);
@@ -466,6 +471,18 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
Known.Zero.setBitsFrom(SrcBitWidth);
break;
}
+ case TargetOpcode::G_ASSERT_ALIGN: {
+ int64_t LogOfAlign = MI.getOperand(2).getImm();
+ if (LogOfAlign == 0)
+ break;
+
+ // TODO: Should use maximum with source
+ // If a node is guaranteed to be aligned, set low zero bits accordingly as
+ // well as clearing one bits.
+ Known.Zero.setLowBits(LogOfAlign);
+ Known.One.clearLowBits(LogOfAlign);
+ break;
+ }
case TargetOpcode::G_MERGE_VALUES: {
unsigned NumOps = MI.getNumOperands();
unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 4ae427484945..e5f95ca5aa73 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -297,10 +297,8 @@ bool InlineAsmLowering::lowerInlineAsm(
GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
// Compute the value type for each operand.
- if (OpInfo.Type == InlineAsm::isInput ||
- (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
-
- OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++));
+ if (OpInfo.hasArg()) {
+ OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo));
if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
@@ -312,10 +310,8 @@ bool InlineAsmLowering::lowerInlineAsm(
// If this is an indirect operand, the operand is a pointer to the
// accessed type.
if (OpInfo.isIndirect) {
- PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
- if (!PtrTy)
- report_fatal_error("Indirect operand for inline asm not a pointer!");
- OpTy = PtrTy->getElementType();
+ OpTy = Call.getAttributes().getParamElementType(ArgNo);
+ assert(OpTy && "Indirect operand must have elementtype attribute");
}
// FIXME: Support aggregate input operands
@@ -327,7 +323,7 @@ bool InlineAsmLowering::lowerInlineAsm(
OpInfo.ConstraintVT =
TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
-
+ ++ArgNo;
} else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
@@ -627,7 +623,8 @@ bool InlineAsmLowering::lowerInlineAsm(
Register SrcReg = OpInfo.Regs[0];
unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
- if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) {
+ LLT ResTy = MRI->getType(ResRegs[i]);
+ if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
// First copy the non-typed virtual register into a generic virtual
// register
Register Tmp1Reg =
@@ -635,9 +632,14 @@ bool InlineAsmLowering::lowerInlineAsm(
MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
// Need to truncate the result of the register
MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
- } else {
+ } else if (ResTy.getSizeInBits() == SrcSize) {
MIRBuilder.buildCopy(ResRegs[i], SrcReg);
+ } else {
+ LLVM_DEBUG(dbgs() << "Unhandled output operand with "
+ "mismatched register size\n");
+ return false;
}
+
break;
}
case TargetLowering::C_Immediate:
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index b10c9272a508..2bb5addefe48 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -71,9 +71,10 @@ InstructionSelect::InstructionSelect()
void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
+ AU.addRequired<GISelKnownBitsAnalysis>();
+ AU.addPreserved<GISelKnownBitsAnalysis>();
+
if (OptLevel != CodeGenOpt::None) {
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
AU.addRequired<ProfileSummaryInfoWrapperPass>();
LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
}
@@ -97,9 +98,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
OptLevel = MF.getFunction().hasOptNone() ? CodeGenOpt::None
: MF.getTarget().getOptLevel();
- GISelKnownBits *KB = nullptr;
+ GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
if (OptLevel != CodeGenOpt::None) {
- KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
if (PSI && PSI->hasProfileSummary())
BFI = &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index dc5a4d8f85aa..1d0c106fd5db 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -29,7 +29,7 @@
using namespace llvm;
InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers)
- : Renderers(MaxRenderers), MIs() {}
+ : Renderers(MaxRenderers) {}
InstructionSelector::InstructionSelector() = default;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index e8a8efd5dad4..37bc8a65dc7c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -564,7 +564,7 @@ static bool isLibCallInTailPosition(MachineInstr &MI,
// the return. Ignore NoAlias and NonNull because they don't affect the
// call sequence.
AttributeList CallerAttrs = F.getAttributes();
- if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
+ if (AttrBuilder(F.getContext(), CallerAttrs.getRetAttrs())
.removeAttribute(Attribute::NoAlias)
.removeAttribute(Attribute::NonNull)
.hasAttributes())
@@ -1677,7 +1677,7 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
// Widen SrcTy to WideTy. This does not affect the result, but since the
// user requested this size, it is probably better handled than SrcTy and
- // should reduce the total number of legalization artifacts
+ // should reduce the total number of legalization artifacts.
if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
SrcTy = WideTy;
SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0);
@@ -3655,7 +3655,6 @@ static bool hasSameNumEltsOnAllVectorOperands(
if (!Ty.isVector()) {
if (!is_contained(NonVecOpIndices, OpIdx))
return false;
- is_contained(NonVecOpIndices, OpIdx);
continue;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Localizer.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
index a1acc4195840..328a278f3d68 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
@@ -124,14 +124,13 @@ bool Localizer::localizeInterBlock(MachineFunction &MF,
LocalizedInstrs.insert(LocalizedMI);
MachineInstr &UseMI = *MOUse.getParent();
if (MRI->hasOneUse(Reg) && !UseMI.isPHI())
- InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(UseMI), LocalizedMI);
+ InsertMBB->insert(UseMI, LocalizedMI);
else
InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(InsertMBB->begin()),
LocalizedMI);
// Set a new register for the definition.
- Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg));
- MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg));
+ Register NewReg = MRI->cloneVirtualRegister(Reg);
LocalizedMI->getOperand(0).setReg(NewReg);
NewVRegIt =
MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first;
@@ -174,9 +173,10 @@ bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) {
while (II != MBB.end() && !Users.count(&*II))
++II;
- LLVM_DEBUG(dbgs() << "Intra-block: moving " << *MI << " before " << *&*II
- << "\n");
assert(II != MBB.end() && "Didn't find the user in the MBB");
+ LLVM_DEBUG(dbgs() << "Intra-block: moving " << *MI << " before " << *II
+ << '\n');
+
MI->removeFromParent();
MBB.insert(II, MI);
Changed = true;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 391251886fbb..c6720568b362 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -282,18 +282,6 @@ MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
return buildInstr(TargetOpcode::COPY, Res, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
- const SrcOp &Op,
- unsigned Size) {
- return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
- const SrcOp &Op,
- unsigned Size) {
- return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
-}
-
MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
const ConstantInt &Val) {
LLT Ty = Res.getLLTTy(*getMRI());
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index 937d94764be1..01af6bb51bb7 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -626,7 +626,8 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
unsigned Opc = MI.getOpcode();
if (isPreISelGenericOptimizationHint(Opc)) {
assert((Opc == TargetOpcode::G_ASSERT_ZEXT ||
- Opc == TargetOpcode::G_ASSERT_SEXT) &&
+ Opc == TargetOpcode::G_ASSERT_SEXT ||
+ Opc == TargetOpcode::G_ASSERT_ALIGN) &&
"Unexpected hint opcode!");
// The only correct mapping for these is to always use the source register
// bank.
@@ -856,7 +857,7 @@ void RegBankSelect::RepairingPlacement::addInsertPoint(
RegBankSelect::InstrInsertPoint::InstrInsertPoint(MachineInstr &Instr,
bool Before)
- : InsertPoint(), Instr(Instr), Before(Before) {
+ : Instr(Instr), Before(Before) {
// Since we do not support splitting, we do not need to update
// liveness and such, so do not do anything with P.
assert((!Before || !Instr.isPHI()) &&
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 4981a537dc7c..544af9a2954f 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -592,17 +592,17 @@ Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
return None;
}
-Optional<MachineInstr *>
-llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
- const Register Op2,
- const MachineRegisterInfo &MRI,
- MachineIRBuilder &MIB) {
- auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
- if (!SrcVec1)
- return None;
+Register llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
+ const Register Op2,
+ const MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIB) {
auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
if (!SrcVec2)
- return None;
+ return Register();
+
+ auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
+ if (!SrcVec1)
+ return Register();
const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
@@ -611,14 +611,14 @@ llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
SrcVec2->getSourceReg(Idx), MRI);
if (!MaybeCst)
- return None;
+ return Register();
auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
FoldedElements.emplace_back(FoldedCstReg);
}
// Create the new vector constant.
auto CstVec =
MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
- return &*CstVec;
+ return CstVec.getReg(0);
}
bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
@@ -704,8 +704,7 @@ Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
const TargetInstrInfo &TII,
MCRegister PhysReg,
const TargetRegisterClass &RC,
- LLT RegTy) {
- DebugLoc DL; // FIXME: Is no location the right choice?
+ const DebugLoc &DL, LLT RegTy) {
MachineBasicBlock &EntryMBB = MF.front();
MachineRegisterInfo &MRI = MF.getRegInfo();
Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/contrib/llvm-project/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
index 9fabcfb1f326..2ee9379cb286 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
@@ -185,7 +185,7 @@ class Polynomial {
APInt A;
public:
- Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V), B(), A() {
+ Polynomial(Value *V) : ErrorMSBs((unsigned)-1), V(V) {
IntegerType *Ty = dyn_cast<IntegerType>(V->getType());
if (Ty) {
ErrorMSBs = 0;
@@ -195,12 +195,12 @@ public:
}
Polynomial(const APInt &A, unsigned ErrorMSBs = 0)
- : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(A) {}
+ : ErrorMSBs(ErrorMSBs), V(nullptr), A(A) {}
Polynomial(unsigned BitWidth, uint64_t A, unsigned ErrorMSBs = 0)
- : ErrorMSBs(ErrorMSBs), V(NULL), B(), A(BitWidth, A) {}
+ : ErrorMSBs(ErrorMSBs), V(nullptr), A(BitWidth, A) {}
- Polynomial() : ErrorMSBs((unsigned)-1), V(NULL), B(), A() {}
+ Polynomial() : ErrorMSBs((unsigned)-1), V(nullptr) {}
/// Increment and clamp the number of undefined bits.
void incErrorMSBs(unsigned amt) {
@@ -677,7 +677,7 @@ public:
FixedVectorType *const VTy;
VectorInfo(FixedVectorType *VTy)
- : BB(nullptr), PV(nullptr), LIs(), Is(), SVI(nullptr), VTy(VTy) {
+ : BB(nullptr), PV(nullptr), SVI(nullptr), VTy(VTy) {
EI = new ElementInfo[VTy->getNumElements()];
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
index e97dcca201e8..8a190e769941 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
@@ -251,9 +251,10 @@ public:
/// creates DBG_VALUEs and puts them in #Transfers, then prepares the other
/// object fields to track variable locations as we step through the block.
/// FIXME: could just examine mloctracker instead of passing in \p mlocs?
- void loadInlocs(MachineBasicBlock &MBB, ValueIDNum *MLocs,
- SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs,
- unsigned NumLocs) {
+ void
+ loadInlocs(MachineBasicBlock &MBB, ValueIDNum *MLocs,
+ const SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs,
+ unsigned NumLocs) {
ActiveMLocs.clear();
ActiveVLocs.clear();
VarLocs.clear();
@@ -272,7 +273,7 @@ public:
};
// Map of the preferred location for each value.
- std::map<ValueIDNum, LocIdx> ValueToLoc;
+ DenseMap<ValueIDNum, LocIdx> ValueToLoc;
ActiveMLocs.reserve(VLocs.size());
ActiveVLocs.reserve(VLocs.size());
@@ -283,6 +284,11 @@ public:
LocIdx Idx = Location.Idx;
ValueIDNum &VNum = MLocs[Idx.asU64()];
VarLocs.push_back(VNum);
+
+ // Short-circuit unnecessary preferred location update.
+ if (VLocs.empty())
+ continue;
+
auto it = ValueToLoc.find(VNum);
// In order of preference, pick:
// * Callee saved registers,
@@ -298,7 +304,7 @@ public:
}
// Now map variables to their picked LocIdxes.
- for (auto Var : VLocs) {
+ for (const auto &Var : VLocs) {
if (Var.second.Kind == DbgValue::Const) {
PendingDbgValues.push_back(
emitMOLoc(*Var.second.MO, Var.first, Var.second.Properties));
@@ -413,7 +419,8 @@ public:
return Reg != SP && Reg != FP;
}
- bool recoverAsEntryValue(const DebugVariable &Var, DbgValueProperties &Prop,
+ bool recoverAsEntryValue(const DebugVariable &Var,
+ const DbgValueProperties &Prop,
const ValueIDNum &Num) {
// Is this variable location a candidate to be an entry value. First,
// should we be trying this at all?
@@ -2799,31 +2806,28 @@ void InstrRefBasedLDV::emitLocations(
}
}
- // We have to insert DBG_VALUEs in a consistent order, otherwise they appeaer
- // in DWARF in different orders. Use the order that they appear when walking
- // through each block / each instruction, stored in AllVarsNumbering.
- auto OrderDbgValues = [&](const MachineInstr *A,
- const MachineInstr *B) -> bool {
- DebugVariable VarA(A->getDebugVariable(), A->getDebugExpression(),
- A->getDebugLoc()->getInlinedAt());
- DebugVariable VarB(B->getDebugVariable(), B->getDebugExpression(),
- B->getDebugLoc()->getInlinedAt());
- return AllVarsNumbering.find(VarA)->second <
- AllVarsNumbering.find(VarB)->second;
- };
-
// Go through all the transfers recorded in the TransferTracker -- this is
// both the live-ins to a block, and any movements of values that happen
// in the middle.
- for (auto &P : TTracker->Transfers) {
- // Sort them according to appearance order.
- llvm::sort(P.Insts, OrderDbgValues);
+ for (const auto &P : TTracker->Transfers) {
+ // We have to insert DBG_VALUEs in a consistent order, otherwise they
+ // appear in DWARF in different orders. Use the order that they appear
+ // when walking through each block / each instruction, stored in
+ // AllVarsNumbering.
+ SmallVector<std::pair<unsigned, MachineInstr *>> Insts;
+ for (MachineInstr *MI : P.Insts) {
+ DebugVariable Var(MI->getDebugVariable(), MI->getDebugExpression(),
+ MI->getDebugLoc()->getInlinedAt());
+ Insts.emplace_back(AllVarsNumbering.find(Var)->second, MI);
+ }
+ llvm::sort(Insts,
+ [](const auto &A, const auto &B) { return A.first < B.first; });
+
// Insert either before or after the designated point...
if (P.MBB) {
MachineBasicBlock &MBB = *P.MBB;
- for (auto *MI : P.Insts) {
- MBB.insert(P.Pos, MI);
- }
+ for (const auto &Pair : Insts)
+ MBB.insert(P.Pos, Pair.second);
} else {
// Terminators, like tail calls, can clobber things. Don't try and place
// transfers after them.
@@ -2831,9 +2835,8 @@ void InstrRefBasedLDV::emitLocations(
continue;
MachineBasicBlock &MBB = *P.Pos->getParent();
- for (auto *MI : P.Insts) {
- MBB.insertAfterBundle(P.Pos, MI);
- }
+ for (const auto &Pair : Insts)
+ MBB.insertAfterBundle(P.Pos, Pair.second);
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
index 789205e61cdb..9e9c0ce394fd 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.h
@@ -494,7 +494,7 @@ public:
return StackIdxesToPos.find(Idx)->second;
}
- unsigned getNumLocs(void) const { return LocIdxToIDNum.size(); }
+ unsigned getNumLocs() const { return LocIdxToIDNum.size(); }
/// Reset all locations to contain a PHI value at the designated block. Used
/// sometimes for actual PHI values, othertimes to indicate the block entry
@@ -516,7 +516,7 @@ public:
}
/// Wipe any un-necessary location records after traversing a block.
- void reset(void) {
+ void reset() {
// We could reset all the location values too; however either loadFromArray
// or setMPhis should be called before this object is re-used. Just
// clear Masks, they're definitely not needed.
@@ -525,7 +525,7 @@ public:
/// Clear all data. Destroys the LocID <=> LocIdx map, which makes most of
/// the information in this pass uninterpretable.
- void clear(void) {
+ void clear() {
reset();
LocIDToLocIdx.clear();
LocIdxToLocID.clear();
@@ -1082,7 +1082,9 @@ template <> struct DenseMapInfo<ValueIDNum> {
return ValueIDNum::TombstoneValue;
}
- static unsigned getHashValue(const ValueIDNum &Val) { return Val.asU64(); }
+ static unsigned getHashValue(const ValueIDNum &Val) {
+ return hash_value(Val.asU64());
+ }
static bool isEqual(const ValueIDNum &A, const ValueIDNum &B) {
return A == B;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp
index 691977dc34e6..8f697611a82c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.cpp
@@ -40,6 +40,10 @@ static cl::opt<bool>
"normal DBG_VALUE inputs"),
cl::init(false));
+static cl::opt<cl::boolOrDefault> ValueTrackingVariableLocations(
+ "experimental-debug-variable-locations",
+ cl::desc("Use experimental new value-tracking variable locations"));
+
// Options to prevent pathological compile-time behavior. If InputBBLimit and
// InputDbgValueLimit are both exceeded, range extension is disabled.
static cl::opt<unsigned> InputBBLimit(
@@ -117,3 +121,8 @@ bool LiveDebugValues::runOnMachineFunction(MachineFunction &MF) {
return TheImpl->ExtendRanges(MF, DomTree, TPC, InputBBLimit,
InputDbgValueLimit);
}
+
+bool llvm::debuginfoShouldUseDebugInstrRef(const Triple &T) {
+ // Enable if explicitly requested on command line.
+ return ValueTrackingVariableLocations == cl::boolOrDefault::BOU_TRUE;
+}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h
index a5936c8a96f0..8f0b2ec3e1fc 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h
@@ -12,6 +12,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/ADT/Triple.h"
namespace llvm {
@@ -35,6 +36,9 @@ public:
// Factory functions for LiveDebugValues implementations.
extern LDVImpl *makeVarLocBasedLiveDebugValues();
extern LDVImpl *makeInstrRefBasedLiveDebugValues();
+
+extern bool debuginfoShouldUseDebugInstrRef(const Triple &T);
+
} // namespace llvm
#endif // LLVM_LIB_CODEGEN_LIVEDEBUGVALUES_LIVEDEBUGVALUES_H
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugVariables.cpp
index e6661e5135c3..6d806135240e 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -152,7 +152,7 @@ public:
}
}
- DbgVariableValue() : LocNoCount(0), WasIndirect(0), WasList(0) {}
+ DbgVariableValue() : LocNoCount(0), WasIndirect(false), WasList(false) {}
DbgVariableValue(const DbgVariableValue &Other)
: LocNoCount(Other.LocNoCount), WasIndirect(Other.getWasIndirect()),
WasList(Other.getWasList()), Expression(Other.getExpression()) {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveIntervals.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveIntervals.cpp
index 2f97386b6d18..9571afa434c1 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveIntervals.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveIntervals.cpp
@@ -827,6 +827,8 @@ CancelKill:
MachineBasicBlock*
LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
+ assert(!LI.empty() && "LiveInterval is empty.");
+
// A local live range must be fully contained inside the block, meaning it is
// defined and killed at instructions, not at block boundaries. It is not
// live in or out of any block.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 1a04e1ca56a9..6477965bdc21 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -875,11 +875,11 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB,
// N.B: Multiple lists of successors and liveins are allowed and they're
// merged into one.
// Example:
- // liveins: %edi
- // liveins: %esi
+ // liveins: $edi
+ // liveins: $esi
//
// is equivalent to
- // liveins: %edi, %esi
+ // liveins: $edi, $esi
bool ExplicitSuccessors = false;
while (true) {
if (Token.is(MIToken::kw_successors)) {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index d0323eaf3d78..f144639770bc 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -182,8 +182,7 @@ static void handleYAMLDiag(const SMDiagnostic &Diag, void *Context) {
MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
StringRef Filename, LLVMContext &Context,
std::function<void(Function &)> Callback)
- : SM(),
- Context(Context),
+ : Context(Context),
In(SM.getMemoryBuffer(SM.AddNewSourceBuffer(std::move(Contents), SMLoc()))
->getBuffer(),
nullptr, handleYAMLDiag, this),
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
new file mode 100644
index 000000000000..a74c57690640
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
@@ -0,0 +1,862 @@
+//===- MLRegAllocEvictAdvisor.cpp - ML eviction advisor -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the ML eviction advisor and reward injection pass
+//
+//===----------------------------------------------------------------------===//
+
+#include "RegAllocEvictionAdvisor.h"
+#include "RegAllocGreedy.h"
+#include "RegAllocScore.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/ModelUnderTrainingRunner.h"
+#include "llvm/Analysis/NoInferenceModelRunner.h"
+#include "llvm/Analysis/ReleaseModeModelRunner.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/VirtRegMap.h"
+#include "llvm/Config/config.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <array>
+#include <memory>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ml-regalloc"
+
+// Generated header in release (AOT) mode
+#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
+#include "RegallocEvictModel.h"
+#endif
+
+// Options that only make sense in development mode
+#ifdef LLVM_HAVE_TF_API
+static cl::opt<std::string> TrainingLog(
+ "regalloc-training-log", cl::Hidden,
+ cl::desc("Training log for the register allocator eviction model"));
+
+static cl::opt<std::string> ModelUnderTraining(
+ "regalloc-model", cl::Hidden,
+ cl::desc("The model being trained for register allocation eviction"));
+
+#endif // #ifdef LLVM_HAVE_TF_API
+
+/// The score injection pass.
+/// This pass calculates the score for a function and inserts it in the log, but
+/// this happens only in development mode. It's a no-op otherwise.
+namespace llvm {
+class RegAllocScoring : public MachineFunctionPass {
+public:
+ static char ID;
+
+ RegAllocScoring() : MachineFunctionPass(ID) {
+ initializeRegAllocScoringPass(*PassRegistry::getPassRegistry());
+ }
+
+ ~RegAllocScoring() override = default;
+
+ StringRef getPassName() const override {
+ return "Register Allocation Pass Scoring";
+ }
+
+ /// RegAllocReward analysis usage.
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ AU.addRequired<RegAllocEvictionAdvisorAnalysis>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addRequired<AAResultsWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ /// Performs this pass
+ bool runOnMachineFunction(MachineFunction &) override;
+};
+
+char RegAllocScoring::ID = 0;
+FunctionPass *createRegAllocScoringPass() { return new RegAllocScoring(); }
+
+} // namespace llvm
+
+INITIALIZE_PASS(RegAllocScoring, "regallocscoringpass",
+ "Register Allocation Scoring Pass", false, false)
+
+// ===================================
+// Common ML Advisor declarations
+// ===================================
+namespace {
+// This is the maximum number of interfererring ranges. That's the number of
+// distinct AllocationOrder values, which comes from MCRegisterClass::RegsSize.
+// For X86, that's 32.
+// TODO: find a way to get this, statically, in a programmatic way.
+static const int64_t MaxInterferences = 32;
+
+// Logically, we can think of the feature set given to the evaluator as a 2D
+// matrix. The rows are the features (see next). The columns correspond to the
+// interferences. We treat the candidate virt reg as an 'interference', too, as
+// its feature set is the same as that of the interferring ranges. So we'll have
+// MaxInterferences + 1 columns and by convention, we will use the last column
+// for the virt reg seeking allocation.
+static const int64_t CandidateVirtRegPos = MaxInterferences;
+static const int64_t NumberOfInterferences = CandidateVirtRegPos + 1;
+
+// Most features are as described above, so we'll reuse this vector in defining
+// them.
+static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
+
+// --------------
+// Features table
+// --------------
+// For each interfering live range (incl. the candidate) we collect a number of
+// features. However, because the features are of different types (and because
+// of ML best practices), we organize the tensors per feature, not per
+// candidate. Each such tensor has a scalar value corresponding to the
+// interferring live range at that position, in the order in AllocationOrder.
+// The last position corresponds to the virt reg seeking allocation.
+// Exception to all that is the progression feature, which is just a scalar (see
+// its documentation for details).
+// Note on naming: the "_by_max" are normalized using the largest value of that
+// tensor, as observed in the current decision making stage (i.e. for the
+// current call to the advisor's tryFindEvictionCandidate)
+//
+// The feature list format: type, name, shape, documentation.
+// Note: we can really just use int64 and float, hence the modeling of some
+// bools as int64 values.
+#define RA_EVICT_FEATURES_LIST(M) \
+ M(int64_t, mask, PerLiveRangeShape, \
+ "boolean values, 0 for unavailable candidates (i.e. if a position is 0, " \
+ "it " \
+ "can't be evicted)") \
+ M(int64_t, is_free, PerLiveRangeShape, \
+ "boolean values, 1 if this phys reg is actually free (no interferences)") \
+ M(float, nr_urgent, PerLiveRangeShape, \
+ "number of 'urgent' intervals, normalized. Urgent are those that are OK " \
+ "to break cascades") \
+ M(float, nr_broken_hints, PerLiveRangeShape, \
+ "if this position were evicted, how many broken hints would there be") \
+ M(int64_t, is_hint, PerLiveRangeShape, \
+ "is this a preferred phys reg for the candidate") \
+ M(int64_t, is_local, PerLiveRangeShape, \
+ "is this live range local to a basic block") \
+ M(float, nr_rematerializable, PerLiveRangeShape, \
+ "nr rematerializable ranges") \
+ M(float, nr_defs_and_uses, PerLiveRangeShape, \
+ "bb freq - weighed nr defs and uses") \
+ M(float, weighed_reads_by_max, PerLiveRangeShape, \
+ "bb freq - weighed nr of reads, normalized") \
+ M(float, weighed_writes_by_max, PerLiveRangeShape, \
+ "bb feq - weighed nr of writes, normalized") \
+ M(float, weighed_read_writes_by_max, PerLiveRangeShape, \
+ "bb freq - weighed nr of uses that are both read and writes, normalized") \
+ M(float, weighed_indvars_by_max, PerLiveRangeShape, \
+ "bb freq - weighed nr of uses that are indvars, normalized") \
+ M(float, hint_weights_by_max, PerLiveRangeShape, \
+ "bb freq - weighed nr of uses that are hints, normalized") \
+ M(float, start_bb_freq_by_max, PerLiveRangeShape, \
+ "the freq in the start block, normalized") \
+ M(float, end_bb_freq_by_max, PerLiveRangeShape, \
+ "freq of end block, normalized") \
+ M(float, hottest_bb_freq_by_max, PerLiveRangeShape, \
+ "hottest BB freq, normalized") \
+ M(float, liverange_size, PerLiveRangeShape, \
+ "size (instr index diff) of the LR") \
+ M(float, use_def_density, PerLiveRangeShape, \
+ "the max weight, as computed by the manual heuristic") \
+ M(int64_t, max_stage, PerLiveRangeShape, \
+ "largest stage of an interval in this LR") \
+ M(int64_t, min_stage, PerLiveRangeShape, \
+ "lowest stage of an interval in this LR") \
+ M(float, progress, {1}, "ratio of current queue size to initial size")
+
+// The model learns to pick one of the mask == 1 interferences. This is the name
+// of the output tensor.
+// The contract with the model is that the output will be guaranteed to be to a
+// mask == 1 position.
+// Using a macro here to avoid 'not used' warnings (and keep cond compilation to
+// a minimum)
+#define DecisionName "index_to_evict"
+
+// Named features index.
+enum FeatureIDs {
+#define _FEATURE_IDX(_, name, __, ___) name,
+ RA_EVICT_FEATURES_LIST(_FEATURE_IDX)
+#undef _FEATURE_IDX
+ FeatureCount
+};
+
+// The ML advisor will typically have a sparse input to the evaluator, because
+// various phys regs won't be available. It's easier (maintenance-wise) to
+// bulk-reset the state of the evaluator each time we are about to use it again.
+template <typename T> size_t getTotalSize(const std::vector<int64_t> &Shape) {
+ size_t Ret = sizeof(T);
+ for (const auto V : Shape)
+ Ret *= V;
+ return Ret;
+}
+
+void resetInputs(MLModelRunner &Runner) {
+#define _RESET(TYPE, NAME, SHAPE, __) \
+ std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0, \
+ getTotalSize<TYPE>(SHAPE));
+ RA_EVICT_FEATURES_LIST(_RESET)
+#undef _RESET
+}
+
+using CandidateRegList =
+ std::array<std::pair<MCRegister, bool>, NumberOfInterferences>;
+using FeaturesListNormalizer = std::array<float, FeatureIDs::FeatureCount>;
+
+/// The ML evictor (commonalities between release and development mode)
+class MLEvictAdvisor : public RegAllocEvictionAdvisor {
+public:
+ MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
+ MLModelRunner *Runner, const MachineBlockFrequencyInfo &MBFI,
+ const MachineLoopInfo &Loops);
+
+protected:
+ const RegAllocEvictionAdvisor &getDefaultAdvisor() const {
+ return static_cast<const RegAllocEvictionAdvisor &>(DefaultAdvisor);
+ }
+
+ // The assumption is that if the Runner could not be constructed, we emit-ed
+ // error, and we shouldn't be asking for it here.
+ const MLModelRunner &getRunner() const { return *Runner; }
+
+ /// This just calls Evaluate on the Runner, but in the development mode case,
+ /// if we're just capturing the log of the default advisor, it needs to call
+ /// the latter instead, so we need to pass all the necessary parameters for
+ /// it. In the development case, it will also log.
+ virtual int64_t tryFindEvictionCandidatePosition(
+ LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
+ uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const;
+
+ /// Load the features of the given VirtReg (allocated or not) at column Pos,
+ /// but if that can't be evicted, return false instead.
+ bool
+ loadInterferenceFeatures(LiveInterval &VirtReg, MCRegister PhysReg,
+ bool IsHint, const SmallVirtRegSet &FixedRegisters,
+ std::array<float, FeatureIDs::FeatureCount> &Largest,
+ size_t Pos) const;
+
+private:
+ static float getInitialQueueSize(const MachineFunction &MF);
+
+ MCRegister tryFindEvictionCandidate(
+ LiveInterval &VirtReg, const AllocationOrder &Order,
+ uint8_t CostPerUseLimit,
+ const SmallVirtRegSet &FixedRegisters) const override;
+
+ void extractFeatures(const SmallVectorImpl<LiveInterval *> &Intervals,
+ std::array<float, FeatureIDs::FeatureCount> &Largest,
+ size_t Pos, int64_t IsHint, int64_t LocalIntfsCount,
+ float NrUrgent) const;
+
+ // Point-in-time: we didn't learn this, so we always delegate to the default.
+ bool canEvictHintInterference(
+ LiveInterval &VirtReg, MCRegister PhysReg,
+ const SmallVirtRegSet &FixedRegisters) const override {
+ return getDefaultAdvisor().canEvictHintInterference(VirtReg, PhysReg,
+ FixedRegisters);
+ }
+
+ // Hold on to a default advisor for:
+ // 1) the implementation of canEvictHintInterference, because we didn't learn
+ // that nuance yet;
+ // 2) for bootstrapping (logging) in the development mode case.
+ const DefaultEvictionAdvisor DefaultAdvisor;
+ MLModelRunner *const Runner;
+ const MachineBlockFrequencyInfo &MBFI;
+ const MachineLoopInfo &Loops;
+
+ // Indices of those features we don't want to normalize.
+ // This could be static and shared, but its initialization is non-trivial.
+ std::bitset<FeatureIDs::FeatureCount> DoNotNormalize;
+ const float InitialQSize;
+};
+
+// ===================================
+// Release (AOT) - specifics
+// ===================================
+#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
+const std::array<std::string, FeatureIDs::FeatureCount> FeatureNames{
+#define _GETNAME(_, NAME, __, ___) #NAME,
+ RA_EVICT_FEATURES_LIST(_GETNAME)
+#undef _GETNAME
+};
+class ReleaseModeEvictionAdvisorAnalysis final
+ : public RegAllocEvictionAdvisorAnalysis {
+public:
+ ReleaseModeEvictionAdvisorAnalysis()
+ : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Release) {}
+ // support for isa<> and dyn_cast.
+ static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
+ return R->getAdvisorMode() == AdvisorMode::Release;
+ }
+
+private:
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addRequired<MachineLoopInfo>();
+ RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
+ }
+
+ std::unique_ptr<RegAllocEvictionAdvisor>
+ getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
+ if (!Runner)
+ Runner = std::make_unique<ReleaseModeModelRunner<RegallocEvictModel>>(
+ MF.getFunction().getContext(), FeatureNames, DecisionName);
+ return std::make_unique<MLEvictAdvisor>(
+ MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
+ getAnalysis<MachineLoopInfo>());
+ }
+ std::unique_ptr<ReleaseModeModelRunner<RegallocEvictModel>> Runner;
+};
+#endif
+
+// ===================================
+// Development mode-specifics
+// ===================================
+//
+// Features we log
+#ifdef LLVM_HAVE_TF_API
+#define _DECL_FEATURES(type, name, shape, _) \
+ TensorSpec::createSpec<type>(#name, shape),
+
+static const std::vector<TensorSpec> InputFeatures{
+ {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
+};
+#undef _DECL_FEATURES
+static const TensorSpec Output =
+ TensorSpec::createSpec<int64_t>(DecisionName, {1});
+static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
+
+// Features we bind on the model. The tensor names have a prefix, and we also
+// need to include some tensors that are expected to be present by the training
+// algo.
+// TODO: can we just get rid of these?
+#define _DECL_TRAIN_FEATURES(type, name, shape, _) \
+ TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
+
+static const std::vector<TensorSpec> TrainingInputFeatures{
+ {RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
+ TensorSpec::createSpec<float>("action_discount", {1}),
+ TensorSpec::createSpec<int32_t>("action_step_type", {1}),
+ TensorSpec::createSpec<float>("action_reward", {1})}};
+#undef _DECL_TRAIN_FEATURES
+
+class DevelopmentModeEvictAdvisor : public MLEvictAdvisor {
+public:
+ DevelopmentModeEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
+ MLModelRunner *Runner,
+ const MachineBlockFrequencyInfo &MBFI,
+ const MachineLoopInfo &Loops, Logger *Log)
+ : MLEvictAdvisor(MF, RA, Runner, MBFI, Loops), Log(Log) {}
+
+private:
+ int64_t tryFindEvictionCandidatePosition(
+ LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
+ uint8_t CostPerUseLimit,
+ const SmallVirtRegSet &FixedRegisters) const override;
+
+ Logger *const Log;
+};
+
+class DevelopmentModeEvictionAdvisorAnalysis final
+ : public RegAllocEvictionAdvisorAnalysis {
+public:
+ DevelopmentModeEvictionAdvisorAnalysis()
+ : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Development) {}
+ // support for isa<> and dyn_cast.
+ static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
+ return R->getAdvisorMode() == AdvisorMode::Development;
+ }
+
+ /// get the logger for the given function, or nullptr if we didn't collect
+ /// one. This is used to inject the score by the RegAllocScoring pass.
+ Logger *getLogger(const MachineFunction &MF) const {
+ auto I = LogMap.find(MF.getName());
+ if (I == LogMap.end())
+ return nullptr;
+ return I->second.get();
+ }
+
+private:
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addRequired<MachineLoopInfo>();
+ RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
+ }
+
+ // Save all the logs (when requested).
+ bool doFinalization(Module &M) override {
+ if (TrainingLog.empty())
+ return false;
+ std::error_code EC;
+ auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
+ if (EC) {
+ M.getContext().emitError(EC.message() + ":" + TrainingLog);
+ return false;
+ }
+ Logger::flushLogs(*OS, LogMap);
+ return false;
+ }
+
+ std::unique_ptr<RegAllocEvictionAdvisor>
+ getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ if (ModelUnderTraining.empty() && TrainingLog.empty()) {
+ Ctx.emitError("Regalloc development mode should be requested with at "
+ "least logging enabled and/or a training model");
+ return nullptr;
+ }
+ if (!Runner) {
+ if (ModelUnderTraining.empty())
+ Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
+ else
+ Runner = ModelUnderTrainingRunner::createAndEnsureValid(
+ Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
+ if (!Runner) {
+ Ctx.emitError("Regalloc: could not set up the model runner");
+ return nullptr;
+ }
+ }
+
+ Logger *Log = nullptr;
+ if (!TrainingLog.empty()) {
+ std::vector<LoggedFeatureSpec> LFS;
+ for (const auto &FS : InputFeatures)
+ LFS.push_back({FS, None});
+ if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
+ if (MUTR->outputLoggedFeatureSpecs().size() > 1)
+ append_range(LFS, drop_begin(MUTR->outputLoggedFeatureSpecs()));
+ // We always log the output; in particular, if we're not evaluating, we
+ // don't have an output spec json file. That's why we handle the
+ // 'normal' output separately.
+ LFS.push_back({Output, None});
+ auto I = LogMap.insert(std::make_pair(
+ MF.getFunction().getName(),
+ std::make_unique<Logger>(LFS, Reward, /*IncludeReward*/ true)));
+ assert(I.second);
+ Log = I.first->second.get();
+ }
+ return std::make_unique<DevelopmentModeEvictAdvisor>(
+ MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
+ getAnalysis<MachineLoopInfo>(), Log);
+ }
+
+ std::unique_ptr<MLModelRunner> Runner;
+ StringMap<std::unique_ptr<Logger>> LogMap;
+};
+#endif //#ifdef LLVM_HAVE_TF_API
+} // namespace
+
+float MLEvictAdvisor::getInitialQueueSize(const MachineFunction &MF) {
+ auto &MRI = MF.getRegInfo();
+ float Ret = 0.0;
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
+ Register Reg = Register::index2VirtReg(I);
+ if (MRI.reg_nodbg_empty(Reg))
+ continue;
+ ++Ret;
+ }
+ return Ret;
+}
+
+MLEvictAdvisor::MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
+ MLModelRunner *Runner,
+ const MachineBlockFrequencyInfo &MBFI,
+ const MachineLoopInfo &Loops)
+ : RegAllocEvictionAdvisor(MF, RA), DefaultAdvisor(MF, RA),
+ Runner(std::move(Runner)), MBFI(MBFI), Loops(Loops),
+ InitialQSize(MLEvictAdvisor::getInitialQueueSize(MF)) {
+ assert(this->Runner);
+ DoNotNormalize.set(FeatureIDs::mask);
+ DoNotNormalize.set(FeatureIDs::is_free);
+ DoNotNormalize.set(FeatureIDs::is_hint);
+ DoNotNormalize.set(FeatureIDs::is_local);
+ DoNotNormalize.set(FeatureIDs::min_stage);
+ DoNotNormalize.set(FeatureIDs::max_stage);
+ DoNotNormalize.set(FeatureIDs::progress);
+}
+
+int64_t MLEvictAdvisor::tryFindEvictionCandidatePosition(
+ LiveInterval &, const AllocationOrder &, unsigned, uint8_t,
+ const SmallVirtRegSet &) const {
+ int64_t Ret = Runner->evaluate<int64_t>();
+ assert(Ret >= 0);
+ assert(Ret <= CandidateVirtRegPos);
+ return Ret;
+}
+
+bool MLEvictAdvisor::loadInterferenceFeatures(
+ LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint,
+ const SmallVirtRegSet &FixedRegisters, FeaturesListNormalizer &Largest,
+ size_t Pos) const {
+ // It is only possible to evict virtual register interference.
+ if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) {
+ // leave unavailable
+ return false;
+ }
+
+ const bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
+ int64_t LocalIntfs = 0;
+ float NrUrgent = 0.0f;
+
+ // The cascade tracking is the same as in the default advisor
+ unsigned Cascade = RA.getExtraInfo().getCascadeOrCurrentNext(VirtReg.reg());
+
+ SmallVector<LiveInterval *, MaxInterferences> InterferingIntervals;
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
+ // Different from the default heuristic, we don't make any assumptions about
+ // what having more than 10 results in the query may mean.
+ const auto &IFIntervals = Q.interferingVRegs();
+ if (IFIntervals.empty() && InterferingIntervals.empty())
+ continue;
+ InterferingIntervals.append(IFIntervals.begin(), IFIntervals.end());
+ for (LiveInterval *Intf : reverse(IFIntervals)) {
+ assert(Register::isVirtualRegister(Intf->reg()) &&
+ "Only expecting virtual register interference from query");
+ // This is the same set of legality checks as in the default case: don't
+ // try to evict fixed regs or 'done' ones. Also don't break cascades,
+ // except in the urgent case, with the same nuances used in the default
+ // heuristic.
+ // We could try sharing this between the advisors, but it may end up
+ // more complex than it is right now.
+ if (FixedRegisters.count(Intf->reg()))
+ return false;
+ if (RA.getExtraInfo().getStage(*Intf) == RS_Done)
+ return false;
+ bool Urgent =
+ !VirtReg.isSpillable() &&
+ (Intf->isSpillable() ||
+ RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
+ RegClassInfo.getNumAllocatableRegs(
+ MRI->getRegClass(Intf->reg())));
+ // Only evict older cascades or live ranges without a cascade.
+ unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
+ if (Cascade <= IntfCascade) {
+ if (!Urgent)
+ return false;
+ ++NrUrgent;
+ }
+
+ LocalIntfs += (IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
+ (!EnableLocalReassign || !canReassign(*Intf, PhysReg)));
+ }
+ }
+ // OK, so if we made it this far, this LR is an eviction candidate, load its
+ // features.
+ extractFeatures(InterferingIntervals, Largest, Pos, IsHint, LocalIntfs,
+ NrUrgent);
+ return true;
+}
+
+MCRegister MLEvictAdvisor::tryFindEvictionCandidate(
+ LiveInterval &VirtReg, const AllocationOrder &Order,
+ uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
+ auto MaybeOrderLimit = getOrderLimit(VirtReg, Order, CostPerUseLimit);
+ if (!MaybeOrderLimit)
+ return MCRegister::NoRegister;
+ unsigned OrderLimit = *MaybeOrderLimit;
+
+ // The heuristic sets initial costs such as, if CostPerUseLimit is
+ // max<uint8_t>, then any of the costs of the legally-evictable intervals
+ // would be lower. When that happens, one of those will be selected.
+ // Therefore, we allow the candidate be selected, unless the candidate is
+ // unspillable, in which case it would be incorrect to not find a register for
+ // it.
+ const bool MustFindEviction =
+ (!VirtReg.isSpillable() && CostPerUseLimit == static_cast<uint8_t>(~0u));
+ // Number of available candidates - if 0, no need to continue.
+ size_t Available = 0;
+ // Make sure we don't have leftover partial state from an attempt where we had
+ // no available candidates and bailed out early.
+ resetInputs(*Runner);
+
+ // Track the index->register mapping because AllocationOrder doesn't do that
+ // and we'd have to scan it.
+ // Also track their mask, to write asserts/debug.
+ CandidateRegList Regs;
+ Regs.fill({0, false});
+
+ // Track the largest value of features seen during this eviction session. We
+ // only normalize (some of) the float features, but it's just simpler to
+ // dimension 'Largest' to all the features, especially since we have the
+ // 'DoNotNormalize' list.
+ FeaturesListNormalizer Largest;
+ Largest.fill(0.0);
+
+ // Same overal idea as in the default eviction policy - we visit the values of
+ // AllocationOrder one at a time. If it's not legally available, we mask off
+ // the corresponding feature column (==do nothing because we already reset all
+ // the features to 0)
+ // Use Pos to capture the column we load features at - in AllocationOrder
+ // order.
+ size_t Pos = 0;
+ for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
+ ++I, ++Pos) {
+ MCRegister PhysReg = *I;
+ Regs[Pos] = std::make_pair(PhysReg, true);
+ assert(PhysReg);
+ if (!canAllocatePhysReg(CostPerUseLimit, PhysReg)) {
+ Regs[Pos].second = false;
+ continue;
+ }
+ if (loadInterferenceFeatures(VirtReg, PhysReg, I.isHint(), FixedRegisters,
+ Largest, Pos)) {
+ ++Available;
+ Regs[Pos].second = true;
+ }
+ }
+ if (Available == 0) {
+ // Nothing to decide, nothing to learn.
+ assert(!MustFindEviction);
+ return MCRegister::NoRegister;
+ }
+ // If we must find eviction, the candidate should be masked out of the
+ // decision making process.
+ Regs[CandidateVirtRegPos].second = !MustFindEviction;
+ if (!MustFindEviction)
+ extractFeatures(SmallVector<LiveInterval *, 1>(1, &VirtReg), Largest,
+ CandidateVirtRegPos, /*IsHint*/ 0, /*LocalIntfsCount*/ 0,
+ /*NrUrgent*/ 0.0);
+ assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had "
+ "nothing to allocate initially.");
+ // Normalize the features.
+ for (auto &V : Largest)
+ V = V ? V : 1.0;
+ for (size_t FeatureIndex = 0; FeatureIndex < FeatureIDs::FeatureCount;
+ ++FeatureIndex) {
+ if (DoNotNormalize.test(FeatureIndex))
+ continue;
+ for (size_t Pos = 0; Pos < NumberOfInterferences; ++Pos) {
+ Runner->getTensor<float>(FeatureIndex)[Pos] /= Largest[FeatureIndex];
+ }
+ }
+ *Runner->getTensor<float>(FeatureIDs::progress) =
+ static_cast<float>(RA.getQueueSize()) / InitialQSize;
+
+ // Get a decision.
+ size_t CandidatePos = tryFindEvictionCandidatePosition(
+ VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
+ // The contract with the ML side is that CandidatePos is mask == 1 (i.e.
+ // Regs[CandidatePos].second)
+ assert(Regs[CandidatePos].second);
+ if (CandidatePos == CandidateVirtRegPos) {
+ assert(!MustFindEviction);
+ return MCRegister::NoRegister;
+ }
+ return Regs[CandidatePos].first;
+}
+
+// Overall, this currently mimics what we do for weight calculation, but instead
+// of accummulating the various features, we keep them separate.
+void MLEvictAdvisor::extractFeatures(
+ const SmallVectorImpl<LiveInterval *> &Intervals,
+ std::array<float, FeatureIDs::FeatureCount> &Largest, size_t Pos,
+ int64_t IsHint, int64_t LocalIntfsCount, float NrUrgent) const {
+ int64_t NrDefsAndUses = 0;
+ int64_t NrBrokenHints = 0;
+ float R = 0;
+ float W = 0;
+ float RW = 0;
+ float IndVarUpdates = 0;
+ float HintWeights = 0.0;
+ float StartBBFreq = 0.0;
+ float EndBBFreq = 0.0;
+ float HottestBlockFreq = 0.0;
+ int32_t NrRematerializable = 0;
+ float TotalWeight = 0.0;
+
+ SlotIndex EndSI = LIS->getSlotIndexes()->getZeroIndex();
+ SlotIndex StartSI = LIS->getSlotIndexes()->getLastIndex();
+ int64_t MaxStage = 0;
+ int64_t MinStage =
+ Intervals.empty() ? 0 : std::numeric_limits<int64_t>::max();
+
+ for (const auto *L : Intervals) {
+ const LiveInterval &LI = *L;
+ MaxStage = std::max<int64_t>(
+ MaxStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
+ MinStage = std::min<int64_t>(
+ MinStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
+
+ TotalWeight = std::max(TotalWeight, LI.weight());
+
+ if (LI.beginIndex() < StartSI)
+ StartSI = LI.beginIndex();
+
+ if (LI.endIndex() > EndSI)
+ EndSI = LI.endIndex();
+
+ SmallPtrSet<MachineInstr *, 8> Visited;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ NrBrokenHints += VRM->hasPreferredPhys(LI.reg());
+
+ for (MachineRegisterInfo::reg_instr_nodbg_iterator
+ I = MRI->reg_instr_nodbg_begin(LI.reg()),
+ E = MRI->reg_instr_nodbg_end();
+ I != E;) {
+ MachineInstr *MI = &*(I++);
+
+ ++NrDefsAndUses;
+ if (!Visited.insert(MI).second)
+ continue;
+
+ if (MI->isIdentityCopy() || MI->isImplicitDef())
+ continue;
+
+ bool Reads, Writes;
+ std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
+
+ float Freq = MBFI.getBlockFreqRelativeToEntryBlock(MI->getParent());
+ if (Freq > HottestBlockFreq)
+ HottestBlockFreq = Freq;
+ R += (Reads && !Writes) * Freq;
+ W += (!Reads && Writes) * Freq;
+ RW += (Reads && Writes) * Freq;
+
+ auto *MBB = MI->getParent();
+ auto *Loop = Loops.getLoopFor(MBB);
+ bool IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
+
+ if (Writes && IsExiting && LIS->isLiveOutOfMBB(LI, MBB))
+ IndVarUpdates += Freq;
+
+ if (MI->isCopy() && VirtRegAuxInfo::copyHint(MI, LI.reg(), TRI, *MRI))
+ HintWeights += Freq;
+ }
+ NrRematerializable += VirtRegAuxInfo::isRematerializable(
+ LI, *LIS, *VRM, *MF.getSubtarget().getInstrInfo());
+ }
+ size_t Size = 0;
+ if (!Intervals.empty()) {
+ StartBBFreq =
+ MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(StartSI));
+ if (EndSI >= LIS->getSlotIndexes()->getLastIndex())
+ EndSI = LIS->getSlotIndexes()->getLastIndex().getPrevIndex();
+ EndBBFreq =
+ MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(EndSI));
+ Size = StartSI.distance(EndSI);
+ }
+ // Set the features at the column 'Pos'.
+#define SET(ID, TYPE, VAL) \
+ do { \
+ Runner->getTensor<TYPE>(FeatureIDs::ID)[Pos] = static_cast<TYPE>(VAL); \
+ if (!DoNotNormalize.test(FeatureIDs::ID)) \
+ Largest[FeatureIDs::ID] = \
+ std::max(Largest[FeatureIDs::ID], static_cast<float>(VAL)); \
+ } while (false)
+ SET(mask, int64_t, 1);
+ SET(is_free, int64_t, Intervals.empty());
+ SET(nr_urgent, float, NrUrgent);
+ SET(nr_broken_hints, float, NrBrokenHints);
+ SET(is_hint, int64_t, IsHint);
+ SET(is_local, int64_t, LocalIntfsCount);
+ SET(nr_rematerializable, float, NrRematerializable);
+ SET(nr_defs_and_uses, float, NrDefsAndUses);
+ SET(weighed_reads_by_max, float, R);
+ SET(weighed_writes_by_max, float, W);
+ SET(weighed_read_writes_by_max, float, RW);
+ SET(weighed_indvars_by_max, float, IndVarUpdates);
+ SET(hint_weights_by_max, float, HintWeights);
+ SET(start_bb_freq_by_max, float, StartBBFreq);
+ SET(end_bb_freq_by_max, float, EndBBFreq);
+ SET(hottest_bb_freq_by_max, float, HottestBlockFreq);
+ SET(liverange_size, float, Size);
+ SET(use_def_density, float, TotalWeight);
+ SET(max_stage, int64_t, MaxStage);
+ SET(min_stage, int64_t, MinStage);
+#undef SET
+}
+
+// Development mode-specific implementations
+#ifdef LLVM_HAVE_TF_API
+RegAllocEvictionAdvisorAnalysis *llvm::createDevelopmentModeAdvisor() {
+ return new DevelopmentModeEvictionAdvisorAnalysis();
+}
+
+int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
+ LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
+ uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
+ int64_t Ret = 0;
+ if (isa<ModelUnderTrainingRunner>(getRunner())) {
+ Ret = MLEvictAdvisor::tryFindEvictionCandidatePosition(
+ VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
+ } else {
+ MCRegister PhysReg = getDefaultAdvisor().tryFindEvictionCandidate(
+ VirtReg, Order, CostPerUseLimit, FixedRegisters);
+ // Find the index of the selected PhysReg. We need it for logging, otherwise
+ // this is wasted cycles (but so would starting development mode without a
+ // model nor logging)
+ if (!PhysReg)
+ Ret = CandidateVirtRegPos;
+ else
+ for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit);
+ I != E; ++I, ++Ret)
+ if (*I == PhysReg)
+ break;
+ }
+ if (TrainingLog.empty())
+ return Ret;
+ size_t CurrentFeature = 0;
+ for (; CurrentFeature < FeatureIDs::FeatureCount; ++CurrentFeature) {
+ Log->logSpecifiedTensorValue(
+ CurrentFeature, reinterpret_cast<const char *>(
+ getRunner().getTensorUntyped(CurrentFeature)));
+ }
+ if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner()))
+ for (size_t I = 1; I < MUTR->outputLoggedFeatureSpecs().size();
+ ++I, ++CurrentFeature)
+ Log->logSpecifiedTensorValue(
+ CurrentFeature,
+ reinterpret_cast<const char *>(
+ MUTR->lastEvaluationResult()->getUntypedTensorValue(I)));
+ // The output is right after the features and the extra outputs
+ Log->logInt64Value(CurrentFeature, &Ret);
+ return Ret;
+}
+
+bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
+ if (auto *DevModeAnalysis = dyn_cast<DevelopmentModeEvictionAdvisorAnalysis>(
+ &getAnalysis<RegAllocEvictionAdvisorAnalysis>()))
+ if (auto *Log = DevModeAnalysis->getLogger(MF))
+ Log->logFloatFinalReward(static_cast<float>(
+ calculateRegAllocScore(
+ MF, getAnalysis<MachineBlockFrequencyInfo>(),
+ getAnalysis<AAResultsWrapperPass>().getAAResults())
+ .getScore()));
+
+ return false;
+}
+#endif // #ifdef LLVM_HAVE_TF_API
+
+#if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
+RegAllocEvictionAdvisorAnalysis *llvm::createReleaseModeAdvisor() {
+ return new ReleaseModeEvictionAdvisorAnalysis();
+}
+#endif
+
+// In all cases except development mode, we don't need scoring.
+#if !defined(LLVM_HAVE_TF_API)
+bool RegAllocScoring::runOnMachineFunction(MachineFunction &) { return false; }
+#endif
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 692587cd58fa..c93ffaabf74c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -96,6 +96,12 @@ static cl::opt<unsigned> AlignAllNonFallThruBlocks(
"format (e.g 4 means align on 16B boundaries)."),
cl::init(0), cl::Hidden);
+static cl::opt<unsigned> MaxBytesForAlignmentOverride(
+ "max-bytes-for-alignment",
+ cl::desc("Forces the maximum bytes allowed to be emitted when padding for "
+ "alignment"),
+ cl::init(0), cl::Hidden);
+
// FIXME: Find a good default for this flag and remove the flag.
static cl::opt<unsigned> ExitBlockBias(
"block-placement-exit-block-bias",
@@ -2929,10 +2935,21 @@ void MachineBlockPlacement::alignBlocks() {
MachineBasicBlock *LayoutPred =
&*std::prev(MachineFunction::iterator(ChainBB));
+ auto DetermineMaxAlignmentPadding = [&]() {
+ // Set the maximum bytes allowed to be emitted for alignment.
+ unsigned MaxBytes;
+ if (MaxBytesForAlignmentOverride.getNumOccurrences() > 0)
+ MaxBytes = MaxBytesForAlignmentOverride;
+ else
+ MaxBytes = TLI->getMaxPermittedBytesForAlignment(ChainBB);
+ ChainBB->setMaxBytesForAlignment(MaxBytes);
+ };
+
// Force alignment if all the predecessors are jumps. We already checked
// that the block isn't cold above.
if (!LayoutPred->isSuccessor(ChainBB)) {
ChainBB->setAlignment(Align);
+ DetermineMaxAlignmentPadding();
continue;
}
@@ -2943,8 +2960,10 @@ void MachineBlockPlacement::alignBlocks() {
BranchProbability LayoutProb =
MBPI->getEdgeProbability(LayoutPred, ChainBB);
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
- if (LayoutEdgeFreq <= (Freq * ColdProb))
+ if (LayoutEdgeFreq <= (Freq * ColdProb)) {
ChainBB->setAlignment(Align);
+ DetermineMaxAlignmentPadding();
+ }
}
}
@@ -3418,17 +3437,30 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
ComputedEdges.clear();
ChainAllocator.DestroyAll();
+ bool HasMaxBytesOverride =
+ MaxBytesForAlignmentOverride.getNumOccurrences() > 0;
+
if (AlignAllBlock)
// Align all of the blocks in the function to a specific alignment.
- for (MachineBasicBlock &MBB : MF)
- MBB.setAlignment(Align(1ULL << AlignAllBlock));
+ for (MachineBasicBlock &MBB : MF) {
+ if (HasMaxBytesOverride)
+ MBB.setAlignment(Align(1ULL << AlignAllBlock),
+ MaxBytesForAlignmentOverride);
+ else
+ MBB.setAlignment(Align(1ULL << AlignAllBlock));
+ }
else if (AlignAllNonFallThruBlocks) {
// Align all of the blocks that have no fall-through predecessors to a
// specific alignment.
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
auto LayoutPred = std::prev(MBI);
- if (!LayoutPred->isSuccessor(&*MBI))
- MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
+ if (!LayoutPred->isSuccessor(&*MBI)) {
+ if (HasMaxBytesOverride)
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks),
+ MaxBytesForAlignmentOverride);
+ else
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
+ }
}
}
if (ViewBlockLayoutWithBFI != GVDT_None &&
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineDominanceFrontier.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineDominanceFrontier.cpp
index 6ddb1758719b..a39dc79baaa8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineDominanceFrontier.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineDominanceFrontier.cpp
@@ -29,9 +29,7 @@ INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(MachineDominanceFrontier, "machine-domfrontier",
"Machine Dominance Frontier Construction", true, true)
-MachineDominanceFrontier::MachineDominanceFrontier()
- : MachineFunctionPass(ID),
- Base() {
+MachineDominanceFrontier::MachineDominanceFrontier() : MachineFunctionPass(ID) {
initializeMachineDominanceFrontierPass(*PassRegistry::getPassRegistry());
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp
index 81ed3d0e93ff..fd5ea5cad072 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineFunction.cpp
@@ -76,6 +76,8 @@
#include <utility>
#include <vector>
+#include "LiveDebugValues/LiveDebugValues.h"
+
using namespace llvm;
#define DEBUG_TYPE "codegen"
@@ -1238,7 +1240,7 @@ bool MachineFunction::useDebugInstrRef() const {
if (F.hasFnAttribute(Attribute::OptimizeNone))
return false;
- if (getTarget().Options.ValueTrackingVariableLocations)
+ if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
return true;
return false;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineInstrBundle.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineInstrBundle.cpp
index 6ca97031b92a..759cff179790 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineInstrBundle.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -144,6 +144,10 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
SmallSet<Register, 8> UndefUseSet;
SmallVector<MachineOperand*, 4> Defs;
for (auto MII = FirstMI; MII != LastMI; ++MII) {
+ // Debug instructions have no effects to track.
+ if (MII->isDebugInstr())
+ continue;
+
for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MII->getOperand(i);
if (!MO.isReg())
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp
index 59fc23983d3d..5347a7b0d890 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp
@@ -22,8 +22,7 @@
using namespace llvm;
DiagnosticInfoMIROptimization::MachineArgument::MachineArgument(
- StringRef MKey, const MachineInstr &MI)
- : Argument() {
+ StringRef MKey, const MachineInstr &MI) {
Key = std::string(MKey);
raw_string_ostream OS(Val);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineSink.cpp
index 54c478645dcf..0dbbc218e946 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineSink.cpp
@@ -796,9 +796,14 @@ bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI,
if (Reg == 0)
continue;
- // Don't handle physical register.
- if (Register::isPhysicalRegister(Reg))
+ if (Register::isPhysicalRegister(Reg)) {
+ if (MO.isUse() &&
+ (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO)))
+ continue;
+
+ // Don't handle non-constant and non-ignorable physical register.
return false;
+ }
// Users for the defs are all dominated by SuccToSinkTo.
if (MO.isDef()) {
@@ -898,7 +903,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI->isConstantPhysReg(Reg))
+ if (!MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
return nullptr;
} else if (!MO.isDead()) {
// A def that isn't dead. We can't move it.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ModuloSchedule.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ModuloSchedule.cpp
index aaa6403cc978..f91a9d2c3a32 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ModuloSchedule.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ModuloSchedule.cpp
@@ -1704,7 +1704,7 @@ void PeelingModuloScheduleExpander::peelPrologAndEpilogs() {
// Peel out the prologs.
LS.reset();
for (int I = 0; I < Schedule.getNumStages() - 1; ++I) {
- LS[I] = 1;
+ LS[I] = true;
Prologs.push_back(peelKernel(LPD_Front));
LiveStages[Prologs.back()] = LS;
AvailableStages[Prologs.back()] = LS;
@@ -1752,7 +1752,7 @@ void PeelingModuloScheduleExpander::peelPrologAndEpilogs() {
// Move stage one block at a time so that Phi nodes are updated correctly.
for (size_t K = Iteration; K > I; K--)
moveStageBetweenBlocks(Epilogs[K - 1], Epilogs[K], Stage);
- LS[Stage] = 1;
+ LS[Stage] = true;
}
LiveStages[Epilogs[I]] = LS;
AvailableStages[Epilogs[I]] = AS;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/NonRelocatableStringpool.cpp b/contrib/llvm-project/llvm/lib/CodeGen/NonRelocatableStringpool.cpp
index 9ed3471c0fc9..db5217469fba 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/NonRelocatableStringpool.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/NonRelocatableStringpool.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/NonRelocatableStringpool.h"
+#include "llvm/ADT/STLExtras.h"
namespace llvm {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index e3eb3f825851..74b903f99284 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -97,7 +97,7 @@ static bool lowerObjCCall(Function &F, const char *NewFn,
objcarc::ARCInstKind Kind = objcarc::getAttachedARCFunctionKind(CB);
(void)Kind;
assert((Kind == objcarc::ARCInstKind::RetainRV ||
- Kind == objcarc::ARCInstKind::ClaimRV) &&
+ Kind == objcarc::ARCInstKind::UnsafeClaimRV) &&
"use expected to be the argument of operand bundle "
"\"clang.arc.attachedcall\"");
U.set(FCache.getCallee());
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp
index 9f1012c95964..87df7bb4a689 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "RegAllocEvictionAdvisor.h"
+#include "RegAllocGreedy.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
@@ -42,6 +43,9 @@ static cl::opt<bool> EnableLocalReassignment(
cl::init(false));
#define DEBUG_TYPE "regalloc"
+#ifdef LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL
+#define LLVM_HAVE_TF_AOT
+#endif
char RegAllocEvictionAdvisorAnalysis::ID = 0;
INITIALIZE_PASS(RegAllocEvictionAdvisorAnalysis, "regalloc-evict",
@@ -62,12 +66,8 @@ public:
private:
std::unique_ptr<RegAllocEvictionAdvisor>
- getAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix,
- LiveIntervals *LIS, VirtRegMap *VRM,
- const RegisterClassInfo &RegClassInfo,
- ExtraRegInfo *ExtraInfo) override {
- return std::make_unique<DefaultEvictionAdvisor>(MF, Matrix, LIS, VRM,
- RegClassInfo, ExtraInfo);
+ getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
+ return std::make_unique<DefaultEvictionAdvisor>(MF, RA);
}
bool doInitialization(Module &M) override {
if (NotAsRequested)
@@ -86,10 +86,14 @@ template <> Pass *llvm::callDefaultCtor<RegAllocEvictionAdvisorAnalysis>() {
Ret = new DefaultEvictionAdvisorAnalysis(/*NotAsRequested*/ false);
break;
case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development:
- // TODO(mtrofin): add implementation
+#if defined(LLVM_HAVE_TF_API)
+ Ret = createDevelopmentModeAdvisor();
+#endif
break;
case RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release:
- // TODO(mtrofin): add implementation
+#if defined(LLVM_HAVE_TF_AOT)
+ Ret = createReleaseModeAdvisor();
+#endif
break;
}
if (Ret)
@@ -109,13 +113,12 @@ StringRef RegAllocEvictionAdvisorAnalysis::getPassName() const {
llvm_unreachable("Unknown advisor kind");
}
-RegAllocEvictionAdvisor::RegAllocEvictionAdvisor(
- const MachineFunction &MF, LiveRegMatrix *Matrix, LiveIntervals *LIS,
- VirtRegMap *VRM, const RegisterClassInfo &RegClassInfo,
- ExtraRegInfo *ExtraInfo)
- : MF(MF), Matrix(Matrix), LIS(LIS), VRM(VRM), MRI(&VRM->getRegInfo()),
- TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RegClassInfo),
- RegCosts(TRI->getRegisterCosts(MF)), ExtraInfo(ExtraInfo),
+RegAllocEvictionAdvisor::RegAllocEvictionAdvisor(const MachineFunction &MF,
+ const RAGreedy &RA)
+ : MF(MF), RA(RA), Matrix(RA.getInterferenceMatrix()),
+ LIS(RA.getLiveIntervals()), VRM(RA.getVirtRegMap()),
+ MRI(&VRM->getRegInfo()), TRI(MF.getSubtarget().getRegisterInfo()),
+ RegClassInfo(RA.getRegClassInfo()), RegCosts(TRI->getRegisterCosts(MF)),
EnableLocalReassign(EnableLocalReassignment ||
MF.getSubtarget().enableRALocalReassignment(
MF.getTarget().getOptLevel())) {}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
index debb75ed5020..33e03aed81a7 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h
@@ -87,87 +87,9 @@ struct EvictionCost {
}
};
-/// Track allocation stage and eviction loop prevention during allocation.
-// TODO(mtrofin): Consider exposing RAGreedy in a header instead, and folding
-// this back into it.
-class ExtraRegInfo final {
- // RegInfo - Keep additional information about each live range.
- struct RegInfo {
- LiveRangeStage Stage = RS_New;
-
- // Cascade - Eviction loop prevention. See
- // canEvictInterferenceBasedOnCost().
- unsigned Cascade = 0;
-
- RegInfo() = default;
- };
-
- IndexedMap<RegInfo, VirtReg2IndexFunctor> Info;
- unsigned NextCascade = 1;
-
-public:
- ExtraRegInfo() = default;
- ExtraRegInfo(const ExtraRegInfo &) = delete;
-
- LiveRangeStage getStage(Register Reg) const { return Info[Reg].Stage; }
-
- LiveRangeStage getStage(const LiveInterval &VirtReg) const {
- return getStage(VirtReg.reg());
- }
-
- void setStage(Register Reg, LiveRangeStage Stage) {
- Info.grow(Reg.id());
- Info[Reg].Stage = Stage;
- }
-
- void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
- setStage(VirtReg.reg(), Stage);
- }
-
- /// Return the current stage of the register, if present, otherwise initialize
- /// it and return that.
- LiveRangeStage getOrInitStage(Register Reg) {
- Info.grow(Reg.id());
- return getStage(Reg);
- }
-
- unsigned getCascade(Register Reg) const { return Info[Reg].Cascade; }
-
- void setCascade(Register Reg, unsigned Cascade) {
- Info.grow(Reg.id());
- Info[Reg].Cascade = Cascade;
- }
-
- unsigned getOrAssignNewCascade(Register Reg) {
- unsigned Cascade = getCascade(Reg);
- if (!Cascade) {
- Cascade = NextCascade++;
- setCascade(Reg, Cascade);
- }
- return Cascade;
- }
-
- unsigned getCascadeOrCurrentNext(Register Reg) const {
- unsigned Cascade = getCascade(Reg);
- if (!Cascade)
- Cascade = NextCascade;
- return Cascade;
- }
-
- template <typename Iterator>
- void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
- for (; Begin != End; ++Begin) {
- Register Reg = *Begin;
- Info.grow(Reg.id());
- if (Info[Reg].Stage == RS_New)
- Info[Reg].Stage = NewStage;
- }
- }
- void LRE_DidCloneVirtReg(Register New, Register Old);
-};
-
/// Interface to the eviction advisor, which is responsible for making a
/// decision as to which live ranges should be evicted (if any).
+class RAGreedy;
class RegAllocEvictionAdvisor {
public:
RegAllocEvictionAdvisor(const RegAllocEvictionAdvisor &) = delete;
@@ -193,14 +115,23 @@ public:
bool isUnusedCalleeSavedReg(MCRegister PhysReg) const;
protected:
- RegAllocEvictionAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix,
- LiveIntervals *LIS, VirtRegMap *VRM,
- const RegisterClassInfo &RegClassInfo,
- ExtraRegInfo *ExtraInfo);
+ RegAllocEvictionAdvisor(const MachineFunction &MF, const RAGreedy &RA);
Register canReassign(LiveInterval &VirtReg, Register PrevReg) const;
+ // Get the upper limit of elements in the given Order we need to analize.
+ // TODO: is this heuristic, we could consider learning it.
+ Optional<unsigned> getOrderLimit(const LiveInterval &VirtReg,
+ const AllocationOrder &Order,
+ unsigned CostPerUseLimit) const;
+
+ // Determine if it's worth trying to allocate this reg, given the
+ // CostPerUseLimit
+ // TODO: this is a heuristic component we could consider learning, too.
+ bool canAllocatePhysReg(unsigned CostPerUseLimit, MCRegister PhysReg) const;
+
const MachineFunction &MF;
+ const RAGreedy &RA;
LiveRegMatrix *const Matrix;
LiveIntervals *const LIS;
VirtRegMap *const VRM;
@@ -208,7 +139,6 @@ protected:
const TargetRegisterInfo *const TRI;
const RegisterClassInfo &RegClassInfo;
const ArrayRef<uint8_t> RegCosts;
- ExtraRegInfo *const ExtraInfo;
/// Run or not the local reassignment heuristic. This information is
/// obtained from the TargetSubtargetInfo.
@@ -243,19 +173,17 @@ public:
/// Get an advisor for the given context (i.e. machine function, etc)
virtual std::unique_ptr<RegAllocEvictionAdvisor>
- getAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix,
- LiveIntervals *LIS, VirtRegMap *VRM,
- const RegisterClassInfo &RegClassInfo,
- ExtraRegInfo *ExtraInfo) = 0;
+ getAdvisor(const MachineFunction &MF, const RAGreedy &RA) = 0;
AdvisorMode getAdvisorMode() const { return Mode; }
-private:
+protected:
// This analysis preserves everything, and subclasses may have additional
// requirements.
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
+private:
StringRef getPassName() const override;
const AdvisorMode Mode;
};
@@ -264,25 +192,16 @@ private:
/// an instance of the eviction advisor.
template <> Pass *callDefaultCtor<RegAllocEvictionAdvisorAnalysis>();
-// TODO(mtrofin): implement these.
-#ifdef LLVM_HAVE_TF_AOT
RegAllocEvictionAdvisorAnalysis *createReleaseModeAdvisor();
-#endif
-#ifdef LLVM_HAVE_TF_API
RegAllocEvictionAdvisorAnalysis *createDevelopmentModeAdvisor();
-#endif
// TODO: move to RegAllocEvictionAdvisor.cpp when we move implementation
// out of RegAllocGreedy.cpp
class DefaultEvictionAdvisor : public RegAllocEvictionAdvisor {
public:
- DefaultEvictionAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix,
- LiveIntervals *LIS, VirtRegMap *VRM,
- const RegisterClassInfo &RegClassInfo,
- ExtraRegInfo *ExtraInfo)
- : RegAllocEvictionAdvisor(MF, Matrix, LIS, VRM, RegClassInfo, ExtraInfo) {
- }
+ DefaultEvictionAdvisor(const MachineFunction &MF, const RAGreedy &RA)
+ : RegAllocEvictionAdvisor(MF, RA) {}
private:
MCRegister tryFindEvictionCandidate(LiveInterval &, const AllocationOrder &,
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
index ce3cf31dbd6b..6ea6dbcbbb74 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "RegAllocGreedy.h"
#include "AllocationOrder.h"
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
@@ -135,362 +136,6 @@ static cl::opt<bool> ConsiderLocalIntervalCost(
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
-namespace {
-
-class RAGreedy : public MachineFunctionPass,
- public RegAllocBase,
- private LiveRangeEdit::Delegate {
- // Convenient shortcuts.
- using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>;
- using SmallLISet = SmallPtrSet<LiveInterval *, 4>;
-
- // context
- MachineFunction *MF;
-
- // Shortcuts to some useful interface.
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- RegisterClassInfo RCI;
-
- // analyses
- SlotIndexes *Indexes;
- MachineBlockFrequencyInfo *MBFI;
- MachineDominatorTree *DomTree;
- MachineLoopInfo *Loops;
- MachineOptimizationRemarkEmitter *ORE;
- EdgeBundles *Bundles;
- SpillPlacement *SpillPlacer;
- LiveDebugVariables *DebugVars;
- AliasAnalysis *AA;
-
- // state
- std::unique_ptr<Spiller> SpillerInstance;
- PQueue Queue;
- std::unique_ptr<VirtRegAuxInfo> VRAI;
- Optional<ExtraRegInfo> ExtraInfo;
- std::unique_ptr<RegAllocEvictionAdvisor> EvictAdvisor;
-
- // Enum CutOffStage to keep a track whether the register allocation failed
- // because of the cutoffs encountered in last chance recoloring.
- // Note: This is used as bitmask. New value should be next power of 2.
- enum CutOffStage {
- // No cutoffs encountered
- CO_None = 0,
-
- // lcr-max-depth cutoff encountered
- CO_Depth = 1,
-
- // lcr-max-interf cutoff encountered
- CO_Interf = 2
- };
-
- uint8_t CutOffInfo;
-
-#ifndef NDEBUG
- static const char *const StageName[];
-#endif
-
- /// EvictionTrack - Keeps track of past evictions in order to optimize region
- /// split decision.
- class EvictionTrack {
-
- public:
- using EvictorInfo =
- std::pair<Register /* evictor */, MCRegister /* physreg */>;
- using EvicteeInfo = llvm::DenseMap<Register /* evictee */, EvictorInfo>;
-
- private:
- /// Each Vreg that has been evicted in the last stage of selectOrSplit will
- /// be mapped to the evictor Vreg and the PhysReg it was evicted from.
- EvicteeInfo Evictees;
-
- public:
- /// Clear all eviction information.
- void clear() { Evictees.clear(); }
-
- /// Clear eviction information for the given evictee Vreg.
- /// E.g. when Vreg get's a new allocation, the old eviction info is no
- /// longer relevant.
- /// \param Evictee The evictee Vreg for whom we want to clear collected
- /// eviction info.
- void clearEvicteeInfo(Register Evictee) { Evictees.erase(Evictee); }
-
- /// Track new eviction.
- /// The Evictor vreg has evicted the Evictee vreg from Physreg.
- /// \param PhysReg The physical register Evictee was evicted from.
- /// \param Evictor The evictor Vreg that evicted Evictee.
- /// \param Evictee The evictee Vreg.
- void addEviction(MCRegister PhysReg, Register Evictor, Register Evictee) {
- Evictees[Evictee].first = Evictor;
- Evictees[Evictee].second = PhysReg;
- }
-
- /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg.
- /// \param Evictee The evictee vreg.
- /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if
- /// nobody has evicted Evictee from PhysReg.
- EvictorInfo getEvictor(Register Evictee) {
- if (Evictees.count(Evictee)) {
- return Evictees[Evictee];
- }
-
- return EvictorInfo(0, 0);
- }
- };
-
- // Keeps track of past evictions in order to optimize region split decision.
- EvictionTrack LastEvicted;
-
- // splitting state.
- std::unique_ptr<SplitAnalysis> SA;
- std::unique_ptr<SplitEditor> SE;
-
- /// Cached per-block interference maps
- InterferenceCache IntfCache;
-
- /// All basic blocks where the current register has uses.
- SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
-
- /// Global live range splitting candidate info.
- struct GlobalSplitCandidate {
- // Register intended for assignment, or 0.
- MCRegister PhysReg;
-
- // SplitKit interval index for this candidate.
- unsigned IntvIdx;
-
- // Interference for PhysReg.
- InterferenceCache::Cursor Intf;
-
- // Bundles where this candidate should be live.
- BitVector LiveBundles;
- SmallVector<unsigned, 8> ActiveBlocks;
-
- void reset(InterferenceCache &Cache, MCRegister Reg) {
- PhysReg = Reg;
- IntvIdx = 0;
- Intf.setPhysReg(Cache, Reg);
- LiveBundles.clear();
- ActiveBlocks.clear();
- }
-
- // Set B[I] = C for every live bundle where B[I] was NoCand.
- unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
- unsigned Count = 0;
- for (unsigned I : LiveBundles.set_bits())
- if (B[I] == NoCand) {
- B[I] = C;
- Count++;
- }
- return Count;
- }
- };
-
- /// Candidate info for each PhysReg in AllocationOrder.
- /// This vector never shrinks, but grows to the size of the largest register
- /// class.
- SmallVector<GlobalSplitCandidate, 32> GlobalCand;
-
- enum : unsigned { NoCand = ~0u };
-
- /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
- /// NoCand which indicates the stack interval.
- SmallVector<unsigned, 32> BundleCand;
-
- /// Callee-save register cost, calculated once per machine function.
- BlockFrequency CSRCost;
-
- /// Enable or not the consideration of the cost of local intervals created
- /// by a split candidate when choosing the best split candidate.
- bool EnableAdvancedRASplitCost;
-
- /// Set of broken hints that may be reconciled later because of eviction.
- SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
-
- /// The register cost values. This list will be recreated for each Machine
- /// Function
- ArrayRef<uint8_t> RegCosts;
-
-public:
- RAGreedy(const RegClassFilterFunc F = allocateAllRegClasses);
-
- /// Return the pass name.
- StringRef getPassName() const override { return "Greedy Register Allocator"; }
-
- /// RAGreedy analysis usage.
- void getAnalysisUsage(AnalysisUsage &AU) const override;
- void releaseMemory() override;
- Spiller &spiller() override { return *SpillerInstance; }
- void enqueueImpl(LiveInterval *LI) override;
- LiveInterval *dequeue() override;
- MCRegister selectOrSplit(LiveInterval &,
- SmallVectorImpl<Register> &) override;
- void aboutToRemoveInterval(LiveInterval &) override;
-
- /// Perform register allocation.
- bool runOnMachineFunction(MachineFunction &mf) override;
-
- MachineFunctionProperties getRequiredProperties() const override {
- return MachineFunctionProperties().set(
- MachineFunctionProperties::Property::NoPHIs);
- }
-
- MachineFunctionProperties getClearedProperties() const override {
- return MachineFunctionProperties().set(
- MachineFunctionProperties::Property::IsSSA);
- }
-
- static char ID;
-
-private:
- MCRegister selectOrSplitImpl(LiveInterval &, SmallVectorImpl<Register> &,
- SmallVirtRegSet &, unsigned = 0);
-
- bool LRE_CanEraseVirtReg(Register) override;
- void LRE_WillShrinkVirtReg(Register) override;
- void LRE_DidCloneVirtReg(Register, Register) override;
- void enqueue(PQueue &CurQueue, LiveInterval *LI);
- LiveInterval *dequeue(PQueue &CurQueue);
-
- BlockFrequency calcSpillCost();
- bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
- bool addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
- bool growRegion(GlobalSplitCandidate &Cand);
- bool splitCanCauseEvictionChain(Register Evictee, GlobalSplitCandidate &Cand,
- unsigned BBNumber,
- const AllocationOrder &Order);
- bool splitCanCauseLocalSpill(unsigned VirtRegToSplit,
- GlobalSplitCandidate &Cand, unsigned BBNumber,
- const AllocationOrder &Order);
- BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &,
- const AllocationOrder &Order,
- bool *CanCauseEvictionChain);
- bool calcCompactRegion(GlobalSplitCandidate&);
- void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
- void calcGapWeights(MCRegister, SmallVectorImpl<float> &);
- bool canEvictInterferenceInRange(const LiveInterval &VirtReg,
- MCRegister PhysReg, SlotIndex Start,
- SlotIndex End, EvictionCost &MaxCost) const;
- MCRegister getCheapestEvicteeWeight(const AllocationOrder &Order,
- const LiveInterval &VirtReg,
- SlotIndex Start, SlotIndex End,
- float *BestEvictWeight) const;
- void evictInterference(LiveInterval &, MCRegister,
- SmallVectorImpl<Register> &);
- bool mayRecolorAllInterferences(MCRegister PhysReg, LiveInterval &VirtReg,
- SmallLISet &RecoloringCandidates,
- const SmallVirtRegSet &FixedRegisters);
-
- MCRegister tryAssign(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<Register>&,
- const SmallVirtRegSet&);
- MCRegister tryFindEvictionCandidate(LiveInterval &, const AllocationOrder &,
- uint8_t, const SmallVirtRegSet &) const;
- MCRegister tryEvict(LiveInterval &, AllocationOrder &,
- SmallVectorImpl<Register> &, uint8_t,
- const SmallVirtRegSet &);
- MCRegister tryRegionSplit(LiveInterval &, AllocationOrder &,
- SmallVectorImpl<Register> &);
- /// Calculate cost of region splitting.
- unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
- AllocationOrder &Order,
- BlockFrequency &BestCost,
- unsigned &NumCands, bool IgnoreCSR,
- bool *CanCauseEvictionChain = nullptr);
- /// Perform region splitting.
- unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
- bool HasCompact,
- SmallVectorImpl<Register> &NewVRegs);
- /// Check other options before using a callee-saved register for the first
- /// time.
- MCRegister tryAssignCSRFirstTime(LiveInterval &VirtReg,
- AllocationOrder &Order, MCRegister PhysReg,
- uint8_t &CostPerUseLimit,
- SmallVectorImpl<Register> &NewVRegs);
- void initializeCSRCost();
- unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<Register>&);
- unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<Register>&);
- unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<Register>&);
- unsigned trySplit(LiveInterval&, AllocationOrder&,
- SmallVectorImpl<Register>&,
- const SmallVirtRegSet&);
- unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
- SmallVectorImpl<Register> &,
- SmallVirtRegSet &, unsigned);
- bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<Register> &,
- SmallVirtRegSet &, unsigned);
- void tryHintRecoloring(LiveInterval &);
- void tryHintsRecoloring();
-
- /// Model the information carried by one end of a copy.
- struct HintInfo {
- /// The frequency of the copy.
- BlockFrequency Freq;
- /// The virtual register or physical register.
- Register Reg;
- /// Its currently assigned register.
- /// In case of a physical register Reg == PhysReg.
- MCRegister PhysReg;
-
- HintInfo(BlockFrequency Freq, Register Reg, MCRegister PhysReg)
- : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
- };
- using HintsInfo = SmallVector<HintInfo, 4>;
-
- BlockFrequency getBrokenHintFreq(const HintsInfo &, MCRegister);
- void collectHintInfo(Register, HintsInfo &);
-
- /// Greedy RA statistic to remark.
- struct RAGreedyStats {
- unsigned Reloads = 0;
- unsigned FoldedReloads = 0;
- unsigned ZeroCostFoldedReloads = 0;
- unsigned Spills = 0;
- unsigned FoldedSpills = 0;
- unsigned Copies = 0;
- float ReloadsCost = 0.0f;
- float FoldedReloadsCost = 0.0f;
- float SpillsCost = 0.0f;
- float FoldedSpillsCost = 0.0f;
- float CopiesCost = 0.0f;
-
- bool isEmpty() {
- return !(Reloads || FoldedReloads || Spills || FoldedSpills ||
- ZeroCostFoldedReloads || Copies);
- }
-
- void add(RAGreedyStats other) {
- Reloads += other.Reloads;
- FoldedReloads += other.FoldedReloads;
- ZeroCostFoldedReloads += other.ZeroCostFoldedReloads;
- Spills += other.Spills;
- FoldedSpills += other.FoldedSpills;
- Copies += other.Copies;
- ReloadsCost += other.ReloadsCost;
- FoldedReloadsCost += other.FoldedReloadsCost;
- SpillsCost += other.SpillsCost;
- FoldedSpillsCost += other.FoldedSpillsCost;
- CopiesCost += other.CopiesCost;
- }
-
- void report(MachineOptimizationRemarkMissed &R);
- };
-
- /// Compute statistic for a basic block.
- RAGreedyStats computeStats(MachineBasicBlock &MBB);
-
- /// Compute and report statistic through a remark.
- RAGreedyStats reportStats(MachineLoop *L);
-
- /// Report the statistic for each loop.
- void reportStats();
-};
-
-} // end anonymous namespace
-
char RAGreedy::ID = 0;
char &llvm::RAGreedyID = RAGreedy::ID;
@@ -613,7 +258,7 @@ void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) {
ExtraInfo->LRE_DidCloneVirtReg(New, Old);
}
-void ExtraRegInfo::LRE_DidCloneVirtReg(Register New, Register Old) {
+void RAGreedy::ExtraRegInfo::LRE_DidCloneVirtReg(Register New, Register Old) {
// Cloning a register we haven't even heard about yet? Just ignore it.
if (!Info.inBounds(Old))
return;
@@ -811,7 +456,7 @@ Register RegAllocEvictionAdvisor::canReassign(LiveInterval &VirtReg,
bool DefaultEvictionAdvisor::shouldEvict(LiveInterval &A, bool IsHint,
LiveInterval &B,
bool BreaksHint) const {
- bool CanSplit = ExtraInfo->getStage(B) < RS_Spill;
+ bool CanSplit = RA.getExtraInfo().getStage(B) < RS_Spill;
// Be fairly aggressive about following hints as long as the evictee can be
// split.
@@ -852,7 +497,7 @@ bool DefaultEvictionAdvisor::canEvictInterferenceBasedOnCost(
if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
return false;
- bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
+ bool IsLocal = VirtReg.empty() || LIS->intervalIsInOneMBB(VirtReg);
// Find VirtReg's cascade number. This will be unassigned if VirtReg was never
// involved in an eviction before. If a cascade number was assigned, deny
@@ -861,7 +506,7 @@ bool DefaultEvictionAdvisor::canEvictInterferenceBasedOnCost(
//
// This works out so a register without a cascade number is allowed to evict
// anything, and it can be evicted by anything.
- unsigned Cascade = ExtraInfo->getCascadeOrCurrentNext(VirtReg.reg());
+ unsigned Cascade = RA.getExtraInfo().getCascadeOrCurrentNext(VirtReg.reg());
EvictionCost Cost;
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
@@ -883,7 +528,7 @@ bool DefaultEvictionAdvisor::canEvictInterferenceBasedOnCost(
return false;
// Never evict spill products. They cannot split or spill.
- if (ExtraInfo->getStage(*Intf) == RS_Done)
+ if (RA.getExtraInfo().getStage(*Intf) == RS_Done)
return false;
// Once a live range becomes small enough, it is urgent that we find a
// register for it. This is indicated by an infinite spill weight. These
@@ -898,7 +543,7 @@ bool DefaultEvictionAdvisor::canEvictInterferenceBasedOnCost(
RegClassInfo.getNumAllocatableRegs(
MRI->getRegClass(Intf->reg())));
// Only evict older cascades or live ranges without a cascade.
- unsigned IntfCascade = ExtraInfo->getCascade(Intf->reg());
+ unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
if (Cascade <= IntfCascade) {
if (!Urgent)
return false;
@@ -1069,28 +714,20 @@ bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const {
return !Matrix->isPhysRegUsed(PhysReg);
}
-MCRegister DefaultEvictionAdvisor::tryFindEvictionCandidate(
- LiveInterval &VirtReg, const AllocationOrder &Order,
- uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
- // Keep track of the cheapest interference seen so far.
- EvictionCost BestCost;
- BestCost.setMax();
- MCRegister BestPhys;
+Optional<unsigned>
+RegAllocEvictionAdvisor::getOrderLimit(const LiveInterval &VirtReg,
+ const AllocationOrder &Order,
+ unsigned CostPerUseLimit) const {
unsigned OrderLimit = Order.getOrder().size();
- // When we are just looking for a reduced cost per use, don't break any
- // hints, and only evict smaller spill weights.
if (CostPerUseLimit < uint8_t(~0u)) {
- BestCost.BrokenHints = 0;
- BestCost.MaxWeight = VirtReg.weight();
-
// Check of any registers in RC are below CostPerUseLimit.
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg());
uint8_t MinCost = RegClassInfo.getMinCost(RC);
if (MinCost >= CostPerUseLimit) {
LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
<< MinCost << ", no cheaper registers to be found.\n");
- return 0;
+ return None;
}
// It is normal for register classes to have a long tail of registers with
@@ -1101,24 +738,50 @@ MCRegister DefaultEvictionAdvisor::tryFindEvictionCandidate(
<< " regs.\n");
}
}
+ return OrderLimit;
+}
+
+bool RegAllocEvictionAdvisor::canAllocatePhysReg(unsigned CostPerUseLimit,
+ MCRegister PhysReg) const {
+ if (RegCosts[PhysReg] >= CostPerUseLimit)
+ return false;
+ // The first use of a callee-saved register in a function has cost 1.
+ // Don't start using a CSR when the CostPerUseLimit is low.
+ if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
+ LLVM_DEBUG(
+ dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
+ << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
+ << '\n');
+ return false;
+ }
+ return true;
+}
+
+MCRegister DefaultEvictionAdvisor::tryFindEvictionCandidate(
+ LiveInterval &VirtReg, const AllocationOrder &Order,
+ uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
+ // Keep track of the cheapest interference seen so far.
+ EvictionCost BestCost;
+ BestCost.setMax();
+ MCRegister BestPhys;
+ auto MaybeOrderLimit = getOrderLimit(VirtReg, Order, CostPerUseLimit);
+ if (!MaybeOrderLimit)
+ return MCRegister::NoRegister;
+ unsigned OrderLimit = *MaybeOrderLimit;
+
+ // When we are just looking for a reduced cost per use, don't break any
+ // hints, and only evict smaller spill weights.
+ if (CostPerUseLimit < uint8_t(~0u)) {
+ BestCost.BrokenHints = 0;
+ BestCost.MaxWeight = VirtReg.weight();
+ }
for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
++I) {
MCRegister PhysReg = *I;
assert(PhysReg);
- if (RegCosts[PhysReg] >= CostPerUseLimit)
- continue;
- // The first use of a callee-saved register in a function has cost 1.
- // Don't start using a CSR when the CostPerUseLimit is low.
- if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
- LLVM_DEBUG(
- dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
- << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
- << '\n');
- continue;
- }
-
- if (!canEvictInterferenceBasedOnCost(VirtReg, PhysReg, false, BestCost,
+ if (!canAllocatePhysReg(CostPerUseLimit, PhysReg) ||
+ !canEvictInterferenceBasedOnCost(VirtReg, PhysReg, false, BestCost,
FixedRegisters))
continue;
@@ -3269,8 +2932,8 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI));
ExtraInfo.emplace();
- EvictAdvisor = getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(
- *MF, Matrix, LIS, VRM, RegClassInfo, &*ExtraInfo);
+ EvictAdvisor =
+ getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(*MF, *this);
IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
GlobalCand.resize(32); // This will grow as needed.
SetOfBrokenHints.clear();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h
new file mode 100644
index 000000000000..e9a5fe635f26
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h
@@ -0,0 +1,507 @@
+//==- RegAllocGreedy.h ------- greedy register allocator ----------*-C++-*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines the RAGreedy function pass for register allocation in
+// optimized builds.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCGREEDY_H_
+#define LLVM_CODEGEN_REGALLOCGREEDY_H_
+
+#include "AllocationOrder.h"
+#include "InterferenceCache.h"
+#include "LiveDebugVariables.h"
+#include "RegAllocBase.h"
+#include "RegAllocEvictionAdvisor.h"
+#include "SpillPlacement.h"
+#include "SplitKit.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/EdgeBundles.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
+#include "llvm/CodeGen/LiveRegMatrix.h"
+#include "llvm/CodeGen/LiveStacks.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/Spiller.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/CodeGen/VirtRegMap.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <queue>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+class LLVM_LIBRARY_VISIBILITY RAGreedy : public MachineFunctionPass,
+ public RegAllocBase,
+ private LiveRangeEdit::Delegate {
+ // Interface to eviction advisers
+public:
+ /// Track allocation stage and eviction loop prevention during allocation.
+ class ExtraRegInfo final {
+ // RegInfo - Keep additional information about each live range.
+ struct RegInfo {
+ LiveRangeStage Stage = RS_New;
+
+ // Cascade - Eviction loop prevention. See
+ // canEvictInterferenceBasedOnCost().
+ unsigned Cascade = 0;
+
+ RegInfo() = default;
+ };
+
+ IndexedMap<RegInfo, VirtReg2IndexFunctor> Info;
+ unsigned NextCascade = 1;
+
+ public:
+ ExtraRegInfo() = default;
+ ExtraRegInfo(const ExtraRegInfo &) = delete;
+
+ LiveRangeStage getStage(Register Reg) const { return Info[Reg].Stage; }
+
+ LiveRangeStage getStage(const LiveInterval &VirtReg) const {
+ return getStage(VirtReg.reg());
+ }
+
+ void setStage(Register Reg, LiveRangeStage Stage) {
+ Info.grow(Reg.id());
+ Info[Reg].Stage = Stage;
+ }
+
+ void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
+ setStage(VirtReg.reg(), Stage);
+ }
+
+ /// Return the current stage of the register, if present, otherwise
+ /// initialize it and return that.
+ LiveRangeStage getOrInitStage(Register Reg) {
+ Info.grow(Reg.id());
+ return getStage(Reg);
+ }
+
+ unsigned getCascade(Register Reg) const { return Info[Reg].Cascade; }
+
+ void setCascade(Register Reg, unsigned Cascade) {
+ Info.grow(Reg.id());
+ Info[Reg].Cascade = Cascade;
+ }
+
+ unsigned getOrAssignNewCascade(Register Reg) {
+ unsigned Cascade = getCascade(Reg);
+ if (!Cascade) {
+ Cascade = NextCascade++;
+ setCascade(Reg, Cascade);
+ }
+ return Cascade;
+ }
+
+ unsigned getCascadeOrCurrentNext(Register Reg) const {
+ unsigned Cascade = getCascade(Reg);
+ if (!Cascade)
+ Cascade = NextCascade;
+ return Cascade;
+ }
+
+ template <typename Iterator>
+ void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
+ for (; Begin != End; ++Begin) {
+ Register Reg = *Begin;
+ Info.grow(Reg.id());
+ if (Info[Reg].Stage == RS_New)
+ Info[Reg].Stage = NewStage;
+ }
+ }
+ void LRE_DidCloneVirtReg(Register New, Register Old);
+ };
+
+ LiveRegMatrix *getInterferenceMatrix() const { return Matrix; }
+ LiveIntervals *getLiveIntervals() const { return LIS; }
+ VirtRegMap *getVirtRegMap() const { return VRM; }
+ const RegisterClassInfo &getRegClassInfo() const { return RegClassInfo; }
+ const ExtraRegInfo &getExtraInfo() const { return *ExtraInfo; }
+ size_t getQueueSize() const { return Queue.size(); }
+ // end (interface to eviction advisers)
+
+private:
+ // Convenient shortcuts.
+ using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>;
+ using SmallLISet = SmallPtrSet<LiveInterval *, 4>;
+
+ // context
+ MachineFunction *MF;
+
+ // Shortcuts to some useful interface.
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ RegisterClassInfo RCI;
+
+ // analyses
+ SlotIndexes *Indexes;
+ MachineBlockFrequencyInfo *MBFI;
+ MachineDominatorTree *DomTree;
+ MachineLoopInfo *Loops;
+ MachineOptimizationRemarkEmitter *ORE;
+ EdgeBundles *Bundles;
+ SpillPlacement *SpillPlacer;
+ LiveDebugVariables *DebugVars;
+ AliasAnalysis *AA;
+
+ // state
+ std::unique_ptr<Spiller> SpillerInstance;
+ PQueue Queue;
+ std::unique_ptr<VirtRegAuxInfo> VRAI;
+ Optional<ExtraRegInfo> ExtraInfo;
+ std::unique_ptr<RegAllocEvictionAdvisor> EvictAdvisor;
+
+ // Enum CutOffStage to keep a track whether the register allocation failed
+ // because of the cutoffs encountered in last chance recoloring.
+ // Note: This is used as bitmask. New value should be next power of 2.
+ enum CutOffStage {
+ // No cutoffs encountered
+ CO_None = 0,
+
+ // lcr-max-depth cutoff encountered
+ CO_Depth = 1,
+
+ // lcr-max-interf cutoff encountered
+ CO_Interf = 2
+ };
+
+ uint8_t CutOffInfo;
+
+#ifndef NDEBUG
+ static const char *const StageName[];
+#endif
+
+ /// EvictionTrack - Keeps track of past evictions in order to optimize region
+ /// split decision.
+ class EvictionTrack {
+
+ public:
+ using EvictorInfo =
+ std::pair<Register /* evictor */, MCRegister /* physreg */>;
+ using EvicteeInfo = llvm::DenseMap<Register /* evictee */, EvictorInfo>;
+
+ private:
+ /// Each Vreg that has been evicted in the last stage of selectOrSplit will
+ /// be mapped to the evictor Vreg and the PhysReg it was evicted from.
+ EvicteeInfo Evictees;
+
+ public:
+ /// Clear all eviction information.
+ void clear() { Evictees.clear(); }
+
+ /// Clear eviction information for the given evictee Vreg.
+ /// E.g. when Vreg get's a new allocation, the old eviction info is no
+ /// longer relevant.
+ /// \param Evictee The evictee Vreg for whom we want to clear collected
+ /// eviction info.
+ void clearEvicteeInfo(Register Evictee) { Evictees.erase(Evictee); }
+
+ /// Track new eviction.
+ /// The Evictor vreg has evicted the Evictee vreg from Physreg.
+ /// \param PhysReg The physical register Evictee was evicted from.
+ /// \param Evictor The evictor Vreg that evicted Evictee.
+ /// \param Evictee The evictee Vreg.
+ void addEviction(MCRegister PhysReg, Register Evictor, Register Evictee) {
+ Evictees[Evictee].first = Evictor;
+ Evictees[Evictee].second = PhysReg;
+ }
+
+ /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg.
+ /// \param Evictee The evictee vreg.
+ /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if
+ /// nobody has evicted Evictee from PhysReg.
+ EvictorInfo getEvictor(Register Evictee) {
+ if (Evictees.count(Evictee)) {
+ return Evictees[Evictee];
+ }
+
+ return EvictorInfo(0, 0);
+ }
+ };
+
+ // Keeps track of past evictions in order to optimize region split decision.
+ EvictionTrack LastEvicted;
+
+ // splitting state.
+ std::unique_ptr<SplitAnalysis> SA;
+ std::unique_ptr<SplitEditor> SE;
+
+ /// Cached per-block interference maps
+ InterferenceCache IntfCache;
+
+ /// All basic blocks where the current register has uses.
+ SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
+
+ /// Global live range splitting candidate info.
+ struct GlobalSplitCandidate {
+ // Register intended for assignment, or 0.
+ MCRegister PhysReg;
+
+ // SplitKit interval index for this candidate.
+ unsigned IntvIdx;
+
+ // Interference for PhysReg.
+ InterferenceCache::Cursor Intf;
+
+ // Bundles where this candidate should be live.
+ BitVector LiveBundles;
+ SmallVector<unsigned, 8> ActiveBlocks;
+
+ void reset(InterferenceCache &Cache, MCRegister Reg) {
+ PhysReg = Reg;
+ IntvIdx = 0;
+ Intf.setPhysReg(Cache, Reg);
+ LiveBundles.clear();
+ ActiveBlocks.clear();
+ }
+
+ // Set B[I] = C for every live bundle where B[I] was NoCand.
+ unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
+ unsigned Count = 0;
+ for (unsigned I : LiveBundles.set_bits())
+ if (B[I] == NoCand) {
+ B[I] = C;
+ Count++;
+ }
+ return Count;
+ }
+ };
+
+ /// Candidate info for each PhysReg in AllocationOrder.
+ /// This vector never shrinks, but grows to the size of the largest register
+ /// class.
+ SmallVector<GlobalSplitCandidate, 32> GlobalCand;
+
+ enum : unsigned { NoCand = ~0u };
+
+ /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
+ /// NoCand which indicates the stack interval.
+ SmallVector<unsigned, 32> BundleCand;
+
+ /// Callee-save register cost, calculated once per machine function.
+ BlockFrequency CSRCost;
+
+ /// Enable or not the consideration of the cost of local intervals created
+ /// by a split candidate when choosing the best split candidate.
+ bool EnableAdvancedRASplitCost;
+
+ /// Set of broken hints that may be reconciled later because of eviction.
+ SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
+
+ /// The register cost values. This list will be recreated for each Machine
+ /// Function
+ ArrayRef<uint8_t> RegCosts;
+
+public:
+ RAGreedy(const RegClassFilterFunc F = allocateAllRegClasses);
+
+ /// Return the pass name.
+ StringRef getPassName() const override { return "Greedy Register Allocator"; }
+
+ /// RAGreedy analysis usage.
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+ Spiller &spiller() override { return *SpillerInstance; }
+ void enqueueImpl(LiveInterval *LI) override;
+ LiveInterval *dequeue() override;
+ MCRegister selectOrSplit(LiveInterval &,
+ SmallVectorImpl<Register> &) override;
+ void aboutToRemoveInterval(LiveInterval &) override;
+
+ /// Perform register allocation.
+ bool runOnMachineFunction(MachineFunction &mf) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoPHIs);
+ }
+
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
+ static char ID;
+
+private:
+ MCRegister selectOrSplitImpl(LiveInterval &, SmallVectorImpl<Register> &,
+ SmallVirtRegSet &, unsigned = 0);
+
+ bool LRE_CanEraseVirtReg(Register) override;
+ void LRE_WillShrinkVirtReg(Register) override;
+ void LRE_DidCloneVirtReg(Register, Register) override;
+ void enqueue(PQueue &CurQueue, LiveInterval *LI);
+ LiveInterval *dequeue(PQueue &CurQueue);
+
+ BlockFrequency calcSpillCost();
+ bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency &);
+ bool addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
+ bool growRegion(GlobalSplitCandidate &Cand);
+ bool splitCanCauseEvictionChain(Register Evictee, GlobalSplitCandidate &Cand,
+ unsigned BBNumber,
+ const AllocationOrder &Order);
+ bool splitCanCauseLocalSpill(unsigned VirtRegToSplit,
+ GlobalSplitCandidate &Cand, unsigned BBNumber,
+ const AllocationOrder &Order);
+ BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &,
+ const AllocationOrder &Order,
+ bool *CanCauseEvictionChain);
+ bool calcCompactRegion(GlobalSplitCandidate &);
+ void splitAroundRegion(LiveRangeEdit &, ArrayRef<unsigned>);
+ void calcGapWeights(MCRegister, SmallVectorImpl<float> &);
+ bool canEvictInterferenceInRange(const LiveInterval &VirtReg,
+ MCRegister PhysReg, SlotIndex Start,
+ SlotIndex End, EvictionCost &MaxCost) const;
+ MCRegister getCheapestEvicteeWeight(const AllocationOrder &Order,
+ const LiveInterval &VirtReg,
+ SlotIndex Start, SlotIndex End,
+ float *BestEvictWeight) const;
+ void evictInterference(LiveInterval &, MCRegister,
+ SmallVectorImpl<Register> &);
+ bool mayRecolorAllInterferences(MCRegister PhysReg, LiveInterval &VirtReg,
+ SmallLISet &RecoloringCandidates,
+ const SmallVirtRegSet &FixedRegisters);
+
+ MCRegister tryAssign(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &, const SmallVirtRegSet &);
+ MCRegister tryEvict(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &, uint8_t,
+ const SmallVirtRegSet &);
+ MCRegister tryRegionSplit(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &);
+ /// Calculate cost of region splitting.
+ unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ BlockFrequency &BestCost,
+ unsigned &NumCands, bool IgnoreCSR,
+ bool *CanCauseEvictionChain = nullptr);
+ /// Perform region splitting.
+ unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
+ bool HasCompact, SmallVectorImpl<Register> &NewVRegs);
+ /// Check other options before using a callee-saved register for the first
+ /// time.
+ MCRegister tryAssignCSRFirstTime(LiveInterval &VirtReg,
+ AllocationOrder &Order, MCRegister PhysReg,
+ uint8_t &CostPerUseLimit,
+ SmallVectorImpl<Register> &NewVRegs);
+ void initializeCSRCost();
+ unsigned tryBlockSplit(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &);
+ unsigned tryInstructionSplit(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &);
+ unsigned tryLocalSplit(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &);
+ unsigned trySplit(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &, const SmallVirtRegSet &);
+ unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<Register> &,
+ SmallVirtRegSet &, unsigned);
+ bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<Register> &,
+ SmallVirtRegSet &, unsigned);
+ void tryHintRecoloring(LiveInterval &);
+ void tryHintsRecoloring();
+
+ /// Model the information carried by one end of a copy.
+ struct HintInfo {
+ /// The frequency of the copy.
+ BlockFrequency Freq;
+ /// The virtual register or physical register.
+ Register Reg;
+ /// Its currently assigned register.
+ /// In case of a physical register Reg == PhysReg.
+ MCRegister PhysReg;
+
+ HintInfo(BlockFrequency Freq, Register Reg, MCRegister PhysReg)
+ : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
+ };
+ using HintsInfo = SmallVector<HintInfo, 4>;
+
+ BlockFrequency getBrokenHintFreq(const HintsInfo &, MCRegister);
+ void collectHintInfo(Register, HintsInfo &);
+
+ /// Greedy RA statistic to remark.
+ struct RAGreedyStats {
+ unsigned Reloads = 0;
+ unsigned FoldedReloads = 0;
+ unsigned ZeroCostFoldedReloads = 0;
+ unsigned Spills = 0;
+ unsigned FoldedSpills = 0;
+ unsigned Copies = 0;
+ float ReloadsCost = 0.0f;
+ float FoldedReloadsCost = 0.0f;
+ float SpillsCost = 0.0f;
+ float FoldedSpillsCost = 0.0f;
+ float CopiesCost = 0.0f;
+
+ bool isEmpty() {
+ return !(Reloads || FoldedReloads || Spills || FoldedSpills ||
+ ZeroCostFoldedReloads || Copies);
+ }
+
+ void add(RAGreedyStats other) {
+ Reloads += other.Reloads;
+ FoldedReloads += other.FoldedReloads;
+ ZeroCostFoldedReloads += other.ZeroCostFoldedReloads;
+ Spills += other.Spills;
+ FoldedSpills += other.FoldedSpills;
+ Copies += other.Copies;
+ ReloadsCost += other.ReloadsCost;
+ FoldedReloadsCost += other.FoldedReloadsCost;
+ SpillsCost += other.SpillsCost;
+ FoldedSpillsCost += other.FoldedSpillsCost;
+ CopiesCost += other.CopiesCost;
+ }
+
+ void report(MachineOptimizationRemarkMissed &R);
+ };
+
+ /// Compute statistic for a basic block.
+ RAGreedyStats computeStats(MachineBasicBlock &MBB);
+
+ /// Compute and report statistic through a remark.
+ RAGreedyStats reportStats(MachineLoop *L);
+
+ /// Report the statistic for each loop.
+ void reportStats();
+};
+} // namespace llvm
+#endif // #ifndef LLVM_CODEGEN_REGALLOCGREEDY_H_
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm-project/llvm/lib/CodeGen/RegisterScavenging.cpp
index c0a07ec4c91d..424ad7419165 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -533,6 +533,22 @@ Register RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
Candidates.reset(*AI);
}
+ // If we have already scavenged some registers, remove them from the
+ // candidates. If we end up recursively calling eliminateFrameIndex, we don't
+ // want to be clobbering previously scavenged registers or their associated
+ // stack slots.
+ for (ScavengedInfo &SI : Scavenged) {
+ if (SI.Reg) {
+ if (isRegUsed(SI.Reg)) {
+ LLVM_DEBUG(
+ dbgs() << "Removing " << printReg(SI.Reg, TRI) <<
+ " from scavenging candidates since it was already scavenged\n");
+ for (MCRegAliasIterator AI(SI.Reg, TRI, true); AI.isValid(); ++AI)
+ Candidates.reset(*AI);
+ }
+ }
+ }
+
// Try to find a register that's unused if there is one, as then we won't
// have to spill.
BitVector Available = getRegsAvailable(RC);
@@ -553,6 +569,12 @@ Register RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
if (!AllowSpill)
return 0;
+#ifndef NDEBUG
+ for (ScavengedInfo &SI : Scavenged) {
+ assert(SI.Reg != SReg && "scavenged a previously scavenged register");
+ }
+#endif
+
ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI);
Scavenged.Restore = &*std::prev(UseMI);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index 6e05de888cc0..a61a2b2728fa 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -30,8 +30,7 @@ using namespace llvm;
ScoreboardHazardRecognizer::ScoreboardHazardRecognizer(
const InstrItineraryData *II, const ScheduleDAG *SchedDAG,
const char *ParentDebugType)
- : ScheduleHazardRecognizer(), DebugType(ParentDebugType), ItinData(II),
- DAG(SchedDAG) {
+ : DebugType(ParentDebugType), ItinData(II), DAG(SchedDAG) {
(void)DebugType;
// Determine the maximum depth of any itinerary. This determines the depth of
// the scoreboard. We always make the scoreboard at least 1 cycle deep to
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 067ad819e0d2..932f263d2558 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -593,7 +593,7 @@ namespace {
SDValue MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
SDValue MatchLoadCombine(SDNode *N);
SDValue mergeTruncStores(StoreSDNode *N);
- SDValue ReduceLoadWidth(SDNode *N);
+ SDValue reduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue splitMergedValStore(StoreSDNode *ST);
SDValue TransformFPLoadStorePair(SDNode *N);
@@ -1070,7 +1070,7 @@ SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
return DAG.getNode(Opc, DL, VT, N00, OpNode);
return SDValue();
}
- if (N0.hasOneUse()) {
+ if (TLI.isReassocProfitable(DAG, N0, N1)) {
// Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
// iff (op x, c1) has one use
if (SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N00, N1))
@@ -3058,9 +3058,8 @@ static SDValue combineADDCARRYDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
//
// Our goal is to identify A, B, and CarryIn and produce ADDCARRY/SUBCARRY with
// a single path for carry/borrow out propagation:
-static SDValue combineCarryDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
- const TargetLowering &TLI, SDValue Carry0,
- SDValue Carry1, SDNode *N) {
+static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI,
+ SDValue Carry0, SDValue Carry1, SDNode *N) {
if (Carry0.getResNo() != 1 || Carry1.getResNo() != 1)
return SDValue();
unsigned Opcode = Carry0.getOpcode();
@@ -3908,7 +3907,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
// Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
// use.
{
- SDValue Sh(nullptr, 0), Y(nullptr, 0);
+ SDValue Sh, Y;
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
if (N0.getOpcode() == ISD::SHL &&
@@ -4471,15 +4470,15 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
return FoldedVOp;
// fold (mulhs x, 0) -> 0
- // do not return N0/N1, because undef node may exist.
- if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
- ISD::isConstantSplatVectorAllZeros(N1.getNode()))
+ // do not return N1, because undef node may exist.
+ if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
return DAG.getConstant(0, DL, VT);
}
// fold (mulhs x, 0) -> 0
if (isNullConstant(N1))
return N1;
+
// fold (mulhs x, 1) -> (sra x, size(x)-1)
if (isOneConstant(N1))
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
@@ -4531,18 +4530,19 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
return FoldedVOp;
// fold (mulhu x, 0) -> 0
- // do not return N0/N1, because undef node may exist.
- if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
- ISD::isConstantSplatVectorAllZeros(N1.getNode()))
+ // do not return N1, because undef node may exist.
+ if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
return DAG.getConstant(0, DL, VT);
}
// fold (mulhu x, 0) -> 0
if (isNullConstant(N1))
return N1;
+
// fold (mulhu x, 1) -> 0
if (isOneConstant(N1))
return DAG.getConstant(0, DL, N0.getValueType());
+
// fold (mulhu x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
@@ -4892,6 +4892,42 @@ static SDValue PerformMinMaxFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
: DAG.getSExtOrTrunc(Sat, DL, N2->getValueType(0));
}
+static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
+ SDValue N3, ISD::CondCode CC,
+ SelectionDAG &DAG) {
+ // We are looking for UMIN(FPTOUI(X), (2^n)-1), which may have come via a
+ // select/vselect/select_cc. The two operands pairs for the select (N2/N3) may
+ // be truncated versions of the the setcc (N0/N1).
+ if ((N0 != N2 &&
+ (N2.getOpcode() != ISD::TRUNCATE || N0 != N2.getOperand(0))) ||
+ N0.getOpcode() != ISD::FP_TO_UINT || CC != ISD::SETULT)
+ return SDValue();
+ ConstantSDNode *N1C = isConstOrConstSplat(N1);
+ ConstantSDNode *N3C = isConstOrConstSplat(N3);
+ if (!N1C || !N3C)
+ return SDValue();
+ const APInt &C1 = N1C->getAPIntValue();
+ const APInt &C3 = N3C->getAPIntValue();
+ if (!(C1 + 1).isPowerOf2() || C1.getBitWidth() < C3.getBitWidth() ||
+ C1 != C3.zextOrSelf(C1.getBitWidth()))
+ return SDValue();
+
+ unsigned BW = (C1 + 1).exactLogBase2();
+ EVT FPVT = N0.getOperand(0).getValueType();
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), BW);
+ if (FPVT.isVector())
+ NewVT = EVT::getVectorVT(*DAG.getContext(), NewVT,
+ FPVT.getVectorElementCount());
+ if (!DAG.getTargetLoweringInfo().shouldConvertFpToSat(ISD::FP_TO_UINT_SAT,
+ FPVT, NewVT))
+ return SDValue();
+
+ SDValue Sat =
+ DAG.getNode(ISD::FP_TO_UINT_SAT, SDLoc(N0), NewVT, N0.getOperand(0),
+ DAG.getValueType(NewVT.getScalarType()));
+ return DAG.getZExtOrTrunc(Sat, SDLoc(N0), N3.getValueType());
+}
+
SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -4934,6 +4970,9 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
if (SDValue S = PerformMinMaxFpToSatCombine(
N0, N1, N0, N1, Opcode == ISD::SMIN ? ISD::SETLT : ISD::SETGT, DAG))
return S;
+ if (Opcode == ISD::UMIN)
+ if (SDValue S = PerformUMinFpToSatCombine(N0, N1, N0, N1, ISD::SETULT, DAG))
+ return S;
// Simplify the operands using demanded-bits information.
if (SimplifyDemandedBits(SDValue(N, 0)))
@@ -5491,6 +5530,8 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
// Some constants may need fixing up later if they are too large.
if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (Mask->getValueType(0) != C->getValueType(0))
+ return false;
if ((N->getOpcode() == ISD::OR || N->getOpcode() == ISD::XOR) &&
(Mask->getAPIntValue() & C->getAPIntValue()) != C->getAPIntValue())
NodesWithConsts.insert(N);
@@ -5524,9 +5565,9 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
case ISD::AssertZext: {
unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
- EVT VT = Op.getOpcode() == ISD::AssertZext ?
- cast<VTSDNode>(Op.getOperand(1))->getVT() :
- Op.getOperand(0).getValueType();
+ EVT VT = Op.getOpcode() == ISD::AssertZext
+ ? cast<VTSDNode>(Op.getOperand(1))->getVT()
+ : Op.getOperand(0).getValueType();
// We can accept extending nodes if the mask is wider or an equal
// width to the original type.
@@ -5534,6 +5575,15 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
continue;
break;
}
+ case ISD::ANY_EXTEND: {
+ unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
+ EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
+ EVT VT = Op.getOperand(0).getValueType();
+ if (ExtVT.bitsGE(VT))
+ break;
+ // Fallthrough to searching for nodes from the operands of the extend.
+ LLVM_FALLTHROUGH;
+ }
case ISD::OR:
case ISD::XOR:
case ISD::AND:
@@ -5593,12 +5643,14 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N) {
// masking.
if (FixupNode) {
LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
- SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
- FixupNode->getValueType(0),
- SDValue(FixupNode, 0), MaskOp);
+ SDValue MaskOpT = DAG.getZExtOrTrunc(MaskOp, SDLoc(FixupNode),
+ FixupNode->getValueType(0));
+ SDValue And =
+ DAG.getNode(ISD::AND, SDLoc(FixupNode), FixupNode->getValueType(0),
+ SDValue(FixupNode, 0), MaskOpT);
DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And);
if (And.getOpcode() == ISD ::AND)
- DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp);
+ DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOpT);
}
// Narrow any constants that need it.
@@ -5607,10 +5659,12 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N) {
SDValue Op1 = LogicN->getOperand(1);
if (isa<ConstantSDNode>(Op0))
- std::swap(Op0, Op1);
+ std::swap(Op0, Op1);
- SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
- Op1, MaskOp);
+ SDValue MaskOpT =
+ DAG.getZExtOrTrunc(MaskOp, SDLoc(Op1), Op1.getValueType());
+ SDValue And =
+ DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(), Op1, MaskOpT);
DAG.UpdateNodeOperands(LogicN, Op0, And);
}
@@ -5618,13 +5672,15 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N) {
// Create narrow loads.
for (auto *Load : Loads) {
LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
+ SDValue MaskOpT =
+ DAG.getZExtOrTrunc(MaskOp, SDLoc(Load), Load->getValueType(0));
SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
- SDValue(Load, 0), MaskOp);
+ SDValue(Load, 0), MaskOpT);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
if (And.getOpcode() == ISD ::AND)
And = SDValue(
- DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0);
- SDValue NewLoad = ReduceLoadWidth(And.getNode());
+ DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOpT), 0);
+ SDValue NewLoad = reduceLoadWidth(And.getNode());
assert(NewLoad &&
"Shouldn't be masking the load if it can't be narrowed");
CombineTo(Load, NewLoad, NewLoad.getValue(1));
@@ -5799,18 +5855,12 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return FoldedVOp;
// fold (and x, 0) -> 0, vector edition
- if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
- // do not return N0, because undef node may exist in N0
- return DAG.getConstant(APInt::getZero(N0.getScalarValueSizeInBits()),
- SDLoc(N), N0.getValueType());
if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
// do not return N1, because undef node may exist in N1
return DAG.getConstant(APInt::getZero(N1.getScalarValueSizeInBits()),
SDLoc(N), N1.getValueType());
// fold (and x, -1) -> x, vector edition
- if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
- return N1;
if (ISD::isConstantSplatVectorAllOnes(N1.getNode()))
return N0;
@@ -5862,7 +5912,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (SDValue Shuffle = XformToShuffleWithZero(N))
return Shuffle;
- if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
+ if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N))
return Combined;
// fold (and (or x, C), D) -> D if (C & D) == D
@@ -6024,7 +6074,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
(N0.getOpcode() == ISD::ANY_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::LOAD))) {
- if (SDValue Res = ReduceLoadWidth(N)) {
+ if (SDValue Res = reduceLoadWidth(N)) {
LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
AddToWorklist(N);
@@ -6659,7 +6709,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDValue Combined = visitORLike(N0, N1, N))
return Combined;
- if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
+ if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N))
return Combined;
// Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
@@ -8156,7 +8206,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
- if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
+ if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N))
return Combined;
return SDValue();
@@ -8948,6 +8998,10 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (SDValue MULH = combineShiftToMULH(N, DAG, TLI))
return MULH;
+ // Attempt to convert a sra of a load into a narrower sign-extending load.
+ if (SDValue NarrowLoad = reduceLoadWidth(N))
+ return NarrowLoad;
+
return SDValue();
}
@@ -9140,7 +9194,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return NewSRL;
// Attempt to convert a srl of a load into a narrower zero-extending load.
- if (SDValue NarrowLoad = ReduceLoadWidth(N))
+ if (SDValue NarrowLoad = reduceLoadWidth(N))
return NarrowLoad;
// Here is a common situation. We want to optimize:
@@ -9358,6 +9412,17 @@ SDValue DAGCombiner::visitBSWAP(SDNode *N) {
// fold (bswap (bswap x)) -> x
if (N0.getOpcode() == ISD::BSWAP)
return N0->getOperand(0);
+
+ // Canonicalize bswap(bitreverse(x)) -> bitreverse(bswap(x)). If bitreverse
+ // isn't supported, it will be expanded to bswap followed by a manual reversal
+ // of bits in each byte. By placing bswaps before bitreverse, we can remove
+ // the two bswaps if the bitreverse gets expanded.
+ if (N0.getOpcode() == ISD::BITREVERSE && N0.hasOneUse()) {
+ SDLoc DL(N);
+ SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, N0.getOperand(0));
+ return DAG.getNode(ISD::BITREVERSE, DL, VT, BSwap);
+ }
+
return SDValue();
}
@@ -10288,6 +10353,8 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
if (SDValue S = PerformMinMaxFpToSatCombine(LHS, RHS, N1, N2, CC, DAG))
return S;
+ if (SDValue S = PerformUMinFpToSatCombine(LHS, RHS, N1, N2, CC, DAG))
+ return S;
// If this select has a condition (setcc) with narrower operands than the
// select, try to widen the compare to match the select width.
@@ -11357,7 +11424,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (sext (truncate (load x))) -> (sext (smaller load x))
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
- if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
+ if (SDValue NarrowLoad = reduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
@@ -11621,7 +11688,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
- if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
+ if (SDValue NarrowLoad = reduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
@@ -11864,7 +11931,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// fold (aext (truncate (load x))) -> (aext (smaller load x))
// fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
- if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
+ if (SDValue NarrowLoad = reduceLoadWidth(N0.getNode())) {
SDNode *oye = N0.getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
@@ -12095,13 +12162,10 @@ SDValue DAGCombiner::visitAssertAlign(SDNode *N) {
return SDValue();
}
-/// If the result of a wider load is shifted to right of N bits and then
-/// truncated to a narrower type and where N is a multiple of number of bits of
-/// the narrower type, transform it to a narrower load from address + N / num of
-/// bits of new type. Also narrow the load if the result is masked with an AND
-/// to effectively produce a smaller type. If the result is to be extended, also
-/// fold the extension to form a extending load.
-SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
+/// If the result of a load is shifted/masked/truncated to an effectively
+/// narrower type, try to transform the load to a narrower type and/or
+/// use an extending load.
+SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
@@ -12113,32 +12177,48 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if (VT.isVector())
return SDValue();
+ // The ShAmt variable is used to indicate that we've consumed a right
+ // shift. I.e. we want to narrow the width of the load by skipping to load the
+ // ShAmt least significant bits.
unsigned ShAmt = 0;
+ // A special case is when the least significant bits from the load are masked
+ // away, but using an AND rather than a right shift. HasShiftedOffset is used
+ // to indicate that the narrowed load should be left-shifted ShAmt bits to get
+ // the result.
bool HasShiftedOffset = false;
// Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
// extended to VT.
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- } else if (Opc == ISD::SRL) {
- // Another special-case: SRL is basically zero-extending a narrower value,
- // or it maybe shifting a higher subword, half or byte into the lowest
- // bits.
- ExtType = ISD::ZEXTLOAD;
- N0 = SDValue(N, 0);
+ } else if (Opc == ISD::SRL || Opc == ISD::SRA) {
+ // Another special-case: SRL/SRA is basically zero/sign-extending a narrower
+ // value, or it may be shifting a higher subword, half or byte into the
+ // lowest bits.
- auto *LN0 = dyn_cast<LoadSDNode>(N0.getOperand(0));
- auto *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (!N01 || !LN0)
+ // Only handle shift with constant shift amount, and the shiftee must be a
+ // load.
+ auto *LN = dyn_cast<LoadSDNode>(N0);
+ auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!N1C || !LN)
+ return SDValue();
+ // If the shift amount is larger than the memory type then we're not
+ // accessing any of the loaded bytes.
+ ShAmt = N1C->getZExtValue();
+ uint64_t MemoryWidth = LN->getMemoryVT().getScalarSizeInBits();
+ if (MemoryWidth <= ShAmt)
+ return SDValue();
+ // Attempt to fold away the SRL by using ZEXTLOAD and SRA by using SEXTLOAD.
+ ExtType = Opc == ISD::SRL ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShAmt);
+ // If original load is a SEXTLOAD then we can't simply replace it by a
+ // ZEXTLOAD (we could potentially replace it by a more narrow SEXTLOAD
+ // followed by a ZEXT, but that is not handled at the moment). Similarly if
+ // the original load is a ZEXTLOAD and we want to use a SEXTLOAD.
+ if ((LN->getExtensionType() == ISD::SEXTLOAD ||
+ LN->getExtensionType() == ISD::ZEXTLOAD) &&
+ LN->getExtensionType() != ExtType)
return SDValue();
-
- uint64_t ShiftAmt = N01->getZExtValue();
- uint64_t MemoryWidth = LN0->getMemoryVT().getScalarSizeInBits();
- if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
- ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
- else
- ExtVT = EVT::getIntegerVT(*DAG.getContext(),
- VT.getScalarSizeInBits() - ShiftAmt);
} else if (Opc == ISD::AND) {
// An AND with a constant mask is the same as a truncate + zero-extend.
auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
@@ -12161,55 +12241,80 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
}
- if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
- SDValue SRL = N0;
- if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
- ShAmt = ConstShift->getZExtValue();
- unsigned EVTBits = ExtVT.getScalarSizeInBits();
- // Is the shift amount a multiple of size of VT?
- if ((ShAmt & (EVTBits-1)) == 0) {
- N0 = N0.getOperand(0);
- // Is the load width a multiple of size of VT?
- if ((N0.getScalarValueSizeInBits() & (EVTBits - 1)) != 0)
- return SDValue();
- }
+ // In case Opc==SRL we've already prepared ExtVT/ExtType/ShAmt based on doing
+ // a right shift. Here we redo some of those checks, to possibly adjust the
+ // ExtVT even further based on "a masking AND". We could also end up here for
+ // other reasons (e.g. based on Opc==TRUNCATE) and that is why some checks
+ // need to be done here as well.
+ if (Opc == ISD::SRL || N0.getOpcode() == ISD::SRL) {
+ SDValue SRL = Opc == ISD::SRL ? SDValue(N, 0) : N0;
+ // Bail out when the SRL has more than one use. This is done for historical
+ // (undocumented) reasons. Maybe intent was to guard the AND-masking below
+ // check below? And maybe it could be non-profitable to do the transform in
+ // case the SRL has multiple uses and we get here with Opc!=ISD::SRL?
+ // FIXME: Can't we just skip this check for the Opc==ISD::SRL case.
+ if (!SRL.hasOneUse())
+ return SDValue();
+
+ // Only handle shift with constant shift amount, and the shiftee must be a
+ // load.
+ auto *LN = dyn_cast<LoadSDNode>(SRL.getOperand(0));
+ auto *SRL1C = dyn_cast<ConstantSDNode>(SRL.getOperand(1));
+ if (!SRL1C || !LN)
+ return SDValue();
- // At this point, we must have a load or else we can't do the transform.
- auto *LN0 = dyn_cast<LoadSDNode>(N0);
- if (!LN0) return SDValue();
+ // If the shift amount is larger than the input type then we're not
+ // accessing any of the loaded bytes. If the load was a zextload/extload
+ // then the result of the shift+trunc is zero/undef (handled elsewhere).
+ ShAmt = SRL1C->getZExtValue();
+ uint64_t MemoryWidth = LN->getMemoryVT().getSizeInBits();
+ if (ShAmt >= MemoryWidth)
+ return SDValue();
- // Because a SRL must be assumed to *need* to zero-extend the high bits
- // (as opposed to anyext the high bits), we can't combine the zextload
- // lowering of SRL and an sextload.
- if (LN0->getExtensionType() == ISD::SEXTLOAD)
- return SDValue();
+ // Because a SRL must be assumed to *need* to zero-extend the high bits
+ // (as opposed to anyext the high bits), we can't combine the zextload
+ // lowering of SRL and an sextload.
+ if (LN->getExtensionType() == ISD::SEXTLOAD)
+ return SDValue();
- // If the shift amount is larger than the input type then we're not
- // accessing any of the loaded bytes. If the load was a zextload/extload
- // then the result of the shift+trunc is zero/undef (handled elsewhere).
- if (ShAmt >= LN0->getMemoryVT().getSizeInBits())
+ // Avoid reading outside the memory accessed by the original load (could
+ // happened if we only adjust the load base pointer by ShAmt). Instead we
+ // try to narrow the load even further. The typical scenario here is:
+ // (i64 (truncate (i96 (srl (load x), 64)))) ->
+ // (i64 (truncate (i96 (zextload (load i32 + offset) from i32))))
+ if (ExtVT.getScalarSizeInBits() > MemoryWidth - ShAmt) {
+ // Don't replace sextload by zextload.
+ if (ExtType == ISD::SEXTLOAD)
return SDValue();
-
- // If the SRL is only used by a masking AND, we may be able to adjust
- // the ExtVT to make the AND redundant.
- SDNode *Mask = *(SRL->use_begin());
- if (Mask->getOpcode() == ISD::AND &&
- isa<ConstantSDNode>(Mask->getOperand(1))) {
- const APInt& ShiftMask = Mask->getConstantOperandAPInt(1);
- if (ShiftMask.isMask()) {
- EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
- ShiftMask.countTrailingOnes());
- // If the mask is smaller, recompute the type.
- if ((ExtVT.getScalarSizeInBits() > MaskedVT.getScalarSizeInBits()) &&
- TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
- ExtVT = MaskedVT;
- }
+ // Narrow the load.
+ ExtType = ISD::ZEXTLOAD;
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShAmt);
+ }
+
+ // If the SRL is only used by a masking AND, we may be able to adjust
+ // the ExtVT to make the AND redundant.
+ SDNode *Mask = *(SRL->use_begin());
+ if (SRL.hasOneUse() && Mask->getOpcode() == ISD::AND &&
+ isa<ConstantSDNode>(Mask->getOperand(1))) {
+ const APInt& ShiftMask = Mask->getConstantOperandAPInt(1);
+ if (ShiftMask.isMask()) {
+ EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
+ ShiftMask.countTrailingOnes());
+ // If the mask is smaller, recompute the type.
+ if ((ExtVT.getScalarSizeInBits() > MaskedVT.getScalarSizeInBits()) &&
+ TLI.isLoadExtLegal(ExtType, SRL.getValueType(), MaskedVT))
+ ExtVT = MaskedVT;
}
}
+
+ N0 = SRL.getOperand(0);
}
- // If the load is shifted left (and the result isn't shifted back right),
- // we can fold the truncate through the shift.
+ // If the load is shifted left (and the result isn't shifted back right), we
+ // can fold a truncate through the shift. The typical scenario is that N
+ // points at a TRUNCATE here so the attempted fold is:
+ // (truncate (shl (load x), c))) -> (shl (narrow load x), c)
+ // ShLeftAmt will indicate how much a narrowed load should be shifted left.
unsigned ShLeftAmt = 0;
if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
@@ -12237,12 +12342,12 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
return LVTStoreBits - EVTStoreBits - ShAmt;
};
- // For big endian targets, we need to adjust the offset to the pointer to
- // load the correct bytes.
- if (DAG.getDataLayout().isBigEndian())
- ShAmt = AdjustBigEndianShift(ShAmt);
+ // We need to adjust the pointer to the load by ShAmt bits in order to load
+ // the correct bytes.
+ unsigned PtrAdjustmentInBits =
+ DAG.getDataLayout().isBigEndian() ? AdjustBigEndianShift(ShAmt) : ShAmt;
- uint64_t PtrOff = ShAmt / 8;
+ uint64_t PtrOff = PtrAdjustmentInBits / 8;
Align NewAlign = commonAlignment(LN0->getAlign(), PtrOff);
SDLoc DL(LN0);
// The original load itself didn't wrap, so an offset within it doesn't.
@@ -12285,11 +12390,6 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
}
if (HasShiftedOffset) {
- // Recalculate the shift amount after it has been altered to calculate
- // the offset.
- if (DAG.getDataLayout().isBigEndian())
- ShAmt = AdjustBigEndianShift(ShAmt);
-
// We're using a shifted mask, so the load now has an offset. This means
// that data has been loaded into the lower bytes than it would have been
// before, so we need to shl the loaded data into the correct position in the
@@ -12320,7 +12420,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
// If the input is already sign extended, just drop the extension.
- if (ExtVTBits >= DAG.ComputeMinSignedBits(N0))
+ if (ExtVTBits >= DAG.ComputeMaxSignificantBits(N0))
return N0;
// fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
@@ -12336,7 +12436,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N00 = N0.getOperand(0);
unsigned N00Bits = N00.getScalarValueSizeInBits();
- if ((N00Bits <= ExtVTBits || DAG.ComputeMinSignedBits(N00) <= ExtVTBits) &&
+ if ((N00Bits <= ExtVTBits ||
+ DAG.ComputeMaxSignificantBits(N00) <= ExtVTBits) &&
(!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00);
}
@@ -12355,7 +12456,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
APInt DemandedSrcElts = APInt::getLowBitsSet(SrcElts, DstElts);
if ((N00Bits == ExtVTBits ||
(!IsZext && (N00Bits < ExtVTBits ||
- DAG.ComputeMinSignedBits(N00) <= ExtVTBits))) &&
+ DAG.ComputeMaxSignificantBits(N00) <= ExtVTBits))) &&
(!LegalOperations ||
TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT)))
return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, SDLoc(N), VT, N00);
@@ -12381,7 +12482,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
// fold (sext_in_reg (load x)) -> (smaller sextload x)
// fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
- if (SDValue NarrowLoad = ReduceLoadWidth(N))
+ if (SDValue NarrowLoad = reduceLoadWidth(N))
return NarrowLoad;
// fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
@@ -12668,7 +12769,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
- if (SDValue Reduced = ReduceLoadWidth(N))
+ if (SDValue Reduced = reduceLoadWidth(N))
return Reduced;
// Handle the case where the load remains an extending load even
@@ -17491,6 +17592,10 @@ void DAGCombiner::getStoreMergeCandidates(
for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
TryToAddCandidate(I2);
}
+ // Check stores that depend on the root (e.g. Store 3 in the chart above).
+ if (I.getOperandNo() == 0 && isa<StoreSDNode>(*I)) {
+ TryToAddCandidate(I);
+ }
}
} else {
for (auto I = RootNode->use_begin(), E = RootNode->use_end();
@@ -18351,6 +18456,15 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
Value.getValueType().isInteger() &&
(!isa<ConstantSDNode>(Value) ||
!cast<ConstantSDNode>(Value)->isOpaque())) {
+ // Convert a truncating store of a extension into a standard store.
+ if ((Value.getOpcode() == ISD::ZERO_EXTEND ||
+ Value.getOpcode() == ISD::SIGN_EXTEND ||
+ Value.getOpcode() == ISD::ANY_EXTEND) &&
+ Value.getOperand(0).getValueType() == ST->getMemoryVT() &&
+ TLI.isOperationLegalOrCustom(ISD::STORE, ST->getMemoryVT()))
+ return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
+ ST->getMemOperand());
+
APInt TruncDemandedBits =
APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
ST->getMemoryVT().getScalarSizeInBits());
@@ -23299,6 +23413,8 @@ SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
if (SDValue S = PerformMinMaxFpToSatCombine(N0, N1, N2, N3, CC, DAG))
return S;
+ if (SDValue S = PerformUMinFpToSatCombine(N0, N1, N2, N3, CC, DAG))
+ return S;
return SDValue();
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 4d1449bc2751..bfde35935c7b 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1775,12 +1775,13 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
return false;
case Instruction::Call:
- // On AIX, call lowering uses the DAG-ISEL path currently so that the
+ // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
// callee of the direct function call instruction will be mapped to the
// symbol for the function's entry point, which is distinct from the
// function descriptor symbol. The latter is the symbol whose XCOFF symbol
// name is the C-linkage name of the source level function.
- if (TM.getTargetTriple().isOSAIX())
+ // But fast isel still has the ability to do selection for intrinsics.
+ if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
return false;
return selectCall(I);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 5dfb65ef131a..54481b94fdd8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3593,9 +3593,16 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
if (Legalized) {
// If we expanded the SETCC by swapping LHS and RHS, or by inverting the
// condition code, create a new SETCC node.
- if (Tmp3.getNode())
- Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
- Tmp1, Tmp2, Tmp3, Node->getFlags());
+ if (Tmp3.getNode()) {
+ if (IsStrict) {
+ Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getVTList(),
+ {Chain, Tmp1, Tmp2, Tmp3}, Node->getFlags());
+ Chain = Tmp1.getValue(1);
+ } else {
+ Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Tmp1,
+ Tmp2, Tmp3, Node->getFlags());
+ }
+ }
// If we expanded the SETCC by inverting the condition code, then wrap
// the existing SETCC in a NOT to restore the intended condition.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 27f9cede1922..6bf38d7296a8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -1193,7 +1193,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
llvm_unreachable("Do not know how to expand the result of this operator!");
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
- case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
+ case ISD::SELECT: SplitRes_Select(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::MERGE_VALUES: ExpandRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 518e525e13d0..8c7b90b6cd33 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -75,30 +75,28 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
break;
case ISD::MGATHER: Res = PromoteIntRes_MGATHER(cast<MaskedGatherSDNode>(N));
break;
- case ISD::SELECT: Res = PromoteIntRes_SELECT(N); break;
- case ISD::VSELECT: Res = PromoteIntRes_VSELECT(N); break;
+ case ISD::SELECT:
+ case ISD::VSELECT:
+ case ISD::VP_SELECT:
+ Res = PromoteIntRes_Select(N);
+ break;
case ISD::SELECT_CC: Res = PromoteIntRes_SELECT_CC(N); break;
case ISD::STRICT_FSETCC:
case ISD::STRICT_FSETCCS:
case ISD::SETCC: Res = PromoteIntRes_SETCC(N); break;
case ISD::SMIN:
- case ISD::SMAX:
- Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ false);
- break;
+ case ISD::SMAX: Res = PromoteIntRes_SExtIntBinOp(N); break;
case ISD::UMIN:
case ISD::UMAX: Res = PromoteIntRes_UMINUMAX(N); break;
case ISD::SHL:
- Res = PromoteIntRes_SHL(N, /*IsVP*/ false);
- break;
+ case ISD::VP_SHL: Res = PromoteIntRes_SHL(N); break;
case ISD::SIGN_EXTEND_INREG:
Res = PromoteIntRes_SIGN_EXTEND_INREG(N); break;
case ISD::SRA:
- Res = PromoteIntRes_SRA(N, /*IsVP*/ false);
- break;
+ case ISD::VP_ASHR: Res = PromoteIntRes_SRA(N); break;
case ISD::SRL:
- Res = PromoteIntRes_SRL(N, /*IsVP*/ false);
- break;
+ case ISD::VP_LSHR: Res = PromoteIntRes_SRL(N); break;
case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break;
case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break;
case ISD::VAARG: Res = PromoteIntRes_VAARG(N); break;
@@ -154,18 +152,22 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
- Res = PromoteIntRes_SimpleIntBinOp(N, /*IsVP*/ false);
- break;
+ case ISD::VP_AND:
+ case ISD::VP_OR:
+ case ISD::VP_XOR:
+ case ISD::VP_ADD:
+ case ISD::VP_SUB:
+ case ISD::VP_MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break;
case ISD::SDIV:
case ISD::SREM:
- Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ false);
- break;
+ case ISD::VP_SDIV:
+ case ISD::VP_SREM: Res = PromoteIntRes_SExtIntBinOp(N); break;
case ISD::UDIV:
case ISD::UREM:
- Res = PromoteIntRes_ZExtIntBinOp(N, /*IsVP*/ false);
- break;
+ case ISD::VP_UDIV:
+ case ISD::VP_UREM: Res = PromoteIntRes_ZExtIntBinOp(N); break;
case ISD::SADDO:
case ISD::SSUBO: Res = PromoteIntRes_SADDSUBO(N, ResNo); break;
@@ -260,32 +262,6 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FSHR:
Res = PromoteIntRes_FunnelShift(N);
break;
-
- case ISD::VP_AND:
- case ISD::VP_OR:
- case ISD::VP_XOR:
- case ISD::VP_ADD:
- case ISD::VP_SUB:
- case ISD::VP_MUL:
- Res = PromoteIntRes_SimpleIntBinOp(N, /*IsVP*/ true);
- break;
- case ISD::VP_SDIV:
- case ISD::VP_SREM:
- Res = PromoteIntRes_SExtIntBinOp(N, /*IsVP*/ true);
- break;
- case ISD::VP_UDIV:
- case ISD::VP_UREM:
- Res = PromoteIntRes_ZExtIntBinOp(N, /*IsVP*/ true);
- break;
- case ISD::VP_SHL:
- Res = PromoteIntRes_SHL(N, /*IsVP*/ true);
- break;
- case ISD::VP_ASHR:
- Res = PromoteIntRes_SRA(N, /*IsVP*/ true);
- break;
- case ISD::VP_LSHR:
- Res = PromoteIntRes_SRL(N, /*IsVP*/ true);
- break;
}
// If the result is null then the sub-method took care of registering it.
@@ -1127,20 +1103,18 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo) {
return Res;
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) {
- SDValue LHS = GetPromotedInteger(N->getOperand(1));
- SDValue RHS = GetPromotedInteger(N->getOperand(2));
- return DAG.getSelect(SDLoc(N),
- LHS.getValueType(), N->getOperand(0), LHS, RHS);
-}
-
-SDValue DAGTypeLegalizer::PromoteIntRes_VSELECT(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_Select(SDNode *N) {
SDValue Mask = N->getOperand(0);
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
- return DAG.getNode(ISD::VSELECT, SDLoc(N),
- LHS.getValueType(), Mask, LHS, RHS);
+
+ unsigned Opcode = N->getOpcode();
+ return Opcode == ISD::VP_SELECT
+ ? DAG.getNode(Opcode, SDLoc(N), LHS.getValueType(), Mask, LHS, RHS,
+ N->getOperand(3))
+ : DAG.getNode(Opcode, SDLoc(N), LHS.getValueType(), Mask, LHS,
+ RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) {
@@ -1193,12 +1167,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
return DAG.getSExtOrTrunc(SetCC, dl, NVT);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) {
SDValue LHS = GetPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
- if (!IsVP)
+ if (N->getOpcode() != ISD::VP_SHL)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
@@ -1210,34 +1184,40 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) {
Op.getValueType(), Op, N->getOperand(1));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) {
// The input may have strange things in the top bits of the registers, but
// these operations don't care. They may have weird bits going out, but
// that too is okay if they are integer operations.
SDValue LHS = GetPromotedInteger(N->getOperand(0));
SDValue RHS = GetPromotedInteger(N->getOperand(1));
- if (!IsVP)
+ if (N->getNumOperands() == 2)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
+ assert(N->isVPOpcode() && "Expected VP opcode");
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SExtIntBinOp(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_SExtIntBinOp(SDNode *N) {
// Sign extend the input.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = SExtPromotedInteger(N->getOperand(1));
- if (!IsVP)
+ if (N->getNumOperands() == 2)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
+ assert(N->isVPOpcode() && "Expected VP opcode");
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_ZExtIntBinOp(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_ZExtIntBinOp(SDNode *N) {
// Zero extend the input.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = ZExtPromotedInteger(N->getOperand(1));
- if (!IsVP)
+ if (N->getNumOperands() == 2)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
+ assert(N->isVPOpcode() && "Expected VP opcode");
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
}
@@ -1251,25 +1231,25 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) {
LHS.getValueType(), LHS, RHS);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
// The input value must be properly sign extended.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
- if (!IsVP)
+ if (N->getOpcode() != ISD::VP_ASHR)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
// The input value must be properly zero extended.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
- if (!IsVP)
+ if (N->getOpcode() != ISD::VP_LSHR)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
@@ -1653,7 +1633,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::UDIVFIX:
case ISD::UDIVFIXSAT: Res = PromoteIntOp_FIX(N); break;
- case ISD::FPOWI: Res = PromoteIntOp_FPOWI(N); break;
+ case ISD::FPOWI:
+ case ISD::STRICT_FPOWI: Res = PromoteIntOp_FPOWI(N); break;
case ISD::VECREDUCE_ADD:
case ISD::VECREDUCE_MUL:
@@ -1703,50 +1684,64 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
/// PromoteSetCCOperands - Promote the operands of a comparison. This code is
/// shared among BR_CC, SELECT_CC, and SETCC handlers.
-void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS,
+void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &LHS, SDValue &RHS,
ISD::CondCode CCCode) {
// We have to insert explicit sign or zero extends. Note that we could
// insert sign extends for ALL conditions. For those operations where either
- // zero or sign extension would be valid, use SExtOrZExtPromotedInteger
- // which will choose the cheapest for the target.
- switch (CCCode) {
- default: llvm_unreachable("Unknown integer comparison!");
- case ISD::SETEQ:
- case ISD::SETNE: {
- SDValue OpL = GetPromotedInteger(NewLHS);
- SDValue OpR = GetPromotedInteger(NewRHS);
-
- // We would prefer to promote the comparison operand with sign extension.
- // If the width of OpL/OpR excluding the duplicated sign bits is no greater
- // than the width of NewLHS/NewRH, we can avoid inserting real truncate
- // instruction, which is redundant eventually.
- unsigned OpLEffectiveBits = DAG.ComputeMinSignedBits(OpL);
- unsigned OpREffectiveBits = DAG.ComputeMinSignedBits(OpR);
- if (OpLEffectiveBits <= NewLHS.getScalarValueSizeInBits() &&
- OpREffectiveBits <= NewRHS.getScalarValueSizeInBits()) {
- NewLHS = OpL;
- NewRHS = OpR;
- } else {
- NewLHS = SExtOrZExtPromotedInteger(NewLHS);
- NewRHS = SExtOrZExtPromotedInteger(NewRHS);
+ // zero or sign extension would be valid, we ask the target which extension
+ // it would prefer.
+
+ // Signed comparisons always require sign extension.
+ if (ISD::isSignedIntSetCC(CCCode)) {
+ LHS = SExtPromotedInteger(LHS);
+ RHS = SExtPromotedInteger(RHS);
+ return;
+ }
+
+ assert((ISD::isUnsignedIntSetCC(CCCode) || ISD::isIntEqualitySetCC(CCCode)) &&
+ "Unknown integer comparison!");
+
+ SDValue OpL = GetPromotedInteger(LHS);
+ SDValue OpR = GetPromotedInteger(RHS);
+
+ if (TLI.isSExtCheaperThanZExt(LHS.getValueType(), OpL.getValueType())) {
+ // The target would prefer to promote the comparison operand with sign
+ // extension. Honor that unless the promoted values are already zero
+ // extended.
+ unsigned OpLEffectiveBits =
+ DAG.computeKnownBits(OpL).countMaxActiveBits();
+ unsigned OpREffectiveBits =
+ DAG.computeKnownBits(OpR).countMaxActiveBits();
+ if (OpLEffectiveBits <= LHS.getScalarValueSizeInBits() &&
+ OpREffectiveBits <= RHS.getScalarValueSizeInBits()) {
+ LHS = OpL;
+ RHS = OpR;
+ return;
}
- break;
+
+ // The promoted values aren't zero extended, use a sext_inreg.
+ LHS = SExtPromotedInteger(LHS);
+ RHS = SExtPromotedInteger(RHS);
+ return;
}
- case ISD::SETUGE:
- case ISD::SETUGT:
- case ISD::SETULE:
- case ISD::SETULT:
- NewLHS = SExtOrZExtPromotedInteger(NewLHS);
- NewRHS = SExtOrZExtPromotedInteger(NewRHS);
- break;
- case ISD::SETGE:
- case ISD::SETGT:
- case ISD::SETLT:
- case ISD::SETLE:
- NewLHS = SExtPromotedInteger(NewLHS);
- NewRHS = SExtPromotedInteger(NewRHS);
- break;
+
+ // Prefer to promote the comparison operand with zero extension.
+
+ // If the width of OpL/OpR excluding the duplicated sign bits is no greater
+ // than the width of LHS/RHS, we can avoid/ inserting a zext_inreg operation
+ // that we might not be able to remove.
+ unsigned OpLEffectiveBits = DAG.ComputeMaxSignificantBits(OpL);
+ unsigned OpREffectiveBits = DAG.ComputeMaxSignificantBits(OpR);
+ if (OpLEffectiveBits <= LHS.getScalarValueSizeInBits() &&
+ OpREffectiveBits <= RHS.getScalarValueSizeInBits()) {
+ LHS = OpL;
+ RHS = OpR;
+ return;
}
+
+ // Otherwise, use zext_inreg.
+ LHS = ZExtPromotedInteger(LHS);
+ RHS = ZExtPromotedInteger(RHS);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
@@ -2099,8 +2094,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PREFETCH(SDNode *N, unsigned OpNo) {
}
SDValue DAGTypeLegalizer::PromoteIntOp_FPOWI(SDNode *N) {
- // FIXME: Support for promotion of STRICT_FPOWI is not implemented yet.
- assert(N->getOpcode() == ISD::FPOWI && "No STRICT_FPOWI support here yet.");
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
// The integer operand is the last operand in FPOWI (so the result and
// floating point operand is already type legalized).
@@ -2118,17 +2113,19 @@ SDValue DAGTypeLegalizer::PromoteIntOp_FPOWI(SDNode *N) {
DAG.getContext()->emitError("Don't know how to promote fpowi to fpow");
return DAG.getUNDEF(N->getValueType(0));
}
+ unsigned OpOffset = IsStrict ? 1 : 0;
// The exponent should fit in a sizeof(int) type for the libcall to be valid.
assert(DAG.getLibInfo().getIntSize() ==
- N->getOperand(1).getValueType().getSizeInBits() &&
+ N->getOperand(1 + OpOffset).getValueType().getSizeInBits() &&
"POWI exponent should match with sizeof(int) when doing the libcall.");
TargetLowering::MakeLibCallOptions CallOptions;
CallOptions.setSExt(true);
- SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, N->getValueType(0), Ops,
- CallOptions, SDLoc(N), SDValue());
+ SDValue Ops[2] = {N->getOperand(0 + OpOffset), N->getOperand(1 + OpOffset)};
+ std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(
+ DAG, LC, N->getValueType(0), Ops, CallOptions, SDLoc(N), Chain);
ReplaceValueWith(SDValue(N, 0), Tmp.first);
+ if (IsStrict)
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
return SDValue();
}
@@ -2255,7 +2252,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::ARITH_FENCE: SplitRes_ARITH_FENCE(N, Lo, Hi); break;
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
- case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
+ case ISD::SELECT: SplitRes_Select(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::FREEZE: SplitRes_FREEZE(N, Lo, Hi); break;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index da282ecad282..4d8daa82d8c0 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -334,18 +334,17 @@ private:
SDValue PromoteIntRes_MGATHER(MaskedGatherSDNode *N);
SDValue PromoteIntRes_Overflow(SDNode *N);
SDValue PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo);
- SDValue PromoteIntRes_SELECT(SDNode *N);
- SDValue PromoteIntRes_VSELECT(SDNode *N);
+ SDValue PromoteIntRes_Select(SDNode *N);
SDValue PromoteIntRes_SELECT_CC(SDNode *N);
SDValue PromoteIntRes_SETCC(SDNode *N);
- SDValue PromoteIntRes_SHL(SDNode *N, bool IsVP);
- SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N, bool IsVP);
- SDValue PromoteIntRes_ZExtIntBinOp(SDNode *N, bool IsVP);
- SDValue PromoteIntRes_SExtIntBinOp(SDNode *N, bool IsVP);
+ SDValue PromoteIntRes_SHL(SDNode *N);
+ SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N);
+ SDValue PromoteIntRes_ZExtIntBinOp(SDNode *N);
+ SDValue PromoteIntRes_SExtIntBinOp(SDNode *N);
SDValue PromoteIntRes_UMINUMAX(SDNode *N);
SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N);
- SDValue PromoteIntRes_SRA(SDNode *N, bool IsVP);
- SDValue PromoteIntRes_SRL(SDNode *N, bool IsVP);
+ SDValue PromoteIntRes_SRA(SDNode *N);
+ SDValue PromoteIntRes_SRL(SDNode *N);
SDValue PromoteIntRes_TRUNCATE(SDNode *N);
SDValue PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo);
@@ -819,6 +818,12 @@ private:
void GetSplitVector(SDValue Op, SDValue &Lo, SDValue &Hi);
void SetSplitVector(SDValue Op, SDValue Lo, SDValue Hi);
+ /// Split mask operator of a VP intrinsic.
+ std::pair<SDValue, SDValue> SplitMask(SDValue Mask);
+
+ /// Split mask operator of a VP intrinsic in a given location.
+ std::pair<SDValue, SDValue> SplitMask(SDValue Mask, const SDLoc &DL);
+
// Helper function for incrementing the pointer when splitting
// memory operations
void IncrementPointer(MemSDNode *N, EVT MemVT, MachinePointerInfo &MPI,
@@ -826,7 +831,7 @@ private:
// Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>.
void SplitVectorResult(SDNode *N, unsigned ResNo);
- void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi, bool IsVP);
+ void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -847,8 +852,10 @@ private:
void SplitVecRes_FCOPYSIGN(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, SDValue &Lo, SDValue &Hi);
- void SplitVecRes_MGATHER(MaskedGatherSDNode *MGT, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_Gather(MemSDNode *VPGT, SDValue &Lo, SDValue &Hi,
+ bool SplitSETCC = false);
void SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_STEP_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -864,6 +871,7 @@ private:
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VECREDUCE_SEQ(SDNode *N);
+ SDValue SplitVecOp_VP_REDUCE(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_UnaryOp(SDNode *N);
SDValue SplitVecOp_TruncateHelper(SDNode *N);
@@ -873,9 +881,10 @@ private:
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_ExtVecInRegOp(SDNode *N);
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
+ SDValue SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_MSTORE(MaskedStoreSDNode *N, unsigned OpNo);
- SDValue SplitVecOp_MSCATTER(MaskedScatterSDNode *N, unsigned OpNo);
- SDValue SplitVecOp_MGATHER(MaskedGatherSDNode *MGT, unsigned OpNo);
+ SDValue SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo);
+ SDValue SplitVecOp_Gather(MemSDNode *MGT, unsigned OpNo);
SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N);
SDValue SplitVecOp_VSETCC(SDNode *N);
SDValue SplitVecOp_FP_ROUND(SDNode *N);
@@ -900,6 +909,23 @@ private:
}
void SetWidenedVector(SDValue Op, SDValue Result);
+ /// Given a mask Mask, returns the larger vector into which Mask was widened.
+ SDValue GetWidenedMask(SDValue Mask, ElementCount EC) {
+ // For VP operations, we must also widen the mask. Note that the mask type
+ // may not actually need widening, leading it be split along with the VP
+ // operation.
+ // FIXME: This could lead to an infinite split/widen loop. We only handle
+ // the case where the mask needs widening to an identically-sized type as
+ // the vector inputs.
+ assert(getTypeAction(Mask.getValueType()) ==
+ TargetLowering::TypeWidenVector &&
+ "Unable to widen binary VP op");
+ Mask = GetWidenedVector(Mask);
+ assert(Mask.getValueType().getVectorElementCount() == EC &&
+ "Unable to widen binary VP op");
+ return Mask;
+ }
+
// Widen Vector Result Promotion.
void WidenVectorResult(SDNode *N, unsigned ResNo);
SDValue WidenVecRes_MERGE_VALUES(SDNode* N, unsigned ResNo);
@@ -911,10 +937,12 @@ private:
SDValue WidenVecRes_INSERT_SUBVECTOR(SDNode *N);
SDValue WidenVecRes_INSERT_VECTOR_ELT(SDNode* N);
SDValue WidenVecRes_LOAD(SDNode* N);
+ SDValue WidenVecRes_VP_LOAD(VPLoadSDNode *N);
SDValue WidenVecRes_MLOAD(MaskedLoadSDNode* N);
SDValue WidenVecRes_MGATHER(MaskedGatherSDNode* N);
+ SDValue WidenVecRes_VP_GATHER(VPGatherSDNode* N);
SDValue WidenVecRes_ScalarOp(SDNode* N);
- SDValue WidenVecRes_SELECT(SDNode* N);
+ SDValue WidenVecRes_Select(SDNode *N);
SDValue WidenVSELECTMask(SDNode *N);
SDValue WidenVecRes_SELECT_CC(SDNode* N);
SDValue WidenVecRes_SETCC(SDNode* N);
@@ -923,7 +951,7 @@ private:
SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N);
SDValue WidenVecRes_Ternary(SDNode *N);
- SDValue WidenVecRes_Binary(SDNode *N, bool IsVP);
+ SDValue WidenVecRes_Binary(SDNode *N);
SDValue WidenVecRes_BinaryCanTrap(SDNode *N);
SDValue WidenVecRes_BinaryWithExtraScalarOp(SDNode *N);
SDValue WidenVecRes_StrictFP(SDNode *N);
@@ -945,9 +973,11 @@ private:
SDValue WidenVecOp_INSERT_SUBVECTOR(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue WidenVecOp_STORE(SDNode* N);
+ SDValue WidenVecOp_VP_STORE(SDNode *N, unsigned OpNo);
SDValue WidenVecOp_MSTORE(SDNode* N, unsigned OpNo);
SDValue WidenVecOp_MGATHER(SDNode* N, unsigned OpNo);
SDValue WidenVecOp_MSCATTER(SDNode* N, unsigned OpNo);
+ SDValue WidenVecOp_VP_SCATTER(SDNode* N, unsigned OpNo);
SDValue WidenVecOp_SETCC(SDNode* N);
SDValue WidenVecOp_STRICT_FSETCC(SDNode* N);
SDValue WidenVecOp_VSELECT(SDNode *N);
@@ -957,6 +987,7 @@ private:
SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
SDValue WidenVecOp_VECREDUCE(SDNode *N);
SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);
+ SDValue WidenVecOp_VP_REDUCE(SDNode *N);
/// Helper function to generate a set of operations to perform
/// a vector operation for a wider type.
@@ -1023,7 +1054,7 @@ private:
void SplitRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi);
void SplitRes_ARITH_FENCE (SDNode *N, SDValue &Lo, SDValue &Hi);
- void SplitRes_SELECT (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitRes_Select (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_SELECT_CC (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_UNDEF (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_FREEZE (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 3d3c9a2ad837..c6885677d644 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -506,9 +506,10 @@ void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
GetSplitOp(Op, Lo, Hi);
}
-void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo, SDValue &Hi) {
+void DAGTypeLegalizer::SplitRes_Select(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue LL, LH, RL, RH, CL, CH;
SDLoc dl(N);
+ unsigned Opcode = N->getOpcode();
GetSplitOp(N->getOperand(1), LL, LH);
GetSplitOp(N->getOperand(2), RL, RH);
@@ -539,8 +540,18 @@ void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo, SDValue &Hi) {
std::tie(CL, CH) = DAG.SplitVector(Cond, dl);
}
- Lo = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), CL, LL, RL);
- Hi = DAG.getNode(N->getOpcode(), dl, LH.getValueType(), CH, LH, RH);
+ if (Opcode != ISD::VP_SELECT && Opcode != ISD::VP_MERGE) {
+ Lo = DAG.getNode(Opcode, dl, LL.getValueType(), CL, LL, RL);
+ Hi = DAG.getNode(Opcode, dl, LH.getValueType(), CH, LH, RH);
+ return;
+ }
+
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) =
+ DAG.SplitEVL(N->getOperand(3), N->getValueType(0), dl);
+
+ Lo = DAG.getNode(Opcode, dl, LL.getValueType(), CL, LL, RL, EVLLo);
+ Hi = DAG.getNode(Opcode, dl, LH.getValueType(), CH, LH, RH, EVLHi);
}
void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo,
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 1493f36fcd3e..abf6a3ac6916 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -133,6 +133,8 @@ class VectorLegalizer {
/// Implement vselect in terms of XOR, AND, OR when blend is not
/// supported by the target.
SDValue ExpandVSELECT(SDNode *Node);
+ SDValue ExpandVP_SELECT(SDNode *Node);
+ SDValue ExpandVP_MERGE(SDNode *Node);
SDValue ExpandSELECT(SDNode *Node);
std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
SDValue ExpandStore(SDNode *N);
@@ -457,6 +459,14 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
}
+
+#define BEGIN_REGISTER_VP_SDNODE(VPID, LEGALPOS, ...) \
+ case ISD::VPID: { \
+ EVT LegalizeVT = LEGALPOS < 0 ? Node->getValueType(-(1 + LEGALPOS)) \
+ : Node->getOperand(LEGALPOS).getValueType(); \
+ Action = TLI.getOperationAction(Node->getOpcode(), LegalizeVT); \
+ } break;
+#include "llvm/IR/VPIntrinsics.def"
}
LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
@@ -718,6 +728,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::VSELECT:
Results.push_back(ExpandVSELECT(Node));
return;
+ case ISD::VP_SELECT:
+ Results.push_back(ExpandVP_SELECT(Node));
+ return;
case ISD::SELECT:
Results.push_back(ExpandSELECT(Node));
return;
@@ -865,6 +878,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::UREM:
ExpandREM(Node, Results);
return;
+ case ISD::VP_MERGE:
+ Results.push_back(ExpandVP_MERGE(Node));
+ return;
}
Results.push_back(DAG.UnrollVectorOp(Node));
@@ -1195,6 +1211,79 @@ SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
}
+SDValue VectorLegalizer::ExpandVP_SELECT(SDNode *Node) {
+ // Implement VP_SELECT in terms of VP_XOR, VP_AND and VP_OR on platforms which
+ // do not support it natively.
+ SDLoc DL(Node);
+
+ SDValue Mask = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ SDValue Op2 = Node->getOperand(2);
+ SDValue EVL = Node->getOperand(3);
+
+ EVT VT = Mask.getValueType();
+
+ // If we can't even use the basic vector operations of
+ // VP_AND,VP_OR,VP_XOR, we will have to scalarize the op.
+ if (TLI.getOperationAction(ISD::VP_AND, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::VP_XOR, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::VP_OR, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Node);
+
+ // This operation also isn't safe when the operands aren't also booleans.
+ if (Op1.getValueType().getVectorElementType() != MVT::i1)
+ return DAG.UnrollVectorOp(Node);
+
+ SDValue Ones = DAG.getAllOnesConstant(DL, VT);
+ SDValue NotMask = DAG.getNode(ISD::VP_XOR, DL, VT, Mask, Ones, Mask, EVL);
+
+ Op1 = DAG.getNode(ISD::VP_AND, DL, VT, Op1, Mask, Mask, EVL);
+ Op2 = DAG.getNode(ISD::VP_AND, DL, VT, Op2, NotMask, Mask, EVL);
+ return DAG.getNode(ISD::VP_OR, DL, VT, Op1, Op2, Mask, EVL);
+}
+
+SDValue VectorLegalizer::ExpandVP_MERGE(SDNode *Node) {
+ // Implement VP_MERGE in terms of VSELECT. Construct a mask where vector
+ // indices less than the EVL/pivot are true. Combine that with the original
+ // mask for a full-length mask. Use a full-length VSELECT to select between
+ // the true and false values.
+ SDLoc DL(Node);
+
+ SDValue Mask = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ SDValue Op2 = Node->getOperand(2);
+ SDValue EVL = Node->getOperand(3);
+
+ EVT MaskVT = Mask.getValueType();
+ bool IsFixedLen = MaskVT.isFixedLengthVector();
+
+ EVT EVLVecVT = EVT::getVectorVT(*DAG.getContext(), EVL.getValueType(),
+ MaskVT.getVectorElementCount());
+
+ // If we can't construct the EVL mask efficiently, it's better to unroll.
+ if ((IsFixedLen &&
+ !TLI.isOperationLegalOrCustom(ISD::BUILD_VECTOR, EVLVecVT)) ||
+ (!IsFixedLen &&
+ (!TLI.isOperationLegalOrCustom(ISD::STEP_VECTOR, EVLVecVT) ||
+ !TLI.isOperationLegalOrCustom(ISD::SPLAT_VECTOR, EVLVecVT))))
+ return DAG.UnrollVectorOp(Node);
+
+ // If using a SETCC would result in a different type than the mask type,
+ // unroll.
+ if (TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ EVLVecVT) != MaskVT)
+ return DAG.UnrollVectorOp(Node);
+
+ SDValue StepVec = DAG.getStepVector(DL, EVLVecVT);
+ SDValue SplatEVL = IsFixedLen ? DAG.getSplatBuildVector(EVLVecVT, DL, EVL)
+ : DAG.getSplatVector(EVLVecVT, DL, EVL);
+ SDValue EVLMask =
+ DAG.getSetCC(DL, MaskVT, StepVec, SplatEVL, ISD::CondCode::SETULT);
+
+ SDValue FullMask = DAG.getNode(ISD::AND, DL, MaskVT, Mask, EVLMask);
+ return DAG.getSelect(DL, Node->getValueType(0), FullMask, Op1, Op2);
+}
+
void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
// Attempt to expand using TargetLowering.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 7ec2638b1e71..0bd44ce4c872 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -914,7 +914,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::VSELECT:
- case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
+ case ISD::SELECT:
+ case ISD::VP_MERGE:
+ case ISD::VP_SELECT: SplitRes_Select(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
@@ -936,11 +938,15 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::LOAD:
SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
break;
+ case ISD::VP_LOAD:
+ SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(N), Lo, Hi);
+ break;
case ISD::MLOAD:
SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(N), Lo, Hi);
break;
case ISD::MGATHER:
- SplitVecRes_MGATHER(cast<MaskedGatherSDNode>(N), Lo, Hi);
+ case ISD::VP_GATHER:
+ SplitVecRes_Gather(cast<MemSDNode>(N), Lo, Hi, /*SplitSETCC*/ true);
break;
case ISD::SETCC:
SplitVecRes_SETCC(N, Lo, Hi);
@@ -1008,31 +1014,31 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
SplitVecRes_ExtendOp(N, Lo, Hi);
break;
- case ISD::ADD:
- case ISD::SUB:
- case ISD::MUL:
+ case ISD::ADD: case ISD::VP_ADD:
+ case ISD::SUB: case ISD::VP_SUB:
+ case ISD::MUL: case ISD::VP_MUL:
case ISD::MULHS:
case ISD::MULHU:
- case ISD::FADD:
- case ISD::FSUB:
- case ISD::FMUL:
+ case ISD::FADD: case ISD::VP_FADD:
+ case ISD::FSUB: case ISD::VP_FSUB:
+ case ISD::FMUL: case ISD::VP_FMUL:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
- case ISD::SDIV:
- case ISD::UDIV:
- case ISD::FDIV:
+ case ISD::SDIV: case ISD::VP_SDIV:
+ case ISD::UDIV: case ISD::VP_UDIV:
+ case ISD::FDIV: case ISD::VP_FDIV:
case ISD::FPOW:
- case ISD::AND:
- case ISD::OR:
- case ISD::XOR:
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
- case ISD::UREM:
- case ISD::SREM:
- case ISD::FREM:
+ case ISD::AND: case ISD::VP_AND:
+ case ISD::OR: case ISD::VP_OR:
+ case ISD::XOR: case ISD::VP_XOR:
+ case ISD::SHL: case ISD::VP_SHL:
+ case ISD::SRA: case ISD::VP_ASHR:
+ case ISD::SRL: case ISD::VP_LSHR:
+ case ISD::UREM: case ISD::VP_UREM:
+ case ISD::SREM: case ISD::VP_SREM:
+ case ISD::FREM: case ISD::VP_FREM:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
@@ -1045,7 +1051,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::USHLSAT:
case ISD::ROTL:
case ISD::ROTR:
- SplitVecRes_BinOp(N, Lo, Hi, /*IsVP*/ false);
+ SplitVecRes_BinOp(N, Lo, Hi);
break;
case ISD::FMA:
case ISD::FSHL:
@@ -1082,26 +1088,6 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::UDIVFIXSAT:
SplitVecRes_FIX(N, Lo, Hi);
break;
- case ISD::VP_ADD:
- case ISD::VP_AND:
- case ISD::VP_MUL:
- case ISD::VP_OR:
- case ISD::VP_SUB:
- case ISD::VP_XOR:
- case ISD::VP_SHL:
- case ISD::VP_LSHR:
- case ISD::VP_ASHR:
- case ISD::VP_SDIV:
- case ISD::VP_UDIV:
- case ISD::VP_SREM:
- case ISD::VP_UREM:
- case ISD::VP_FADD:
- case ISD::VP_FSUB:
- case ISD::VP_FMUL:
- case ISD::VP_FDIV:
- case ISD::VP_FREM:
- SplitVecRes_BinOp(N, Lo, Hi, /*IsVP*/ true);
- break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -1133,8 +1119,22 @@ void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT,
}
}
-void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi,
- bool IsVP) {
+std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask) {
+ return SplitMask(Mask, SDLoc(Mask));
+}
+
+std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(SDValue Mask,
+ const SDLoc &DL) {
+ SDValue MaskLo, MaskHi;
+ EVT MaskVT = Mask.getValueType();
+ if (getTypeAction(MaskVT) == TargetLowering::TypeSplitVector)
+ GetSplitVector(Mask, MaskLo, MaskHi);
+ else
+ std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
+ return std::make_pair(MaskLo, MaskHi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue LHSLo, LHSHi;
GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
SDValue RHSLo, RHSHi;
@@ -1143,36 +1143,21 @@ void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi,
const SDNodeFlags Flags = N->getFlags();
unsigned Opcode = N->getOpcode();
- if (!IsVP) {
+ if (N->getNumOperands() == 2) {
Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(), LHSLo, RHSLo, Flags);
Hi = DAG.getNode(Opcode, dl, LHSHi.getValueType(), LHSHi, RHSHi, Flags);
return;
}
- // Split the mask.
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
+ assert(N->isVPOpcode() && "Expected VP opcode");
+
SDValue MaskLo, MaskHi;
- SDValue Mask = N->getOperand(2);
- EVT MaskVT = Mask.getValueType();
- if (getTypeAction(MaskVT) == TargetLowering::TypeSplitVector)
- GetSplitVector(Mask, MaskLo, MaskHi);
- else
- std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, SDLoc(Mask));
-
- // Split the vector length parameter.
- // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
- SDValue EVL = N->getOperand(3);
- EVT VecVT = N->getValueType(0);
- EVT EVLVT = EVL.getValueType();
- assert(VecVT.getVectorElementCount().isKnownEven() &&
- "Expecting the mask to be an evenly-sized vector");
- unsigned HalfMinNumElts = VecVT.getVectorMinNumElements() / 2;
- SDValue HalfNumElts =
- VecVT.isFixedLengthVector()
- ? DAG.getConstant(HalfMinNumElts, dl, EVLVT)
- : DAG.getVScale(dl, EVLVT,
- APInt(EVLVT.getScalarSizeInBits(), HalfMinNumElts));
- SDValue EVLLo = DAG.getNode(ISD::UMIN, dl, EVLVT, EVL, HalfNumElts);
- SDValue EVLHi = DAG.getNode(ISD::USUBSAT, dl, EVLVT, EVL, HalfNumElts);
+ std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2));
+
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) =
+ DAG.SplitEVL(N->getOperand(3), N->getValueType(0), dl);
Lo = DAG.getNode(Opcode, dl, LHSLo.getValueType(),
{LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
@@ -1781,6 +1766,86 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
ReplaceValueWith(SDValue(LD, 1), Ch);
}
+void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
+ SDValue &Hi) {
+ assert(LD->isUnindexed() && "Indexed VP load during type legalization!");
+ EVT LoVT, HiVT;
+ SDLoc dl(LD);
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
+
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ SDValue Ch = LD->getChain();
+ SDValue Ptr = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ assert(Offset.isUndef() && "Unexpected indexed variable-length load offset");
+ Align Alignment = LD->getOriginalAlign();
+ SDValue Mask = LD->getMask();
+ SDValue EVL = LD->getVectorLength();
+ EVT MemoryVT = LD->getMemoryVT();
+
+ EVT LoMemVT, HiMemVT;
+ bool HiIsEmpty = false;
+ std::tie(LoMemVT, HiMemVT) =
+ DAG.GetDependentSplitDestVTs(MemoryVT, LoVT, &HiIsEmpty);
+
+ // Split Mask operand
+ SDValue MaskLo, MaskHi;
+ if (Mask.getOpcode() == ISD::SETCC) {
+ SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
+ } else {
+ if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
+ GetSplitVector(Mask, MaskLo, MaskHi);
+ else
+ std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
+ }
+
+ // Split EVL operand
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, LD->getValueType(0), dl);
+
+ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
+ LD->getPointerInfo(), MachineMemOperand::MOLoad,
+ MemoryLocation::UnknownSize, Alignment, LD->getAAInfo(), LD->getRanges());
+
+ Lo =
+ DAG.getLoadVP(LD->getAddressingMode(), ExtType, LoVT, dl, Ch, Ptr, Offset,
+ MaskLo, EVLLo, LoMemVT, MMO, LD->isExpandingLoad());
+
+ if (HiIsEmpty) {
+ // The hi vp_load has zero storage size. We therefore simply set it to
+ // the low vp_load and rely on subsequent removal from the chain.
+ Hi = Lo;
+ } else {
+ // Generate hi vp_load.
+ Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG,
+ LD->isExpandingLoad());
+
+ MachinePointerInfo MPI;
+ if (LoMemVT.isScalableVector())
+ MPI = MachinePointerInfo(LD->getPointerInfo().getAddrSpace());
+ else
+ MPI = LD->getPointerInfo().getWithOffset(
+ LoMemVT.getStoreSize().getFixedSize());
+
+ MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment,
+ LD->getAAInfo(), LD->getRanges());
+
+ Hi = DAG.getLoadVP(LD->getAddressingMode(), ExtType, HiVT, dl, Ch, Ptr,
+ Offset, MaskHi, EVLHi, HiMemVT, MMO,
+ LD->isExpandingLoad());
+ }
+
+ // Build a factor node to remember that this load is independent of the
+ // other one.
+ Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Legalize the chain result - switch anything that used the old chain to
+ // use the new one.
+ ReplaceValueWith(SDValue(LD, 1), Ch);
+}
+
void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
SDValue &Lo, SDValue &Hi) {
assert(MLD->isUnindexed() && "Indexed masked load during type legalization!");
@@ -1865,61 +1930,85 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
}
-void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
- SDValue &Lo, SDValue &Hi) {
+void DAGTypeLegalizer::SplitVecRes_Gather(MemSDNode *N, SDValue &Lo,
+ SDValue &Hi, bool SplitSETCC) {
EVT LoVT, HiVT;
- SDLoc dl(MGT);
- std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MGT->getValueType(0));
-
- SDValue Ch = MGT->getChain();
- SDValue Ptr = MGT->getBasePtr();
- SDValue Mask = MGT->getMask();
- SDValue PassThru = MGT->getPassThru();
- SDValue Index = MGT->getIndex();
- SDValue Scale = MGT->getScale();
- EVT MemoryVT = MGT->getMemoryVT();
- Align Alignment = MGT->getOriginalAlign();
- ISD::LoadExtType ExtType = MGT->getExtensionType();
+ SDLoc dl(N);
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+
+ SDValue Ch = N->getChain();
+ SDValue Ptr = N->getBasePtr();
+ struct Operands {
+ SDValue Mask;
+ SDValue Index;
+ SDValue Scale;
+ } Ops = [&]() -> Operands {
+ if (auto *MSC = dyn_cast<MaskedGatherSDNode>(N)) {
+ return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
+ }
+ auto *VPSC = cast<VPGatherSDNode>(N);
+ return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
+ }();
+
+ EVT MemoryVT = N->getMemoryVT();
+ Align Alignment = N->getOriginalAlign();
// Split Mask operand
SDValue MaskLo, MaskHi;
- if (Mask.getOpcode() == ISD::SETCC) {
- SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
+ if (SplitSETCC && Ops.Mask.getOpcode() == ISD::SETCC) {
+ SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
} else {
- if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(Mask, MaskLo, MaskHi);
- else
- std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
+ std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
}
EVT LoMemVT, HiMemVT;
// Split MemoryVT
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
- SDValue PassThruLo, PassThruHi;
- if (getTypeAction(PassThru.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(PassThru, PassThruLo, PassThruHi);
- else
- std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
-
SDValue IndexHi, IndexLo;
- if (getTypeAction(Index.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(Index, IndexLo, IndexHi);
+ if (getTypeAction(Ops.Index.getValueType()) ==
+ TargetLowering::TypeSplitVector)
+ GetSplitVector(Ops.Index, IndexLo, IndexHi);
else
- std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, dl);
+ std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, dl);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MGT->getPointerInfo(), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, MGT->getAAInfo(),
- MGT->getRanges());
+ N->getPointerInfo(), MachineMemOperand::MOLoad,
+ MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
+
+ if (auto *MGT = dyn_cast<MaskedGatherSDNode>(N)) {
+ SDValue PassThru = MGT->getPassThru();
+ SDValue PassThruLo, PassThruHi;
+ if (getTypeAction(PassThru.getValueType()) ==
+ TargetLowering::TypeSplitVector)
+ GetSplitVector(PassThru, PassThruLo, PassThruHi);
+ else
+ std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
- SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale};
- Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo,
- MMO, MGT->getIndexType(), ExtType);
+ ISD::LoadExtType ExtType = MGT->getExtensionType();
+ ISD::MemIndexType IndexTy = MGT->getIndexType();
- SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Scale};
- Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi,
- MMO, MGT->getIndexType(), ExtType);
+ SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Ops.Scale};
+ Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl,
+ OpsLo, MMO, IndexTy, ExtType);
+
+ SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Ops.Scale};
+ Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl,
+ OpsHi, MMO, IndexTy, ExtType);
+ } else {
+ auto *VPGT = cast<VPGatherSDNode>(N);
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) =
+ DAG.SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
+
+ SDValue OpsLo[] = {Ch, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
+ Lo = DAG.getGatherVP(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo,
+ MMO, VPGT->getIndexType());
+
+ SDValue OpsHi[] = {Ch, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
+ Hi = DAG.getGatherVP(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi,
+ MMO, VPGT->getIndexType());
+ }
// Build a factor node to remember that this load is independent of the
// other one.
@@ -1928,10 +2017,9 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
// Legalize the chain result - switch anything that used the old chain to
// use the new one.
- ReplaceValueWith(SDValue(MGT, 1), Ch);
+ ReplaceValueWith(SDValue(N, 1), Ch);
}
-
void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
@@ -2221,14 +2309,19 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::STORE:
Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
+ case ISD::VP_STORE:
+ Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(N), OpNo);
+ break;
case ISD::MSTORE:
Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo);
break;
case ISD::MSCATTER:
- Res = SplitVecOp_MSCATTER(cast<MaskedScatterSDNode>(N), OpNo);
+ case ISD::VP_SCATTER:
+ Res = SplitVecOp_Scatter(cast<MemSDNode>(N), OpNo);
break;
case ISD::MGATHER:
- Res = SplitVecOp_MGATHER(cast<MaskedGatherSDNode>(N), OpNo);
+ case ISD::VP_GATHER:
+ Res = SplitVecOp_Gather(cast<MemSDNode>(N), OpNo);
break;
case ISD::VSELECT:
Res = SplitVecOp_VSELECT(N, OpNo);
@@ -2285,6 +2378,23 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::VECREDUCE_SEQ_FMUL:
Res = SplitVecOp_VECREDUCE_SEQ(N);
break;
+ case ISD::VP_REDUCE_FADD:
+ case ISD::VP_REDUCE_SEQ_FADD:
+ case ISD::VP_REDUCE_FMUL:
+ case ISD::VP_REDUCE_SEQ_FMUL:
+ case ISD::VP_REDUCE_ADD:
+ case ISD::VP_REDUCE_MUL:
+ case ISD::VP_REDUCE_AND:
+ case ISD::VP_REDUCE_OR:
+ case ISD::VP_REDUCE_XOR:
+ case ISD::VP_REDUCE_SMAX:
+ case ISD::VP_REDUCE_SMIN:
+ case ISD::VP_REDUCE_UMAX:
+ case ISD::VP_REDUCE_UMIN:
+ case ISD::VP_REDUCE_FMAX:
+ case ISD::VP_REDUCE_FMIN:
+ Res = SplitVecOp_VP_REDUCE(N, OpNo);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -2381,6 +2491,33 @@ SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) {
return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags);
}
+SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(SDNode *N, unsigned OpNo) {
+ assert(N->isVPOpcode() && "Expected VP opcode");
+ assert(OpNo == 1 && "Can only split reduce vector operand");
+
+ unsigned Opc = N->getOpcode();
+ EVT ResVT = N->getValueType(0);
+ SDValue Lo, Hi;
+ SDLoc dl(N);
+
+ SDValue VecOp = N->getOperand(OpNo);
+ EVT VecVT = VecOp.getValueType();
+ assert(VecVT.isVector() && "Can only split reduce vector operand");
+ GetSplitVector(VecOp, Lo, Hi);
+
+ SDValue MaskLo, MaskHi;
+ std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(2));
+
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) = DAG.SplitEVL(N->getOperand(3), VecVT, dl);
+
+ const SDNodeFlags Flags = N->getFlags();
+
+ SDValue ResLo =
+ DAG.getNode(Opc, dl, ResVT, {N->getOperand(0), Lo, MaskLo, EVLLo}, Flags);
+ return DAG.getNode(Opc, dl, ResVT, {ResLo, Hi, MaskHi, EVLHi}, Flags);
+}
+
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
// The result has a legal vector type, but the input needs splitting.
EVT ResVT = N->getValueType(0);
@@ -2558,70 +2695,92 @@ SDValue DAGTypeLegalizer::SplitVecOp_ExtVecInRegOp(SDNode *N) {
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), N->getValueType(0), Lo, Hi);
}
-SDValue DAGTypeLegalizer::SplitVecOp_MGATHER(MaskedGatherSDNode *MGT,
- unsigned OpNo) {
- EVT LoVT, HiVT;
- SDLoc dl(MGT);
- std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MGT->getValueType(0));
-
- SDValue Ch = MGT->getChain();
- SDValue Ptr = MGT->getBasePtr();
- SDValue Index = MGT->getIndex();
- SDValue Scale = MGT->getScale();
- SDValue Mask = MGT->getMask();
- SDValue PassThru = MGT->getPassThru();
- Align Alignment = MGT->getOriginalAlign();
- ISD::LoadExtType ExtType = MGT->getExtensionType();
+SDValue DAGTypeLegalizer::SplitVecOp_Gather(MemSDNode *N, unsigned OpNo) {
+ (void)OpNo;
+ SDValue Lo, Hi;
+ SplitVecRes_Gather(N, Lo, Hi);
- SDValue MaskLo, MaskHi;
- if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
- // Split Mask operand
- GetSplitVector(Mask, MaskLo, MaskHi);
- else
- std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
+ SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, N, N->getValueType(0), Lo, Hi);
+ ReplaceValueWith(SDValue(N, 0), Res);
+ return SDValue();
+}
- EVT MemoryVT = MGT->getMemoryVT();
- EVT LoMemVT, HiMemVT;
- std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
+SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
+ assert(N->isUnindexed() && "Indexed vp_store of vector?");
+ SDValue Ch = N->getChain();
+ SDValue Ptr = N->getBasePtr();
+ SDValue Offset = N->getOffset();
+ assert(Offset.isUndef() && "Unexpected VP store offset");
+ SDValue Mask = N->getMask();
+ SDValue EVL = N->getVectorLength();
+ SDValue Data = N->getValue();
+ Align Alignment = N->getOriginalAlign();
+ SDLoc DL(N);
- SDValue PassThruLo, PassThruHi;
- if (getTypeAction(PassThru.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(PassThru, PassThruLo, PassThruHi);
+ SDValue DataLo, DataHi;
+ if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
+ // Split Data operand
+ GetSplitVector(Data, DataLo, DataHi);
else
- std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
+ std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
- SDValue IndexHi, IndexLo;
- if (getTypeAction(Index.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(Index, IndexLo, IndexHi);
- else
- std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, dl);
+ // Split Mask operand
+ SDValue MaskLo, MaskHi;
+ if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) {
+ SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
+ } else {
+ if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
+ GetSplitVector(Mask, MaskLo, MaskHi);
+ else
+ std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
+ }
+ EVT MemoryVT = N->getMemoryVT();
+ EVT LoMemVT, HiMemVT;
+ bool HiIsEmpty = false;
+ std::tie(LoMemVT, HiMemVT) =
+ DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty);
+
+ // Split EVL
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL, Data.getValueType(), DL);
+
+ SDValue Lo, Hi;
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MGT->getPointerInfo(), MachineMemOperand::MOLoad,
- MemoryLocation::UnknownSize, Alignment, MGT->getAAInfo(),
- MGT->getRanges());
+ N->getPointerInfo(), MachineMemOperand::MOStore,
+ MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
- SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale};
- SDValue Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl,
- OpsLo, MMO, MGT->getIndexType(), ExtType);
+ Lo = DAG.getStoreVP(Ch, DL, DataLo, Ptr, Offset, MaskLo, EVLLo, LoMemVT, MMO,
+ N->getAddressingMode(), N->isTruncatingStore(),
+ N->isCompressingStore());
- SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Scale};
- SDValue Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl,
- OpsHi, MMO, MGT->getIndexType(), ExtType);
+ // If the hi vp_store has zero storage size, only the lo vp_store is needed.
+ if (HiIsEmpty)
+ return Lo;
- // Build a factor node to remember that this load is independent of the
- // other one.
- Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
- Hi.getValue(1));
+ Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
+ N->isCompressingStore());
- // Legalize the chain result - switch anything that used the old chain to
- // use the new one.
- ReplaceValueWith(SDValue(MGT, 1), Ch);
+ MachinePointerInfo MPI;
+ if (LoMemVT.isScalableVector()) {
+ Alignment = commonAlignment(Alignment,
+ LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
+ } else
+ MPI = N->getPointerInfo().getWithOffset(
+ LoMemVT.getStoreSize().getFixedSize());
- SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MGT->getValueType(0), Lo,
- Hi);
- ReplaceValueWith(SDValue(MGT, 0), Res);
- return SDValue();
+ MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment,
+ N->getAAInfo(), N->getRanges());
+
+ Hi = DAG.getStoreVP(Ch, DL, DataHi, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO,
+ N->getAddressingMode(), N->isTruncatingStore(),
+ N->isCompressingStore());
+
+ // Build a factor node to remember that this store is independent of the
+ // other one.
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
@@ -2703,64 +2862,87 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
return Res;
}
-SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N,
- unsigned OpNo) {
- SDValue Ch = N->getChain();
+SDValue DAGTypeLegalizer::SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo) {
+ SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
- SDValue Mask = N->getMask();
- SDValue Index = N->getIndex();
- SDValue Scale = N->getScale();
- SDValue Data = N->getValue();
EVT MemoryVT = N->getMemoryVT();
Align Alignment = N->getOriginalAlign();
SDLoc DL(N);
-
+ struct Operands {
+ SDValue Mask;
+ SDValue Index;
+ SDValue Scale;
+ SDValue Data;
+ } Ops = [&]() -> Operands {
+ if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) {
+ return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
+ MSC->getValue()};
+ }
+ auto *VPSC = cast<VPScatterSDNode>(N);
+ return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
+ VPSC->getValue()};
+ }();
// Split all operands
EVT LoMemVT, HiMemVT;
std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
SDValue DataLo, DataHi;
- if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
+ if (getTypeAction(Ops.Data.getValueType()) == TargetLowering::TypeSplitVector)
// Split Data operand
- GetSplitVector(Data, DataLo, DataHi);
+ GetSplitVector(Ops.Data, DataLo, DataHi);
else
- std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
+ std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data, DL);
// Split Mask operand
SDValue MaskLo, MaskHi;
- if (OpNo == 1 && Mask.getOpcode() == ISD::SETCC) {
- SplitVecRes_SETCC(Mask.getNode(), MaskLo, MaskHi);
+ if (OpNo == 1 && Ops.Mask.getOpcode() == ISD::SETCC) {
+ SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
} else {
- if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(Mask, MaskLo, MaskHi);
- else
- std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, DL);
+ std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, DL);
}
SDValue IndexHi, IndexLo;
- if (getTypeAction(Index.getValueType()) == TargetLowering::TypeSplitVector)
- GetSplitVector(Index, IndexLo, IndexHi);
+ if (getTypeAction(Ops.Index.getValueType()) ==
+ TargetLowering::TypeSplitVector)
+ GetSplitVector(Ops.Index, IndexLo, IndexHi);
else
- std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
+ std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index, DL);
SDValue Lo;
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
N->getPointerInfo(), MachineMemOperand::MOStore,
MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
- SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Scale};
- Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
- DL, OpsLo, MMO, N->getIndexType(),
- N->isTruncatingStore());
+ if (auto *MSC = dyn_cast<MaskedScatterSDNode>(N)) {
+ SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Ops.Scale};
+ Lo =
+ DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO,
+ MSC->getIndexType(), MSC->isTruncatingStore());
+
+ // The order of the Scatter operation after split is well defined. The "Hi"
+ // part comes after the "Lo". So these two operations should be chained one
+ // after another.
+ SDValue OpsHi[] = {Lo, DataHi, MaskHi, Ptr, IndexHi, Ops.Scale};
+ return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi,
+ MMO, MSC->getIndexType(),
+ MSC->isTruncatingStore());
+ }
+ auto *VPSC = cast<VPScatterSDNode>(N);
+ SDValue EVLLo, EVLHi;
+ std::tie(EVLLo, EVLHi) =
+ DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(), DL);
+
+ SDValue OpsLo[] = {Ch, DataLo, Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
+ Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT, DL, OpsLo, MMO,
+ VPSC->getIndexType());
// The order of the Scatter operation after split is well defined. The "Hi"
// part comes after the "Lo". So these two operations should be chained one
// after another.
- SDValue OpsHi[] = {Lo, DataHi, MaskHi, Ptr, IndexHi, Scale};
- return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
- DL, OpsHi, MMO, N->getIndexType(),
- N->isTruncatingStore());
+ SDValue OpsHi[] = {Lo, DataHi, Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
+ return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT, DL, OpsHi, MMO,
+ VPSC->getIndexType());
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
@@ -3047,31 +3229,41 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::VSELECT:
- case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
+ case ISD::SELECT:
+ case ISD::VP_SELECT:
+ case ISD::VP_MERGE:
+ Res = WidenVecRes_Select(N);
+ break;
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::SETCC: Res = WidenVecRes_SETCC(N); break;
case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE:
Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
break;
+ case ISD::VP_LOAD:
+ Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(N));
+ break;
case ISD::MLOAD:
Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(N));
break;
case ISD::MGATHER:
Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(N));
break;
+ case ISD::VP_GATHER:
+ Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(N));
+ break;
- case ISD::ADD:
- case ISD::AND:
- case ISD::MUL:
+ case ISD::ADD: case ISD::VP_ADD:
+ case ISD::AND: case ISD::VP_AND:
+ case ISD::MUL: case ISD::VP_MUL:
case ISD::MULHS:
case ISD::MULHU:
- case ISD::OR:
- case ISD::SUB:
- case ISD::XOR:
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
+ case ISD::OR: case ISD::VP_OR:
+ case ISD::SUB: case ISD::VP_SUB:
+ case ISD::XOR: case ISD::VP_XOR:
+ case ISD::SHL: case ISD::VP_SHL:
+ case ISD::SRA: case ISD::VP_ASHR:
+ case ISD::SRL: case ISD::VP_LSHR:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FMINIMUM:
@@ -3088,7 +3280,21 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::USHLSAT:
case ISD::ROTL:
case ISD::ROTR:
- Res = WidenVecRes_Binary(N, /*IsVP*/ false);
+ // Vector-predicated binary op widening. Note that -- unlike the
+ // unpredicated versions -- we don't have to worry about trapping on
+ // operations like UDIV, FADD, etc., as we pass on the original vector
+ // length parameter. This means the widened elements containing garbage
+ // aren't active.
+ case ISD::VP_SDIV:
+ case ISD::VP_UDIV:
+ case ISD::VP_SREM:
+ case ISD::VP_UREM:
+ case ISD::VP_FADD:
+ case ISD::VP_FSUB:
+ case ISD::VP_FMUL:
+ case ISD::VP_FDIV:
+ case ISD::VP_FREM:
+ Res = WidenVecRes_Binary(N);
break;
case ISD::FADD:
@@ -3212,31 +3418,6 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FSHR:
Res = WidenVecRes_Ternary(N);
break;
- case ISD::VP_ADD:
- case ISD::VP_AND:
- case ISD::VP_MUL:
- case ISD::VP_OR:
- case ISD::VP_SUB:
- case ISD::VP_XOR:
- case ISD::VP_SHL:
- case ISD::VP_LSHR:
- case ISD::VP_ASHR:
- case ISD::VP_SDIV:
- case ISD::VP_UDIV:
- case ISD::VP_SREM:
- case ISD::VP_UREM:
- case ISD::VP_FADD:
- case ISD::VP_FSUB:
- case ISD::VP_FMUL:
- case ISD::VP_FDIV:
- case ISD::VP_FREM:
- // Vector-predicated binary op widening. Note that -- unlike the
- // unpredicated versions -- we don't have to worry about trapping on
- // operations like UDIV, FADD, etc., as we pass on the original vector
- // length parameter. This means the widened elements containing garbage
- // aren't active.
- Res = WidenVecRes_Binary(N, /*IsVP*/ true);
- break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -3254,29 +3435,21 @@ SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) {
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
}
-SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N, bool IsVP) {
+SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
// Binary op widening.
SDLoc dl(N);
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
- if (!IsVP)
+ if (N->getNumOperands() == 2)
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2,
N->getFlags());
- // For VP operations, we must also widen the mask. Note that the mask type
- // may not actually need widening, leading it be split along with the VP
- // operation.
- // FIXME: This could lead to an infinite split/widen loop. We only handle the
- // case where the mask needs widening to an identically-sized type as the
- // vector inputs.
- SDValue Mask = N->getOperand(2);
- assert(getTypeAction(Mask.getValueType()) ==
- TargetLowering::TypeWidenVector &&
- "Unable to widen binary VP op");
- Mask = GetWidenedVector(Mask);
- assert(Mask.getValueType().getVectorElementCount() ==
- WidenVT.getVectorElementCount() &&
- "Unable to widen binary VP op");
+
+ assert(N->getNumOperands() == 4 && "Unexpected number of operands!");
+ assert(N->isVPOpcode() && "Expected VP opcode");
+
+ SDValue Mask =
+ GetWidenedMask(N->getOperand(2), WidenVT.getVectorElementCount());
return DAG.getNode(N->getOpcode(), dl, WidenVT,
{InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags());
}
@@ -4226,6 +4399,33 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
report_fatal_error("Unable to widen vector load");
}
+SDValue DAGTypeLegalizer::WidenVecRes_VP_LOAD(VPLoadSDNode *N) {
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue Mask = N->getMask();
+ SDValue EVL = N->getVectorLength();
+ ISD::LoadExtType ExtType = N->getExtensionType();
+ SDLoc dl(N);
+
+ // The mask should be widened as well
+ assert(getTypeAction(Mask.getValueType()) ==
+ TargetLowering::TypeWidenVector &&
+ "Unable to widen binary VP op");
+ Mask = GetWidenedVector(Mask);
+ assert(Mask.getValueType().getVectorElementCount() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Mask.getValueType())
+ .getVectorElementCount() &&
+ "Unable to widen vector load");
+
+ SDValue Res =
+ DAG.getLoadVP(N->getAddressingMode(), ExtType, WidenVT, dl, N->getChain(),
+ N->getBasePtr(), N->getOffset(), Mask, EVL,
+ N->getMemoryVT(), N->getMemOperand(), N->isExpandingLoad());
+ // Legalize the chain result - switch anything that used the old chain to
+ // use the new one.
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_MLOAD(MaskedLoadSDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),N->getValueType(0));
@@ -4289,6 +4489,29 @@ SDValue DAGTypeLegalizer::WidenVecRes_MGATHER(MaskedGatherSDNode *N) {
return Res;
}
+SDValue DAGTypeLegalizer::WidenVecRes_VP_GATHER(VPGatherSDNode *N) {
+ EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue Mask = N->getMask();
+ SDValue Scale = N->getScale();
+ ElementCount WideEC = WideVT.getVectorElementCount();
+ SDLoc dl(N);
+
+ SDValue Index = GetWidenedVector(N->getIndex());
+ EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(),
+ N->getMemoryVT().getScalarType(), WideEC);
+ Mask = GetWidenedMask(Mask, WideEC);
+
+ SDValue Ops[] = {N->getChain(), N->getBasePtr(), Index, Scale,
+ Mask, N->getVectorLength()};
+ SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
+ dl, Ops, N->getMemOperand(), N->getIndexType());
+
+ // Legalize the chain result - switch anything that used the old chain to
+ // use the new one.
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_ScalarOp(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0));
@@ -4522,19 +4745,19 @@ SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
return Mask;
}
-SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
+SDValue DAGTypeLegalizer::WidenVecRes_Select(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
ElementCount WidenEC = WidenVT.getVectorElementCount();
SDValue Cond1 = N->getOperand(0);
EVT CondVT = Cond1.getValueType();
+ unsigned Opcode = N->getOpcode();
if (CondVT.isVector()) {
if (SDValue WideCond = WidenVSELECTMask(N)) {
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
SDValue InOp2 = GetWidenedVector(N->getOperand(2));
assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
- return DAG.getNode(N->getOpcode(), SDLoc(N),
- WidenVT, WideCond, InOp1, InOp2);
+ return DAG.getNode(Opcode, SDLoc(N), WidenVT, WideCond, InOp1, InOp2);
}
EVT CondEltVT = CondVT.getVectorElementType();
@@ -4560,8 +4783,10 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
SDValue InOp2 = GetWidenedVector(N->getOperand(2));
assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
- return DAG.getNode(N->getOpcode(), SDLoc(N),
- WidenVT, Cond1, InOp1, InOp2);
+ return Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE
+ ? DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2,
+ N->getOperand(3))
+ : DAG.getNode(Opcode, SDLoc(N), WidenVT, Cond1, InOp1, InOp2);
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(SDNode *N) {
@@ -4711,9 +4936,11 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::STORE: Res = WidenVecOp_STORE(N); break;
+ case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(N, OpNo); break;
case ISD::MSTORE: Res = WidenVecOp_MSTORE(N, OpNo); break;
case ISD::MGATHER: Res = WidenVecOp_MGATHER(N, OpNo); break;
case ISD::MSCATTER: Res = WidenVecOp_MSCATTER(N, OpNo); break;
+ case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(N, OpNo); break;
case ISD::SETCC: Res = WidenVecOp_SETCC(N); break;
case ISD::STRICT_FSETCC:
case ISD::STRICT_FSETCCS: Res = WidenVecOp_STRICT_FSETCC(N); break;
@@ -4766,6 +4993,23 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::VECREDUCE_SEQ_FMUL:
Res = WidenVecOp_VECREDUCE_SEQ(N);
break;
+ case ISD::VP_REDUCE_FADD:
+ case ISD::VP_REDUCE_SEQ_FADD:
+ case ISD::VP_REDUCE_FMUL:
+ case ISD::VP_REDUCE_SEQ_FMUL:
+ case ISD::VP_REDUCE_ADD:
+ case ISD::VP_REDUCE_MUL:
+ case ISD::VP_REDUCE_AND:
+ case ISD::VP_REDUCE_OR:
+ case ISD::VP_REDUCE_XOR:
+ case ISD::VP_REDUCE_SMAX:
+ case ISD::VP_REDUCE_SMIN:
+ case ISD::VP_REDUCE_UMAX:
+ case ISD::VP_REDUCE_UMIN:
+ case ISD::VP_REDUCE_FMAX:
+ case ISD::VP_REDUCE_FMIN:
+ Res = WidenVecOp_VP_REDUCE(N);
+ break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -5092,15 +5336,54 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
unsigned NumVTElts = StVT.getVectorMinNumElements();
SDValue EVL =
DAG.getVScale(DL, EVLVT, APInt(EVLVT.getScalarSizeInBits(), NumVTElts));
- const auto *MMO = ST->getMemOperand();
- return DAG.getStoreVP(ST->getChain(), DL, StVal, ST->getBasePtr(), Mask,
- EVL, MMO->getPointerInfo(), MMO->getAlign(),
- MMO->getFlags(), MMO->getAAInfo());
+ return DAG.getStoreVP(ST->getChain(), DL, StVal, ST->getBasePtr(),
+ DAG.getUNDEF(ST->getBasePtr().getValueType()), Mask,
+ EVL, StVal.getValueType(), ST->getMemOperand(),
+ ST->getAddressingMode());
}
report_fatal_error("Unable to widen vector store");
}
+SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(SDNode *N, unsigned OpNo) {
+ assert((OpNo == 1 || OpNo == 3) &&
+ "Can widen only data or mask operand of vp_store");
+ VPStoreSDNode *ST = cast<VPStoreSDNode>(N);
+ SDValue Mask = ST->getMask();
+ SDValue StVal = ST->getValue();
+ SDLoc dl(N);
+
+ if (OpNo == 1) {
+ // Widen the value.
+ StVal = GetWidenedVector(StVal);
+
+ // We only handle the case where the mask needs widening to an
+ // identically-sized type as the vector inputs.
+ assert(getTypeAction(Mask.getValueType()) ==
+ TargetLowering::TypeWidenVector &&
+ "Unable to widen VP store");
+ Mask = GetWidenedVector(Mask);
+ } else {
+ Mask = GetWidenedVector(Mask);
+
+ // We only handle the case where the stored value needs widening to an
+ // identically-sized type as the mask.
+ assert(getTypeAction(StVal.getValueType()) ==
+ TargetLowering::TypeWidenVector &&
+ "Unable to widen VP store");
+ StVal = GetWidenedVector(StVal);
+ }
+
+ assert(Mask.getValueType().getVectorElementCount() ==
+ StVal.getValueType().getVectorElementCount() &&
+ "Mask and data vectors should have the same number of elements");
+ return DAG.getStoreVP(ST->getChain(), dl, StVal, ST->getBasePtr(),
+ ST->getOffset(), Mask, ST->getVectorLength(),
+ ST->getMemoryVT(), ST->getMemOperand(),
+ ST->getAddressingMode(), ST->isTruncatingStore(),
+ ST->isCompressingStore());
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) {
assert((OpNo == 1 || OpNo == 3) &&
"Can widen only data or mask operand of mstore");
@@ -5202,6 +5485,34 @@ SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) {
MSC->isTruncatingStore());
}
+SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(SDNode *N, unsigned OpNo) {
+ VPScatterSDNode *VPSC = cast<VPScatterSDNode>(N);
+ SDValue DataOp = VPSC->getValue();
+ SDValue Mask = VPSC->getMask();
+ SDValue Index = VPSC->getIndex();
+ SDValue Scale = VPSC->getScale();
+ EVT WideMemVT = VPSC->getMemoryVT();
+
+ if (OpNo == 1) {
+ DataOp = GetWidenedVector(DataOp);
+ Index = GetWidenedVector(Index);
+ const auto WideEC = DataOp.getValueType().getVectorElementCount();
+ Mask = GetWidenedMask(Mask, WideEC);
+ WideMemVT = EVT::getVectorVT(*DAG.getContext(),
+ VPSC->getMemoryVT().getScalarType(), WideEC);
+ } else if (OpNo == 4) {
+ // Just widen the index. It's allowed to have extra elements.
+ Index = GetWidenedVector(Index);
+ } else
+ llvm_unreachable("Can't widen this operand of mscatter");
+
+ SDValue Ops[] = {
+ VPSC->getChain(), DataOp, VPSC->getBasePtr(), Index, Scale, Mask,
+ VPSC->getVectorLength()};
+ return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N), Ops,
+ VPSC->getMemOperand(), VPSC->getIndexType());
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
SDValue InOp0 = GetWidenedVector(N->getOperand(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
@@ -5320,6 +5631,19 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags);
}
+SDValue DAGTypeLegalizer::WidenVecOp_VP_REDUCE(SDNode *N) {
+ assert(N->isVPOpcode() && "Expected VP opcode");
+
+ SDLoc dl(N);
+ SDValue Op = GetWidenedVector(N->getOperand(1));
+ SDValue Mask = GetWidenedMask(N->getOperand(2),
+ Op.getValueType().getVectorElementCount());
+
+ return DAG.getNode(N->getOpcode(), dl, N->getValueType(0),
+ {N->getOperand(0), Op, Mask, N->getOperand(3)},
+ N->getFlags());
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
// This only gets called in the case that the left and right inputs and
// result are of a legal odd vector type, and the condition is illegal i1 of
@@ -5779,6 +6103,8 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT,
EVT InVT = InOp.getValueType();
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match");
+ assert(!InVT.isScalableVector() && !NVT.isScalableVector() &&
+ "cannot modify scalable vectors in this way");
SDLoc dl(InOp);
// Check if InOp already has the right width.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index aec2cf38b400..403f34573899 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -286,7 +286,7 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
// Cluster loads by adding MVT::Glue outputs and inputs. This also
// ensure they are scheduled in order of increasing addresses.
SDNode *Lead = Loads[0];
- SDValue InGlue = SDValue(nullptr, 0);
+ SDValue InGlue;
if (AddGlue(Lead, InGlue, true, DAG))
InGlue = SDValue(Lead, Lead->getNumValues() - 1);
for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
@@ -1057,12 +1057,13 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
"first terminator cannot be a debug value");
for (MachineInstr &MI : make_early_inc_range(
make_range(std::next(FirstTerm), InsertBB->end()))) {
+ // Only scan up to insertion point.
+ if (&MI == InsertPos)
+ break;
+
if (!MI.isDebugValue())
continue;
- if (&MI == InsertPos)
- InsertPos = std::prev(InsertPos->getIterator());
-
// The DBG_VALUE was referencing a value produced by a terminator. By
// moving the DBG_VALUE, the referenced value also needs invalidating.
MI.getOperand(0).ChangeToRegister(0, false);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 2ae0d4df7b77..45f3005e8f57 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -373,31 +373,46 @@ ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
llvm_unreachable("Expected VECREDUCE opcode");
case ISD::VECREDUCE_FADD:
case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VP_REDUCE_FADD:
+ case ISD::VP_REDUCE_SEQ_FADD:
return ISD::FADD;
case ISD::VECREDUCE_FMUL:
case ISD::VECREDUCE_SEQ_FMUL:
+ case ISD::VP_REDUCE_FMUL:
+ case ISD::VP_REDUCE_SEQ_FMUL:
return ISD::FMUL;
case ISD::VECREDUCE_ADD:
+ case ISD::VP_REDUCE_ADD:
return ISD::ADD;
case ISD::VECREDUCE_MUL:
+ case ISD::VP_REDUCE_MUL:
return ISD::MUL;
case ISD::VECREDUCE_AND:
+ case ISD::VP_REDUCE_AND:
return ISD::AND;
case ISD::VECREDUCE_OR:
+ case ISD::VP_REDUCE_OR:
return ISD::OR;
case ISD::VECREDUCE_XOR:
+ case ISD::VP_REDUCE_XOR:
return ISD::XOR;
case ISD::VECREDUCE_SMAX:
+ case ISD::VP_REDUCE_SMAX:
return ISD::SMAX;
case ISD::VECREDUCE_SMIN:
+ case ISD::VP_REDUCE_SMIN:
return ISD::SMIN;
case ISD::VECREDUCE_UMAX:
+ case ISD::VP_REDUCE_UMAX:
return ISD::UMAX;
case ISD::VECREDUCE_UMIN:
+ case ISD::VP_REDUCE_UMIN:
return ISD::UMIN;
case ISD::VECREDUCE_FMAX:
+ case ISD::VP_REDUCE_FMAX:
return ISD::FMAXNUM;
case ISD::VECREDUCE_FMIN:
+ case ISD::VP_REDUCE_FMIN:
return ISD::FMINNUM;
}
}
@@ -3066,7 +3081,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
case ISD::MUL: {
Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known = KnownBits::mul(Known, Known2);
+ bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
+ Known = KnownBits::mul(Known, Known2, SelfMultiply);
break;
}
case ISD::MULHU: {
@@ -3085,8 +3101,9 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
if (Op.getResNo() == 0)
- Known = KnownBits::mul(Known, Known2);
+ Known = KnownBits::mul(Known, Known2, SelfMultiply);
else
Known = KnownBits::mulhu(Known, Known2);
break;
@@ -3095,8 +3112,9 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
if (Op.getResNo() == 0)
- Known = KnownBits::mul(Known, Known2);
+ Known = KnownBits::mul(Known, Known2, SelfMultiply);
else
Known = KnownBits::mulhs(Known, Known2);
break;
@@ -3363,6 +3381,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
case ISD::AssertAlign: {
unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
assert(LogOfAlign != 0);
+
+ // TODO: Should use maximum with source
// If a node is guaranteed to be aligned, set low zero bits accordingly as
// well as clearing one bits.
Known.Zero.setLowBits(LogOfAlign);
@@ -3584,6 +3604,12 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known = KnownBits::smin(Known, Known2);
break;
}
+ case ISD::FP_TO_UINT_SAT: {
+ // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
+ EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits());
+ break;
+ }
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
if (Op.getResNo() == 1) {
// The boolean result conforms to getBooleanContents.
@@ -3860,6 +3886,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
break;
}
+ case ISD::FP_TO_SINT_SAT:
+ // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
+ Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
+ return VTBits - Tmp + 1;
case ISD::SIGN_EXTEND:
Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
@@ -4252,7 +4282,8 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
// scalar cases.
Type *CstTy = Cst->getType();
if (CstTy->isVectorTy() &&
- (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
+ (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() &&
+ VTBits == CstTy->getScalarSizeInBits()) {
Tmp = VTBits;
for (unsigned i = 0; i != NumElts; ++i) {
if (!DemandedElts[i])
@@ -4294,31 +4325,18 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
-
- APInt Mask;
- if (Known.isNonNegative()) { // sign bit is 0
- Mask = Known.Zero;
- } else if (Known.isNegative()) { // sign bit is 1;
- Mask = Known.One;
- } else {
- // Nothing known.
- return FirstAnswer;
- }
-
- // Okay, we know that the sign bit in Mask is set. Use CLO to determine
- // the number of identical bits in the top of the input value.
- Mask <<= Mask.getBitWidth()-VTBits;
- return std::max(FirstAnswer, Mask.countLeadingOnes());
+ return std::max(FirstAnswer, Known.countMinSignBits());
}
-unsigned SelectionDAG::ComputeMinSignedBits(SDValue Op, unsigned Depth) const {
+unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
+ unsigned Depth) const {
unsigned SignBits = ComputeNumSignBits(Op, Depth);
return Op.getScalarValueSizeInBits() - SignBits + 1;
}
-unsigned SelectionDAG::ComputeMinSignedBits(SDValue Op,
- const APInt &DemandedElts,
- unsigned Depth) const {
+unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
+ const APInt &DemandedElts,
+ unsigned Depth) const {
unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
return Op.getScalarValueSizeInBits() - SignBits + 1;
}
@@ -5102,6 +5120,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
"BSWAP types must be a multiple of 16 bits!");
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
+ // bswap(bswap(X)) -> X.
+ if (OpOpcode == ISD::BSWAP)
+ return Operand.getOperand(0);
break;
case ISD::BITREVERSE:
assert(VT.isInteger() && VT == Operand.getValueType() &&
@@ -5398,6 +5419,19 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
}
}
+ // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
+ // (shl step_vector(C0), C1) -> (step_vector(C0 << C1))
+ if ((Opcode == ISD::MUL || Opcode == ISD::SHL) &&
+ Ops[0].getOpcode() == ISD::STEP_VECTOR) {
+ APInt RHSVal;
+ if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) {
+ APInt NewStep = Opcode == ISD::MUL
+ ? Ops[0].getConstantOperandAPInt(0) * RHSVal
+ : Ops[0].getConstantOperandAPInt(0) << RHSVal;
+ return getStepVector(DL, VT, NewStep);
+ }
+ }
+
auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
return !Op.getValueType().isVector() ||
Op.getValueType().getVectorElementCount() == NumElts;
@@ -5595,22 +5629,24 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(N1.getOpcode() != ISD::DELETED_NODE &&
N2.getOpcode() != ISD::DELETED_NODE &&
"Operand is DELETED_NODE!");
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
- ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
-
// Canonicalize constant to RHS if commutative.
if (TLI->isCommutativeBinOp(Opcode)) {
- if (N1C && !N2C) {
- std::swap(N1C, N2C);
+ bool IsN1C = isConstantIntBuildVectorOrConstantInt(N1);
+ bool IsN2C = isConstantIntBuildVectorOrConstantInt(N2);
+ bool IsN1CFP = isConstantFPBuildVectorOrConstantFP(N1);
+ bool IsN2CFP = isConstantFPBuildVectorOrConstantFP(N2);
+ if ((IsN1C && !IsN2C) || (IsN1CFP && !IsN2CFP))
std::swap(N1, N2);
- } else if (N1CFP && !N2CFP) {
- std::swap(N1CFP, N2CFP);
- std::swap(N1, N2);
- }
}
+ auto *N1C = dyn_cast<ConstantSDNode>(N1);
+ auto *N2C = dyn_cast<ConstantSDNode>(N2);
+
+ // Don't allow undefs in vector splats - we might be returning N2 when folding
+ // to zero etc.
+ ConstantSDNode *N2CV =
+ isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true);
+
switch (Opcode) {
default: break;
case ISD::TokenFactor:
@@ -5640,9 +5676,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
N1.getValueType() == VT && "Binary operator types must match!");
// (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
// worth handling here.
- if (N2C && N2C->isZero())
+ if (N2CV && N2CV->isZero())
return N2;
- if (N2C && N2C->isAllOnes()) // X & -1 -> X
+ if (N2CV && N2CV->isAllOnes()) // X & -1 -> X
return N1;
break;
case ISD::OR:
@@ -5654,7 +5690,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
N1.getValueType() == VT && "Binary operator types must match!");
// (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
// it's worth handling here.
- if (N2C && N2C->isZero())
+ if (N2CV && N2CV->isZero())
return N1;
if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
VT.getVectorElementType() == MVT::i1)
@@ -5760,7 +5796,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// size of the value, the shift/rotate count is guaranteed to be zero.
if (VT == MVT::i1)
return N1;
- if (N2C && N2C->isZero())
+ if (N2CV && N2CV->isZero())
return N1;
break;
case ISD::FP_ROUND:
@@ -6358,7 +6394,7 @@ static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
return DAG.getConstant(Val, dl, VT);
- return SDValue(nullptr, 0);
+ return SDValue();
}
SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
@@ -7697,23 +7733,6 @@ SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
SDValue Offset, SDValue Mask, SDValue EVL,
EVT MemVT, MachineMemOperand *MMO,
bool IsExpanding) {
- if (VT == MemVT) {
- ExtType = ISD::NON_EXTLOAD;
- } else if (ExtType == ISD::NON_EXTLOAD) {
- assert(VT == MemVT && "Non-extending load from different memory type!");
- } else {
- // Extending load.
- assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
- "Should only be an extending load, not truncating!");
- assert(VT.isInteger() == MemVT.isInteger() &&
- "Cannot convert from FP to Int or Int -> FP!");
- assert(VT.isVector() == MemVT.isVector() &&
- "Cannot use an ext load to convert to or from a vector!");
- assert((!VT.isVector() ||
- VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
- "Cannot use an ext load to change the number of vector elements!");
- }
-
bool Indexed = AM != ISD::UNINDEXED;
assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
@@ -7802,48 +7821,29 @@ SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
}
SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
- SDValue Ptr, SDValue Mask, SDValue EVL,
- MachinePointerInfo PtrInfo, Align Alignment,
- MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo, bool IsCompressing) {
+ SDValue Ptr, SDValue Offset, SDValue Mask,
+ SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
+ ISD::MemIndexedMode AM, bool IsTruncating,
+ bool IsCompressing) {
assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
-
- MMOFlags |= MachineMemOperand::MOStore;
- assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
-
- if (PtrInfo.V.isNull())
- PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
-
- MachineFunction &MF = getMachineFunction();
- uint64_t Size =
- MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
- return getStoreVP(Chain, dl, Val, Ptr, Mask, EVL, MMO, IsCompressing);
-}
-
-SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
- SDValue Ptr, SDValue Mask, SDValue EVL,
- MachineMemOperand *MMO, bool IsCompressing) {
- assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
- EVT VT = Val.getValueType();
- SDVTList VTs = getVTList(MVT::Other);
- SDValue Undef = getUNDEF(Ptr.getValueType());
- SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
+ bool Indexed = AM != ISD::UNINDEXED;
+ assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
+ SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
+ : getVTList(MVT::Other);
+ SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
- ID.AddInteger(VT.getRawBits());
+ ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
- dl.getIROrder(), VTs, ISD::UNINDEXED, false, IsCompressing, VT, MMO));
+ dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
cast<VPStoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- auto *N =
- newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
- ISD::UNINDEXED, false, IsCompressing, VT, MMO);
+ auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
+ IsTruncating, IsCompressing, MemVT, MMO);
createOperands(N, Ops);
CSEMap.InsertNode(N, IP);
@@ -7885,7 +7885,9 @@ SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
if (VT == SVT)
- return getStoreVP(Chain, dl, Val, Ptr, Mask, EVL, MMO, IsCompressing);
+ return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask,
+ EVL, VT, MMO, ISD::UNINDEXED,
+ /*IsTruncating*/ false, IsCompressing);
assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
"Should only be a truncating store, not extending!");
@@ -10661,6 +10663,23 @@ SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
return std::make_pair(Lo, Hi);
}
+std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT,
+ const SDLoc &DL) {
+ // Split the vector length parameter.
+ // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
+ EVT VT = N.getValueType();
+ assert(VecVT.getVectorElementCount().isKnownEven() &&
+ "Expecting the mask to be an evenly-sized vector");
+ unsigned HalfMinNumElts = VecVT.getVectorMinNumElements() / 2;
+ SDValue HalfNumElts =
+ VecVT.isFixedLengthVector()
+ ? getConstant(HalfMinNumElts, DL, VT)
+ : getVScale(DL, VT, APInt(VT.getScalarSizeInBits(), HalfMinNumElts));
+ SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts);
+ SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts);
+ return std::make_pair(Lo, Hi);
+}
+
/// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
EVT VT = N.getValueType();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 63cd723cf6da..41460f78e1c2 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1683,6 +1683,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
}
+ if (const auto *BB = dyn_cast<BasicBlock>(V))
+ return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
llvm_unreachable("Can't get register for value!");
}
@@ -4846,10 +4848,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
if (!I.getType()->isVoidTy()) {
- if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
- EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
- Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
- } else
+ if (!isa<VectorType>(I.getType()))
Result = lowerRangeToAssertZExt(DAG, I, Result);
MaybeAlign Alignment = I.getRetAlign();
@@ -7327,8 +7326,6 @@ void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Value *PtrOperand = VPIntrin.getArgOperand(0);
MaybeAlign Alignment = VPIntrin.getPointerAlignment();
- if (!Alignment)
- Alignment = DAG.getEVTAlign(VT);
AAMDNodes AAInfo = VPIntrin.getAAMetadata();
const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
SDValue LD;
@@ -7336,6 +7333,8 @@ void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
if (!IsGather) {
// Do not serialize variable-length loads of constant memory with
// anything.
+ if (!Alignment)
+ Alignment = DAG.getEVTAlign(VT);
MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
AddToChain = !AA || !AA->pointsToConstantMemory(ML);
SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
@@ -7345,6 +7344,8 @@ void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
MMO, false /*IsExpanding */);
} else {
+ if (!Alignment)
+ Alignment = DAG.getEVTAlign(VT.getScalarType());
unsigned AS =
PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
@@ -7385,18 +7386,22 @@ void SelectionDAGBuilder::visitVPStoreScatter(const VPIntrinsic &VPIntrin,
Value *PtrOperand = VPIntrin.getArgOperand(1);
EVT VT = OpValues[0].getValueType();
MaybeAlign Alignment = VPIntrin.getPointerAlignment();
- if (!Alignment)
- Alignment = DAG.getEVTAlign(VT);
AAMDNodes AAInfo = VPIntrin.getAAMetadata();
SDValue ST;
if (!IsScatter) {
+ if (!Alignment)
+ Alignment = DAG.getEVTAlign(VT);
+ SDValue Ptr = OpValues[1];
+ SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
MemoryLocation::UnknownSize, *Alignment, AAInfo);
- ST =
- DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], OpValues[1],
- OpValues[2], OpValues[3], MMO, false /* IsTruncating */);
+ ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
+ OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
+ /* IsTruncating */ false, /*IsCompressing*/ false);
} else {
+ if (!Alignment)
+ Alignment = DAG.getEVTAlign(VT.getScalarType());
unsigned AS =
PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
@@ -8250,7 +8255,8 @@ public:
/// corresponds to. If there is no Value* for this operand, it returns
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
- const DataLayout &DL) const {
+ const DataLayout &DL,
+ llvm::Type *ParamElemType) const {
if (!CallOperandVal) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
@@ -8262,10 +8268,8 @@ public:
// If this is an indirect operand, the operand is a pointer to the
// accessed type.
if (isIndirect) {
- PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
- if (!PtrTy)
- report_fatal_error("Indirect operand for inline asm not a pointer!");
- OpTy = PtrTy->getElementType();
+ OpTy = ParamElemType;
+ assert(OpTy && "Indirect opernad must have elementtype attribute");
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
@@ -8559,37 +8563,19 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
unsigned ResNo = 0; // ResNo - The result number of the next output.
- unsigned NumMatchingOps = 0;
for (auto &T : TargetConstraints) {
ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
// Compute the value type for each operand.
- if (OpInfo.Type == InlineAsm::isInput ||
- (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
- OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
-
- // Process the call argument. BasicBlocks are labels, currently appearing
- // only in asm's.
- if (isa<CallBrInst>(Call) &&
- ArgNo - 1 >= (cast<CallBrInst>(&Call)->arg_size() -
- cast<CallBrInst>(&Call)->getNumIndirectDests() -
- NumMatchingOps) &&
- (NumMatchingOps == 0 ||
- ArgNo - 1 <
- (cast<CallBrInst>(&Call)->arg_size() - NumMatchingOps))) {
- const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
- EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
- OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
- } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
- OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
- } else {
- OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
- }
-
+ if (OpInfo.hasArg()) {
+ OpInfo.CallOperandVal = Call.getArgOperand(ArgNo);
+ OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
+ Type *ParamElemTy = Call.getAttributes().getParamElementType(ArgNo);
EVT VT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
- DAG.getDataLayout());
+ DAG.getDataLayout(), ParamElemTy);
OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other;
+ ArgNo++;
} else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
// The return value of the call is this value. As such, there is no
// corresponding argument.
@@ -8607,9 +8593,6 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
OpInfo.ConstraintVT = MVT::Other;
}
- if (OpInfo.hasMatchingInput())
- ++NumMatchingOps;
-
if (!HasSideEffect)
HasSideEffect = OpInfo.hasMemory(TLI);
@@ -11246,12 +11229,6 @@ void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
unsigned NumElts = VT.getVectorNumElements();
- if ((-Imm > NumElts) || (Imm >= NumElts)) {
- // Result is undefined if immediate is out-of-bounds.
- setValue(&I, DAG.getUNDEF(VT));
- return;
- }
-
uint64_t Idx = (NumElts + Imm) % NumElts;
// Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e6b06ab93d6b..a98c21f16c71 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -60,7 +60,7 @@ bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
// Conservatively require the attributes of the call to match those of
// the return. Ignore following attributes because they don't affect the
// call sequence.
- AttrBuilder CallerAttrs(F.getAttributes(), AttributeList::ReturnIndex);
+ AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs());
for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
Attribute::DereferenceableOrNull, Attribute::NoAlias,
Attribute::NonNull})
@@ -1806,6 +1806,31 @@ bool TargetLowering::SimplifyDemandedBits(
}
case ISD::BSWAP: {
SDValue Src = Op.getOperand(0);
+
+ // If the only bits demanded come from one byte of the bswap result,
+ // just shift the input byte into position to eliminate the bswap.
+ unsigned NLZ = DemandedBits.countLeadingZeros();
+ unsigned NTZ = DemandedBits.countTrailingZeros();
+
+ // Round NTZ down to the next byte. If we have 11 trailing zeros, then
+ // we need all the bits down to bit 8. Likewise, round NLZ. If we
+ // have 14 leading zeros, round to 8.
+ NLZ = alignDown(NLZ, 8);
+ NTZ = alignDown(NTZ, 8);
+ // If we need exactly one byte, we can do this transformation.
+ if (BitWidth - NLZ - NTZ == 8) {
+ // Replace this with either a left or right shift to get the byte into
+ // the right place.
+ unsigned ShiftOpcode = NLZ > NTZ ? ISD::SRL : ISD::SHL;
+ if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) {
+ EVT ShiftAmtTy = getShiftAmountTy(VT, DL);
+ unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
+ SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy);
+ SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt);
+ return TLO.CombineTo(Op, NewOp);
+ }
+ }
+
APInt DemandedSrcBits = DemandedBits.byteSwap();
if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
Depth + 1))
@@ -1833,19 +1858,15 @@ bool TargetLowering::SimplifyDemandedBits(
// If we only care about the highest bit, don't bother shifting right.
if (DemandedBits.isSignMask()) {
unsigned MinSignedBits =
- TLO.DAG.ComputeMinSignedBits(Op0, DemandedElts, Depth + 1);
+ TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1);
bool AlreadySignExtended = ExVTBits >= MinSignedBits;
// However if the input is already sign extended we expect the sign
// extension to be dropped altogether later and do not simplify.
if (!AlreadySignExtended) {
// Compute the correct shift amount type, which must be getShiftAmountTy
// for scalar types after legalization.
- EVT ShiftAmtTy = VT;
- if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
- ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
-
- SDValue ShiftAmt =
- TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy);
+ SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl,
+ getShiftAmountTy(VT, DL));
return TLO.CombineTo(Op,
TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt));
}
@@ -3233,17 +3254,29 @@ bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT,
SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, const SDLoc &DL,
DAGCombinerInfo &DCI) const {
- // Match these patterns in any of their permutations:
- // (X & Y) == Y
- // (X & Y) != Y
if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND)
std::swap(N0, N1);
+ SelectionDAG &DAG = DCI.DAG;
EVT OpVT = N0.getValueType();
if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() ||
(Cond != ISD::SETEQ && Cond != ISD::SETNE))
return SDValue();
+ // (X & Y) != 0 --> zextOrTrunc(X & Y)
+ // iff everything but LSB is known zero:
+ if (Cond == ISD::SETNE && isNullConstant(N1) &&
+ (getBooleanContents(OpVT) == TargetLowering::UndefinedBooleanContent ||
+ getBooleanContents(OpVT) == TargetLowering::ZeroOrOneBooleanContent)) {
+ unsigned NumEltBits = OpVT.getScalarSizeInBits();
+ APInt UpperBits = APInt::getHighBitsSet(NumEltBits, NumEltBits - 1);
+ if (DAG.MaskedValueIsZero(N0, UpperBits))
+ return DAG.getBoolExtOrTrunc(N0, DL, VT, OpVT);
+ }
+
+ // Match these patterns in any of their permutations:
+ // (X & Y) == Y
+ // (X & Y) != Y
SDValue X, Y;
if (N0.getOperand(0) == N1) {
X = N0.getOperand(1);
@@ -3255,7 +3288,6 @@ SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
return SDValue();
}
- SelectionDAG &DAG = DCI.DAG;
SDValue Zero = DAG.getConstant(0, DL, OpVT);
if (DAG.isKnownToBeAPowerOfTwo(Y)) {
// Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set.
@@ -3678,9 +3710,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
// Figure out how many bits we need to preserve this constant.
- unsigned ReqdBits = Signed ?
- C1.getBitWidth() - C1.getNumSignBits() + 1 :
- C1.getActiveBits();
+ unsigned ReqdBits = Signed ? C1.getMinSignedBits() : C1.getActiveBits();
// Make sure we're not losing bits from the constant.
if (MinBits > 0 &&
@@ -4594,20 +4624,12 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter = Constraint[0];
switch (ConstraintLetter) {
default: break;
- case 'X': // Allows any operand; labels (basic block) use this.
- if (Op.getOpcode() == ISD::BasicBlock ||
- Op.getOpcode() == ISD::TargetBlockAddress) {
- Ops.push_back(Op);
- return;
- }
- LLVM_FALLTHROUGH;
+ case 'X': // Allows any operand
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
case 's': { // Relocatable Constant
- GlobalAddressSDNode *GA;
ConstantSDNode *C;
- BlockAddressSDNode *BA;
uint64_t Offset = 0;
// Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C),
@@ -4615,13 +4637,7 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible
// while in this case the GA may be furthest from the root node which is
// likely an ISD::ADD.
- while (1) {
- if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') {
- Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
- GA->getValueType(0),
- Offset + GA->getOffset()));
- return;
- }
+ while (true) {
if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') {
// gcc prints these as sign extended. Sign extend value to 64 bits
// now; without this it would get ZExt'd later in
@@ -4636,11 +4652,23 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64));
return;
}
- if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && ConstraintLetter != 'n') {
- Ops.push_back(DAG.getTargetBlockAddress(
- BA->getBlockAddress(), BA->getValueType(0),
- Offset + BA->getOffset(), BA->getTargetFlags()));
- return;
+ if (ConstraintLetter != 'n') {
+ if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
+ Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
+ GA->getValueType(0),
+ Offset + GA->getOffset()));
+ return;
+ }
+ if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
+ Ops.push_back(DAG.getTargetBlockAddress(
+ BA->getBlockAddress(), BA->getValueType(0),
+ Offset + BA->getOffset(), BA->getTargetFlags()));
+ return;
+ }
+ if (isa<BasicBlockSDNode>(Op)) {
+ Ops.push_back(Op);
+ return;
+ }
}
const unsigned OpCode = Op.getOpcode();
if (OpCode == ISD::ADD || OpCode == ISD::SUB) {
@@ -4753,7 +4781,7 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
case InlineAsm::isOutput:
// Indirect outputs just consume an argument.
if (OpInfo.isIndirect) {
- OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
+ OpInfo.CallOperandVal = Call.getArgOperand(ArgNo);
break;
}
@@ -4771,7 +4799,7 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
++ResNo;
break;
case InlineAsm::isInput:
- OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
+ OpInfo.CallOperandVal = Call.getArgOperand(ArgNo);
break;
case InlineAsm::isClobber:
// Nothing to do.
@@ -4781,10 +4809,8 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
if (OpInfo.CallOperandVal) {
llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
if (OpInfo.isIndirect) {
- llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
- if (!PtrTy)
- report_fatal_error("Indirect operand for inline asm not a pointer!");
- OpTy = PtrTy->getElementType();
+ OpTy = Call.getAttributes().getParamElementType(ArgNo);
+ assert(OpTy && "Indirect opernad must have elementtype attribute");
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
@@ -4814,6 +4840,8 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
} else {
OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
}
+
+ ArgNo++;
}
}
@@ -5087,17 +5115,18 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
// 'X' matches anything.
if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
- // Labels and constants are handled elsewhere ('X' is the only thing
- // that matches labels). For Functions, the type here is the type of
- // the result, which is not what we want to look at; leave them alone.
+ // Constants are handled elsewhere. For Functions, the type here is the
+ // type of the result, which is not what we want to look at; leave them
+ // alone.
Value *v = OpInfo.CallOperandVal;
- if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
- OpInfo.CallOperandVal = v;
+ if (isa<ConstantInt>(v) || isa<Function>(v)) {
return;
}
- if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress)
+ if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
+ OpInfo.ConstraintCode = "i";
return;
+ }
// Otherwise, try to resolve it to something we know about by looking at
// the actual operand type.
@@ -6438,12 +6467,6 @@ bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl,
unsigned ShiftAmount = OuterBitSize - InnerBitSize;
EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout());
- if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) {
- // FIXME getShiftAmountTy does not always return a sensible result when VT
- // is an illegal type, and so the type may be too small to fit the shift
- // amount. Override it with i32. The shift will have to be legalized.
- ShiftAmountTy = MVT::i32;
- }
SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy);
if (!LH.getNode() && !RH.getNode() &&
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp
index f89069e9f728..f6ad2b50abcd 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ShrinkWrap.cpp
@@ -273,6 +273,8 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
return true;
}
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
for (const MachineOperand &MO : MI.operands()) {
bool UseOrDefCSR = false;
if (MO.isReg()) {
@@ -288,8 +290,14 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
// separately. An SP mentioned by a call instruction, we can ignore,
// though, as it's harmless and we do not want to effectively disable tail
// calls by forcing the restore point to post-dominate them.
- UseOrDefCSR = (!MI.isCall() && PhysReg == SP) ||
- RCI.getLastCalleeSavedAlias(PhysReg);
+ // PPC's LR is also not normally described as a callee-saved register in
+ // calling convention definitions, so we need to watch for it, too. An LR
+ // mentioned implicitly by a return (or "branch to link register")
+ // instruction we can ignore, otherwise we may pessimize shrinkwrapping.
+ UseOrDefCSR =
+ (!MI.isCall() && PhysReg == SP) ||
+ RCI.getLastCalleeSavedAlias(PhysReg) ||
+ (!MI.isReturn() && TRI->isNonallocatableRegisterCalleeSave(PhysReg));
} else if (MO.isRegMask()) {
// Check if this regmask clobbers any of the CSRs.
for (unsigned Reg : getCurrentCSRs(RS)) {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f0d342d26cc4..f69e50eaa0ca 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -715,6 +715,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
SchedPreferenceInfo = Sched::ILP;
GatherAllAliasesMaxDepth = 18;
IsStrictFPEnabled = DisableStrictNodeMutation;
+ MaxBytesForAlignment = 0;
// TODO: the default will be switched to 0 in the next commit, along
// with the Target-specific changes necessary.
MaxAtomicSizeInBitsSupported = 1024;
@@ -2040,6 +2041,11 @@ Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const {
return PrefLoopAlignment;
}
+unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment(
+ MachineBasicBlock *MBB) const {
+ return MaxBytesForAlignment;
+}
+
//===----------------------------------------------------------------------===//
// Reciprocal Estimates
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index d1c2cdeb133b..ce350034d073 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -108,8 +108,7 @@ static void GetObjCImageInfo(Module &M, unsigned &Version, unsigned &Flags,
// ELF
//===----------------------------------------------------------------------===//
-TargetLoweringObjectFileELF::TargetLoweringObjectFileELF()
- : TargetLoweringObjectFile() {
+TargetLoweringObjectFileELF::TargetLoweringObjectFileELF() {
SupportDSOLocalEquivalentLowering = true;
}
@@ -478,6 +477,11 @@ static SectionKind getELFKindForNamedSection(StringRef Name, SectionKind K) {
return K;
}
+static bool hasPrefix(StringRef SectionName, StringRef Prefix) {
+ return SectionName.consume_front(Prefix) &&
+ (SectionName.empty() || SectionName[0] == '.');
+}
+
static unsigned getELFSectionType(StringRef Name, SectionKind K) {
// Use SHT_NOTE for section whose name starts with ".note" to allow
// emitting ELF notes from C variable declaration.
@@ -485,13 +489,13 @@ static unsigned getELFSectionType(StringRef Name, SectionKind K) {
if (Name.startswith(".note"))
return ELF::SHT_NOTE;
- if (Name == ".init_array")
+ if (hasPrefix(Name, ".init_array"))
return ELF::SHT_INIT_ARRAY;
- if (Name == ".fini_array")
+ if (hasPrefix(Name, ".fini_array"))
return ELF::SHT_FINI_ARRAY;
- if (Name == ".preinit_array")
+ if (hasPrefix(Name, ".preinit_array"))
return ELF::SHT_PREINIT_ARRAY;
if (K.isBSS() || K.isThreadBSS())
@@ -1139,8 +1143,7 @@ TargetLoweringObjectFileELF::InitializeELF(bool UseInitArray_) {
// MachO
//===----------------------------------------------------------------------===//
-TargetLoweringObjectFileMachO::TargetLoweringObjectFileMachO()
- : TargetLoweringObjectFile() {
+TargetLoweringObjectFileMachO::TargetLoweringObjectFileMachO() {
SupportIndirectSymViaGOTPCRel = true;
}
@@ -1185,6 +1188,7 @@ void TargetLoweringObjectFileMachO::emitModuleMetadata(MCStreamer &Streamer,
StringRef SectionVal;
GetObjCImageInfo(M, VersionVal, ImageInfoFlags, SectionVal);
+ emitCGProfileMetadata(Streamer, M);
// The section is mandatory. If we don't have it, then we don't have GC info.
if (SectionVal.empty())
@@ -2543,8 +2547,7 @@ MCSection *TargetLoweringObjectFileXCOFF::getSectionForTOCEntry(
//===----------------------------------------------------------------------===//
// GOFF
//===----------------------------------------------------------------------===//
-TargetLoweringObjectFileGOFF::TargetLoweringObjectFileGOFF()
- : TargetLoweringObjectFile() {}
+TargetLoweringObjectFileGOFF::TargetLoweringObjectFileGOFF() {}
MCSection *TargetLoweringObjectFileGOFF::getExplicitSectionGlobal(
const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetPassConfig.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetPassConfig.cpp
index 402e21d3708b..05004fb935df 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -328,7 +328,7 @@ static IdentifyingPassPtr overridePass(AnalysisID StandardID,
// Find the FSProfile file name. The internal option takes the precedence
// before getting from TargetMachine.
-static const std::string getFSProfileFile(const TargetMachine *TM) {
+static std::string getFSProfileFile(const TargetMachine *TM) {
if (!FSProfileFile.empty())
return FSProfileFile.getValue();
const Optional<PGOOptions> &PGOOpt = TM->getPGOOption();
@@ -339,7 +339,7 @@ static const std::string getFSProfileFile(const TargetMachine *TM) {
// Find the Profile remapping file name. The internal option takes the
// precedence before getting from TargetMachine.
-static const std::string getFSRemappingFile(const TargetMachine *TM) {
+static std::string getFSRemappingFile(const TargetMachine *TM) {
if (!FSRemappingFile.empty())
return FSRemappingFile.getValue();
const Optional<PGOOptions> &PGOOpt = TM->getPGOOption();
@@ -1399,6 +1399,9 @@ bool TargetPassConfig::addRegAssignAndRewriteOptimized() {
// Finally rewrite virtual registers.
addPass(&VirtRegRewriterID);
+ // Regalloc scoring for ML-driven eviction - noop except when learning a new
+ // eviction policy.
+ addPass(createRegAllocScoringPass());
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index f5cb518fce3e..6bcf79547056 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -552,7 +552,7 @@ bool TargetRegisterInfo::getCoveringSubRegIndexes(
// Abort if we cannot possibly implement the COPY with the given indexes.
if (BestIdx == 0)
- return 0;
+ return false;
NeededIndexes.push_back(BestIdx);
@@ -581,7 +581,7 @@ bool TargetRegisterInfo::getCoveringSubRegIndexes(
}
if (BestIdx == 0)
- return 0; // Impossible to handle
+ return false; // Impossible to handle
NeededIndexes.push_back(BestIdx);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
index d042deefd746..01ea171e5ea2 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TypePromotion.cpp
@@ -116,11 +116,11 @@ class IRPromoter {
SmallPtrSet<Value*, 8> Promoted;
void ReplaceAllUsersOfWith(Value *From, Value *To);
- void ExtendSources(void);
- void ConvertTruncs(void);
- void PromoteTree(void);
- void TruncateSinks(void);
- void Cleanup(void);
+ void ExtendSources();
+ void ConvertTruncs();
+ void PromoteTree();
+ void TruncateSinks();
+ void Cleanup();
public:
IRPromoter(LLVMContext &C, IntegerType *Ty, unsigned Width,
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/VLIWMachineScheduler.cpp b/contrib/llvm-project/llvm/lib/CodeGen/VLIWMachineScheduler.cpp
index cbc5d9ec169b..5f59cb4643f2 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/VLIWMachineScheduler.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/VLIWMachineScheduler.cpp
@@ -293,7 +293,7 @@ void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
const std::vector<unsigned> &MaxPressure =
DAG->getRegPressure().MaxSetPressure;
- HighPressureSets.assign(MaxPressure.size(), 0);
+ HighPressureSets.assign(MaxPressure.size(), false);
for (unsigned i = 0, e = MaxPressure.size(); i < e; ++i) {
unsigned Limit = DAG->getRegClassInfo()->getRegPressureSetLimit(i);
HighPressureSets[i] =
diff --git a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
index ae0859e1ecfd..b56095ca9a96 100644
--- a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
+++ b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
@@ -124,6 +124,7 @@ static bool isTypeTag(uint16_t Tag) {
case dwarf::DW_TAG_interface_type:
case dwarf::DW_TAG_unspecified_type:
case dwarf::DW_TAG_shared_type:
+ case dwarf::DW_TAG_immutable_type:
return true;
default:
break;
@@ -1934,7 +1935,7 @@ uint32_t DWARFLinker::DIECloner::hashFullyQualifiedName(DWARFDie DIE,
CompileUnit *CU = &U;
Optional<DWARFFormValue> Ref;
- while (1) {
+ while (true) {
if (const char *CurrentName = DIE.getName(DINameKind::ShortName))
Name = CurrentName;
@@ -2107,7 +2108,6 @@ Error DWARFLinker::loadClangModule(
// Add this module.
Unit = std::make_unique<CompileUnit>(*CU, UnitID++, !Options.NoODR,
ModuleName);
- Unit->setHasInterestingContent();
analyzeContextInfo(CUDie, 0, *Unit, &ODRContexts.getRoot(), ODRContexts,
ModulesEndOffset, Options.ParseableSwiftInterfaces,
[&](const Twine &Warning, const DWARFDie &DIE) {
diff --git a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp
index 925ab3d295c2..acecb1788d10 100644
--- a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp
@@ -40,8 +40,6 @@ StringRef CompileUnit::getSysRoot() {
void CompileUnit::markEverythingAsKept() {
unsigned Idx = 0;
- setHasInterestingContent();
-
for (auto &I : Info) {
// Mark everything that wasn't explicit marked for pruning.
I.Keep = !I.Prune;
diff --git a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
index d9b3c4235b4d..5ab2ad0780a2 100644
--- a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
+++ b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
@@ -173,7 +173,7 @@ DeclContextTree::getChildDeclContext(DeclContext &Context, const DWARFDie &DIE,
!(*ContextIter)->setLastSeenDIE(U, DIE)) {
// The context was found, but it is ambiguous with another context
// in the same file. Mark it invalid.
- return PointerIntPair<DeclContext *, 1>(*ContextIter, /* Invalid= */ 1);
+ return PointerIntPair<DeclContext *, 1>(*ContextIter, /* IntVal= */ 1);
}
assert(ContextIter != Contexts.end());
@@ -183,7 +183,7 @@ DeclContextTree::getChildDeclContext(DeclContext &Context, const DWARFDie &DIE,
Context.getTag() != dwarf::DW_TAG_structure_type &&
Context.getTag() != dwarf::DW_TAG_class_type) ||
(Tag == dwarf::DW_TAG_union_type))
- return PointerIntPair<DeclContext *, 1>(*ContextIter, /* Invalid= */ 1);
+ return PointerIntPair<DeclContext *, 1>(*ContextIter, /* IntVal= */ 1);
return PointerIntPair<DeclContext *, 1>(*ContextIter);
}
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/CodeView/EnumTables.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/CodeView/EnumTables.cpp
index b4a2a0031b2d..adf4ae519dae 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/CodeView/EnumTables.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/CodeView/EnumTables.cpp
@@ -104,7 +104,7 @@ static const EnumEntry<codeview::SourceLanguage> SourceLanguages[] = {
CV_ENUM_ENT(SourceLanguage, ILAsm), CV_ENUM_ENT(SourceLanguage, Java),
CV_ENUM_ENT(SourceLanguage, JScript), CV_ENUM_ENT(SourceLanguage, MSIL),
CV_ENUM_ENT(SourceLanguage, HLSL), CV_ENUM_ENT(SourceLanguage, D),
- CV_ENUM_ENT(SourceLanguage, Swift),
+ CV_ENUM_ENT(SourceLanguage, Swift), CV_ENUM_ENT(SourceLanguage, Rust),
};
static const EnumEntry<uint32_t> CompileSym2FlagNames[] = {
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFCompileUnit.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFCompileUnit.cpp
index 6e30309ae94a..d68ecd4f8a42 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFCompileUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFCompileUnit.cpp
@@ -15,6 +15,8 @@
using namespace llvm;
void DWARFCompileUnit::dump(raw_ostream &OS, DIDumpOptions DumpOpts) {
+ if (DumpOpts.SummarizeTypes)
+ return;
int OffsetDumpWidth = 2 * dwarf::getDwarfOffsetByteSize(getFormat());
OS << format("0x%08" PRIx64, getOffset()) << ": Compile Unit:"
<< " length = " << format("0x%0*" PRIx64, OffsetDumpWidth, getLength())
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
index 95135c95e8d2..ef50ad53650a 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -695,14 +695,30 @@ void DWARFContext::dump(
DWARFTypeUnit *DWARFContext::getTypeUnitForHash(uint16_t Version, uint64_t Hash,
bool IsDWO) {
- // FIXME: Check for/use the tu_index here, if there is one.
- for (const auto &U : IsDWO ? dwo_units() : normal_units()) {
- if (DWARFTypeUnit *TU = dyn_cast<DWARFTypeUnit>(U.get())) {
- if (TU->getTypeHash() == Hash)
- return TU;
+ parseDWOUnits(LazyParse);
+
+ if (const auto &TUI = getTUIndex()) {
+ if (const auto *R = TUI.getFromHash(Hash))
+ return dyn_cast_or_null<DWARFTypeUnit>(
+ DWOUnits.getUnitForIndexEntry(*R));
+ return nullptr;
+ }
+
+ struct UnitContainers {
+ const DWARFUnitVector &Units;
+ Optional<DenseMap<uint64_t, DWARFTypeUnit *>> &Map;
+ };
+ UnitContainers Units = IsDWO ? UnitContainers{DWOUnits, DWOTypeUnits}
+ : UnitContainers{NormalUnits, NormalTypeUnits};
+ if (!Units.Map) {
+ Units.Map.emplace();
+ for (const auto &U : IsDWO ? dwo_units() : normal_units()) {
+ if (DWARFTypeUnit *TU = dyn_cast<DWARFTypeUnit>(U.get()))
+ (*Units.Map)[TU->getTypeHash()] = TU;
}
}
- return nullptr;
+
+ return (*Units.Map)[Hash];
}
DWARFCompileUnit *DWARFContext::getDWOCompileUnitForHash(uint64_t Hash) {
@@ -1098,6 +1114,7 @@ static Optional<uint64_t> getTypeSize(DWARFDie Type, uint64_t PointerSize) {
return PointerSize;
}
case DW_TAG_const_type:
+ case DW_TAG_immutable_type:
case DW_TAG_volatile_type:
case DW_TAG_restrict_type:
case DW_TAG_typedef: {
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp
index d91a630256d6..ee54fc754803 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugAbbrev.cpp
@@ -74,7 +74,7 @@ std::string DWARFAbbreviationDeclarationSet::getCodeRange() const {
for (const auto &Decl : Decls)
Codes.push_back(Decl.getCode());
- std::string Buffer = "";
+ std::string Buffer;
raw_string_ostream Stream(Buffer);
// Each iteration through this loop represents a single contiguous range in
// the set of codes.
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
index 5421b2d59a1b..ec7889a3728a 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDie.cpp
@@ -215,15 +215,16 @@ struct DWARFTypePrinter {
OS << "void";
return DWARFDie();
}
- DWARFDie Inner = resolveReferencedType(D);
+ DWARFDie InnerDIE;
+ auto Inner = [&] { return InnerDIE = resolveReferencedType(D); };
const dwarf::Tag T = D.getTag();
switch (T) {
case DW_TAG_pointer_type: {
- appendPointerLikeTypeBefore(D, Inner, "*");
+ appendPointerLikeTypeBefore(D, Inner(), "*");
break;
}
case DW_TAG_subroutine_type: {
- appendQualifiedNameBefore(Inner);
+ appendQualifiedNameBefore(Inner());
if (Word) {
OS << ' ';
}
@@ -231,18 +232,18 @@ struct DWARFTypePrinter {
break;
}
case DW_TAG_array_type: {
- appendQualifiedNameBefore(Inner);
+ appendQualifiedNameBefore(Inner());
break;
}
case DW_TAG_reference_type:
- appendPointerLikeTypeBefore(D, Inner, "&");
+ appendPointerLikeTypeBefore(D, Inner(), "&");
break;
case DW_TAG_rvalue_reference_type:
- appendPointerLikeTypeBefore(D, Inner, "&&");
+ appendPointerLikeTypeBefore(D, Inner(), "&&");
break;
case DW_TAG_ptr_to_member_type: {
- appendQualifiedNameBefore(Inner);
- if (needsParens(Inner))
+ appendQualifiedNameBefore(Inner());
+ if (needsParens(InnerDIE))
OS << '(';
else if (Word)
OS << ' ';
@@ -284,7 +285,7 @@ struct DWARFTypePrinter {
const char *NamePtr = dwarf::toString(D.find(DW_AT_name), nullptr);
if (!NamePtr) {
appendTypeTagName(D.getTag());
- return Inner;
+ return DWARFDie();
}
Word = true;
StringRef Name = NamePtr;
@@ -317,7 +318,7 @@ struct DWARFTypePrinter {
break;
}
}
- return Inner;
+ return InnerDIE;
}
void appendUnqualifiedNameAfter(DWARFDie D, DWARFDie Inner,
@@ -610,7 +611,8 @@ struct DWARFTypePrinter {
bool First = true;
bool RealFirst = true;
for (DWARFDie P : D) {
- if (P.getTag() != DW_TAG_formal_parameter)
+ if (P.getTag() != DW_TAG_formal_parameter &&
+ P.getTag() != DW_TAG_unspecified_parameters)
return;
DWARFDie T = resolveReferencedType(P);
if (SkipFirstParamIfArtificial && RealFirst && P.find(DW_AT_artificial)) {
@@ -622,7 +624,10 @@ struct DWARFTypePrinter {
OS << ", ";
}
First = false;
- appendQualifiedName(T);
+ if (P.getTag() == DW_TAG_unspecified_parameters)
+ OS << "...";
+ else
+ appendQualifiedName(T);
}
EndedWithTemplate = false;
OS << ')';
@@ -767,7 +772,7 @@ static void dumpAttribute(raw_ostream &OS, const DWARFDie &Die,
DWARFDie D = resolveReferencedType(Die, FormValue);
if (D && !D.isNULL()) {
OS << Space << "\"";
- DWARFTypePrinter(OS).appendQualifiedName(D);
+ dumpTypeQualifiedName(D, OS);
OS << '"';
}
} else if (Attr == DW_AT_APPLE_property_attribute) {
@@ -801,7 +806,9 @@ void DWARFDie::getFullName(raw_string_ostream &OS,
const char *NamePtr = getShortName();
if (!NamePtr)
return;
- DWARFTypePrinter(OS).appendUnqualifiedName(*this, OriginalFullName);
+ if (getTag() == DW_TAG_GNU_template_parameter_pack)
+ return;
+ dumpTypeUnqualifiedName(*this, OS, OriginalFullName);
}
bool DWARFDie::isSubprogramDIE() const { return getTag() == DW_TAG_subprogram; }
@@ -1263,3 +1270,16 @@ bool DWARFAttribute::mayHaveLocationExpr(dwarf::Attribute Attr) {
return false;
}
}
+
+namespace llvm {
+
+void dumpTypeQualifiedName(const DWARFDie &DIE, raw_ostream &OS) {
+ DWARFTypePrinter(OS).appendQualifiedName(DIE);
+}
+
+void dumpTypeUnqualifiedName(const DWARFDie &DIE, raw_ostream &OS,
+ std::string *OriginalFullName) {
+ DWARFTypePrinter(OS).appendUnqualifiedName(DIE, OriginalFullName);
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
index 6424c2f59844..ca7ac785b550 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
@@ -173,7 +173,7 @@ bool DWARFVerifier::verifyName(const DWARFDie &Die) {
Die.getFullName(OS, &OriginalFullName);
OS.flush();
if (OriginalFullName.empty() || OriginalFullName == ReconstructedName)
- return 0;
+ return false;
error() << "Simplified template DW_AT_name could not be reconstituted:\n"
<< formatv(" original: {0}\n"
@@ -181,7 +181,7 @@ bool DWARFVerifier::verifyName(const DWARFDie &Die) {
OriginalFullName, ReconstructedName);
dump(Die) << '\n';
dump(Die.getDwarfUnit()->getUnitDIE()) << '\n';
- return 1;
+ return true;
}
unsigned DWARFVerifier::verifyUnitContents(DWARFUnit &Unit,
@@ -322,12 +322,19 @@ unsigned DWARFVerifier::verifyUnits(const DWARFUnitVector &Units) {
unsigned NumDebugInfoErrors = 0;
ReferenceMap CrossUnitReferences;
+ unsigned Index = 1;
for (const auto &Unit : Units) {
- ReferenceMap UnitLocalReferences;
- NumDebugInfoErrors +=
- verifyUnitContents(*Unit, UnitLocalReferences, CrossUnitReferences);
- NumDebugInfoErrors += verifyDebugInfoReferences(
- UnitLocalReferences, [&](uint64_t Offset) { return Unit.get(); });
+ OS << "Verifying unit: " << Index << " / " << Units.getNumUnits();
+ if (const char* Name = Unit->getUnitDIE(true).getShortName())
+ OS << ", \"" << Name << '\"';
+ OS << '\n';
+ OS.flush();
+ ReferenceMap UnitLocalReferences;
+ NumDebugInfoErrors +=
+ verifyUnitContents(*Unit, UnitLocalReferences, CrossUnitReferences);
+ NumDebugInfoErrors += verifyDebugInfoReferences(
+ UnitLocalReferences, [&](uint64_t Offset) { return Unit.get(); });
+ ++Index;
}
NumDebugInfoErrors += verifyDebugInfoReferences(
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp
index ac217df1ee48..2524e10cb6c5 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/Native/NativeEnumTypes.cpp
@@ -23,7 +23,7 @@ using namespace llvm::pdb;
NativeEnumTypes::NativeEnumTypes(NativeSession &PDBSession,
LazyRandomTypeCollection &Types,
std::vector<codeview::TypeLeafKind> Kinds)
- : Matches(), Index(0), Session(PDBSession) {
+ : Index(0), Session(PDBSession) {
Optional<TypeIndex> TI = Types.getFirst();
while (TI) {
CVType CVT = Types.getType(*TI);
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBExtras.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBExtras.cpp
index 25962e5152eb..a6d7ca0da7a9 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBExtras.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBExtras.cpp
@@ -231,6 +231,7 @@ raw_ostream &llvm::pdb::operator<<(raw_ostream &OS, const PDB_Lang &Lang) {
CASE_OUTPUT_ENUM_CLASS_NAME(PDB_Lang, HLSL, OS)
CASE_OUTPUT_ENUM_CLASS_NAME(PDB_Lang, D, OS)
CASE_OUTPUT_ENUM_CLASS_NAME(PDB_Lang, Swift, OS)
+ CASE_OUTPUT_ENUM_CLASS_NAME(PDB_Lang, Rust, OS)
}
return OS;
}
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBSymbolCompiland.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBSymbolCompiland.cpp
index 9b2883546305..529100b23ba5 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBSymbolCompiland.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/PDB/PDBSymbolCompiland.cpp
@@ -100,6 +100,7 @@ std::string PDBSymbolCompiland::getSourceFileFullPath() const {
.Case(".c", Lang == PDB_Lang::C)
.Case(".asm", Lang == PDB_Lang::Masm)
.Case(".swift", Lang == PDB_Lang::Swift)
+ .Case(".rs", Lang == PDB_Lang::Rust)
.Default(false))
return File->getFileName();
}
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/DIPrinter.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/DIPrinter.cpp
index 555d29fe184b..e29968d113bd 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/DIPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/DIPrinter.cpp
@@ -33,8 +33,8 @@ namespace symbolize {
class SourceCode {
std::unique_ptr<MemoryBuffer> MemBuf;
- const Optional<StringRef> load(StringRef FileName,
- const Optional<StringRef> &EmbeddedSource) {
+ Optional<StringRef> load(StringRef FileName,
+ const Optional<StringRef> &EmbeddedSource) {
if (Lines <= 0)
return None;
@@ -50,7 +50,7 @@ class SourceCode {
}
}
- const Optional<StringRef> pruneSource(const Optional<StringRef> &Source) {
+ Optional<StringRef> pruneSource(const Optional<StringRef> &Source) {
if (!Source)
return None;
size_t FirstLinePos = StringRef::npos, Pos = 0;
diff --git a/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp b/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
index 389b18fd62ac..27614572766d 100644
--- a/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
+++ b/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
@@ -21,8 +21,10 @@
#include "llvm/Debuginfod/HTTPClient.h"
#include "llvm/Support/CachePruning.h"
#include "llvm/Support/Caching.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/xxhash.h"
namespace llvm {
@@ -36,7 +38,7 @@ static std::string buildIDToString(BuildIDRef ID) {
Expected<SmallVector<StringRef>> getDefaultDebuginfodUrls() {
const char *DebuginfodUrlsEnv = std::getenv("DEBUGINFOD_URLS");
- if (DebuginfodUrlsEnv == NULL)
+ if (DebuginfodUrlsEnv == nullptr)
return SmallVector<StringRef>();
SmallVector<StringRef> DebuginfodUrls;
@@ -52,6 +54,7 @@ Expected<std::string> getDefaultDebuginfodCacheDirectory() {
if (!sys::path::cache_directory(CacheDirectory))
return createStringError(
errc::io_error, "Unable to determine appropriate cache directory.");
+ sys::path::append(CacheDirectory, "llvm-debuginfod", "client");
return std::string(CacheDirectory);
}
diff --git a/contrib/llvm-project/llvm/lib/Demangle/DLangDemangle.cpp b/contrib/llvm-project/llvm/lib/Demangle/DLangDemangle.cpp
index 0cefbd63a7ae..7cecd8007087 100644
--- a/contrib/llvm-project/llvm/lib/Demangle/DLangDemangle.cpp
+++ b/contrib/llvm-project/llvm/lib/Demangle/DLangDemangle.cpp
@@ -68,7 +68,53 @@ private:
/// \note A result larger than UINT_MAX is considered a failure.
///
/// \see https://dlang.org/spec/abi.html#Number .
- const char *decodeNumber(const char *Mangled, unsigned long *Ret);
+ const char *decodeNumber(const char *Mangled, unsigned long &Ret);
+
+ /// Extract the back reference position from a given string.
+ ///
+ /// \param Mangled string to extract the back reference position.
+ /// \param Ret assigned result value.
+ ///
+ /// \return the remaining string on success or nullptr on failure.
+ ///
+ /// \note Ret is always >= 0 on success, and unspecified on failure
+ ///
+ /// \see https://dlang.org/spec/abi.html#back_ref .
+ /// \see https://dlang.org/spec/abi.html#NumberBackRef .
+ const char *decodeBackrefPos(const char *Mangled, long &Ret);
+
+ /// Extract the symbol pointed by the back reference form a given string.
+ ///
+ /// \param Mangled string to extract the back reference position.
+ /// \param Ret assigned result value.
+ ///
+ /// \return the remaining string on success or nullptr on failure.
+ ///
+ /// \see https://dlang.org/spec/abi.html#back_ref .
+ const char *decodeBackref(const char *Mangled, const char *&Ret);
+
+ /// Extract and demangle backreferenced symbol from a given mangled symbol
+ /// and append it to the output string.
+ ///
+ /// \param Demangled output buffer to write the demangled name.
+ /// \param Mangled mangled symbol to be demangled.
+ ///
+ /// \return the remaining string on success or nullptr on failure.
+ ///
+ /// \see https://dlang.org/spec/abi.html#back_ref .
+ /// \see https://dlang.org/spec/abi.html#IdentifierBackRef .
+ const char *parseSymbolBackref(OutputBuffer *Demangled, const char *Mangled);
+
+ /// Extract and demangle backreferenced type from a given mangled symbol
+ /// and append it to the output string.
+ ///
+ /// \param Mangled mangled symbol to be demangled.
+ ///
+ /// \return the remaining string on success or nullptr on failure.
+ ///
+ /// \see https://dlang.org/spec/abi.html#back_ref .
+ /// \see https://dlang.org/spec/abi.html#TypeBackRef .
+ const char *parseTypeBackref(const char *Mangled);
/// Check whether it is the beginning of a symbol name.
///
@@ -115,13 +161,25 @@ private:
/// \see https://dlang.org/spec/abi.html#QualifiedName .
const char *parseQualified(OutputBuffer *Demangled, const char *Mangled);
+ /// Extract and demangle a type from a given mangled symbol append it to
+ /// the output string.
+ ///
+ /// \param Mangled mangled symbol to be demangled.
+ ///
+ /// \return the remaining string on success or nullptr on failure.
+ ///
+ /// \see https://dlang.org/spec/abi.html#Type .
+ const char *parseType(const char *Mangled);
+
/// The string we are demangling.
const char *Str;
+ /// The index of the last back reference.
+ int LastBackref;
};
} // namespace
-const char *Demangler::decodeNumber(const char *Mangled, unsigned long *Ret) {
+const char *Demangler::decodeNumber(const char *Mangled, unsigned long &Ret) {
// Return nullptr if trying to extract something that isn't a digit.
if (Mangled == nullptr || !std::isdigit(*Mangled))
return nullptr;
@@ -142,16 +200,145 @@ const char *Demangler::decodeNumber(const char *Mangled, unsigned long *Ret) {
if (*Mangled == '\0')
return nullptr;
- *Ret = Val;
+ Ret = Val;
+ return Mangled;
+}
+
+const char *Demangler::decodeBackrefPos(const char *Mangled, long &Ret) {
+ // Return nullptr if trying to extract something that isn't a digit
+ if (Mangled == nullptr || !std::isalpha(*Mangled))
+ return nullptr;
+
+ // Any identifier or non-basic type that has been emitted to the mangled
+ // symbol before will not be emitted again, but is referenced by a special
+ // sequence encoding the relative position of the original occurrence in the
+ // mangled symbol name.
+ // Numbers in back references are encoded with base 26 by upper case letters
+ // A-Z for higher digits but lower case letters a-z for the last digit.
+ // NumberBackRef:
+ // [a-z]
+ // [A-Z] NumberBackRef
+ // ^
+ unsigned long Val = 0;
+
+ while (std::isalpha(*Mangled)) {
+ // Check for overflow
+ if (Val > (std::numeric_limits<unsigned long>::max() - 25) / 26)
+ break;
+
+ Val *= 26;
+
+ if (Mangled[0] >= 'a' && Mangled[0] <= 'z') {
+ Val += Mangled[0] - 'a';
+ if ((long)Val <= 0)
+ break;
+ Ret = Val;
+ return Mangled + 1;
+ }
+
+ Val += Mangled[0] - 'A';
+ ++Mangled;
+ }
+
+ return nullptr;
+}
+
+const char *Demangler::decodeBackref(const char *Mangled, const char *&Ret) {
+ assert(Mangled != nullptr && *Mangled == 'Q' && "Invalid back reference!");
+ Ret = nullptr;
+
+ // Position of 'Q'
+ const char *Qpos = Mangled;
+ long RefPos;
+ ++Mangled;
+
+ Mangled = decodeBackrefPos(Mangled, RefPos);
+ if (Mangled == nullptr)
+ return nullptr;
+
+ if (RefPos > Qpos - Str)
+ return nullptr;
+
+ // Set the position of the back reference.
+ Ret = Qpos - RefPos;
+
+ return Mangled;
+}
+
+const char *Demangler::parseSymbolBackref(OutputBuffer *Demangled,
+ const char *Mangled) {
+ // An identifier back reference always points to a digit 0 to 9.
+ // IdentifierBackRef:
+ // Q NumberBackRef
+ // ^
+ const char *Backref;
+ unsigned long Len;
+
+ // Get position of the back reference
+ Mangled = decodeBackref(Mangled, Backref);
+
+ // Must point to a simple identifier
+ Backref = decodeNumber(Backref, Len);
+ if (Backref == nullptr || strlen(Backref) < Len)
+ return nullptr;
+
+ Backref = parseLName(Demangled, Backref, Len);
+ if (Backref == nullptr)
+ return nullptr;
+
+ return Mangled;
+}
+
+const char *Demangler::parseTypeBackref(const char *Mangled) {
+ // A type back reference always points to a letter.
+ // TypeBackRef:
+ // Q NumberBackRef
+ // ^
+ const char *Backref;
+
+ // If we appear to be moving backwards through the mangle string, then
+ // bail as this may be a recursive back reference.
+ if (Mangled - Str >= LastBackref)
+ return nullptr;
+
+ int SaveRefPos = LastBackref;
+ LastBackref = Mangled - Str;
+
+ // Get position of the back reference.
+ Mangled = decodeBackref(Mangled, Backref);
+
+ // Can't decode back reference.
+ if (Backref == nullptr)
+ return nullptr;
+
+ // TODO: Add support for function type back references.
+ Backref = parseType(Backref);
+
+ LastBackref = SaveRefPos;
+
+ if (Backref == nullptr)
+ return nullptr;
+
return Mangled;
}
bool Demangler::isSymbolName(const char *Mangled) {
+ long Ret;
+ const char *Qref = Mangled;
+
if (std::isdigit(*Mangled))
return true;
- // TODO: Handle symbol back references and template instances.
- return false;
+ // TODO: Handle template instances.
+
+ if (*Mangled != 'Q')
+ return false;
+
+ Mangled = decodeBackrefPos(Mangled + 1, Ret);
+ if (Mangled == nullptr || Ret > Qref - Str)
+ return false;
+
+ return std::isdigit(Qref[-Ret]);
}
const char *Demangler::parseMangle(OutputBuffer *Demangled,
@@ -174,8 +361,7 @@ const char *Demangler::parseMangle(OutputBuffer *Demangled,
if (*Mangled == 'Z')
++Mangled;
else {
- // TODO: Implement symbols with types.
- return nullptr;
+ Mangled = parseType(Mangled);
}
}
@@ -228,9 +414,12 @@ const char *Demangler::parseIdentifier(OutputBuffer *Demangled,
if (Mangled == nullptr || *Mangled == '\0')
return nullptr;
- // TODO: Parse back references and lengthless template instances.
+ if (*Mangled == 'Q')
+ return parseSymbolBackref(Demangled, Mangled);
+
+ // TODO: Parse lengthless template instances.
- const char *Endptr = decodeNumber(Mangled, &Len);
+ const char *Endptr = decodeNumber(Mangled, Len);
if (Endptr == nullptr || Len == 0)
return nullptr;
@@ -262,6 +451,34 @@ const char *Demangler::parseIdentifier(OutputBuffer *Demangled,
return parseLName(Demangled, Mangled, Len);
}
+const char *Demangler::parseType(const char *Mangled) {
+ if (*Mangled == '\0')
+ return nullptr;
+
+ switch (*Mangled) {
+ // TODO: Parse type qualifiers.
+ // TODO: Parse function types.
+ // TODO: Parse compound types.
+ // TODO: Parse delegate types.
+ // TODO: Parse tuple types.
+
+ // Basic types.
+ case 'i':
+ ++Mangled;
+ // TODO: Add type name dumping
+ return Mangled;
+
+ // TODO: Add support for the rest of the basic types.
+
+ // Back referenced type.
+ case 'Q':
+ return parseTypeBackref(Mangled);
+
+ default: // unhandled.
+ return nullptr;
+ }
+}
+
const char *Demangler::parseLName(OutputBuffer *Demangled, const char *Mangled,
unsigned long Len) {
switch (Len) {
@@ -319,7 +536,8 @@ const char *Demangler::parseLName(OutputBuffer *Demangled, const char *Mangled,
return Mangled;
}
-Demangler::Demangler(const char *Mangled) : Str(Mangled) {}
+Demangler::Demangler(const char *Mangled)
+ : Str(Mangled), LastBackref(strlen(Mangled)) {}
const char *Demangler::parseMangle(OutputBuffer *Demangled) {
return parseMangle(Demangled, this->Str);
diff --git a/contrib/llvm-project/llvm/lib/Demangle/ItaniumDemangle.cpp b/contrib/llvm-project/llvm/lib/Demangle/ItaniumDemangle.cpp
index 3f68f76761ce..1a5db755e37b 100644
--- a/contrib/llvm-project/llvm/lib/Demangle/ItaniumDemangle.cpp
+++ b/contrib/llvm-project/llvm/lib/Demangle/ItaniumDemangle.cpp
@@ -19,9 +19,7 @@
#include <cstdlib>
#include <cstring>
#include <functional>
-#include <numeric>
#include <utility>
-#include <vector>
using namespace llvm;
using namespace llvm::itanium_demangle;
diff --git a/contrib/llvm-project/llvm/lib/Demangle/MicrosoftDemangleNodes.cpp b/contrib/llvm-project/llvm/lib/Demangle/MicrosoftDemangleNodes.cpp
index 32d8dff66c3f..d07d05a08c55 100644
--- a/contrib/llvm-project/llvm/lib/Demangle/MicrosoftDemangleNodes.cpp
+++ b/contrib/llvm-project/llvm/lib/Demangle/MicrosoftDemangleNodes.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Demangle/MicrosoftDemangleNodes.h"
-#include "llvm/Demangle/DemangleConfig.h"
#include "llvm/Demangle/Utility.h"
#include <cctype>
#include <string>
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
index e15bce0d6c4b..1fb37ce7c57c 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
@@ -96,7 +96,7 @@ class GDBJITRegistrationListener : public JITEventListener {
public:
/// Instantiates the JIT service.
- GDBJITRegistrationListener() : ObjectBufferMap() {}
+ GDBJITRegistrationListener() {}
/// Unregisters each object that was previously registered and releases all
/// internal resources.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
index 8ae3bc2bf61d..159880e4b152 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
@@ -52,13 +52,13 @@ public:
auto &SR = getSectionRange(*D.Sec);
if (D.IsStart) {
if (SR.empty())
- G.makeAbsolute(*Sym, 0);
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
else
G.makeDefined(*Sym, *SR.getFirstBlock(), 0, 0, Linkage::Strong,
Scope::Local, false);
} else {
if (SR.empty())
- G.makeAbsolute(*Sym, 0);
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
else
G.makeDefined(*Sym, *SR.getLastBlock(),
SR.getLastBlock()->getSize(), 0, Linkage::Strong,
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
index 4d7d5ce26668..2ae193595fc0 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
@@ -65,10 +65,7 @@ Error EHFrameSplitter::operator()(LinkGraph &G) {
Error EHFrameSplitter::processBlock(LinkGraph &G, Block &B,
LinkGraph::SplitBlockCache &Cache) {
- LLVM_DEBUG({
- dbgs() << " Processing block at " << formatv("{0:x16}", B.getAddress())
- << "\n";
- });
+ LLVM_DEBUG(dbgs() << " Processing block at " << B.getAddress() << "\n");
// eh-frame should not contain zero-fill blocks.
if (B.isZeroFill())
@@ -400,7 +397,7 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
BlockEdgeMap &BlockEdges) {
LLVM_DEBUG(dbgs() << " Record is FDE\n");
- JITTargetAddress RecordAddress = B.getAddress() + RecordOffset;
+ orc::ExecutorAddr RecordAddress = B.getAddress() + RecordOffset;
auto RecordContent = B.getContent().slice(RecordOffset, RecordLength);
BinaryStreamReader RecordReader(
@@ -418,8 +415,9 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
{
// Process the CIE pointer field.
auto CIEEdgeItr = BlockEdges.find(RecordOffset + CIEDeltaFieldOffset);
- JITTargetAddress CIEAddress =
- RecordAddress + CIEDeltaFieldOffset - CIEDelta;
+ orc::ExecutorAddr CIEAddress =
+ RecordAddress + orc::ExecutorAddrDiff(CIEDeltaFieldOffset) -
+ orc::ExecutorAddrDiff(CIEDelta);
if (CIEEdgeItr == BlockEdges.end()) {
LLVM_DEBUG({
@@ -456,7 +454,7 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
{
// Process the PC-Begin field.
Block *PCBeginBlock = nullptr;
- JITTargetAddress PCBeginFieldOffset = RecordReader.getOffset();
+ orc::ExecutorAddrDiff PCBeginFieldOffset = RecordReader.getOffset();
auto PCEdgeItr = BlockEdges.find(RecordOffset + PCBeginFieldOffset);
if (PCEdgeItr == BlockEdges.end()) {
auto PCBeginPtrInfo =
@@ -464,12 +462,12 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
RecordAddress + PCBeginFieldOffset, RecordReader);
if (!PCBeginPtrInfo)
return PCBeginPtrInfo.takeError();
- JITTargetAddress PCBegin = PCBeginPtrInfo->first;
+ orc::ExecutorAddr PCBegin = PCBeginPtrInfo->first;
Edge::Kind PCBeginEdgeKind = PCBeginPtrInfo->second;
LLVM_DEBUG({
dbgs() << " Adding edge at "
- << formatv("{0:x16}", RecordAddress + PCBeginFieldOffset)
- << " to PC at " << formatv("{0:x16}", PCBegin) << "\n";
+ << (RecordAddress + PCBeginFieldOffset) << " to PC at "
+ << formatv("{0:x16}", PCBegin) << "\n";
});
auto PCBeginSym = getOrCreateSymbol(PC, PCBegin);
if (!PCBeginSym)
@@ -522,7 +520,7 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
if (auto Err = RecordReader.readULEB128(AugmentationDataSize))
return Err;
- JITTargetAddress LSDAFieldOffset = RecordReader.getOffset();
+ orc::ExecutorAddrDiff LSDAFieldOffset = RecordReader.getOffset();
auto LSDAEdgeItr = BlockEdges.find(RecordOffset + LSDAFieldOffset);
if (LSDAEdgeItr == BlockEdges.end()) {
auto LSDAPointerInfo =
@@ -530,7 +528,7 @@ Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
RecordAddress + LSDAFieldOffset, RecordReader);
if (!LSDAPointerInfo)
return LSDAPointerInfo.takeError();
- JITTargetAddress LSDA = LSDAPointerInfo->first;
+ orc::ExecutorAddr LSDA = LSDAPointerInfo->first;
Edge::Kind LSDAEdgeKind = LSDAPointerInfo->second;
auto LSDASym = getOrCreateSymbol(PC, LSDA);
if (!LSDASym)
@@ -645,12 +643,10 @@ unsigned EHFrameEdgeFixer::getPointerEncodingDataSize(uint8_t PointerEncoding) {
}
}
-Expected<std::pair<JITTargetAddress, Edge::Kind>>
+Expected<std::pair<orc::ExecutorAddr, Edge::Kind>>
EHFrameEdgeFixer::readEncodedPointer(uint8_t PointerEncoding,
- JITTargetAddress PointerFieldAddress,
+ orc::ExecutorAddr PointerFieldAddress,
BinaryStreamReader &RecordReader) {
- static_assert(sizeof(JITTargetAddress) == sizeof(uint64_t),
- "Result must be able to hold a uint64_t");
assert(isSupportedPointerEncoding(PointerEncoding) &&
"Unsupported pointer encoding");
@@ -663,7 +659,7 @@ EHFrameEdgeFixer::readEncodedPointer(uint8_t PointerEncoding,
if (EffectiveType == DW_EH_PE_absptr)
EffectiveType = (PointerSize == 8) ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
- JITTargetAddress Addr;
+ orc::ExecutorAddr Addr;
Edge::Kind PointerEdgeKind = Edge::Invalid;
switch (EffectiveType) {
case DW_EH_PE_udata4: {
@@ -709,7 +705,7 @@ EHFrameEdgeFixer::readEncodedPointer(uint8_t PointerEncoding,
}
Expected<Symbol &> EHFrameEdgeFixer::getOrCreateSymbol(ParseContext &PC,
- JITTargetAddress Addr) {
+ orc::ExecutorAddr Addr) {
Symbol *CanonicalSym = nullptr;
auto UpdateCanonicalSym = [&](Symbol *Sym) {
@@ -753,8 +749,9 @@ Error EHFrameNullTerminator::operator()(LinkGraph &G) {
<< EHFrameSectionName << "\n";
});
- auto &NullTerminatorBlock = G.createContentBlock(
- *EHFrame, NullTerminatorBlockContent, 0xfffffffffffffffc, 1, 0);
+ auto &NullTerminatorBlock =
+ G.createContentBlock(*EHFrame, NullTerminatorBlockContent,
+ orc::ExecutorAddr(~uint64_t(4)), 1, 0);
G.addAnonymousSymbol(NullTerminatorBlock, 0, 4, false, true);
return Error::success();
}
@@ -762,17 +759,15 @@ Error EHFrameNullTerminator::operator()(LinkGraph &G) {
EHFrameRegistrar::~EHFrameRegistrar() {}
Error InProcessEHFrameRegistrar::registerEHFrames(
- JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
- return orc::registerEHFrameSection(
- jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
- EHFrameSectionSize);
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::registerEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
}
Error InProcessEHFrameRegistrar::deregisterEHFrames(
- JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
- return orc::deregisterEHFrameSection(
- jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
- EHFrameSectionSize);
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::deregisterEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
}
LinkGraphPassFunction
@@ -789,14 +784,14 @@ createEHFrameRecorderPass(const Triple &TT,
StoreFrameRange = std::move(StoreRangeAddress)](LinkGraph &G) -> Error {
// Search for a non-empty eh-frame and record the address of the first
// symbol in it.
- JITTargetAddress Addr = 0;
+ orc::ExecutorAddr Addr;
size_t Size = 0;
if (auto *S = G.findSectionByName(EHFrameSectionName)) {
auto R = SectionRange(*S);
Addr = R.getStart();
Size = R.getSize();
}
- if (Addr == 0 && Size != 0)
+ if (!Addr && Size != 0)
return make_error<JITLinkError>(
StringRef(EHFrameSectionName) +
" section can not have zero address with non-zero size");
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
index b4c4b0f7b097..ef4b47b9aa28 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
@@ -71,12 +71,12 @@ private:
};
using BlockEdgeMap = DenseMap<Edge::OffsetT, EdgeTarget>;
- using CIEInfosMap = DenseMap<JITTargetAddress, CIEInformation>;
+ using CIEInfosMap = DenseMap<orc::ExecutorAddr, CIEInformation>;
struct ParseContext {
ParseContext(LinkGraph &G) : G(G) {}
- Expected<CIEInformation *> findCIEInfo(JITTargetAddress Address) {
+ Expected<CIEInformation *> findCIEInfo(orc::ExecutorAddr Address) {
auto I = CIEInfos.find(Address);
if (I == CIEInfos.end())
return make_error<JITLinkError>("No CIE found at address " +
@@ -102,12 +102,13 @@ private:
static bool isSupportedPointerEncoding(uint8_t PointerEncoding);
unsigned getPointerEncodingDataSize(uint8_t PointerEncoding);
- Expected<std::pair<JITTargetAddress, Edge::Kind>>
+ Expected<std::pair<orc::ExecutorAddr, Edge::Kind>>
readEncodedPointer(uint8_t PointerEncoding,
- JITTargetAddress PointerFieldAddress,
+ orc::ExecutorAddr PointerFieldAddress,
BinaryStreamReader &RecordReader);
- Expected<Symbol &> getOrCreateSymbol(ParseContext &PC, JITTargetAddress Addr);
+ Expected<Symbol &> getOrCreateSymbol(ParseContext &PC,
+ orc::ExecutorAddr Addr);
StringRef EHFrameSectionName;
unsigned PointerSize;
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
index f9101d71dfa8..2ab7ed61f71b 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
@@ -77,14 +77,14 @@ protected:
return Obj.getHeader().e_type == llvm::ELF::ET_REL;
}
- void setGraphSection(ELFSectionIndex SecIndex, Section &Sec) {
- assert(!GraphSections.count(SecIndex) && "Duplicate section at index");
- GraphSections[SecIndex] = &Sec;
+ void setGraphBlock(ELFSectionIndex SecIndex, Block *B) {
+ assert(!GraphBlocks.count(SecIndex) && "Duplicate section at index");
+ GraphBlocks[SecIndex] = B;
}
- Section *getGraphSection(ELFSectionIndex SecIndex) {
- auto I = GraphSections.find(SecIndex);
- if (I == GraphSections.end())
+ Block *getGraphBlock(ELFSectionIndex SecIndex) {
+ auto I = GraphBlocks.find(SecIndex);
+ if (I == GraphBlocks.end())
return nullptr;
return I->second;
}
@@ -139,9 +139,9 @@ protected:
const typename ELFFile::Elf_Shdr *SymTabSec = nullptr;
StringRef SectionStringTab;
- // Maps ELF section indexes to LinkGraph Sections.
- // Only SHF_ALLOC sections will have graph sections.
- DenseMap<ELFSectionIndex, Section *> GraphSections;
+ // Maps ELF section indexes to LinkGraph Blocks.
+ // Only SHF_ALLOC sections will have graph blocks.
+ DenseMap<ELFSectionIndex, Block *> GraphBlocks;
DenseMap<ELFSymbolIndex, Symbol *> GraphSymbols;
DenseMap<const typename ELFFile::Elf_Shdr *,
ArrayRef<typename ELFFile::Elf_Word>>
@@ -316,18 +316,27 @@ template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySections() {
else
Prot = MemProt::Read | MemProt::Write;
- auto &GraphSec = G->createSection(*Name, Prot);
+ // Look for existing sections first.
+ auto *GraphSec = G->findSectionByName(*Name);
+ if (!GraphSec)
+ GraphSec = &G->createSection(*Name, Prot);
+ assert(GraphSec->getMemProt() == Prot && "MemProt should match");
+
+ Block *B = nullptr;
if (Sec.sh_type != ELF::SHT_NOBITS) {
auto Data = Obj.template getSectionContentsAsArray<char>(Sec);
if (!Data)
return Data.takeError();
- G->createContentBlock(GraphSec, *Data, Sec.sh_addr, Sec.sh_addralign, 0);
+ B = &G->createContentBlock(*GraphSec, *Data,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
} else
- G->createZeroFillBlock(GraphSec, Sec.sh_size, Sec.sh_addr,
- Sec.sh_addralign, 0);
+ B = &G->createZeroFillBlock(*GraphSec, Sec.sh_size,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
- setGraphSection(SecIndex, GraphSec);
+ setGraphBlock(SecIndex, B);
}
return Error::success();
@@ -393,9 +402,9 @@ template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySymbols() {
// Handle common symbols specially.
if (Sym.isCommon()) {
- Symbol &GSym =
- G->addCommonSymbol(*Name, Scope::Default, getCommonSection(), 0,
- Sym.st_size, Sym.getValue(), false);
+ Symbol &GSym = G->addCommonSymbol(*Name, Scope::Default,
+ getCommonSection(), orc::ExecutorAddr(),
+ Sym.st_size, Sym.getValue(), false);
setGraphSymbol(SymIndex, GSym);
continue;
}
@@ -425,28 +434,24 @@ template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySymbols() {
return NdxOrErr.takeError();
Shndx = *NdxOrErr;
}
- if (auto *GraphSec = getGraphSection(Shndx)) {
- Block *B = nullptr;
- {
- auto Blocks = GraphSec->blocks();
- assert(Blocks.begin() != Blocks.end() && "No blocks for section");
- assert(std::next(Blocks.begin()) == Blocks.end() &&
- "Multiple blocks for section");
- B = *Blocks.begin();
- }
-
+ if (auto *B = getGraphBlock(Shndx)) {
LLVM_DEBUG({
dbgs() << " " << SymIndex
<< ": Creating defined graph symbol for ELF symbol \"" << *Name
<< "\"\n";
});
- if (Sym.getType() == ELF::STT_SECTION)
- *Name = GraphSec->getName();
-
+ // In RISCV, temporary symbols (Used to generate dwarf, eh_frame
+ // sections...) will appear in object code's symbol table, and LLVM does
+ // not use names on these temporary symbols (RISCV gnu toolchain uses
+ // names on these temporary symbols). If the symbol is unnamed, add an
+ // anonymous symbol.
auto &GSym =
- G->addDefinedSymbol(*B, Sym.getValue(), *Name, Sym.st_size, L, S,
- Sym.getType() == ELF::STT_FUNC, false);
+ Name->empty()
+ ? G->addAnonymousSymbol(*B, Sym.getValue(), Sym.st_size,
+ false, false)
+ : G->addDefinedSymbol(*B, Sym.getValue(), *Name, Sym.st_size, L,
+ S, Sym.getType() == ELF::STT_FUNC, false);
setGraphSymbol(SymIndex, GSym);
}
} else if (Sym.isUndefined() && Sym.isExternal()) {
@@ -498,8 +503,8 @@ Error ELFLinkGraphBuilder<ELFT>::forEachRelocation(
}
// Lookup the link-graph node corresponding to the target section name.
- Section *GraphSect = G->findSectionByName(*Name);
- if (!GraphSect)
+ auto *BlockToFix = getGraphBlock(RelSect.sh_info);
+ if (!BlockToFix)
return make_error<StringError>(
"Refencing a section that wasn't added to the graph: " + *Name,
inconvertibleErrorCode());
@@ -510,7 +515,7 @@ Error ELFLinkGraphBuilder<ELFT>::forEachRelocation(
// Let the callee process relocation entries one by one.
for (const typename ELFT::Rela &R : *RelEntries)
- if (Error Err = Func(R, **FixupSection, *GraphSect))
+ if (Error Err = Func(R, **FixupSection, *BlockToFix))
return Err;
LLVM_DEBUG(dbgs() << "\n");
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
index dc183dfddfae..dd3eb97c21a0 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -16,6 +16,7 @@
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/ExecutionEngine/JITLink/aarch64.h"
#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/MathExtras.h"
#define DEBUG_TYPE "jitlink"
@@ -41,16 +42,17 @@ private:
char *BlockWorkingMem = B.getAlreadyMutableContent().data();
char *FixupPtr = BlockWorkingMem + E.getOffset();
- JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+ auto FixupAddress = B.getAddress() + E.getOffset();
switch (E.getKind()) {
case aarch64::R_AARCH64_CALL26: {
- assert((FixupAddress & 0x3) == 0 && "Call-inst is not 32-bit aligned");
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "Call-inst is not 32-bit aligned");
int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
if (static_cast<uint64_t>(Value) & 0x3)
return make_error<JITLinkError>("Call target is not 32-bit aligned");
- if (!fitsRangeSignedInt<27>(Value))
+ if (!isInt<28>(Value))
return makeTargetOutOfRangeError(G, B, E);
uint32_t RawInstr = *(little32_t *)FixupPtr;
@@ -64,10 +66,6 @@ private:
}
return Error::success();
}
-
- template <uint8_t Bits> static bool fitsRangeSignedInt(int64_t Value) {
- return Value >= -(1ll << Bits) && Value < (1ll << Bits);
- }
};
template <typename ELFT>
@@ -100,7 +98,7 @@ private:
Error addSingleRelocation(const typename ELFT::Rela &Rel,
const typename ELFT::Shdr &FixupSect,
- Section &GraphSection) {
+ Block &BlockToFix) {
using Base = ELFLinkGraphBuilder<ELFT>;
uint32_t SymbolIndex = Rel.getSymbol(false);
@@ -123,17 +121,17 @@ private:
return Kind.takeError();
int64_t Addend = Rel.r_addend;
- Block *BlockToFix = *(GraphSection.blocks().begin());
- JITTargetAddress FixupAddress = FixupSect.sh_addr + Rel.r_offset;
- Edge::OffsetT Offset = FixupAddress - BlockToFix->getAddress();
+ orc::ExecutorAddr FixupAddress =
+ orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
Edge GE(*Kind, Offset, *GraphSymbol, Addend);
LLVM_DEBUG({
dbgs() << " ";
- printEdge(dbgs(), *BlockToFix, GE, aarch64::getEdgeKindName(*Kind));
+ printEdge(dbgs(), BlockToFix, GE, aarch64::getEdgeKindName(*Kind));
dbgs() << "\n";
});
- BlockToFix->addEdge(std::move(GE));
+ BlockToFix.addEdge(std::move(GE));
return Error::success();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
index b057788ce3ef..f83001417e94 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
@@ -19,6 +19,7 @@
#include "llvm/ExecutionEngine/JITLink/riscv.h"
#include "llvm/Object/ELF.h"
#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
#define DEBUG_TYPE "jitlink"
using namespace llvm;
@@ -44,15 +45,16 @@ public:
bool isGOTEdgeToFix(Edge &E) const { return E.getKind() == R_RISCV_GOT_HI20; }
Symbol &createGOTEntry(Symbol &Target) {
- Block &GOTBlock = G.createContentBlock(
- getGOTSection(), getGOTEntryBlockContent(), 0, G.getPointerSize(), 0);
+ Block &GOTBlock =
+ G.createContentBlock(getGOTSection(), getGOTEntryBlockContent(),
+ orc::ExecutorAddr(), G.getPointerSize(), 0);
GOTBlock.addEdge(isRV64() ? R_RISCV_64 : R_RISCV_32, 0, Target, 0);
return G.addAnonymousSymbol(GOTBlock, 0, G.getPointerSize(), false, false);
}
Symbol &createPLTStub(Symbol &Target) {
- Block &StubContentBlock =
- G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 4, 0);
+ Block &StubContentBlock = G.createContentBlock(
+ getStubsSection(), getStubBlockContent(), orc::ExecutorAddr(), 4, 0);
auto &GOTEntrySymbol = getGOTEntry(Target);
StubContentBlock.addEdge(R_RISCV_CALL, 0, GOTEntrySymbol, 0);
return G.addAnonymousSymbol(StubContentBlock, 0, StubEntrySize, true,
@@ -134,13 +136,13 @@ static Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) {
const Symbol &Sym = E.getTarget();
const Block &B = Sym.getBlock();
- JITTargetAddress Offset = Sym.getOffset();
+ orc::ExecutorAddrDiff Offset = Sym.getOffset();
struct Comp {
- bool operator()(const Edge &Lhs, JITTargetAddress Offset) {
+ bool operator()(const Edge &Lhs, orc::ExecutorAddrDiff Offset) {
return Lhs.getOffset() < Offset;
}
- bool operator()(JITTargetAddress Offset, const Edge &Rhs) {
+ bool operator()(orc::ExecutorAddrDiff Offset, const Edge &Rhs) {
return Offset < Rhs.getOffset();
}
};
@@ -157,8 +159,24 @@ static Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) {
"No HI20 PCREL relocation type be found for LO12 PCREL relocation type");
}
-static uint32_t extractBits(uint64_t Num, unsigned High, unsigned Low) {
- return (Num & ((1ULL << (High + 1)) - 1)) >> Low;
+static uint32_t extractBits(uint32_t Num, unsigned Low, unsigned Size) {
+ return (Num & (((1ULL << (Size + 1)) - 1) << Low)) >> Low;
+}
+
+inline Error checkAlignment(llvm::orc::ExecutorAddr loc, uint64_t v, int n,
+ const Edge &E) {
+ if (v & (n - 1))
+ return make_error<JITLinkError>("0x" + llvm::utohexstr(loc.getValue()) +
+ " improper alignment for relocation " +
+ formatv("{0:d}", E.getKind()) + ": 0x" +
+ llvm::utohexstr(v) + " is not aligned to " +
+ Twine(n) + " bytes");
+ return Error::success();
+}
+
+static inline bool isInRangeForImmS32(int64_t Value) {
+ return (Value >= std::numeric_limits<int32_t>::min() &&
+ Value <= std::numeric_limits<int32_t>::max());
}
class ELFJITLinker_riscv : public JITLinker<ELFJITLinker_riscv> {
@@ -176,27 +194,47 @@ private:
char *BlockWorkingMem = B.getAlreadyMutableContent().data();
char *FixupPtr = BlockWorkingMem + E.getOffset();
- JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+ orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
switch (E.getKind()) {
case R_RISCV_32: {
- int64_t Value = E.getTarget().getAddress() + E.getAddend();
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
*(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
break;
}
case R_RISCV_64: {
- int64_t Value = E.getTarget().getAddress() + E.getAddend();
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
*(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
break;
}
+ case R_RISCV_BRANCH: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ Error AlignmentIssue = checkAlignment(FixupAddress, Value, 2, E);
+ if (AlignmentIssue) {
+ return AlignmentIssue;
+ }
+ int64_t Lo = Value & 0xFFF;
+ uint32_t Imm31_25 = extractBits(Lo, 5, 6) << 25 | extractBits(Lo, 12, 1)
+ << 31;
+ uint32_t Imm11_7 = extractBits(Lo, 1, 4) << 8 | extractBits(Lo, 11, 1)
+ << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm31_25 | Imm11_7;
+ break;
+ }
case R_RISCV_HI20: {
- int64_t Value = E.getTarget().getAddress() + E.getAddend();
- int32_t Hi = (Value + 0x800) & 0xFFFFF000;
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
uint32_t RawInstr = *(little32_t *)FixupPtr;
- *(little32_t *)FixupPtr = (RawInstr & 0xFFF) | static_cast<uint32_t>(Hi);
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
break;
}
case R_RISCV_LO12_I: {
- int64_t Value = E.getTarget().getAddress() + E.getAddend();
+ // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
+ // with current relocation R_RISCV_LO12_I. So here may need a check.
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
int32_t Lo = Value & 0xFFF;
uint32_t RawInstr = *(little32_t *)FixupPtr;
*(little32_t *)FixupPtr =
@@ -205,23 +243,32 @@ private:
}
case R_RISCV_CALL: {
int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
- int32_t Hi = (Value + 0x800) & 0xFFFFF000;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
int32_t Lo = Value & 0xFFF;
uint32_t RawInstrAuipc = *(little32_t *)FixupPtr;
uint32_t RawInstrJalr = *(little32_t *)(FixupPtr + 4);
- *(little32_t *)FixupPtr = RawInstrAuipc | static_cast<uint32_t>(Hi);
+ *(little32_t *)FixupPtr =
+ RawInstrAuipc | (static_cast<uint32_t>(Hi & 0xFFFFF000));
*(little32_t *)(FixupPtr + 4) =
RawInstrJalr | (static_cast<uint32_t>(Lo) << 20);
break;
}
case R_RISCV_PCREL_HI20: {
int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
- int32_t Hi = (Value + 0x800) & 0xFFFFF000;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
uint32_t RawInstr = *(little32_t *)FixupPtr;
- *(little32_t *)FixupPtr = (RawInstr & 0xFFF) | static_cast<uint32_t>(Hi);
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
break;
}
case R_RISCV_PCREL_LO12_I: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_I. So here may need a
+ // check.
auto RelHI20 = getRISCVPCRelHi20(E);
if (!RelHI20)
return RelHI20.takeError();
@@ -234,17 +281,117 @@ private:
break;
}
case R_RISCV_PCREL_LO12_S: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_S. So here may need a
+ // check.
auto RelHI20 = getRISCVPCRelHi20(E);
int64_t Value = RelHI20->getTarget().getAddress() +
RelHI20->getAddend() - E.getTarget().getAddress();
int64_t Lo = Value & 0xFFF;
- uint32_t Imm31_25 = extractBits(Lo, 11, 5) << 25;
- uint32_t Imm11_7 = extractBits(Lo, 4, 0) << 7;
+ uint32_t Imm31_25 = extractBits(Lo, 5, 7) << 25;
+ uint32_t Imm11_7 = extractBits(Lo, 0, 5) << 7;
uint32_t RawInstr = *(little32_t *)FixupPtr;
*(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm31_25 | Imm11_7;
break;
}
+ case R_RISCV_ADD64: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read64le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD32: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read32le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD16: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read16le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD8: {
+ int64_t Value =
+ (E.getTarget().getAddress() +
+ *(reinterpret_cast<const uint8_t *>(FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB64: {
+ int64_t Value = support::endian::read64le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB32: {
+ int64_t Value = support::endian::read32le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB16: {
+ int64_t Value = support::endian::read16le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB8: {
+ int64_t Value =
+ *(reinterpret_cast<const uint8_t *>(FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_SET6: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word6 = Value & 0x3f;
+ *(little32_t *)FixupPtr = (RawData & 0xffffffc0) | Word6;
+ break;
+ }
+ case R_RISCV_SET8: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word8 = Value & 0xff;
+ *(little32_t *)FixupPtr = (RawData & 0xffffff00) | Word8;
+ break;
+ }
+ case R_RISCV_SET16: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word16 = Value & 0xffff;
+ *(little32_t *)FixupPtr = (RawData & 0xffff0000) | Word16;
+ break;
+ }
+ case R_RISCV_SET32: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
+ case R_RISCV_32_PCREL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
}
return Error::success();
}
@@ -261,6 +408,8 @@ private:
return EdgeKind_riscv::R_RISCV_32;
case ELF::R_RISCV_64:
return EdgeKind_riscv::R_RISCV_64;
+ case ELF::R_RISCV_BRANCH:
+ return EdgeKind_riscv::R_RISCV_BRANCH;
case ELF::R_RISCV_HI20:
return EdgeKind_riscv::R_RISCV_HI20;
case ELF::R_RISCV_LO12_I:
@@ -277,6 +426,32 @@ private:
return EdgeKind_riscv::R_RISCV_GOT_HI20;
case ELF::R_RISCV_CALL_PLT:
return EdgeKind_riscv::R_RISCV_CALL_PLT;
+ case ELF::R_RISCV_ADD64:
+ return EdgeKind_riscv::R_RISCV_ADD64;
+ case ELF::R_RISCV_ADD32:
+ return EdgeKind_riscv::R_RISCV_ADD32;
+ case ELF::R_RISCV_ADD16:
+ return EdgeKind_riscv::R_RISCV_ADD16;
+ case ELF::R_RISCV_ADD8:
+ return EdgeKind_riscv::R_RISCV_ADD8;
+ case ELF::R_RISCV_SUB64:
+ return EdgeKind_riscv::R_RISCV_SUB64;
+ case ELF::R_RISCV_SUB32:
+ return EdgeKind_riscv::R_RISCV_SUB32;
+ case ELF::R_RISCV_SUB16:
+ return EdgeKind_riscv::R_RISCV_SUB16;
+ case ELF::R_RISCV_SUB8:
+ return EdgeKind_riscv::R_RISCV_SUB8;
+ case ELF::R_RISCV_SET6:
+ return EdgeKind_riscv::R_RISCV_SET6;
+ case ELF::R_RISCV_SET8:
+ return EdgeKind_riscv::R_RISCV_SET8;
+ case ELF::R_RISCV_SET16:
+ return EdgeKind_riscv::R_RISCV_SET16;
+ case ELF::R_RISCV_SET32:
+ return EdgeKind_riscv::R_RISCV_SET32;
+ case ELF::R_RISCV_32_PCREL:
+ return EdgeKind_riscv::R_RISCV_32_PCREL;
}
return make_error<JITLinkError>("Unsupported riscv relocation:" +
@@ -298,7 +473,7 @@ private:
Error addSingleRelocation(const typename ELFT::Rela &Rel,
const typename ELFT::Shdr &FixupSect,
- Section &GraphSection) {
+ Block &BlockToFix) {
using Base = ELFLinkGraphBuilder<ELFT>;
uint32_t SymbolIndex = Rel.getSymbol(false);
@@ -321,17 +496,16 @@ private:
return Kind.takeError();
int64_t Addend = Rel.r_addend;
- Block *BlockToFix = *(GraphSection.blocks().begin());
- JITTargetAddress FixupAddress = FixupSect.sh_addr + Rel.r_offset;
- Edge::OffsetT Offset = FixupAddress - BlockToFix->getAddress();
+ auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
Edge GE(*Kind, Offset, *GraphSymbol, Addend);
LLVM_DEBUG({
dbgs() << " ";
- printEdge(dbgs(), *BlockToFix, GE, riscv::getEdgeKindName(*Kind));
+ printEdge(dbgs(), BlockToFix, GE, riscv::getEdgeKindName(*Kind));
dbgs() << "\n";
});
- BlockToFix->addEdge(std::move(GE));
+ BlockToFix.addEdge(std::move(GE));
return Error::success();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
index 27d8833ae19e..79d2cdbb30f1 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -59,8 +59,8 @@ public:
// the TLS Info entry's key value will be written by the fixTLVSectionByName
// pass, so create mutable content.
auto &TLSInfoEntry = G.createMutableContentBlock(
- getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()), 0, 8,
- 0);
+ getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()),
+ orc::ExecutorAddr(), 8, 0);
TLSInfoEntry.addEdge(x86_64::Pointer64, 8, Target, 0);
return G.addAnonymousSymbol(TLSInfoEntry, 0, 16, false, false);
}
@@ -172,7 +172,7 @@ private:
Error addSingleRelocation(const typename ELFT::Rela &Rel,
const typename ELFT::Shdr &FixupSection,
- Section &GraphSection) {
+ Block &BlockToFix) {
using Base = ELFLinkGraphBuilder<ELFT>;
uint32_t SymbolIndex = Rel.getSymbol(false);
@@ -248,17 +248,16 @@ private:
}
}
- Block *BlockToFix = *(GraphSection.blocks().begin());
- JITTargetAddress FixupAddress = FixupSection.sh_addr + Rel.r_offset;
- Edge::OffsetT Offset = FixupAddress - BlockToFix->getAddress();
+ auto FixupAddress = orc::ExecutorAddr(FixupSection.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
Edge GE(Kind, Offset, *GraphSymbol, Addend);
LLVM_DEBUG({
dbgs() << " ";
- printEdge(dbgs(), *BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ printEdge(dbgs(), BlockToFix, GE, x86_64::getEdgeKindName(Kind));
dbgs() << "\n";
});
- BlockToFix->addEdge(std::move(GE));
+ BlockToFix.addEdge(std::move(GE));
return Error::success();
}
@@ -322,8 +321,9 @@ private:
// If there's no defined symbol then create one.
SectionRange SR(*GOTSection);
if (SR.empty())
- GOTSymbol = &G.addAbsoluteSymbol(ELFGOTSymbolName, 0, 0,
- Linkage::Strong, Scope::Local, true);
+ GOTSymbol =
+ &G.addAbsoluteSymbol(ELFGOTSymbolName, orc::ExecutorAddr(), 0,
+ Linkage::Strong, Scope::Local, true);
else
GOTSymbol =
&G.addDefinedSymbol(*SR.getFirstBlock(), 0, ELFGOTSymbolName, 0,
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
index 51dcc1c35fad..78a603cfed17 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -90,8 +90,8 @@ const char *getScopeName(Scope S) {
}
raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
- return OS << formatv("{0:x16}", B.getAddress()) << " -- "
- << formatv("{0:x8}", B.getAddress() + B.getSize()) << ": "
+ return OS << B.getAddress() << " -- " << (B.getAddress() + B.getSize())
+ << ": "
<< "size = " << formatv("{0:x8}", B.getSize()) << ", "
<< (B.isZeroFill() ? "zero-fill" : "content")
<< ", align = " << B.getAlignment()
@@ -100,9 +100,8 @@ raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
}
raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
- OS << formatv("{0:x16}", Sym.getAddress()) << " ("
- << (Sym.isDefined() ? "block" : "addressable") << " + "
- << formatv("{0:x8}", Sym.getOffset())
+ OS << Sym.getAddress() << " (" << (Sym.isDefined() ? "block" : "addressable")
+ << " + " << formatv("{0:x8}", Sym.getOffset())
<< "): size: " << formatv("{0:x8}", Sym.getSize())
<< ", linkage: " << formatv("{0:6}", getLinkageName(Sym.getLinkage()))
<< ", scope: " << formatv("{0:8}", getScopeName(Sym.getScope())) << ", "
@@ -113,9 +112,9 @@ raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
StringRef EdgeKindName) {
- OS << "edge@" << formatv("{0:x16}", B.getAddress() + E.getOffset()) << ": "
- << formatv("{0:x16}", B.getAddress()) << " + "
- << formatv("{0:x}", E.getOffset()) << " -- " << EdgeKindName << " -> ";
+ OS << "edge@" << B.getAddress() + E.getOffset() << ": " << B.getAddress()
+ << " + " << formatv("{0:x}", E.getOffset()) << " -- " << EdgeKindName
+ << " -> ";
auto &TargetSym = E.getTarget();
if (TargetSym.hasName())
@@ -123,17 +122,16 @@ void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
else {
auto &TargetBlock = TargetSym.getBlock();
auto &TargetSec = TargetBlock.getSection();
- JITTargetAddress SecAddress = ~JITTargetAddress(0);
+ orc::ExecutorAddr SecAddress(~uint64_t(0));
for (auto *B : TargetSec.blocks())
if (B->getAddress() < SecAddress)
SecAddress = B->getAddress();
- JITTargetAddress SecDelta = TargetSym.getAddress() - SecAddress;
- OS << formatv("{0:x16}", TargetSym.getAddress()) << " (section "
- << TargetSec.getName();
+ orc::ExecutorAddrDiff SecDelta = TargetSym.getAddress() - SecAddress;
+ OS << TargetSym.getAddress() << " (section " << TargetSec.getName();
if (SecDelta)
OS << " + " << formatv("{0:x}", SecDelta);
- OS << " / block " << formatv("{0:x16}", TargetBlock.getAddress());
+ OS << " / block " << TargetBlock.getAddress();
if (TargetSym.getOffset())
OS << " + " << formatv("{0:x}", TargetSym.getOffset());
OS << ")";
@@ -265,7 +263,7 @@ void LinkGraph::dump(raw_ostream &OS) {
});
for (auto *B : SortedBlocks) {
- OS << " block " << formatv("{0:x16}", B->getAddress())
+ OS << " block " << B->getAddress()
<< " size = " << formatv("{0:x8}", B->getSize())
<< ", align = " << B->getAlignment()
<< ", alignment-offset = " << B->getAlignmentOffset();
@@ -290,9 +288,8 @@ void LinkGraph::dump(raw_ostream &OS) {
return LHS.getOffset() < RHS.getOffset();
});
for (auto &E : SortedEdges) {
- OS << " " << formatv("{0:x16}", B->getFixupAddress(E))
- << " (block + " << formatv("{0:x8}", E.getOffset())
- << "), addend = ";
+ OS << " " << B->getFixupAddress(E) << " (block + "
+ << formatv("{0:x8}", E.getOffset()) << "), addend = ";
if (E.getAddend() >= 0)
OS << formatv("+{0:x8}", E.getAddend());
else
@@ -315,16 +312,14 @@ void LinkGraph::dump(raw_ostream &OS) {
OS << "Absolute symbols:\n";
if (!llvm::empty(absolute_symbols())) {
for (auto *Sym : absolute_symbols())
- OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
- << "\n";
+ OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
} else
OS << " none\n";
OS << "\nExternal symbols:\n";
if (!llvm::empty(external_symbols())) {
for (auto *Sym : external_symbols())
- OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
- << "\n";
+ OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
} else
OS << " none\n";
}
@@ -370,10 +365,13 @@ Error makeTargetOutOfRangeError(const LinkGraph &G, const Block &B,
Section &Sec = B.getSection();
ErrStream << "In graph " << G.getName() << ", section " << Sec.getName()
<< ": relocation target ";
- if (E.getTarget().hasName())
- ErrStream << "\"" << E.getTarget().getName() << "\" ";
- ErrStream << "at address " << formatv("{0:x}", E.getTarget().getAddress());
- ErrStream << " is out of range of " << G.getEdgeKindName(E.getKind())
+ if (E.getTarget().hasName()) {
+ ErrStream << "\"" << E.getTarget().getName() << "\"";
+ } else
+ ErrStream << E.getTarget().getBlock().getSection().getName() << " + "
+ << formatv("{0:x}", E.getOffset());
+ ErrStream << " at address " << formatv("{0:x}", E.getTarget().getAddress())
+ << " is out of range of " << G.getEdgeKindName(E.getKind())
<< " fixup at " << formatv("{0:x}", B.getFixupAddress(E)) << " (";
Symbol *BestSymbolForBlock = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
index 706688aba4ec..35ee050c8566 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -192,7 +192,7 @@ JITLinkContext::LookupMap JITLinkerBase::getExternalSymbolNames() const {
// Identify unresolved external symbols.
JITLinkContext::LookupMap UnresolvedExternals;
for (auto *Sym : G->external_symbols()) {
- assert(Sym->getAddress() == 0 &&
+ assert(!Sym->getAddress() &&
"External has already been assigned an address");
assert(Sym->getName() != StringRef() && Sym->getName() != "" &&
"Externals must be named");
@@ -209,11 +209,12 @@ void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
for (auto *Sym : G->external_symbols()) {
assert(Sym->getOffset() == 0 &&
"External symbol is not at the start of its addressable block");
- assert(Sym->getAddress() == 0 && "Symbol already resolved");
+ assert(!Sym->getAddress() && "Symbol already resolved");
assert(!Sym->isDefined() && "Symbol being resolved is already defined");
auto ResultI = Result.find(Sym->getName());
if (ResultI != Result.end())
- Sym->getAddressable().setAddress(ResultI->second.getAddress());
+ Sym->getAddressable().setAddress(
+ orc::ExecutorAddr(ResultI->second.getAddress()));
else
assert(Sym->getLinkage() == Linkage::Weak &&
"Failed to resolve non-weak reference");
@@ -223,7 +224,7 @@ void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
dbgs() << "Externals after applying lookup result:\n";
for (auto *Sym : G->external_symbols())
dbgs() << " " << Sym->getName() << ": "
- << formatv("{0:x16}", Sym->getAddress()) << "\n";
+ << formatv("{0:x16}", Sym->getAddress().getValue()) << "\n";
});
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
index e4fdda0783a4..1095fa5ce701 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
@@ -19,9 +19,6 @@
#define DEBUG_TYPE "jitlink"
namespace llvm {
-
-class MemoryBufferRef;
-
namespace jitlink {
/// Base class for a JIT linker.
@@ -161,4 +158,4 @@ void prune(LinkGraph &G);
#undef DEBUG_TYPE // "jitlink"
-#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+#endif // LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
index 831b9b26d2fd..9315ac4f6120 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -15,63 +15,12 @@
using namespace llvm;
-namespace {
-
-// FIXME: Remove this copy of CWrapperFunctionResult as soon as JITLink can
-// depend on shared utils from Orc.
-
-// Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
-union CWrapperFunctionResultDataUnion {
- char *ValuePtr;
- char Value[sizeof(ValuePtr)];
-};
-
-// Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
-typedef struct {
- CWrapperFunctionResultDataUnion Data;
- size_t Size;
-} CWrapperFunctionResult;
-
-Error toError(CWrapperFunctionResult R) {
- bool HasError = false;
- std::string ErrMsg;
- if (R.Size) {
- bool Large = R.Size > sizeof(CWrapperFunctionResultDataUnion);
- char *Content = Large ? R.Data.ValuePtr : R.Data.Value;
- if (Content[0]) {
- HasError = true;
- constexpr unsigned StrStart = 1 + sizeof(uint64_t);
- ErrMsg.resize(R.Size - StrStart);
- memcpy(&ErrMsg[0], Content + StrStart, R.Size - StrStart);
- }
- if (Large)
- free(R.Data.ValuePtr);
- } else if (R.Data.ValuePtr) {
- HasError = true;
- ErrMsg = R.Data.ValuePtr;
- free(R.Data.ValuePtr);
- }
-
- if (HasError)
- return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
- return Error::success();
-}
-} // namespace
-
namespace llvm {
namespace jitlink {
JITLinkMemoryManager::~JITLinkMemoryManager() = default;
JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
-static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) {
- using WrapperFnTy = CWrapperFunctionResult (*)(const void *, size_t);
- auto *Fn = jitTargetAddressToPointer<WrapperFnTy>(C.FnAddr);
-
- return toError(Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr),
- static_cast<size_t>(C.CtxSize)));
-}
-
BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
for (auto &Sec : G.sections()) {
@@ -189,7 +138,7 @@ Error BasicLayout::apply() {
return Error::success();
}
-JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() {
+orc::shared::AllocActions &BasicLayout::graphAllocActions() {
return G.allocActions();
}
@@ -209,7 +158,7 @@ void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
AllocGroupSmallMap<Block *> ContentBlocks;
- JITTargetAddress NextAddr = 0x100000;
+ orc::ExecutorAddr NextAddr(0x100000);
for (auto &KV : Segments) {
auto &AG = KV.first;
auto &Seg = KV.second;
@@ -222,7 +171,8 @@ void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
if (Seg.ContentSize != 0) {
- NextAddr = alignTo(NextAddr, Seg.ContentAlign);
+ NextAddr =
+ orc::ExecutorAddr(alignTo(NextAddr.getValue(), Seg.ContentAlign));
auto &B =
G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
NextAddr, Seg.ContentAlign.value(), 0);
@@ -297,19 +247,11 @@ public:
}
// Run finalization actions.
- // FIXME: Roll back previous successful actions on failure.
- std::vector<AllocActionCall> DeallocActions;
- DeallocActions.reserve(G.allocActions().size());
- for (auto &ActPair : G.allocActions()) {
- if (ActPair.Finalize.FnAddr)
- if (auto Err = runAllocAction(ActPair.Finalize)) {
- OnFinalized(std::move(Err));
- return;
- }
- if (ActPair.Dealloc.FnAddr)
- DeallocActions.push_back(ActPair.Dealloc);
+ auto DeallocActions = runFinalizeActions(G.allocActions());
+ if (!DeallocActions) {
+ OnFinalized(DeallocActions.takeError());
+ return;
}
- G.allocActions().clear();
// Release the finalize segments slab.
if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
@@ -319,7 +261,7 @@ public:
// Continue with finalized allocation.
OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
- std::move(DeallocActions)));
+ std::move(*DeallocActions)));
}
void abandon(OnAbandonedFunction OnAbandoned) override {
@@ -428,8 +370,8 @@ void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
static_cast<size_t>(SegsSizes->FinalizeSegs)};
}
- auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base());
- auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base());
+ auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(StandardSegsMem.base());
+ auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(FinalizeSegsMem.base());
LLVM_DEBUG({
dbgs() << "InProcessMemoryManager allocated:\n";
@@ -456,7 +398,7 @@ void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
? NextStandardSegAddr
: NextFinalizeSegAddr;
- Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr);
+ Seg.WorkingMem = SegAddr.toPtr<char *>();
Seg.Addr = SegAddr;
SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
@@ -475,13 +417,12 @@ void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
OnDeallocatedFunction OnDeallocated) {
std::vector<sys::MemoryBlock> StandardSegmentsList;
- std::vector<std::vector<AllocActionCall>> DeallocActionsList;
+ std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
{
std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
for (auto &Alloc : Allocs) {
- auto *FA =
- jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release());
+ auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
StandardSegmentsList.push_back(std::move(FA->StandardSegments));
if (!FA->DeallocActions.empty())
DeallocActionsList.push_back(std::move(FA->DeallocActions));
@@ -498,7 +439,7 @@ void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
/// Run any deallocate calls.
while (!DeallocActions.empty()) {
- if (auto Err = runAllocAction(DeallocActions.back()))
+ if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
DeallocActions.pop_back();
}
@@ -517,12 +458,12 @@ void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
JITLinkMemoryManager::FinalizedAlloc
InProcessMemoryManager::createFinalizedAlloc(
sys::MemoryBlock StandardSegments,
- std::vector<AllocActionCall> DeallocActions) {
+ std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
new (FA) FinalizedAllocInfo(
{std::move(StandardSegments), std::move(DeallocActions)});
- return FinalizedAlloc(pointerToJITTargetAddress(FA));
+ return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA));
}
} // end namespace jitlink
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
index d588b63d9e88..62574604458c 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
@@ -134,7 +134,7 @@ Error MachOLinkGraphBuilder::createNormalizedSections() {
memcpy(&NSec.SegName, Sec64.segname, 16);
NSec.SegName[16] = '\0';
- NSec.Address = Sec64.addr;
+ NSec.Address = orc::ExecutorAddr(Sec64.addr);
NSec.Size = Sec64.size;
NSec.Alignment = 1ULL << Sec64.align;
NSec.Flags = Sec64.flags;
@@ -147,7 +147,7 @@ Error MachOLinkGraphBuilder::createNormalizedSections() {
memcpy(&NSec.SegName, Sec32.segname, 16);
NSec.SegName[16] = '\0';
- NSec.Address = Sec32.addr;
+ NSec.Address = orc::ExecutorAddr(Sec32.addr);
NSec.Size = Sec32.size;
NSec.Alignment = 1ULL << Sec32.align;
NSec.Flags = Sec32.flags;
@@ -287,7 +287,8 @@ Error MachOLinkGraphBuilder::createNormalizedSymbols() {
if (!NSec)
return NSec.takeError();
- if (Value < NSec->Address || Value > NSec->Address + NSec->Size)
+ if (orc::ExecutorAddr(Value) < NSec->Address ||
+ orc::ExecutorAddr(Value) > NSec->Address + NSec->Size)
return make_error<JITLinkError>("Address " + formatv("{0:x}", Value) +
" for symbol " + *Name +
" does not fall within section");
@@ -311,8 +312,9 @@ Error MachOLinkGraphBuilder::createNormalizedSymbols() {
}
void MachOLinkGraphBuilder::addSectionStartSymAndBlock(
- unsigned SecIndex, Section &GraphSec, uint64_t Address, const char *Data,
- uint64_t Size, uint32_t Alignment, bool IsLive) {
+ unsigned SecIndex, Section &GraphSec, orc::ExecutorAddr Address,
+ const char *Data, orc::ExecutorAddrDiff Size, uint32_t Alignment,
+ bool IsLive) {
Block &B =
Data ? G->createContentBlock(GraphSec, ArrayRef<char>(Data, Size),
Address, Alignment, 0)
@@ -346,7 +348,8 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
return make_error<JITLinkError>("Anonymous common symbol at index " +
Twine(KV.first));
NSym.GraphSymbol = &G->addCommonSymbol(
- *NSym.Name, NSym.S, getCommonSection(), 0, NSym.Value,
+ *NSym.Name, NSym.S, getCommonSection(), orc::ExecutorAddr(),
+ orc::ExecutorAddrDiff(NSym.Value),
1ull << MachO::GET_COMM_ALIGN(NSym.Desc),
NSym.Desc & MachO::N_NO_DEAD_STRIP);
} else {
@@ -364,8 +367,8 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
return make_error<JITLinkError>("Anonymous absolute symbol at index " +
Twine(KV.first));
NSym.GraphSymbol = &G->addAbsoluteSymbol(
- *NSym.Name, NSym.Value, 0, Linkage::Strong, Scope::Default,
- NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ *NSym.Name, orc::ExecutorAddr(NSym.Value), 0, Linkage::Strong,
+ Scope::Default, NSym.Desc & MachO::N_NO_DEAD_STRIP);
break;
case MachO::N_SECT:
SecIndexToSymbols[NSym.Sect - 1].push_back(&NSym);
@@ -468,13 +471,13 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
// If the section is non-empty but there is no symbol covering the start
// address then add an anonymous one.
- if (SecNSymStack.back()->Value != NSec.Address) {
- auto AnonBlockSize = SecNSymStack.back()->Value - NSec.Address;
+ if (orc::ExecutorAddr(SecNSymStack.back()->Value) != NSec.Address) {
+ auto AnonBlockSize =
+ orc::ExecutorAddr(SecNSymStack.back()->Value) - NSec.Address;
LLVM_DEBUG({
dbgs() << " Section start not covered by symbol. "
- << "Creating anonymous block to cover [ "
- << formatv("{0:x16}", NSec.Address) << " -- "
- << formatv("{0:x16}", NSec.Address + AnonBlockSize) << " ]\n";
+ << "Creating anonymous block to cover [ " << NSec.Address
+ << " -- " << (NSec.Address + AnonBlockSize) << " ]\n";
});
addSectionStartSymAndBlock(SecIndex, *NSec.GraphSection, NSec.Address,
NSec.Data, AnonBlockSize, NSec.Alignment,
@@ -496,12 +499,12 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
}
// BlockNSyms now contains the block symbols in reverse canonical order.
- JITTargetAddress BlockStart = BlockSyms.front()->Value;
- JITTargetAddress BlockEnd = SecNSymStack.empty()
- ? NSec.Address + NSec.Size
- : SecNSymStack.back()->Value;
- JITTargetAddress BlockOffset = BlockStart - NSec.Address;
- JITTargetAddress BlockSize = BlockEnd - BlockStart;
+ auto BlockStart = orc::ExecutorAddr(BlockSyms.front()->Value);
+ orc::ExecutorAddr BlockEnd =
+ SecNSymStack.empty() ? NSec.Address + NSec.Size
+ : orc::ExecutorAddr(SecNSymStack.back()->Value);
+ orc::ExecutorAddrDiff BlockOffset = BlockStart - NSec.Address;
+ orc::ExecutorAddrDiff BlockSize = BlockEnd - BlockStart;
LLVM_DEBUG({
dbgs() << " Creating block for " << formatv("{0:x16}", BlockStart)
@@ -521,8 +524,8 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
BlockStart, NSec.Alignment,
BlockStart % NSec.Alignment);
- Optional<JITTargetAddress> LastCanonicalAddr;
- JITTargetAddress SymEnd = BlockEnd;
+ Optional<orc::ExecutorAddr> LastCanonicalAddr;
+ auto SymEnd = BlockEnd;
while (!BlockSyms.empty()) {
auto &NSym = *BlockSyms.back();
BlockSyms.pop_back();
@@ -530,9 +533,9 @@ Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
bool SymLive =
(NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
- auto &Sym = createStandardGraphSymbol(NSym, B, SymEnd - NSym.Value,
- SectionIsText, SymLive,
- LastCanonicalAddr != NSym.Value);
+ auto &Sym = createStandardGraphSymbol(
+ NSym, B, SymEnd - orc::ExecutorAddr(NSym.Value), SectionIsText,
+ SymLive, LastCanonicalAddr != orc::ExecutorAddr(NSym.Value));
if (LastCanonicalAddr != Sym.getAddress()) {
if (LastCanonicalAddr)
@@ -568,11 +571,12 @@ Symbol &MachOLinkGraphBuilder::createStandardGraphSymbol(NormalizedSymbol &NSym,
dbgs() << "\n";
});
- auto &Sym = NSym.Name ? G->addDefinedSymbol(B, NSym.Value - B.getAddress(),
- *NSym.Name, Size, NSym.L, NSym.S,
- IsText, IsNoDeadStrip)
- : G->addAnonymousSymbol(B, NSym.Value - B.getAddress(),
- Size, IsText, IsNoDeadStrip);
+ auto SymOffset = orc::ExecutorAddr(NSym.Value) - B.getAddress();
+ auto &Sym =
+ NSym.Name
+ ? G->addDefinedSymbol(B, SymOffset, *NSym.Name, Size, NSym.L, NSym.S,
+ IsText, IsNoDeadStrip)
+ : G->addAnonymousSymbol(B, SymOffset, Size, IsText, IsNoDeadStrip);
NSym.GraphSymbol = &Sym;
if (IsCanonical)
@@ -635,12 +639,12 @@ Error MachOLinkGraphBuilder::graphifyCStringSection(
bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
- JITTargetAddress BlockStart = 0;
+ orc::ExecutorAddrDiff BlockStart = 0;
// Scan section for null characters.
for (size_t I = 0; I != NSec.Size; ++I)
if (NSec.Data[I] == '\0') {
- JITTargetAddress BlockEnd = I + 1;
+ orc::ExecutorAddrDiff BlockEnd = I + 1;
size_t BlockSize = BlockEnd - BlockStart;
// Create a block for this null terminated string.
auto &B = G->createContentBlock(*NSec.GraphSection,
@@ -654,7 +658,8 @@ Error MachOLinkGraphBuilder::graphifyCStringSection(
});
// If there's no symbol at the start of this block then create one.
- if (NSyms.empty() || NSyms.back()->Value != B.getAddress()) {
+ if (NSyms.empty() ||
+ orc::ExecutorAddr(NSyms.back()->Value) != B.getAddress()) {
auto &S = G->addAnonymousSymbol(B, 0, BlockSize, false, false);
setCanonicalSymbol(NSec, S);
LLVM_DEBUG({
@@ -666,18 +671,19 @@ Error MachOLinkGraphBuilder::graphifyCStringSection(
}
// Process any remaining symbols that point into this block.
- JITTargetAddress LastCanonicalAddr = B.getAddress() + BlockEnd;
- while (!NSyms.empty() &&
- NSyms.back()->Value < (B.getAddress() + BlockSize)) {
+ auto LastCanonicalAddr = B.getAddress() + BlockEnd;
+ while (!NSyms.empty() && orc::ExecutorAddr(NSyms.back()->Value) <
+ B.getAddress() + BlockSize) {
auto &NSym = *NSyms.back();
- size_t SymSize = (B.getAddress() + BlockSize) - NSyms.back()->Value;
+ size_t SymSize = (B.getAddress() + BlockSize) -
+ orc::ExecutorAddr(NSyms.back()->Value);
bool SymLive =
(NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
bool IsCanonical = false;
- if (LastCanonicalAddr != NSym.Value) {
+ if (LastCanonicalAddr != orc::ExecutorAddr(NSym.Value)) {
IsCanonical = true;
- LastCanonicalAddr = NSym.Value;
+ LastCanonicalAddr = orc::ExecutorAddr(NSym.Value);
}
createStandardGraphSymbol(NSym, B, SymSize, SectionIsText, SymLive,
@@ -785,7 +791,7 @@ Error CompactUnwindSplitter::operator()(LinkGraph &G) {
E.getTarget().getName() + " is an external symbol");
auto &TgtBlock = E.getTarget().getBlock();
auto &CURecSym =
- G.addAnonymousSymbol(CURec, 0, CURecordSize, 0, false);
+ G.addAnonymousSymbol(CURec, 0, CURecordSize, false, false);
TgtBlock.addEdge(Edge::KeepAlive, 0, CURecSym, 0);
AddedKeepAlive = true;
} else if (E.getOffset() != PersonalityEdgeOffset &&
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
index d29732ebdba8..2951a8533098 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
@@ -71,13 +71,13 @@ protected:
public:
char SectName[17];
char SegName[17];
- uint64_t Address = 0;
+ orc::ExecutorAddr Address;
uint64_t Size = 0;
uint64_t Alignment = 0;
uint32_t Flags = 0;
const char *Data = nullptr;
Section *GraphSection = nullptr;
- std::map<JITTargetAddress, Symbol *> CanonicalSymbols;
+ std::map<orc::ExecutorAddr, Symbol *> CanonicalSymbols;
};
using SectionParserFunction = std::function<Error(NormalizedSection &S)>;
@@ -137,7 +137,7 @@ protected:
/// Returns the symbol with the highest address not greater than the search
/// address, or null if no such symbol exists.
Symbol *getSymbolByAddress(NormalizedSection &NSec,
- JITTargetAddress Address) {
+ orc::ExecutorAddr Address) {
auto I = NSec.CanonicalSymbols.upper_bound(Address);
if (I == NSec.CanonicalSymbols.begin())
return nullptr;
@@ -147,7 +147,7 @@ protected:
/// Returns the symbol with the highest address not greater than the search
/// address, or an error if no such symbol exists.
Expected<Symbol &> findSymbolByAddress(NormalizedSection &NSec,
- JITTargetAddress Address) {
+ orc::ExecutorAddr Address) {
auto *Sym = getSymbolByAddress(NSec, Address);
if (Sym)
if (Address <= Sym->getAddress() + Sym->getSize())
@@ -193,9 +193,9 @@ private:
Section &getCommonSection();
void addSectionStartSymAndBlock(unsigned SecIndex, Section &GraphSec,
- uint64_t Address, const char *Data,
- uint64_t Size, uint32_t Alignment,
- bool IsLive);
+ orc::ExecutorAddr Address, const char *Data,
+ orc::ExecutorAddrDiff Size,
+ uint32_t Alignment, bool IsLive);
Error createNormalizedSections();
Error createNormalizedSymbols();
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
index f2a029d35cd5..3ca2e40c7263 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -109,7 +109,7 @@ private:
Expected<PairRelocInfo>
parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
const MachO::relocation_info &SubRI,
- JITTargetAddress FixupAddress, const char *FixupContent,
+ orc::ExecutorAddr FixupAddress, const char *FixupContent,
object::relocation_iterator &UnsignedRelItr,
object::relocation_iterator &RelEnd) {
using namespace support;
@@ -162,7 +162,7 @@ private:
return ToSymbolSec.takeError();
ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
assert(ToSymbol && "No symbol for section");
- FixupValue -= ToSymbol->getAddress();
+ FixupValue -= ToSymbol->getAddress().getValue();
}
MachOARM64RelocationKind DeltaKind;
@@ -195,7 +195,7 @@ private:
for (auto &S : Obj.sections()) {
- JITTargetAddress SectionAddress = S.getAddress();
+ orc::ExecutorAddr SectionAddress(S.getAddress());
// Skip relocations virtual sections.
if (S.isVirtual()) {
@@ -234,7 +234,8 @@ private:
return Kind.takeError();
// Find the address of the value to fix up.
- JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+ orc::ExecutorAddr FixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
LLVM_DEBUG({
dbgs() << " " << NSec->SectName << " + "
<< formatv("{0:x8}", RI.r_address) << ":\n";
@@ -249,7 +250,7 @@ private:
BlockToFix = &SymbolToFixOrErr->getBlock();
}
- if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
BlockToFix->getAddress() + BlockToFix->getContent().size())
return make_error<JITLinkError>(
"Relocation content extends past end of fixup block");
@@ -290,7 +291,7 @@ private:
});
// Find the address of the value to fix up.
- JITTargetAddress PairedFixupAddress =
+ orc::ExecutorAddr PairedFixupAddress =
SectionAddress + (uint32_t)RI.r_address;
if (PairedFixupAddress != FixupAddress)
return make_error<JITLinkError>("Paired relocation points at "
@@ -324,7 +325,7 @@ private:
Addend = *(const ulittle64_t *)FixupContent;
break;
case Pointer64Anon: {
- JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
if (!TargetNSec)
return TargetNSec.takeError();
@@ -435,7 +436,7 @@ public:
Symbol &createGOTEntry(Symbol &Target) {
auto &GOTEntryBlock = G.createContentBlock(
- getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ getGOTSection(), getGOTEntryBlockContent(), orc::ExecutorAddr(), 8, 0);
GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
}
@@ -457,8 +458,8 @@ public:
}
Symbol &createPLTStub(Symbol &Target) {
- auto &StubContentBlock =
- G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ auto &StubContentBlock = G.createContentBlock(
+ getStubsSection(), getStubBlockContent(), orc::ExecutorAddr(), 1, 0);
// Re-use GOT entries for stub targets.
auto &GOTEntrySymbol = getGOTEntry(Target);
StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
@@ -474,7 +475,7 @@ public:
private:
Section &getGOTSection() {
if (!GOTSection)
- GOTSection = &G.createSection("$__GOT", MemProt::Read);
+ GOTSection = &G.createSection("$__GOT", MemProt::Read | MemProt::Exec);
return *GOTSection;
}
@@ -545,11 +546,12 @@ private:
char *BlockWorkingMem = B.getAlreadyMutableContent().data();
char *FixupPtr = BlockWorkingMem + E.getOffset();
- JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+ orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
switch (E.getKind()) {
case Branch26: {
- assert((FixupAddress & 0x3) == 0 && "Branch-inst is not 32-bit aligned");
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "Branch-inst is not 32-bit aligned");
int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
@@ -569,7 +571,7 @@ private:
break;
}
case Pointer32: {
- uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
if (Value > std::numeric_limits<uint32_t>::max())
return makeTargetOutOfRangeError(G, B, E);
*(ulittle32_t *)FixupPtr = Value;
@@ -577,7 +579,7 @@ private:
}
case Pointer64:
case Pointer64Anon: {
- uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
*(ulittle64_t *)FixupPtr = Value;
break;
}
@@ -587,9 +589,10 @@ private:
assert((E.getKind() != GOTPage21 || E.getAddend() == 0) &&
"GOTPAGE21 with non-zero addend");
uint64_t TargetPage =
- (E.getTarget().getAddress() + E.getAddend()) &
- ~static_cast<uint64_t>(4096 - 1);
- uint64_t PCPage = FixupAddress & ~static_cast<uint64_t>(4096 - 1);
+ (E.getTarget().getAddress().getValue() + E.getAddend()) &
+ ~static_cast<uint64_t>(4096 - 1);
+ uint64_t PCPage =
+ FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);
int64_t PageDelta = TargetPage - PCPage;
if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
@@ -606,7 +609,7 @@ private:
}
case PageOffset12: {
uint64_t TargetOffset =
- (E.getTarget().getAddress() + E.getAddend()) & 0xfff;
+ (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;
uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
unsigned ImmShift = getPageOffset12Shift(RawInstr);
@@ -627,7 +630,7 @@ private:
assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
"RawInstr isn't a 64-bit LDR immediate");
- uint32_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+ uint32_t TargetOffset = E.getTarget().getAddress().getValue() & 0xfff;
assert((TargetOffset & 0x7) == 0 && "GOT entry is not 8-byte aligned");
uint32_t EncodedImm = (TargetOffset >> 3) << 10;
uint32_t FixedInstr = RawInstr | EncodedImm;
@@ -635,7 +638,8 @@ private:
break;
}
case LDRLiteral19: {
- assert((FixupAddress & 0x3) == 0 && "LDR is not 32-bit aligned");
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "LDR is not 32-bit aligned");
assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
@@ -705,6 +709,13 @@ void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
Config.PrePrunePasses.push_back(
CompactUnwindSplitter("__LD,__compact_unwind"));
+ // Add eh-frame passses.
+ // FIXME: Prune eh-frames for which compact-unwind is available once
+ // we support compact-unwind registration with libunwind.
+ Config.PrePrunePasses.push_back(EHFrameSplitter("__TEXT,__eh_frame"));
+ Config.PrePrunePasses.push_back(
+ EHFrameEdgeFixer("__TEXT,__eh_frame", 8, Delta64, Delta32, NegDelta32));
+
// Add an in-place GOT/Stubs pass.
Config.PostPrunePasses.push_back(
PerGraphGOTAndPLTStubsBuilder_MachO_arm64::asPass);
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
index a4fcd3b9a5f5..82afaa3aa3c5 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -119,7 +119,7 @@ private:
// returns the edge kind and addend to be used.
Expected<PairRelocInfo> parsePairRelocation(
Block &BlockToFix, MachONormalizedRelocationType SubtractorKind,
- const MachO::relocation_info &SubRI, JITTargetAddress FixupAddress,
+ const MachO::relocation_info &SubRI, orc::ExecutorAddr FixupAddress,
const char *FixupContent, object::relocation_iterator &UnsignedRelItr,
object::relocation_iterator &RelEnd) {
using namespace support;
@@ -172,7 +172,7 @@ private:
return ToSymbolSec.takeError();
ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
assert(ToSymbol && "No symbol for section");
- FixupValue -= ToSymbol->getAddress();
+ FixupValue -= ToSymbol->getAddress().getValue();
}
Edge::Kind DeltaKind;
@@ -206,7 +206,7 @@ private:
for (auto &S : Obj.sections()) {
- JITTargetAddress SectionAddress = S.getAddress();
+ orc::ExecutorAddr SectionAddress(S.getAddress());
// Skip relocations virtual sections.
if (S.isVirtual()) {
@@ -241,7 +241,7 @@ private:
MachO::relocation_info RI = getRelocationInfo(RelItr);
// Find the address of the value to fix up.
- JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+ auto FixupAddress = SectionAddress + (uint32_t)RI.r_address;
LLVM_DEBUG({
dbgs() << " " << NSec->SectName << " + "
@@ -257,7 +257,7 @@ private:
BlockToFix = &SymbolToFixOrErr->getBlock();
}
- if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
BlockToFix->getAddress() + BlockToFix->getContent().size())
return make_error<JITLinkError>(
"Relocation extends past end of fixup block");
@@ -343,7 +343,7 @@ private:
Kind = x86_64::Pointer64;
break;
case MachOPointer64Anon: {
- JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
if (!TargetNSec)
return TargetNSec.takeError();
@@ -367,8 +367,8 @@ private:
Kind = x86_64::Delta32;
break;
case MachOPCRel32Anon: {
- JITTargetAddress TargetAddress =
- FixupAddress + 4 + *(const little32_t *)FixupContent;
+ orc::ExecutorAddr TargetAddress(FixupAddress + 4 +
+ *(const little32_t *)FixupContent);
auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
if (!TargetNSec)
return TargetNSec.takeError();
@@ -384,10 +384,10 @@ private:
case MachOPCRel32Minus1Anon:
case MachOPCRel32Minus2Anon:
case MachOPCRel32Minus4Anon: {
- JITTargetAddress Delta =
- 4 + static_cast<JITTargetAddress>(
+ orc::ExecutorAddrDiff Delta =
+ 4 + orc::ExecutorAddrDiff(
1ULL << (*MachORelocKind - MachOPCRel32Minus1Anon));
- JITTargetAddress TargetAddress =
+ orc::ExecutorAddr TargetAddress =
FixupAddress + Delta + *(const little32_t *)FixupContent;
auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
if (!TargetNSec)
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
index 6e9df9c75a65..6e325f92bafb 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
@@ -47,16 +47,16 @@ public:
if (impl().isGOTEdgeToFix(E)) {
LLVM_DEBUG({
dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
- << " edge at " << formatv("{0:x}", B->getFixupAddress(E))
- << " (" << formatv("{0:x}", B->getAddress()) << " + "
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
<< formatv("{0:x}", E.getOffset()) << ")\n";
});
impl().fixGOTEdge(E, getGOTEntry(E.getTarget()));
} else if (impl().isExternalBranchEdge(E)) {
LLVM_DEBUG({
dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
- << " edge at " << formatv("{0:x}", B->getFixupAddress(E))
- << " (" << formatv("{0:x}", B->getAddress()) << " + "
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
<< formatv("{0:x}", E.getOffset()) << ")\n";
});
impl().fixPLTEdge(E, getPLTStub(E.getTarget()));
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp
index 6b73ff95a3b0..3ce2cf10a24c 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp
@@ -24,6 +24,8 @@ const char *getEdgeKindName(Edge::Kind K) {
return "R_RISCV_32";
case R_RISCV_64:
return "R_RISCV_64";
+ case R_RISCV_BRANCH:
+ return "R_RISCV_BRANCH";
case R_RISCV_HI20:
return "R_RISCV_HI20";
case R_RISCV_LO12_I:
@@ -36,6 +38,32 @@ const char *getEdgeKindName(Edge::Kind K) {
return "R_RISCV_PCREL_LO12_S";
case R_RISCV_CALL:
return "R_RISCV_CALL";
+ case R_RISCV_32_PCREL:
+ return "R_RISCV_32_PCREL";
+ case R_RISCV_ADD64:
+ return "R_RISCV_ADD64";
+ case R_RISCV_ADD32:
+ return "R_RISCV_ADD32";
+ case R_RISCV_ADD16:
+ return "R_RISCV_ADD16";
+ case R_RISCV_ADD8:
+ return "R_RISCV_ADD8";
+ case R_RISCV_SUB64:
+ return "R_RISCV_SUB64";
+ case R_RISCV_SUB32:
+ return "R_RISCV_SUB32";
+ case R_RISCV_SUB16:
+ return "R_RISCV_SUB16";
+ case R_RISCV_SUB8:
+ return "R_RISCV_SUB8";
+ case R_RISCV_SET6:
+ return "R_RISCV_SET6";
+ case R_RISCV_SET8:
+ return "R_RISCV_SET8";
+ case R_RISCV_SET16:
+ return "R_RISCV_SET16";
+ case R_RISCV_SET32:
+ return "R_RISCV_SET32";
}
return getGenericEdgeKindName(K);
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
index 48521280059d..df9979b47e88 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
@@ -95,10 +95,10 @@ Error optimizeGOTAndStubAccesses(LinkGraph &G) {
assert(GOTEntryBlock.edges_size() == 1 &&
"GOT entry should only have one outgoing edge");
auto &GOTTarget = GOTEntryBlock.edges().begin()->getTarget();
- JITTargetAddress TargetAddr = GOTTarget.getAddress();
- JITTargetAddress EdgeAddr = B->getFixupAddress(E);
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+ orc::ExecutorAddr EdgeAddr = B->getFixupAddress(E);
int64_t Displacement = TargetAddr - EdgeAddr + 4;
- bool TargetInRangeForImmU32 = isInRangeForImmU32(TargetAddr);
+ bool TargetInRangeForImmU32 = isInRangeForImmU32(TargetAddr.getValue());
bool DisplacementInRangeForImmS32 = isInRangeForImmS32(Displacement);
// If both of the Target and displacement is out of range, then
@@ -165,8 +165,8 @@ Error optimizeGOTAndStubAccesses(LinkGraph &G) {
"GOT block should only have one outgoing edge");
auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
- JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
- JITTargetAddress TargetAddr = GOTTarget.getAddress();
+ orc::ExecutorAddr EdgeAddr = B->getAddress() + E.getOffset();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
int64_t Displacement = TargetAddr - EdgeAddr + 4;
if (isInRangeForImmS32(Displacement)) {
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp
index aa82cf38c45d..e5cb8103919a 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp
@@ -1933,9 +1933,14 @@ Error ExecutionSession::removeJITDylib(JITDylib &JD) {
JDs.erase(I);
});
- // Clear the JITDylib.
+ // Clear the JITDylib. Hold on to any error while we clean up the
+ // JITDylib members below.
auto Err = JD.clear();
+ // Notify the platform of the teardown.
+ if (P)
+ Err = joinErrors(std::move(Err), P->teardownJITDylib(JD));
+
// Set JD to closed state. Clear remaining data structures.
runSessionLocked([&] {
assert(JD.State == JITDylib::Closing && "JD should be closing");
@@ -1953,19 +1958,22 @@ Error ExecutionSession::removeJITDylib(JITDylib &JD) {
return Err;
}
-std::vector<JITDylibSP> JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+Expected<std::vector<JITDylibSP>>
+JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
if (JDs.empty())
- return {};
+ return std::vector<JITDylibSP>();
auto &ES = JDs.front()->getExecutionSession();
- return ES.runSessionLocked([&]() {
+ return ES.runSessionLocked([&]() -> Expected<std::vector<JITDylibSP>> {
DenseSet<JITDylib *> Visited;
std::vector<JITDylibSP> Result;
for (auto &JD : JDs) {
- assert(JD->State == Open && "JD is defunct");
-
+ if (JD->State != Open)
+ return make_error<StringError>(
+ "Error building link order: " + JD->getName() + " is defunct",
+ inconvertibleErrorCode());
if (Visited.count(JD.get()))
continue;
@@ -1990,18 +1998,19 @@ std::vector<JITDylibSP> JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
});
}
-std::vector<JITDylibSP>
+Expected<std::vector<JITDylibSP>>
JITDylib::getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
- auto Tmp = getDFSLinkOrder(JDs);
- std::reverse(Tmp.begin(), Tmp.end());
- return Tmp;
+ auto Result = getDFSLinkOrder(JDs);
+ if (Result)
+ std::reverse(Result->begin(), Result->end());
+ return Result;
}
-std::vector<JITDylibSP> JITDylib::getDFSLinkOrder() {
+Expected<std::vector<JITDylibSP>> JITDylib::getDFSLinkOrder() {
return getDFSLinkOrder({this});
}
-std::vector<JITDylibSP> JITDylib::getReverseDFSLinkOrder() {
+Expected<std::vector<JITDylibSP>> JITDylib::getReverseDFSLinkOrder() {
return getReverseDFSLinkOrder({this});
}
@@ -2201,7 +2210,7 @@ void ExecutionSession::dump(raw_ostream &OS) {
void ExecutionSession::dispatchOutstandingMUs() {
LLVM_DEBUG(dbgs() << "Dispatching MaterializationUnits...\n");
- while (1) {
+ while (true) {
Optional<std::pair<std::unique_ptr<MaterializationUnit>,
std::unique_ptr<MaterializationResponsibility>>>
JMU;
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
index fcfe389f82a8..4ff6b7fd54df 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
@@ -67,9 +67,9 @@ private:
template <typename ELFT>
void ELFDebugObjectSection<ELFT>::setTargetMemoryRange(SectionRange Range) {
// Only patch load-addresses for executable and data sections.
- if (isTextOrDataSection()) {
- Header->sh_addr = static_cast<typename ELFT::uint>(Range.getStart());
- }
+ if (isTextOrDataSection())
+ Header->sh_addr =
+ static_cast<typename ELFT::uint>(Range.getStart().getValue());
}
template <typename ELFT>
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp
index fe62138c790c..6916ee4a827f 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp
@@ -129,8 +129,8 @@ public:
Section *Sec = nullptr;
StringRef SegName;
StringRef SecName;
- JITTargetAddress Alignment = 0;
- JITTargetAddress StartAddr = 0;
+ uint64_t Alignment = 0;
+ orc::ExecutorAddr StartAddr;
uint64_t Size = 0;
};
@@ -153,7 +153,8 @@ public:
return Error::success();
}
DebugSecInfos.push_back({&Sec, Sec.getName().substr(0, SepPos),
- Sec.getName().substr(SepPos + 1), 0, 0});
+ Sec.getName().substr(SepPos + 1), 0,
+ orc::ExecutorAddr(), 0});
} else {
NonDebugSections.push_back(&Sec);
@@ -182,11 +183,11 @@ public:
size_t ContainerBlockSize =
sizeof(typename MachOTraits::Header) + SegmentLCSize;
auto ContainerBlockContent = G.allocateBuffer(ContainerBlockSize);
- MachOContainerBlock =
- &G.createMutableContentBlock(SDOSec, ContainerBlockContent, 0, 8, 0);
+ MachOContainerBlock = &G.createMutableContentBlock(
+ SDOSec, ContainerBlockContent, orc::ExecutorAddr(), 8, 0);
// Copy debug section blocks and symbols.
- JITTargetAddress NextBlockAddr = MachOContainerBlock->getSize();
+ orc::ExecutorAddr NextBlockAddr(MachOContainerBlock->getSize());
for (auto &SI : DebugSecInfos) {
assert(!llvm::empty(SI.Sec->blocks()) && "Empty debug info section?");
@@ -219,7 +220,8 @@ public:
G.mergeSections(SDOSec, *SI.Sec);
SI.Sec = nullptr;
}
- size_t DebugSectionsSize = NextBlockAddr - MachOContainerBlock->getSize();
+ size_t DebugSectionsSize =
+ NextBlockAddr - orc::ExecutorAddr(MachOContainerBlock->getSize());
// Write MachO header and debug section load commands.
MachOStructWriter Writer(MachOContainerBlock->getAlreadyMutableContent());
@@ -266,9 +268,9 @@ public:
memset(&Sec, 0, sizeof(Sec));
memcpy(Sec.sectname, SI.SecName.data(), SI.SecName.size());
memcpy(Sec.segname, SI.SegName.data(), SI.SegName.size());
- Sec.addr = SI.StartAddr;
+ Sec.addr = SI.StartAddr.getValue();
Sec.size = SI.Size;
- Sec.offset = SI.StartAddr;
+ Sec.offset = SI.StartAddr.getValue();
Sec.align = SI.Alignment;
Sec.reloff = 0;
Sec.nreloc = 0;
@@ -336,7 +338,7 @@ public:
memset(&SecCmd, 0, sizeof(SecCmd));
memcpy(SecCmd.sectname, SecName.data(), SecName.size());
memcpy(SecCmd.segname, SegName.data(), SegName.size());
- SecCmd.addr = R.getStart();
+ SecCmd.addr = R.getStart().getValue();
SecCmd.size = R.getSize();
SecCmd.offset = 0;
SecCmd.align = R.getFirstBlock()->getAlignment();
@@ -347,8 +349,10 @@ public:
}
SectionRange R(MachOContainerBlock->getSection());
- G.allocActions().push_back(
- {{RegisterActionAddr.getValue(), R.getStart(), R.getSize()}, {}});
+ G.allocActions().push_back({cantFail(shared::WrapperFunctionCall::Create<
+ SPSArgList<SPSExecutorAddrRange>>(
+ RegisterActionAddr, R.getRange())),
+ {}});
return Error::success();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
index eded54f4bfb3..d02760703f06 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
@@ -58,7 +58,8 @@ public:
auto &DSOHandleSection =
G->createSection(".data.__dso_handle", jitlink::MemProt::Read);
auto &DSOHandleBlock = G->createContentBlock(
- DSOHandleSection, getDSOHandleContent(PointerSize), 0, 8, 0);
+ DSOHandleSection, getDSOHandleContent(PointerSize), orc::ExecutorAddr(),
+ 8, 0);
auto &DSOHandleSymbol = G->addDefinedSymbol(
DSOHandleBlock, 0, *R->getInitializerSymbol(), DSOHandleBlock.getSize(),
jitlink::Linkage::Strong, jitlink::Scope::Default, false, true);
@@ -154,6 +155,10 @@ Error ELFNixPlatform::setupJITDylib(JITDylib &JD) {
std::make_unique<DSOHandleMaterializationUnit>(*this, DSOHandleSymbol));
}
+Error ELFNixPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
Error ELFNixPlatform::notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) {
auto &JD = RT.getJITDylib();
@@ -315,9 +320,14 @@ void ELFNixPlatform::getInitializersLookupPhase(
SendInitializerSequenceFn SendResult, JITDylib &JD) {
auto DFSLinkOrder = JD.getDFSLinkOrder();
+ if (!DFSLinkOrder) {
+ SendResult(DFSLinkOrder.takeError());
+ return;
+ }
+
DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
ES.runSessionLocked([&]() {
- for (auto &InitJD : DFSLinkOrder) {
+ for (auto &InitJD : *DFSLinkOrder) {
auto RISItr = RegisteredInitSymbols.find(InitJD.get());
if (RISItr != RegisteredInitSymbols.end()) {
NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
@@ -330,7 +340,7 @@ void ELFNixPlatform::getInitializersLookupPhase(
// phase.
if (NewInitSymbols.empty()) {
getInitializersBuildSequencePhase(std::move(SendResult), JD,
- std::move(DFSLinkOrder));
+ std::move(*DFSLinkOrder));
return;
}
@@ -375,7 +385,7 @@ void ELFNixPlatform::rt_getDeinitializers(
{
std::lock_guard<std::mutex> Lock(PlatformMutex);
- auto I = HandleAddrToJITDylib.find(Handle.getValue());
+ auto I = HandleAddrToJITDylib.find(Handle);
if (I != HandleAddrToJITDylib.end())
JD = I->second;
}
@@ -406,7 +416,7 @@ void ELFNixPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
{
std::lock_guard<std::mutex> Lock(PlatformMutex);
- auto I = HandleAddrToJITDylib.find(Handle.getValue());
+ auto I = HandleAddrToJITDylib.find(Handle);
if (I != HandleAddrToJITDylib.end())
JD = I->second;
}
@@ -630,12 +640,11 @@ void ELFNixPlatform::ELFNixPlatformPlugin::addDSOHandleSupportPasses(
assert(I != G.defined_symbols().end() && "Missing DSO handle symbol");
{
std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
- JITTargetAddress HandleAddr = (*I)->getAddress();
+ auto HandleAddr = (*I)->getAddress();
MP.HandleAddrToJITDylib[HandleAddr] = &JD;
assert(!MP.InitSeqs.count(&JD) && "InitSeq entry for JD already exists");
MP.InitSeqs.insert(std::make_pair(
- &JD,
- ELFNixJITDylibInitializers(JD.getName(), ExecutorAddr(HandleAddr))));
+ &JD, ELFNixJITDylibInitializers(JD.getName(), HandleAddr)));
}
return Error::success();
});
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
index 4c0fab8aa9fa..256ce94690f0 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
@@ -56,18 +56,15 @@ EPCEHFrameRegistrar::Create(ExecutionSession &ES) {
ExecutorAddr(DeregisterEHFrameWrapperFnAddr));
}
-Error EPCEHFrameRegistrar::registerEHFrames(JITTargetAddress EHFrameSectionAddr,
- size_t EHFrameSectionSize) {
- return ES.callSPSWrapper<void(SPSExecutorAddr, uint64_t)>(
- RegisterEHFrameWrapperFnAddr, ExecutorAddr(EHFrameSectionAddr),
- static_cast<uint64_t>(EHFrameSectionSize));
+Error EPCEHFrameRegistrar::registerEHFrames(ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ RegisterEHFrameWrapperFnAddr, EHFrameSection);
}
Error EPCEHFrameRegistrar::deregisterEHFrames(
- JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
- return ES.callSPSWrapper<void(SPSExecutorAddr, uint64_t)>(
- DeregisterEHFrameWrapperFnAddr, ExecutorAddr(EHFrameSectionAddr),
- static_cast<uint64_t>(EHFrameSectionSize));
+ ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ DeregisterEHFrameWrapperFnAddr, EHFrameSection);
}
} // end namespace orc
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
index 9b712cb8f7ca..75cc30753f41 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
@@ -56,17 +56,7 @@ public:
}
// Transfer allocation actions.
- // FIXME: Merge JITLink and ORC SupportFunctionCall and Action list types,
- // turn this into a std::swap.
- FR.Actions.reserve(G.allocActions().size());
- for (auto &ActPair : G.allocActions())
- FR.Actions.push_back({{ExecutorAddr(ActPair.Finalize.FnAddr),
- {ExecutorAddr(ActPair.Finalize.CtxAddr),
- ExecutorAddrDiff(ActPair.Finalize.CtxSize)}},
- {ExecutorAddr(ActPair.Dealloc.FnAddr),
- {ExecutorAddr(ActPair.Dealloc.CtxAddr),
- ExecutorAddrDiff(ActPair.Dealloc.CtxSize)}}});
- G.allocActions().clear();
+ std::swap(FR.Actions, G.allocActions());
Parent.EPC.callSPSWrapperAsync<
rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
@@ -80,7 +70,7 @@ public:
} else if (FinalizeErr)
OnFinalize(std::move(FinalizeErr));
else
- OnFinalize(FinalizedAlloc(AllocAddr.getValue()));
+ OnFinalize(FinalizedAlloc(AllocAddr));
},
Parent.SAs.Allocator, std::move(FR));
}
@@ -161,7 +151,7 @@ void EPCGenericJITLinkMemoryManager::completeAllocation(
const auto &AG = KV.first;
auto &Seg = KV.second;
- Seg.Addr = NextSegAddr.getValue();
+ Seg.Addr = NextSegAddr;
KV.second.WorkingMem = BL.getGraph().allocateBuffer(Seg.ContentSize).data();
NextSegAddr += ExecutorAddrDiff(
alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize()));
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
index 1d98e104a4d7..cdac367e11a3 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
@@ -14,6 +14,8 @@
#define DEBUG_TYPE "orc"
+using namespace llvm::orc::shared;
+
namespace llvm {
namespace orc {
@@ -27,10 +29,8 @@ EPCGenericRTDyldMemoryManager::CreateWithDefaultBootstrapSymbols(
{SAs.Finalize, rt::SimpleExecutorMemoryManagerFinalizeWrapperName},
{SAs.Deallocate,
rt::SimpleExecutorMemoryManagerDeallocateWrapperName},
- {SAs.RegisterEHFrame,
- rt::RegisterEHFrameSectionCustomDirectWrapperName},
- {SAs.DeregisterEHFrame,
- rt::DeregisterEHFrameSectionCustomDirectWrapperName}}))
+ {SAs.RegisterEHFrame, rt::RegisterEHFrameSectionWrapperName},
+ {SAs.DeregisterEHFrame, rt::DeregisterEHFrameSectionWrapperName}}))
return std::move(Err);
return std::make_unique<EPCGenericRTDyldMemoryManager>(EPC, std::move(SAs));
}
@@ -263,10 +263,12 @@ bool EPCGenericRTDyldMemoryManager::finalizeMemory(std::string *ErrMsg) {
for (auto &Frame : ObjAllocs.UnfinalizedEHFrames)
FR.Actions.push_back(
- {{SAs.RegisterEHFrame,
- {ExecutorAddr(Frame.Addr), ExecutorAddrDiff(Frame.Size)}},
- {SAs.DeregisterEHFrame,
- {ExecutorAddr(Frame.Addr), ExecutorAddrDiff(Frame.Size)}}});
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.RegisterEHFrame, Frame)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.DeregisterEHFrame, Frame))});
// We'll also need to make an extra allocation for the eh-frame wrapper call
// arguments.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
index 818b6b52ff83..b901a2d2da23 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
@@ -119,10 +119,12 @@ Error EPCTrampolinePool::grow() {
unsigned NumTrampolines = TrampolinesPerPage;
auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
- EPCIU.getABISupport().writeTrampolines(
- SegInfo.WorkingMem.data(), SegInfo.Addr, ResolverAddress, NumTrampolines);
+ EPCIU.getABISupport().writeTrampolines(SegInfo.WorkingMem.data(),
+ SegInfo.Addr.getValue(),
+ ResolverAddress, NumTrampolines);
for (unsigned I = 0; I < NumTrampolines; ++I)
- AvailableTrampolines.push_back(SegInfo.Addr + (I * TrampolineSize));
+ AvailableTrampolines.push_back(SegInfo.Addr.getValue() +
+ (I * TrampolineSize));
auto FA = Alloc->finalize();
if (!FA)
@@ -300,15 +302,15 @@ EPCIndirectionUtils::writeResolverBlock(JITTargetAddress ReentryFnAddr,
return Alloc.takeError();
auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
- ABI->writeResolverCode(SegInfo.WorkingMem.data(), SegInfo.Addr, ReentryFnAddr,
- ReentryCtxAddr);
+ ABI->writeResolverCode(SegInfo.WorkingMem.data(), SegInfo.Addr.getValue(),
+ ReentryFnAddr, ReentryCtxAddr);
auto FA = Alloc->finalize();
if (!FA)
return FA.takeError();
ResolverBlock = std::move(*FA);
- return SegInfo.Addr;
+ return SegInfo.Addr.getValue();
}
std::unique_ptr<IndirectStubsManager>
@@ -369,8 +371,9 @@ EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
auto StubSeg = Alloc->getSegInfo(StubProt);
auto PtrSeg = Alloc->getSegInfo(PtrProt);
- ABI->writeIndirectStubsBlock(StubSeg.WorkingMem.data(), StubSeg.Addr,
- PtrSeg.Addr, NumStubsToAllocate);
+ ABI->writeIndirectStubsBlock(StubSeg.WorkingMem.data(),
+ StubSeg.Addr.getValue(),
+ PtrSeg.Addr.getValue(), NumStubsToAllocate);
auto FA = Alloc->finalize();
if (!FA)
@@ -381,8 +384,8 @@ EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
auto StubExecutorAddr = StubSeg.Addr;
auto PtrExecutorAddr = PtrSeg.Addr;
for (unsigned I = 0; I != NumStubsToAllocate; ++I) {
- AvailableIndirectStubs.push_back(
- IndirectStubInfo(StubExecutorAddr, PtrExecutorAddr));
+ AvailableIndirectStubs.push_back(IndirectStubInfo(
+ StubExecutorAddr.getValue(), PtrExecutorAddr.getValue()));
StubExecutorAddr += ABI->getStubSize();
PtrExecutorAddr += ABI->getPointerSize();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
index f427271bb45d..7a71d2f781d7 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -410,7 +410,7 @@ Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
while (I < Content.size()) {
MCInst Instr;
uint64_t InstrSize = 0;
- uint64_t InstrStart = SymAddress + I;
+ uint64_t InstrStart = SymAddress.getValue() + I;
auto DecodeStatus = Disassembler.getInstruction(
Instr, InstrSize, Content.drop_front(I), InstrStart, CommentStream);
if (DecodeStatus != MCDisassembler::Success) {
@@ -426,7 +426,7 @@ Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
// Check for a PC-relative address equal to the symbol itself.
auto PCRelAddr =
MIA.evaluateMemoryOperandAddress(Instr, &STI, InstrStart, InstrSize);
- if (!PCRelAddr.hasValue() || PCRelAddr.getValue() != SymAddress)
+ if (!PCRelAddr || *PCRelAddr != SymAddress.getValue())
continue;
auto RelocOffInInstr =
@@ -438,8 +438,8 @@ Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
continue;
}
- auto RelocOffInBlock =
- InstrStart + *RelocOffInInstr - SymAddress + Sym.getOffset();
+ auto RelocOffInBlock = orc::ExecutorAddr(InstrStart) + *RelocOffInInstr -
+ SymAddress + Sym.getOffset();
if (ExistingRelocations.contains(RelocOffInBlock))
continue;
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
index 0ab0d7d2e2b6..91949c9d7eeb 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -89,6 +89,7 @@ class GenericLLVMIRPlatform : public Platform {
public:
GenericLLVMIRPlatform(GenericLLVMIRPlatformSupport &S) : S(S) {}
Error setupJITDylib(JITDylib &JD) override;
+ Error teardownJITDylib(JITDylib &JD) override;
Error notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) override;
Error notifyRemoving(ResourceTracker &RT) override {
@@ -276,17 +277,22 @@ private:
DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
std::vector<JITDylibSP> DFSLinkOrder;
- getExecutionSession().runSessionLocked([&]() {
- DFSLinkOrder = JD.getDFSLinkOrder();
-
- for (auto &NextJD : DFSLinkOrder) {
- auto IFItr = InitFunctions.find(NextJD.get());
- if (IFItr != InitFunctions.end()) {
- LookupSymbols[NextJD.get()] = std::move(IFItr->second);
- InitFunctions.erase(IFItr);
- }
- }
- });
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto IFItr = InitFunctions.find(NextJD.get());
+ if (IFItr != InitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(IFItr->second);
+ InitFunctions.erase(IFItr);
+ }
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
LLVM_DEBUG({
dbgs() << "JITDylib init order is [ ";
@@ -326,20 +332,25 @@ private:
DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
std::vector<JITDylibSP> DFSLinkOrder;
- ES.runSessionLocked([&]() {
- DFSLinkOrder = JD.getDFSLinkOrder();
-
- for (auto &NextJD : DFSLinkOrder) {
- auto &JDLookupSymbols = LookupSymbols[NextJD.get()];
- auto DIFItr = DeInitFunctions.find(NextJD.get());
- if (DIFItr != DeInitFunctions.end()) {
- LookupSymbols[NextJD.get()] = std::move(DIFItr->second);
- DeInitFunctions.erase(DIFItr);
- }
- JDLookupSymbols.add(LLJITRunAtExits,
- SymbolLookupFlags::WeaklyReferencedSymbol);
- }
- });
+ if (auto Err = ES.runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto &JDLookupSymbols = LookupSymbols[NextJD.get()];
+ auto DIFItr = DeInitFunctions.find(NextJD.get());
+ if (DIFItr != DeInitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(DIFItr->second);
+ DeInitFunctions.erase(DIFItr);
+ }
+ JDLookupSymbols.add(LLJITRunAtExits,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
LLVM_DEBUG({
dbgs() << "JITDylib deinit order is [ ";
@@ -380,17 +391,22 @@ private:
DenseMap<JITDylib *, SymbolLookupSet> RequiredInitSymbols;
std::vector<JITDylibSP> DFSLinkOrder;
- getExecutionSession().runSessionLocked([&]() {
- DFSLinkOrder = JD.getDFSLinkOrder();
-
- for (auto &NextJD : DFSLinkOrder) {
- auto ISItr = InitSymbols.find(NextJD.get());
- if (ISItr != InitSymbols.end()) {
- RequiredInitSymbols[NextJD.get()] = std::move(ISItr->second);
- InitSymbols.erase(ISItr);
- }
- }
- });
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto ISItr = InitSymbols.find(NextJD.get());
+ if (ISItr != InitSymbols.end()) {
+ RequiredInitSymbols[NextJD.get()] = std::move(ISItr->second);
+ InitSymbols.erase(ISItr);
+ }
+ }
+ return Error::success();
+ }))
+ return Err;
return Platform::lookupInitSymbols(getExecutionSession(),
RequiredInitSymbols)
@@ -460,6 +476,10 @@ Error GenericLLVMIRPlatform::setupJITDylib(JITDylib &JD) {
return S.setupJITDylib(JD);
}
+Error GenericLLVMIRPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
Error GenericLLVMIRPlatform::notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) {
return S.notifyAdding(RT, MU);
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp
index fb2e90e1c9c5..a364719855b4 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp
@@ -106,7 +106,8 @@ private:
auto HeaderContent = G.allocateString(
StringRef(reinterpret_cast<const char *>(&Hdr), sizeof(Hdr)));
- return G.createContentBlock(HeaderSection, HeaderContent, 0, 8, 0);
+ return G.createContentBlock(HeaderSection, HeaderContent, ExecutorAddr(), 8,
+ 0);
}
static MaterializationUnit::Interface
@@ -202,6 +203,8 @@ Error MachOPlatform::setupJITDylib(JITDylib &JD) {
*this, MachOHeaderStartSymbol));
}
+Error MachOPlatform::teardownJITDylib(JITDylib &JD) { return Error::success(); }
+
Error MachOPlatform::notifyAdding(ResourceTracker &RT,
const MaterializationUnit &MU) {
auto &JD = RT.getJITDylib();
@@ -379,9 +382,14 @@ void MachOPlatform::getInitializersLookupPhase(
SendInitializerSequenceFn SendResult, JITDylib &JD) {
auto DFSLinkOrder = JD.getDFSLinkOrder();
+ if (!DFSLinkOrder) {
+ SendResult(DFSLinkOrder.takeError());
+ return;
+ }
+
DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
ES.runSessionLocked([&]() {
- for (auto &InitJD : DFSLinkOrder) {
+ for (auto &InitJD : *DFSLinkOrder) {
auto RISItr = RegisteredInitSymbols.find(InitJD.get());
if (RISItr != RegisteredInitSymbols.end()) {
NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
@@ -394,7 +402,7 @@ void MachOPlatform::getInitializersLookupPhase(
// phase.
if (NewInitSymbols.empty()) {
getInitializersBuildSequencePhase(std::move(SendResult), JD,
- std::move(DFSLinkOrder));
+ std::move(*DFSLinkOrder));
return;
}
@@ -439,7 +447,7 @@ void MachOPlatform::rt_getDeinitializers(SendDeinitializerSequenceFn SendResult,
{
std::lock_guard<std::mutex> Lock(PlatformMutex);
- auto I = HeaderAddrToJITDylib.find(Handle.getValue());
+ auto I = HeaderAddrToJITDylib.find(Handle);
if (I != HeaderAddrToJITDylib.end())
JD = I->second;
}
@@ -469,7 +477,7 @@ void MachOPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
{
std::lock_guard<std::mutex> Lock(PlatformMutex);
- auto I = HeaderAddrToJITDylib.find(Handle.getValue());
+ auto I = HeaderAddrToJITDylib.find(Handle);
if (I != HeaderAddrToJITDylib.end())
JD = I->second;
}
@@ -661,11 +669,11 @@ Error MachOPlatform::MachOPlatformPlugin::associateJITDylibHeaderSymbol(
auto &JD = MR.getTargetJITDylib();
std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
- JITTargetAddress HeaderAddr = (*I)->getAddress();
+ auto HeaderAddr = (*I)->getAddress();
MP.HeaderAddrToJITDylib[HeaderAddr] = &JD;
assert(!MP.InitSeqs.count(&JD) && "InitSeq entry for JD already exists");
- MP.InitSeqs.insert(std::make_pair(
- &JD, MachOJITDylibInitializers(JD.getName(), ExecutorAddr(HeaderAddr))));
+ MP.InitSeqs.insert(
+ std::make_pair(&JD, MachOJITDylibInitializers(JD.getName(), HeaderAddr)));
return Error::success();
}
@@ -792,7 +800,7 @@ Error MachOPlatform::MachOPlatformPlugin::registerInitSections(
if (auto *ObjCImageInfoSec = G.findSectionByName(ObjCImageInfoSectionName)) {
if (auto Addr = jitlink::SectionRange(*ObjCImageInfoSec).getStart())
- ObjCImageInfoAddr.setValue(Addr);
+ ObjCImageInfoAddr = Addr;
}
for (auto InitSectionName : InitSectionNames)
@@ -880,10 +888,12 @@ Error MachOPlatform::MachOPlatformPlugin::registerEHAndTLVSections(
jitlink::SectionRange R(*EHFrameSection);
if (!R.empty())
G.allocActions().push_back(
- {{MP.orc_rt_macho_register_ehframe_section.getValue(), R.getStart(),
- R.getSize()},
- {MP.orc_rt_macho_deregister_ehframe_section.getValue(), R.getStart(),
- R.getSize()}});
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_register_ehframe_section, R.getRange())),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_deregister_ehframe_section, R.getRange()))});
}
// Get a pointer to the thread data section if there is one. It will be used
@@ -913,10 +923,13 @@ Error MachOPlatform::MachOPlatformPlugin::registerEHAndTLVSections(
inconvertibleErrorCode());
G.allocActions().push_back(
- {{MP.orc_rt_macho_register_thread_data_section.getValue(),
- R.getStart(), R.getSize()},
- {MP.orc_rt_macho_deregister_thread_data_section.getValue(),
- R.getStart(), R.getSize()}});
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_register_thread_data_section, R.getRange())),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_deregister_thread_data_section,
+ R.getRange()))});
}
}
return Error::success();
@@ -963,10 +976,10 @@ Error MachOPlatform::MachOPlatformPlugin::registerEHSectionsPhase1(
// Otherwise, add allocation actions to the graph to register eh-frames for
// this object.
G.allocActions().push_back(
- {{orc_rt_macho_register_ehframe_section.getValue(), R.getStart(),
- R.getSize()},
- {orc_rt_macho_deregister_ehframe_section.getValue(), R.getStart(),
- R.getSize()}});
+ {cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ orc_rt_macho_register_ehframe_section, R.getRange())),
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ orc_rt_macho_deregister_ehframe_section, R.getRange()))});
return Error::success();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
index 0d6a33c5685e..32c5998a789b 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
@@ -217,7 +217,7 @@ public:
Flags |= JITSymbolFlags::Exported;
InternedResult[InternedName] =
- JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ JITEvaluatedSymbol(Sym->getAddress().getValue(), Flags);
if (AutoClaim && !MR->getSymbols().count(InternedName)) {
assert(!ExtraSymbolsToClaim.count(InternedName) &&
"Duplicate symbol to claim?");
@@ -235,7 +235,7 @@ public:
if (Sym->getLinkage() == Linkage::Weak)
Flags |= JITSymbolFlags::Weak;
InternedResult[InternedName] =
- JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ JITEvaluatedSymbol(Sym->getAddress().getValue(), Flags);
if (AutoClaim && !MR->getSymbols().count(InternedName)) {
assert(!ExtraSymbolsToClaim.count(InternedName) &&
"Duplicate symbol to claim?");
@@ -743,7 +743,7 @@ void EHFrameRegistrationPlugin::modifyPassConfig(
PassConfiguration &PassConfig) {
PassConfig.PostFixupPasses.push_back(createEHFrameRecorderPass(
- G.getTargetTriple(), [this, &MR](JITTargetAddress Addr, size_t Size) {
+ G.getTargetTriple(), [this, &MR](ExecutorAddr Addr, size_t Size) {
if (Addr) {
std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
assert(!InProcessLinks.count(&MR) &&
@@ -756,7 +756,7 @@ void EHFrameRegistrationPlugin::modifyPassConfig(
Error EHFrameRegistrationPlugin::notifyEmitted(
MaterializationResponsibility &MR) {
- EHFrameRange EmittedRange;
+ ExecutorAddrRange EmittedRange;
{
std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
@@ -765,7 +765,7 @@ Error EHFrameRegistrationPlugin::notifyEmitted(
return Error::success();
EmittedRange = EHFrameRangeItr->second;
- assert(EmittedRange.Addr && "eh-frame addr to register can not be null");
+ assert(EmittedRange.Start && "eh-frame addr to register can not be null");
InProcessLinks.erase(EHFrameRangeItr);
}
@@ -773,7 +773,7 @@ Error EHFrameRegistrationPlugin::notifyEmitted(
[&](ResourceKey K) { EHFrameRanges[K].push_back(EmittedRange); }))
return Err;
- return Registrar->registerEHFrames(EmittedRange.Addr, EmittedRange.Size);
+ return Registrar->registerEHFrames(EmittedRange);
}
Error EHFrameRegistrationPlugin::notifyFailed(
@@ -784,7 +784,7 @@ Error EHFrameRegistrationPlugin::notifyFailed(
}
Error EHFrameRegistrationPlugin::notifyRemovingResources(ResourceKey K) {
- std::vector<EHFrameRange> RangesToRemove;
+ std::vector<ExecutorAddrRange> RangesToRemove;
ES.runSessionLocked([&] {
auto I = EHFrameRanges.find(K);
@@ -798,10 +798,9 @@ Error EHFrameRegistrationPlugin::notifyRemovingResources(ResourceKey K) {
while (!RangesToRemove.empty()) {
auto RangeToRemove = RangesToRemove.back();
RangesToRemove.pop_back();
- assert(RangeToRemove.Addr && "Untracked eh-frame range must not be null");
- Err = joinErrors(
- std::move(Err),
- Registrar->deregisterEHFrames(RangeToRemove.Addr, RangeToRemove.Size));
+ assert(RangeToRemove.Start && "Untracked eh-frame range must not be null");
+ Err = joinErrors(std::move(Err),
+ Registrar->deregisterEHFrames(RangeToRemove));
}
return Err;
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
index 77a8f5af8ba0..71be8dfdc004 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
@@ -611,7 +611,7 @@ LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForProcess(
DynamicLibrarySearchGenerator::GetForCurrentProcess(GlobalPrefix, Pred);
if (!ProcessSymsGenerator) {
- *Result = 0;
+ *Result = nullptr;
return wrap(ProcessSymsGenerator.takeError());
}
@@ -637,7 +637,7 @@ LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForPath(
DynamicLibrarySearchGenerator::Load(FileName, GlobalPrefix, Pred);
if (!LibrarySymsGenerator) {
- *Result = 0;
+ *Result = nullptr;
return wrap(LibrarySymsGenerator.takeError());
}
@@ -657,7 +657,7 @@ LLVMErrorRef LLVMOrcCreateStaticLibrarySearchGeneratorForPath(
auto LibrarySymsGenerator =
StaticLibraryDefinitionGenerator::Load(*unwrap(ObjLayer), FileName, TT);
if (!LibrarySymsGenerator) {
- *Result = 0;
+ *Result = nullptr;
return wrap(LibrarySymsGenerator.takeError());
}
*Result = wrap(LibrarySymsGenerator->release());
@@ -666,7 +666,7 @@ LLVMErrorRef LLVMOrcCreateStaticLibrarySearchGeneratorForPath(
auto LibrarySymsGenerator =
StaticLibraryDefinitionGenerator::Load(*unwrap(ObjLayer), FileName);
if (!LibrarySymsGenerator) {
- *Result = 0;
+ *Result = nullptr;
return wrap(LibrarySymsGenerator.takeError());
}
*Result = wrap(LibrarySymsGenerator->release());
@@ -712,7 +712,7 @@ LLVMErrorRef LLVMOrcJITTargetMachineBuilderDetectHost(
auto JTMB = JITTargetMachineBuilder::detectHost();
if (!JTMB) {
- Result = 0;
+ Result = nullptr;
return wrap(JTMB.takeError());
}
@@ -876,7 +876,7 @@ LLVMErrorRef LLVMOrcCreateLLJIT(LLVMOrcLLJITRef *Result,
LLVMOrcDisposeLLJITBuilder(Builder);
if (!J) {
- Result = 0;
+ Result = nullptr;
return wrap(J.takeError());
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
new file mode 100644
index 000000000000..91f2899449ef
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
@@ -0,0 +1,44 @@
+//===----- AllocationActions.gpp -- JITLink allocation support calls -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+Expected<std::vector<WrapperFunctionCall>>
+runFinalizeActions(AllocActions &AAs) {
+ std::vector<WrapperFunctionCall> DeallocActions;
+ DeallocActions.reserve(numDeallocActions(AAs));
+
+ for (auto &AA : AAs) {
+ if (AA.Finalize)
+ if (auto Err = AA.Finalize.runWithSPSRetErrorMerged())
+ return joinErrors(std::move(Err), runDeallocActions(DeallocActions));
+
+ if (AA.Dealloc)
+ DeallocActions.push_back(std::move(AA.Dealloc));
+ }
+
+ AAs.clear();
+ return DeallocActions;
+}
+
+Error runDeallocActions(ArrayRef<WrapperFunctionCall> DAs) {
+ Error Err = Error::success();
+ while (!DAs.empty()) {
+ Err = joinErrors(std::move(Err), DAs.back().runWithSPSRetErrorMerged());
+ DAs = DAs.drop_back();
+ }
+ return Err;
+}
+
+} // namespace shared
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
index 02044e4af29a..5eae33121eb9 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
@@ -36,10 +36,10 @@ const char *MemoryWriteUInt64sWrapperName =
"__llvm_orc_bootstrap_mem_write_uint64s_wrapper";
const char *MemoryWriteBuffersWrapperName =
"__llvm_orc_bootstrap_mem_write_buffers_wrapper";
-const char *RegisterEHFrameSectionCustomDirectWrapperName =
- "__llvm_orc_bootstrap_register_ehframe_section_custom_direct_wrapper";
-const char *DeregisterEHFrameSectionCustomDirectWrapperName =
- "__llvm_orc_bootstrap_deregister_ehframe_section_custom_direct_wrapper";
+const char *RegisterEHFrameSectionWrapperName =
+ "__llvm_orc_bootstrap_register_ehframe_section_wrapper";
+const char *DeregisterEHFrameSectionWrapperName =
+ "__llvm_orc_bootstrap_deregister_ehframe_section_wrapper";
const char *RunAsMainWrapperName = "__llvm_orc_bootstrap_run_as_main_wrapper";
} // end namespace rt
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
index 4c15e25b1d89..ffa2969536e7 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
@@ -105,23 +105,25 @@ static void registerJITLoaderGDBImpl(const char *ObjAddr, size_t Size) {
extern "C" orc::shared::CWrapperFunctionResult
llvm_orc_registerJITLoaderGDBAllocAction(const char *Data, size_t Size) {
using namespace orc::shared;
- return WrapperFunction<SPSError()>::handle(nullptr, 0,
- [=]() -> Error {
- registerJITLoaderGDBImpl(Data,
- Size);
- return Error::success();
- })
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size,
+ [](ExecutorAddrRange R) {
+ registerJITLoaderGDBImpl(R.Start.toPtr<const char *>(),
+ R.size());
+ return Error::success();
+ })
.release();
}
extern "C" orc::shared::CWrapperFunctionResult
llvm_orc_registerJITLoaderGDBWrapper(const char *Data, uint64_t Size) {
using namespace orc::shared;
- return WrapperFunction<void(SPSExecutorAddrRange)>::handle(
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
Data, Size,
[](ExecutorAddrRange R) {
- registerJITLoaderGDBImpl(R.Start.toPtr<char *>(),
- R.size().getValue());
+ registerJITLoaderGDBImpl(R.Start.toPtr<const char *>(),
+ R.size());
+ return Error::success();
})
.release();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
index 82aa62a0c0d9..909d47deef59 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
@@ -71,11 +71,10 @@ void addTo(StringMap<ExecutorAddr> &M) {
shared::SPSMemoryAccessUInt64Write>);
M[rt::MemoryWriteBuffersWrapperName] =
ExecutorAddr::fromPtr(&writeBuffersWrapper);
- M[rt::RegisterEHFrameSectionCustomDirectWrapperName] = ExecutorAddr::fromPtr(
- &llvm_orc_registerEHFrameSectionCustomDirectWrapper);
- M[rt::DeregisterEHFrameSectionCustomDirectWrapperName] =
- ExecutorAddr::fromPtr(
- &llvm_orc_deregisterEHFrameSectionCustomDirectWrapper);
+ M[rt::RegisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper);
+ M[rt::DeregisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper);
M[rt::RunAsMainWrapperName] = ExecutorAddr::fromPtr(&runAsMainWrapper);
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
index 6b7ff79a3efc..92b513d0bb53 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
@@ -33,4 +33,4 @@ void addTo(StringMap<ExecutorAddr> &M);
} // end namespace orc
} // end namespace llvm
-#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
+#endif // LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
index e331bad84200..fdae0e45da65 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
@@ -158,42 +158,26 @@ Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
} // end namespace orc
} // end namespace llvm
-extern "C" llvm::orc::shared::CWrapperFunctionResult
-llvm_orc_registerEHFrameSectionCustomDirectWrapper(
- const char *EHFrameSectionAddr, uint64_t Size) {
- if (auto Err = registerEHFrameSection(EHFrameSectionAddr, Size))
- return WrapperFunctionResult::createOutOfBandError(toString(std::move(Err)))
- .release();
- return llvm::orc::shared::CWrapperFunctionResult();
+static Error registerEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::registerEHFrameSection(EHFrame.Start.toPtr<const void *>(),
+ EHFrame.size());
}
-extern "C" llvm::orc::shared::CWrapperFunctionResult
-llvm_orc_deregisterEHFrameSectionCustomDirectWrapper(
- const char *EHFrameSectionAddr, uint64_t Size) {
- if (auto Err = deregisterEHFrameSection(EHFrameSectionAddr, Size))
- return WrapperFunctionResult::createOutOfBandError(toString(std::move(Err)))
- .release();
- return llvm::orc::shared::CWrapperFunctionResult();
-}
-
-static Error registerEHFrameWrapper(ExecutorAddr Addr, uint64_t Size) {
- return llvm::orc::registerEHFrameSection(Addr.toPtr<const void *>(), Size);
-}
-
-static Error deregisterEHFrameWrapper(ExecutorAddr Addr, uint64_t Size) {
- return llvm::orc::deregisterEHFrameSection(Addr.toPtr<const void *>(), Size);
+static Error deregisterEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::deregisterEHFrameSection(
+ EHFrame.Start.toPtr<const void *>(), EHFrame.size());
}
extern "C" orc::shared::CWrapperFunctionResult
llvm_orc_registerEHFrameSectionWrapper(const char *Data, uint64_t Size) {
- return WrapperFunction<SPSError(SPSExecutorAddr, uint64_t)>::handle(
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
Data, Size, registerEHFrameWrapper)
.release();
}
extern "C" orc::shared::CWrapperFunctionResult
llvm_orc_deregisterEHFrameSectionWrapper(const char *Data, uint64_t Size) {
- return WrapperFunction<SPSError(SPSExecutorAddr, uint64_t)>::handle(
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
Data, Size, deregisterEHFrameWrapper)
.release();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
index 232340c22a32..7cadf3bb51a7 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
@@ -24,7 +24,7 @@ SimpleExecutorMemoryManager::~SimpleExecutorMemoryManager() {
Expected<ExecutorAddr> SimpleExecutorMemoryManager::allocate(uint64_t Size) {
std::error_code EC;
auto MB = sys::Memory::allocateMappedMemory(
- Size, 0, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+ Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
if (EC)
return errorCodeToError(EC);
std::lock_guard<std::mutex> Lock(M);
@@ -35,7 +35,7 @@ Expected<ExecutorAddr> SimpleExecutorMemoryManager::allocate(uint64_t Size) {
Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
ExecutorAddr Base(~0ULL);
- std::vector<tpctypes::WrapperFunctionCall> DeallocationActions;
+ std::vector<shared::WrapperFunctionCall> DeallocationActions;
size_t SuccessfulFinalizationActions = 0;
if (FR.Segments.empty()) {
@@ -52,8 +52,8 @@ Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
Base = std::min(Base, Seg.Addr);
for (auto &ActPair : FR.Actions)
- if (ActPair.Deallocate.Func)
- DeallocationActions.push_back(ActPair.Deallocate);
+ if (ActPair.Dealloc)
+ DeallocationActions.push_back(ActPair.Dealloc);
// Get the Allocation for this finalization.
size_t AllocSize = 0;
@@ -96,7 +96,7 @@ Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
while (SuccessfulFinalizationActions)
Err =
joinErrors(std::move(Err), FR.Actions[--SuccessfulFinalizationActions]
- .Deallocate.runWithSPSRet());
+ .Dealloc.runWithSPSRetErrorMerged());
// Deallocate memory.
sys::MemoryBlock MB(AllocToDestroy.first, AllocToDestroy.second.Size);
@@ -139,7 +139,7 @@ Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
// Run finalization actions.
for (auto &ActPair : FR.Actions) {
- if (auto Err = ActPair.Finalize.runWithSPSRet())
+ if (auto Err = ActPair.Finalize.runWithSPSRetErrorMerged())
return BailOut(std::move(Err));
++SuccessfulFinalizationActions;
}
@@ -212,7 +212,7 @@ Error SimpleExecutorMemoryManager::deallocateImpl(void *Base, Allocation &A) {
while (!A.DeallocationActions.empty()) {
Err = joinErrors(std::move(Err),
- A.DeallocationActions.back().runWithSPSRet());
+ A.DeallocationActions.back().runWithSPSRetErrorMerged());
A.DeallocationActions.pop_back();
}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index 2b88c481dab0..33db23408cf2 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -97,8 +97,8 @@ private:
class EvalResult {
public:
- EvalResult() : Value(0), ErrorMsg("") {}
- EvalResult(uint64_t Value) : Value(Value), ErrorMsg("") {}
+ EvalResult() : Value(0) {}
+ EvalResult(uint64_t Value) : Value(Value) {}
EvalResult(std::string ErrorMsg)
: Value(0), ErrorMsg(std::move(ErrorMsg)) {}
uint64_t getValue() const { return Value; }
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index 0de76ab78e0f..f92618afdff6 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -422,6 +422,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
default:
report_fatal_error("Relocation type not implemented yet!");
break;
+ case ELF::R_AARCH64_NONE:
+ break;
case ELF::R_AARCH64_ABS16: {
uint64_t Result = Value + Addend;
assert(static_cast<int64_t>(Result) >= INT16_MIN && Result < UINT16_MAX);
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
index 6690dd07d99b..56b232b9dbcd 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -114,11 +114,11 @@ uint8_t *SectionMemoryManager::allocateSection(
// Copy the address to all the other groups, if they have not
// been initialized.
- if (CodeMem.Near.base() == 0)
+ if (CodeMem.Near.base() == nullptr)
CodeMem.Near = MB;
- if (RODataMem.Near.base() == 0)
+ if (RODataMem.Near.base() == nullptr)
RODataMem.Near = MB;
- if (RWDataMem.Near.base() == 0)
+ if (RWDataMem.Near.base() == nullptr)
RWDataMem.Near = MB;
// Remember that we allocated this memory
diff --git a/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp b/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
index c962231cbdc1..6186af444e73 100644
--- a/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
+++ b/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
@@ -1007,8 +1007,9 @@ bool Pattern::parsePattern(StringRef PatternStr, StringRef Prefix,
// brackets. They also accept a combined form which sets a numeric variable
// to the evaluation of an expression. Both string and numeric variable
// names must satisfy the regular expression "[a-zA-Z_][0-9a-zA-Z_]*" to be
- // valid, as this helps catch some common errors.
- if (PatternStr.startswith("[[")) {
+ // valid, as this helps catch some common errors. If there are extra '['s
+ // before the "[[", treat them literally.
+ if (PatternStr.startswith("[[") && !PatternStr.startswith("[[[")) {
StringRef UnparsedPatternStr = PatternStr.substr(2);
// Find the closing bracket pair ending the match. End is going to be an
// offset relative to the beginning of the match string.
@@ -1183,12 +1184,14 @@ bool Pattern::parsePattern(StringRef PatternStr, StringRef Prefix,
Substitutions.push_back(Substitution);
}
}
+
+ continue;
}
// Handle fixed string matches.
// Find the end, which is the start of the next regex.
- size_t FixedMatchEnd = PatternStr.find("{{");
- FixedMatchEnd = std::min(FixedMatchEnd, PatternStr.find("[["));
+ size_t FixedMatchEnd =
+ std::min(PatternStr.find("{{", 1), PatternStr.find("[[", 1));
RegExStr += Regex::escape(PatternStr.substr(0, FixedMatchEnd));
PatternStr = PatternStr.substr(FixedMatchEnd);
}
@@ -2215,7 +2218,7 @@ static Error reportMatchResult(bool ExpectedMatch, const SourceMgr &SM,
static unsigned CountNumNewlinesBetween(StringRef Range,
const char *&FirstNewLine) {
unsigned NumNewLines = 0;
- while (1) {
+ while (true) {
// Scan for newline.
Range = Range.substr(Range.find_first_of("\n\r"));
if (Range.empty())
diff --git a/contrib/llvm-project/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/contrib/llvm-project/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 5157d51fd18c..3b8d80c4eeec 100644
--- a/contrib/llvm-project/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/AssumptionCache.h"
@@ -21,7 +22,9 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PassManager.h"
@@ -37,6 +40,7 @@
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include <cstdint>
#include <sstream>
#define DEBUG_TYPE "openmp-ir-builder"
@@ -56,6 +60,20 @@ static cl::opt<double> UnrollThresholdFactor(
"simplifications still taking place"),
cl::init(1.5));
+#ifndef NDEBUG
+/// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions
+/// at position IP1 may change the meaning of IP2 or vice-versa. This is because
+/// an InsertPoint stores the instruction before something is inserted. For
+/// instance, if both point to the same instruction, two IRBuilders alternating
+/// creating instruction will cause the instructions to be interleaved.
+static bool isConflictIP(IRBuilder<>::InsertPoint IP1,
+ IRBuilder<>::InsertPoint IP2) {
+ if (!IP1.isSet() || !IP2.isSet())
+ return false;
+ return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint();
+}
+#endif
+
void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
LLVMContext &Ctx = Fn.getContext();
@@ -156,7 +174,7 @@ Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
void OpenMPIRBuilder::initialize() { initializeTypes(M); }
-void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
+void OpenMPIRBuilder::finalize(Function *Fn) {
SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
SmallVector<BasicBlock *, 32> Blocks;
SmallVector<OutlineInfo, 16> DeferredOutlines;
@@ -175,7 +193,7 @@ void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
Function *OuterFn = OI.getFunction();
CodeExtractorAnalysisCache CEAC(*OuterFn);
CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
- /* AggregateArgs */ false,
+ /* AggregateArgs */ true,
/* BlockFrequencyInfo */ nullptr,
/* BranchProbabilityInfo */ nullptr,
/* AssumptionCache */ nullptr,
@@ -189,6 +207,9 @@ void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
assert(Extractor.isEligible() &&
"Expected OpenMP outlining to be possible!");
+ for (auto *V : OI.ExcludeArgsFromAggregate)
+ Extractor.excludeArgFromAggregate(V);
+
Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n");
@@ -207,25 +228,25 @@ void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB);
assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry);
- if (AllowExtractorSinking) {
- // Move instructions from the to-be-deleted ArtificialEntry to the entry
- // basic block of the parallel region. CodeExtractor may have sunk
- // allocas/bitcasts for values that are solely used in the outlined
- // region and do not escape.
- assert(!ArtificialEntry.empty() &&
- "Expected instructions to sink in the outlined region");
- for (BasicBlock::iterator It = ArtificialEntry.begin(),
- End = ArtificialEntry.end();
- It != End;) {
- Instruction &I = *It;
- It++;
-
- if (I.isTerminator())
- continue;
-
- I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
- }
+ // Move instructions from the to-be-deleted ArtificialEntry to the entry
+ // basic block of the parallel region. CodeExtractor generates
+ // instructions to unwrap the aggregate argument and may sink
+ // allocas/bitcasts for values that are solely used in the outlined region
+ // and do not escape.
+ assert(!ArtificialEntry.empty() &&
+ "Expected instructions to add in the outlined region entry");
+ for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(),
+ End = ArtificialEntry.rend();
+ It != End;) {
+ Instruction &I = *It;
+ It++;
+
+ if (I.isTerminator())
+ continue;
+
+ I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
}
+
OI.EntryBB->moveBefore(&ArtificialEntry);
ArtificialEntry.eraseFromParent();
}
@@ -251,23 +272,26 @@ GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) {
new GlobalVariable(M, I32Ty,
/* isConstant = */ true, GlobalValue::WeakODRLinkage,
ConstantInt::get(I32Ty, Value), Name);
+ GV->setVisibility(GlobalValue::HiddenVisibility);
return GV;
}
-Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
- IdentFlag LocFlags,
- unsigned Reserve2Flags) {
+Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
+ uint32_t SrcLocStrSize,
+ IdentFlag LocFlags,
+ unsigned Reserve2Flags) {
// Enable "C-mode".
LocFlags |= OMP_IDENT_FLAG_KMPC;
- Value *&Ident =
+ Constant *&Ident =
IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
if (!Ident) {
Constant *I32Null = ConstantInt::getNullValue(Int32);
- Constant *IdentData[] = {
- I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
- ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
+ Constant *IdentData[] = {I32Null,
+ ConstantInt::get(Int32, uint32_t(LocFlags)),
+ ConstantInt::get(Int32, Reserve2Flags),
+ ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr};
Constant *Initializer =
ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData);
@@ -290,10 +314,12 @@ Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
}
}
- return Builder.CreatePointerCast(Ident, IdentPtr);
+ return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr);
}
-Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
+Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr,
+ uint32_t &SrcLocStrSize) {
+ SrcLocStrSize = LocStr.size();
Constant *&SrcLocStr = SrcLocStrMap[LocStr];
if (!SrcLocStr) {
Constant *Initializer =
@@ -314,8 +340,8 @@ Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
StringRef FileName,
- unsigned Line,
- unsigned Column) {
+ unsigned Line, unsigned Column,
+ uint32_t &SrcLocStrSize) {
SmallString<128> Buffer;
Buffer.push_back(';');
Buffer.append(FileName);
@@ -327,17 +353,21 @@ Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
Buffer.append(std::to_string(Column));
Buffer.push_back(';');
Buffer.push_back(';');
- return getOrCreateSrcLocStr(Buffer.str());
+ return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize);
}
-Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
- return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
+Constant *
+OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) {
+ StringRef UnknownLoc = ";unknown;unknown;0;0;;";
+ return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize);
}
-Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, Function *F) {
+Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL,
+ uint32_t &SrcLocStrSize,
+ Function *F) {
DILocation *DIL = DL.get();
if (!DIL)
- return getOrCreateDefaultSrcLocStr();
+ return getOrCreateDefaultSrcLocStr(SrcLocStrSize);
StringRef FileName = M.getName();
if (DIFile *DIF = DIL->getFile())
if (Optional<StringRef> Source = DIF->getSource())
@@ -346,12 +376,13 @@ Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, Function *F) {
if (Function.empty() && F)
Function = F->getName();
return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
- DIL->getColumn());
+ DIL->getColumn(), SrcLocStrSize);
}
-Constant *
-OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
- return getOrCreateSrcLocStr(Loc.DL, Loc.IP.getBlock()->getParent());
+Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc,
+ uint32_t &SrcLocStrSize) {
+ return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize,
+ Loc.IP.getBlock()->getParent());
}
Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
@@ -393,9 +424,11 @@ OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
break;
}
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
- getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Args[] = {
+ getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags),
+ getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))};
// If we are in a cancellable parallel region, barriers are cancellation
// points.
@@ -441,8 +474,9 @@ OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
llvm_unreachable("Unknown cancel kind!");
}
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
Value *Result = Builder.CreateCall(
getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
@@ -510,11 +544,14 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
omp::ProcBindKind ProcBind, bool IsCancellable) {
+ assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous");
+
if (!updateToLocation(Loc))
return Loc.IP;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadID = getOrCreateThreadID(Ident);
if (NumThreads) {
@@ -777,8 +814,10 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
auto PrivHelper = [&](Value &V) {
- if (&V == TIDAddr || &V == ZeroAddr)
+ if (&V == TIDAddr || &V == ZeroAddr) {
+ OI.ExcludeArgsFromAggregate.push_back(&V);
return;
+ }
SetVector<Use *> Uses;
for (Use &U : V.uses())
@@ -871,8 +910,9 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
// Build call void __kmpc_flush(ident_t *loc)
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Args[] = {getOrCreateIdent(SrcLocStr)};
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)};
Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
}
@@ -886,8 +926,9 @@ void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
// Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
// global_tid);
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
// Ignore return result until untied tasks are supported.
@@ -903,8 +944,9 @@ void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
// Build call __kmpc_omp_taskyield(loc, thread_id, 0);
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Constant *I32Null = ConstantInt::getNullValue(Int32);
Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
@@ -1114,14 +1156,16 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions(
Module *Module = Func->getParent();
Value *RedArrayPtr =
Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr");
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
bool CanGenerateAtomic =
llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
return RI.AtomicReductionGen;
});
- Value *Ident = getOrCreateIdent(
- SrcLocStr, CanGenerateAtomic ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
- : IdentFlag(0));
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize,
+ CanGenerateAtomic
+ ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
+ : IdentFlag(0));
Value *ThreadId = getOrCreateThreadID(Ident);
Constant *NumVariables = Builder.getInt32(NumReductions);
const DataLayout &DL = Module->getDataLayout();
@@ -1235,8 +1279,9 @@ OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
return Loc.IP;
Directive OMPD = Directive::OMPD_master;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {Ident, ThreadId};
@@ -1258,8 +1303,9 @@ OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
return Loc.IP;
Directive OMPD = Directive::OMPD_masked;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {Ident, ThreadId, Filter};
Value *ArgsEnd[] = {Ident, ThreadId};
@@ -1475,13 +1521,16 @@ OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier, Value *Chunk) {
assert(CLI->isValid() && "Requires a valid canonical loop");
+ assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
+ "Require dedicated allocate IP");
// Set up the source location value for OpenMP runtime.
Builder.restoreIP(CLI->getPreheaderIP());
Builder.SetCurrentDebugLocation(DL);
- Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
- Value *SrcLoc = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
+ Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
// Declare useful OpenMP runtime functions.
Value *IV = CLI->getIndVar();
@@ -1604,12 +1653,15 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
assert(CLI->isValid() && "Requires a valid canonical loop");
+ assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
+ "Require dedicated allocate IP");
// Set up the source location value for OpenMP runtime.
Builder.SetCurrentDebugLocation(DL);
- Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
- Value *SrcLoc = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
+ Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
// Declare useful OpenMP runtime functions.
Value *IV = CLI->getIndVar();
@@ -2119,6 +2171,19 @@ static void addLoopMetadata(CanonicalLoopInfo *Loop,
Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID);
}
+/// Attach llvm.access.group metadata to the memref instructions of \p Block
+static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup,
+ LoopInfo &LI) {
+ for (Instruction &I : *Block) {
+ if (I.mayReadOrWriteMemory()) {
+ // TODO: This instruction may already have access group from
+ // other pragmas e.g. #pragma clang loop vectorize. Append
+ // so that the existing metadata is not overwritten.
+ I.setMetadata(LLVMContext::MD_access_group, AccessGroup);
+ }
+ }
+}
+
void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) {
LLVMContext &Ctx = Builder.getContext();
addLoopMetadata(
@@ -2134,6 +2199,53 @@ void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) {
});
}
+void OpenMPIRBuilder::applySimd(DebugLoc, CanonicalLoopInfo *CanonicalLoop) {
+ LLVMContext &Ctx = Builder.getContext();
+
+ Function *F = CanonicalLoop->getFunction();
+
+ FunctionAnalysisManager FAM;
+ FAM.registerPass([]() { return DominatorTreeAnalysis(); });
+ FAM.registerPass([]() { return LoopAnalysis(); });
+ FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
+
+ LoopAnalysis LIA;
+ LoopInfo &&LI = LIA.run(*F, FAM);
+
+ Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
+
+ SmallSet<BasicBlock *, 8> Reachable;
+
+ // Get the basic blocks from the loop in which memref instructions
+ // can be found.
+ // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo,
+ // preferably without running any passes.
+ for (BasicBlock *Block : L->getBlocks()) {
+ if (Block == CanonicalLoop->getCond() ||
+ Block == CanonicalLoop->getHeader())
+ continue;
+ Reachable.insert(Block);
+ }
+
+ // Add access group metadata to memory-access instructions.
+ MDNode *AccessGroup = MDNode::getDistinct(Ctx, {});
+ for (BasicBlock *BB : Reachable)
+ addSimdMetadata(BB, AccessGroup, LI);
+
+ // Use the above access group metadata to create loop level
+ // metadata, which should be distinct for each loop.
+ ConstantAsMetadata *BoolConst =
+ ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx)));
+ // TODO: If the loop has existing parallel access metadata, have
+ // to combine two lists.
+ addLoopMetadata(
+ CanonicalLoop,
+ {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"),
+ AccessGroup}),
+ MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ BoolConst})});
+}
+
/// Create the TargetMachine object to query the backend for optimization
/// preferences.
///
@@ -2243,7 +2355,7 @@ static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) {
gatherPeelingPreferences(L, SE, TTI,
/*UserAllowPeeling=*/false,
/*UserAllowProfileBasedPeeling=*/false,
- /*UserUnrollingSpecficValues=*/false);
+ /*UnrollingSpecficValues=*/false);
SmallPtrSet<const Value *, 32> EphValues;
CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
@@ -2379,8 +2491,9 @@ OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
if (!updateToLocation(Loc))
return Loc.IP;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
@@ -2407,8 +2520,9 @@ OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
}
Directive OMPD = Directive::OMPD_single;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {Ident, ThreadId};
@@ -2436,8 +2550,9 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
return Loc.IP;
Directive OMPD = Directive::OMPD_critical;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *LockVar = getOMPCriticalRegionLock(CriticalName);
Value *Args[] = {Ident, ThreadId, LockVar};
@@ -2466,6 +2581,10 @@ OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource) {
+ for (size_t I = 0; I < StoreValues.size(); I++)
+ assert(StoreValues[I]->getType()->isIntegerTy(64) &&
+ "OpenMP runtime requires depend vec with i64 type");
+
if (!updateToLocation(Loc))
return Loc.IP;
@@ -2480,14 +2599,16 @@ OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc,
for (unsigned I = 0; I < NumLoops; ++I) {
Value *DependAddrGEPIter = Builder.CreateInBoundsGEP(
ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)});
- Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
+ StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
+ STInst->setAlignment(Align(8));
}
Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP(
ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)});
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP};
@@ -2512,8 +2633,9 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd(
Instruction *ExitCall = nullptr;
if (IsThreads) {
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {Ident, ThreadId};
@@ -2718,8 +2840,9 @@ CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
IRBuilder<>::InsertPointGuard IPG(Builder);
Builder.restoreIP(Loc.IP);
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {ThreadId, Size, Allocator};
@@ -2734,8 +2857,9 @@ CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
IRBuilder<>::InsertPointGuard IPG(Builder);
Builder.restoreIP(Loc.IP);
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Value *Args[] = {ThreadId, Addr, Allocator};
Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
@@ -2748,8 +2872,9 @@ CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
IRBuilder<>::InsertPointGuard IPG(Builder);
Builder.restoreIP(Loc.IP);
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
Value *ThreadId = getOrCreateThreadID(Ident);
Constant *ThreadPrivateCache =
getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
@@ -2767,8 +2892,9 @@ OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD,
if (!updateToLocation(Loc))
return Loc.IP;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
ConstantInt *IsSPMDVal = ConstantInt::getSigned(
IntegerType::getInt8Ty(Int8->getContext()),
IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
@@ -2820,8 +2946,9 @@ void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc,
if (!updateToLocation(Loc))
return;
- Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
- Value *Ident = getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
ConstantInt *IsSPMDVal = ConstantInt::getSigned(
IntegerType::getInt8Ty(Int8->getContext()),
IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
@@ -2860,7 +2987,8 @@ Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
StringRef RuntimeName = Out.str();
auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
if (Elem.second) {
- assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ assert(cast<PointerType>(Elem.second->getType())
+ ->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested");
} else {
// TODO: investigate the appropriate linkage type used for the global
diff --git a/contrib/llvm-project/llvm/lib/FuzzMutate/Operations.cpp b/contrib/llvm-project/llvm/lib/FuzzMutate/Operations.cpp
index a37fd5454dd4..221a3a84b49b 100644
--- a/contrib/llvm-project/llvm/lib/FuzzMutate/Operations.cpp
+++ b/contrib/llvm-project/llvm/lib/FuzzMutate/Operations.cpp
@@ -169,7 +169,7 @@ OpDescriptor llvm::fuzzerop::splitBlockDescriptor(unsigned Weight) {
OpDescriptor llvm::fuzzerop::gepDescriptor(unsigned Weight) {
auto buildGEP = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
- Type *Ty = cast<PointerType>(Srcs[0]->getType())->getElementType();
+ Type *Ty = Srcs[0]->getType()->getPointerElementType();
auto Indices = makeArrayRef(Srcs).drop_front(1);
return GetElementPtrInst::Create(Ty, Srcs[0], Indices, "G", Inst);
};
diff --git a/contrib/llvm-project/llvm/lib/FuzzMutate/RandomIRBuilder.cpp b/contrib/llvm-project/llvm/lib/FuzzMutate/RandomIRBuilder.cpp
index 1295714839e8..27c3bdfb22a8 100644
--- a/contrib/llvm-project/llvm/lib/FuzzMutate/RandomIRBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/FuzzMutate/RandomIRBuilder.cpp
@@ -53,8 +53,8 @@ Value *RandomIRBuilder::newSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
IP = ++I->getIterator();
assert(IP != BB.end() && "guaranteed by the findPointer");
}
- auto *NewLoad = new LoadInst(
- cast<PointerType>(Ptr->getType())->getElementType(), Ptr, "L", &*IP);
+ auto *NewLoad =
+ new LoadInst(Ptr->getType()->getPointerElementType(), Ptr, "L", &*IP);
// Only sample this load if it really matches the descriptor
if (Pred.matches(Srcs, NewLoad))
@@ -141,12 +141,12 @@ Value *RandomIRBuilder::findPointer(BasicBlock &BB,
if (auto PtrTy = dyn_cast<PointerType>(Inst->getType())) {
// We can never generate loads from non first class or non sized types
- if (!PtrTy->getElementType()->isSized() ||
- !PtrTy->getElementType()->isFirstClassType())
+ Type *ElemTy = PtrTy->getPointerElementType();
+ if (!ElemTy->isSized() || !ElemTy->isFirstClassType())
return false;
// TODO: Check if this is horribly expensive.
- return Pred.matches(Srcs, UndefValue::get(PtrTy->getElementType()));
+ return Pred.matches(Srcs, UndefValue::get(ElemTy));
}
return false;
};
diff --git a/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp b/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp
index bbe0c97e60a2..179754e275b0 100644
--- a/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp
@@ -587,7 +587,7 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) {
OS << " addrspace(" << AddressSpace << ')';
return;
}
- print(PTy->getElementType(), OS);
+ print(PTy->getNonOpaquePointerElementType(), OS);
if (unsigned AddressSpace = PTy->getAddressSpace())
OS << " addrspace(" << AddressSpace << ')';
OS << '*';
@@ -1986,6 +1986,8 @@ static void writeDIStringType(raw_ostream &Out, const DIStringType *N,
Printer.printString("name", N->getName());
Printer.printMetadata("stringLength", N->getRawStringLength());
Printer.printMetadata("stringLengthExpression", N->getRawStringLengthExp());
+ Printer.printMetadata("stringLocationExpression",
+ N->getRawStringLocationExp());
Printer.printInt("size", N->getSizeInBits());
Printer.printInt("align", N->getAlignInBits());
Printer.printDwarfEnum("encoding", N->getEncoding(),
diff --git a/contrib/llvm-project/llvm/lib/IR/Attributes.cpp b/contrib/llvm-project/llvm/lib/IR/Attributes.cpp
index c899afae6cce..c92bacaee36d 100644
--- a/contrib/llvm-project/llvm/lib/IR/Attributes.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Attributes.cpp
@@ -607,14 +607,14 @@ AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<Attribute> Attrs) {
AttributeSet AttributeSet::addAttribute(LLVMContext &C,
Attribute::AttrKind Kind) const {
if (hasAttribute(Kind)) return *this;
- AttrBuilder B;
+ AttrBuilder B(C);
B.addAttribute(Kind);
return addAttributes(C, AttributeSet::get(C, B));
}
AttributeSet AttributeSet::addAttribute(LLVMContext &C, StringRef Kind,
StringRef Value) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addAttribute(Kind, Value);
return addAttributes(C, AttributeSet::get(C, B));
}
@@ -627,17 +627,15 @@ AttributeSet AttributeSet::addAttributes(LLVMContext &C,
if (!AS.hasAttributes())
return *this;
- AttrBuilder B(AS);
- for (const auto &I : *this)
- B.addAttribute(I);
-
- return get(C, B);
+ AttrBuilder B(C, *this);
+ B.merge(AttrBuilder(C, AS));
+ return get(C, B);
}
AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
Attribute::AttrKind Kind) const {
if (!hasAttribute(Kind)) return *this;
- AttrBuilder B(*this);
+ AttrBuilder B(C, *this);
B.removeAttribute(Kind);
return get(C, B);
}
@@ -645,14 +643,14 @@ AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
StringRef Kind) const {
if (!hasAttribute(Kind)) return *this;
- AttrBuilder B(*this);
+ AttrBuilder B(C, *this);
B.removeAttribute(Kind);
return get(C, B);
}
AttributeSet AttributeSet::removeAttributes(LLVMContext &C,
- const AttrBuilder &Attrs) const {
- AttrBuilder B(*this);
+ const AttributeMask &Attrs) const {
+ AttrBuilder B(C, *this);
// If there is nothing to remove, directly return the original set.
if (!B.overlaps(Attrs))
return *this;
@@ -817,28 +815,7 @@ AttributeSetNode *AttributeSetNode::getSorted(LLVMContext &C,
}
AttributeSetNode *AttributeSetNode::get(LLVMContext &C, const AttrBuilder &B) {
- // Add target-independent attributes.
- SmallVector<Attribute, 8> Attrs;
- for (Attribute::AttrKind Kind = Attribute::None;
- Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) {
- if (!B.contains(Kind))
- continue;
-
- Attribute Attr;
- if (Attribute::isTypeAttrKind(Kind))
- Attr = Attribute::get(C, Kind, B.getTypeAttr(Kind));
- else if (Attribute::isIntAttrKind(Kind))
- Attr = Attribute::get(C, Kind, B.getRawIntAttr(Kind));
- else
- Attr = Attribute::get(C, Kind);
- Attrs.push_back(Attr);
- }
-
- // Add target-dependent (string) attributes.
- for (const auto &TDA : B.td_attrs())
- Attrs.emplace_back(Attribute::get(C, TDA.first, TDA.second));
-
- return getSorted(C, Attrs);
+ return getSorted(C, B.attrs());
}
bool AttributeSetNode::hasAttribute(StringRef Kind) const {
@@ -1194,9 +1171,9 @@ AttributeList AttributeList::get(LLVMContext &C,
SmallVector<AttributeSet, 8> NewAttrSets(MaxSize);
for (unsigned I = 0; I < MaxSize; ++I) {
- AttrBuilder CurBuilder;
+ AttrBuilder CurBuilder(C);
for (const auto &List : Attrs)
- CurBuilder.merge(List.getAttributes(I - 1));
+ CurBuilder.merge(AttrBuilder(C, List.getAttributes(I - 1)));
NewAttrSets[I] = AttributeSet::get(C, CurBuilder);
}
@@ -1218,14 +1195,14 @@ AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
AttributeList AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
StringRef Kind,
StringRef Value) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addAttribute(Kind, Value);
return addAttributesAtIndex(C, Index, B);
}
AttributeList AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
Attribute A) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addAttribute(A);
return addAttributesAtIndex(C, Index, B);
}
@@ -1250,16 +1227,7 @@ AttributeList AttributeList::addAttributesAtIndex(LLVMContext &C,
if (!pImpl)
return AttributeList::get(C, {{Index, AttributeSet::get(C, B)}});
-#ifndef NDEBUG
- // FIXME it is not obvious how this should work for alignment. For now, say
- // we can't change a known alignment.
- const MaybeAlign OldAlign = getAttributes(Index).getAlignment();
- const MaybeAlign NewAlign = B.getAlignment();
- assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
- "Attempt to change alignment!");
-#endif
-
- AttrBuilder Merged(getAttributes(Index));
+ AttrBuilder Merged(C, getAttributes(Index));
Merged.merge(B);
return setAttributesAtIndex(C, Index, AttributeSet::get(C, Merged));
}
@@ -1276,7 +1244,7 @@ AttributeList AttributeList::addParamAttribute(LLVMContext &C,
for (unsigned ArgNo : ArgNos) {
unsigned Index = attrIdxToArrayIdx(ArgNo + FirstArgIndex);
- AttrBuilder B(AttrSets[Index]);
+ AttrBuilder B(C, AttrSets[Index]);
B.addAttribute(A);
AttrSets[Index] = AttributeSet::get(C, B);
}
@@ -1314,9 +1282,8 @@ AttributeList AttributeList::removeAttributeAtIndex(LLVMContext &C,
return getImpl(C, AttrSets);
}
-AttributeList
-AttributeList::removeAttributesAtIndex(LLVMContext &C, unsigned Index,
- const AttrBuilder &AttrsToRemove) const {
+AttributeList AttributeList::removeAttributesAtIndex(
+ LLVMContext &C, unsigned Index, const AttributeMask &AttrsToRemove) const {
AttributeSet Attrs = getAttributes(Index);
AttributeSet NewAttrs = Attrs.removeAttributes(C, AttrsToRemove);
// If nothing was removed, return the original list.
@@ -1340,7 +1307,7 @@ AttributeList::removeAttributesAtIndex(LLVMContext &C,
AttributeList AttributeList::addDereferenceableRetAttr(LLVMContext &C,
uint64_t Bytes) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addDereferenceableAttr(Bytes);
return addRetAttributes(C, B);
}
@@ -1348,7 +1315,7 @@ AttributeList AttributeList::addDereferenceableRetAttr(LLVMContext &C,
AttributeList AttributeList::addDereferenceableParamAttr(LLVMContext &C,
unsigned Index,
uint64_t Bytes) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addDereferenceableAttr(Bytes);
return addParamAttributes(C, Index, B);
}
@@ -1356,7 +1323,7 @@ AttributeList AttributeList::addDereferenceableParamAttr(LLVMContext &C,
AttributeList
AttributeList::addDereferenceableOrNullParamAttr(LLVMContext &C, unsigned Index,
uint64_t Bytes) const {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addDereferenceableOrNullAttr(Bytes);
return addParamAttributes(C, Index, B);
}
@@ -1365,7 +1332,7 @@ AttributeList
AttributeList::addAllocSizeParamAttr(LLVMContext &C, unsigned Index,
unsigned ElemSizeArg,
const Optional<unsigned> &NumElemsArg) {
- AttrBuilder B;
+ AttrBuilder B(C);
B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
return addParamAttributes(C, Index, B);
}
@@ -1549,97 +1516,93 @@ LLVM_DUMP_METHOD void AttributeList::dump() const { print(dbgs()); }
// AttrBuilder Method Implementations
//===----------------------------------------------------------------------===//
-// FIXME: Remove this ctor, use AttributeSet.
-AttrBuilder::AttrBuilder(AttributeList AL, unsigned Index) {
- AttributeSet AS = AL.getAttributes(Index);
- for (const auto &A : AS)
- addAttribute(A);
-}
-
-AttrBuilder::AttrBuilder(AttributeSet AS) {
- for (const auto &A : AS)
- addAttribute(A);
+AttrBuilder::AttrBuilder(LLVMContext &Ctx, AttributeSet AS) : Ctx(Ctx) {
+ append_range(Attrs, AS);
+ assert(is_sorted(Attrs) && "AttributeSet should be sorted");
}
-void AttrBuilder::clear() {
- Attrs.reset();
- TargetDepAttrs.clear();
- IntAttrs = {};
- TypeAttrs = {};
-}
+void AttrBuilder::clear() { Attrs.clear(); }
-Optional<unsigned>
-AttrBuilder::kindToIntIndex(Attribute::AttrKind Kind) const {
- if (Attribute::isIntAttrKind(Kind))
- return Kind - Attribute::FirstIntAttr;
- return None;
-}
+/// Attribute comparator that only compares attribute keys. Enum attributes are
+/// sorted before string attributes.
+struct AttributeComparator {
+ bool operator()(Attribute A0, Attribute A1) const {
+ bool A0IsString = A0.isStringAttribute();
+ bool A1IsString = A1.isStringAttribute();
+ if (A0IsString) {
+ if (A1IsString)
+ return A0.getKindAsString() < A1.getKindAsString();
+ else
+ return false;
+ }
+ if (A1IsString)
+ return true;
+ return A0.getKindAsEnum() < A1.getKindAsEnum();
+ }
+ bool operator()(Attribute A0, Attribute::AttrKind Kind) const {
+ if (A0.isStringAttribute())
+ return false;
+ return A0.getKindAsEnum() < Kind;
+ }
+ bool operator()(Attribute A0, StringRef Kind) const {
+ if (A0.isStringAttribute())
+ return A0.getKindAsString() < Kind;
+ return true;
+ }
+};
-Optional<unsigned>
-AttrBuilder::kindToTypeIndex(Attribute::AttrKind Kind) const {
- if (Attribute::isTypeAttrKind(Kind))
- return Kind - Attribute::FirstTypeAttr;
- return None;
+template <typename K>
+static void addAttributeImpl(SmallVectorImpl<Attribute> &Attrs, K Kind,
+ Attribute Attr) {
+ auto It = lower_bound(Attrs, Kind, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(Kind))
+ std::swap(*It, Attr);
+ else
+ Attrs.insert(It, Attr);
}
AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) {
- if (Attr.isStringAttribute()) {
- addAttribute(Attr.getKindAsString(), Attr.getValueAsString());
- return *this;
- }
-
- Attribute::AttrKind Kind = Attr.getKindAsEnum();
- Attrs[Kind] = true;
-
- if (Optional<unsigned> TypeIndex = kindToTypeIndex(Kind))
- TypeAttrs[*TypeIndex] = Attr.getValueAsType();
- else if (Optional<unsigned> IntIndex = kindToIntIndex(Kind))
- IntAttrs[*IntIndex] = Attr.getValueAsInt();
+ if (Attr.isStringAttribute())
+ addAttributeImpl(Attrs, Attr.getKindAsString(), Attr);
+ else
+ addAttributeImpl(Attrs, Attr.getKindAsEnum(), Attr);
+ return *this;
+}
+AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Kind) {
+ addAttributeImpl(Attrs, Kind, Attribute::get(Ctx, Kind));
return *this;
}
AttrBuilder &AttrBuilder::addAttribute(StringRef A, StringRef V) {
- TargetDepAttrs[A] = V;
+ addAttributeImpl(Attrs, A, Attribute::get(Ctx, A, V));
return *this;
}
AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
- Attrs[Val] = false;
-
- if (Optional<unsigned> TypeIndex = kindToTypeIndex(Val))
- TypeAttrs[*TypeIndex] = nullptr;
- else if (Optional<unsigned> IntIndex = kindToIntIndex(Val))
- IntAttrs[*IntIndex] = 0;
-
- return *this;
-}
-
-AttrBuilder &AttrBuilder::removeAttributes(AttributeList A, uint64_t Index) {
- remove(A.getAttributes(Index));
+ auto It = lower_bound(Attrs, Val, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(Val))
+ Attrs.erase(It);
return *this;
}
AttrBuilder &AttrBuilder::removeAttribute(StringRef A) {
- TargetDepAttrs.erase(A);
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ Attrs.erase(It);
return *this;
}
uint64_t AttrBuilder::getRawIntAttr(Attribute::AttrKind Kind) const {
- Optional<unsigned> IntIndex = kindToIntIndex(Kind);
- assert(IntIndex && "Not an int attribute");
- return IntAttrs[*IntIndex];
+ assert(Attribute::isIntAttrKind(Kind) && "Not an int attribute");
+ Attribute A = getAttribute(Kind);
+ return A.isValid() ? A.getValueAsInt() : 0;
}
AttrBuilder &AttrBuilder::addRawIntAttr(Attribute::AttrKind Kind,
uint64_t Value) {
- Optional<unsigned> IntIndex = kindToIntIndex(Kind);
- assert(IntIndex && "Not an int attribute");
- assert(Value && "Value cannot be zero");
- Attrs[Kind] = true;
- IntAttrs[*IntIndex] = Value;
- return *this;
+ return addAttribute(Attribute::get(Ctx, Kind, Value));
}
std::pair<unsigned, Optional<unsigned>> AttrBuilder::getAllocSizeArgs() const {
@@ -1709,17 +1672,13 @@ AttrBuilder &AttrBuilder::addVScaleRangeAttrFromRawRepr(uint64_t RawArgs) {
}
Type *AttrBuilder::getTypeAttr(Attribute::AttrKind Kind) const {
- Optional<unsigned> TypeIndex = kindToTypeIndex(Kind);
- assert(TypeIndex && "Not a type attribute");
- return TypeAttrs[*TypeIndex];
+ assert(Attribute::isTypeAttrKind(Kind) && "Not a type attribute");
+ Attribute A = getAttribute(Kind);
+ return A.isValid() ? A.getValueAsType() : nullptr;
}
AttrBuilder &AttrBuilder::addTypeAttr(Attribute::AttrKind Kind, Type *Ty) {
- Optional<unsigned> TypeIndex = kindToTypeIndex(Kind);
- assert(TypeIndex && "Not a type attribute");
- Attrs[Kind] = true;
- TypeAttrs[*TypeIndex] = Ty;
- return *this;
+ return addAttribute(Attribute::get(Ctx, Kind, Ty));
}
AttrBuilder &AttrBuilder::addByValAttr(Type *Ty) {
@@ -1743,76 +1702,43 @@ AttrBuilder &AttrBuilder::addInAllocaAttr(Type *Ty) {
}
AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
- // FIXME: What if both have an int/type attribute, but they don't match?!
- for (unsigned Index = 0; Index < Attribute::NumIntAttrKinds; ++Index)
- if (!IntAttrs[Index])
- IntAttrs[Index] = B.IntAttrs[Index];
-
- for (unsigned Index = 0; Index < Attribute::NumTypeAttrKinds; ++Index)
- if (!TypeAttrs[Index])
- TypeAttrs[Index] = B.TypeAttrs[Index];
-
- Attrs |= B.Attrs;
-
- for (const auto &I : B.td_attrs())
- TargetDepAttrs[I.first] = I.second;
+ // TODO: Could make this O(n) as we're merging two sorted lists.
+ for (const auto &I : B.attrs())
+ addAttribute(I);
return *this;
}
-AttrBuilder &AttrBuilder::remove(const AttrBuilder &B) {
- // FIXME: What if both have an int/type attribute, but they don't match?!
- for (unsigned Index = 0; Index < Attribute::NumIntAttrKinds; ++Index)
- if (B.IntAttrs[Index])
- IntAttrs[Index] = 0;
-
- for (unsigned Index = 0; Index < Attribute::NumTypeAttrKinds; ++Index)
- if (B.TypeAttrs[Index])
- TypeAttrs[Index] = nullptr;
-
- Attrs &= ~B.Attrs;
-
- for (const auto &I : B.td_attrs())
- TargetDepAttrs.erase(I.first);
-
+AttrBuilder &AttrBuilder::remove(const AttributeMask &AM) {
+ erase_if(Attrs, [&](Attribute A) { return AM.contains(A); });
return *this;
}
-bool AttrBuilder::overlaps(const AttrBuilder &B) const {
- // First check if any of the target independent attributes overlap.
- if ((Attrs & B.Attrs).any())
- return true;
-
- // Then check if any target dependent ones do.
- for (const auto &I : td_attrs())
- if (B.contains(I.first))
- return true;
-
- return false;
+bool AttrBuilder::overlaps(const AttributeMask &AM) const {
+ return any_of(Attrs, [&](Attribute A) { return AM.contains(A); });
}
-bool AttrBuilder::contains(StringRef A) const {
- return TargetDepAttrs.find(A) != TargetDepAttrs.end();
+Attribute AttrBuilder::getAttribute(Attribute::AttrKind A) const {
+ assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ return *It;
+ return {};
}
-bool AttrBuilder::hasAttributes() const {
- return !Attrs.none() || !TargetDepAttrs.empty();
+Attribute AttrBuilder::getAttribute(StringRef A) const {
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ return *It;
+ return {};
}
-bool AttrBuilder::hasAttributes(AttributeList AL, uint64_t Index) const {
- AttributeSet AS = AL.getAttributes(Index);
-
- for (const auto &Attr : AS) {
- if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
- if (contains(Attr.getKindAsEnum()))
- return true;
- } else {
- assert(Attr.isStringAttribute() && "Invalid attribute kind!");
- return contains(Attr.getKindAsString());
- }
- }
+bool AttrBuilder::contains(Attribute::AttrKind A) const {
+ return getAttribute(A).isValid();
+}
- return false;
+bool AttrBuilder::contains(StringRef A) const {
+ return getAttribute(A).isValid();
}
bool AttrBuilder::hasAlignmentAttr() const {
@@ -1820,14 +1746,7 @@ bool AttrBuilder::hasAlignmentAttr() const {
}
bool AttrBuilder::operator==(const AttrBuilder &B) const {
- if (Attrs != B.Attrs)
- return false;
-
- for (const auto &TDA : TargetDepAttrs)
- if (B.TargetDepAttrs.find(TDA.first) == B.TargetDepAttrs.end())
- return false;
-
- return IntAttrs == B.IntAttrs && TypeAttrs == B.TypeAttrs;
+ return Attrs == B.Attrs;
}
//===----------------------------------------------------------------------===//
@@ -1835,16 +1754,16 @@ bool AttrBuilder::operator==(const AttrBuilder &B) const {
//===----------------------------------------------------------------------===//
/// Which attributes cannot be applied to a type.
-AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
- AttrBuilder Incompatible;
+AttributeMask AttributeFuncs::typeIncompatible(Type *Ty) {
+ AttributeMask Incompatible;
if (!Ty->isIntegerTy())
- // Attribute that only apply to integers.
+ // Attributes that only apply to integers.
Incompatible.addAttribute(Attribute::SExt)
.addAttribute(Attribute::ZExt);
if (!Ty->isPointerTy())
- // Attribute that only apply to pointers.
+ // Attributes that only apply to pointers.
Incompatible.addAttribute(Attribute::Nest)
.addAttribute(Attribute::NoAlias)
.addAttribute(Attribute::NoCapture)
@@ -1852,15 +1771,18 @@ AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
.addAttribute(Attribute::ReadNone)
.addAttribute(Attribute::ReadOnly)
.addAttribute(Attribute::SwiftError)
- .addAlignmentAttr(1) // the int here is ignored
- .addDereferenceableAttr(1) // the int here is ignored
- .addDereferenceableOrNullAttr(1) // the int here is ignored
- .addPreallocatedAttr(Ty)
- .addInAllocaAttr(Ty)
- .addByValAttr(Ty)
- .addStructRetAttr(Ty)
- .addByRefAttr(Ty)
- .addTypeAttr(Attribute::ElementType, Ty);
+ .addAttribute(Attribute::Dereferenceable)
+ .addAttribute(Attribute::DereferenceableOrNull)
+ .addAttribute(Attribute::Preallocated)
+ .addAttribute(Attribute::InAlloca)
+ .addAttribute(Attribute::ByVal)
+ .addAttribute(Attribute::StructRet)
+ .addAttribute(Attribute::ByRef)
+ .addAttribute(Attribute::ElementType);
+
+ if (!Ty->isPtrOrPtrVectorTy())
+ // Attributes that only apply to pointers or vectors of pointers.
+ Incompatible.addAttribute(Attribute::Alignment);
// Some attributes can apply to all "values" but there are no `void` values.
if (Ty->isVoidTy())
@@ -1869,12 +1791,12 @@ AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
return Incompatible;
}
-AttrBuilder AttributeFuncs::getUBImplyingAttributes() {
- AttrBuilder B;
- B.addAttribute(Attribute::NoUndef);
- B.addDereferenceableAttr(1);
- B.addDereferenceableOrNullAttr(1);
- return B;
+AttributeMask AttributeFuncs::getUBImplyingAttributes() {
+ AttributeMask AM;
+ AM.addAttribute(Attribute::NoUndef);
+ AM.addAttribute(Attribute::Dereferenceable);
+ AM.addAttribute(Attribute::DereferenceableOrNull);
+ return AM;
}
template<typename AttrClass>
@@ -1910,10 +1832,16 @@ static void setOR(Function &Caller, const Function &Callee) {
/// If the inlined function had a higher stack protection level than the
/// calling function, then bump up the caller's stack protection level.
static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
+ // If the calling function has *no* stack protection level (e.g. it was built
+ // with Clang's -fno-stack-protector or no_stack_protector attribute), don't
+ // change it as that could change the program's semantics.
+ if (!Caller.hasStackProtectorFnAttr())
+ return;
+
// If upgrading the SSP attribute, clear out the old SSP Attributes first.
// Having multiple SSP attributes doesn't actually hurt, but it adds useless
// clutter to the IR.
- AttrBuilder OldSSPAttr;
+ AttributeMask OldSSPAttr;
OldSSPAttr.addAttribute(Attribute::StackProtect)
.addAttribute(Attribute::StackProtectStrong)
.addAttribute(Attribute::StackProtectReq);
diff --git a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
index b8ad2b294b87..45459e200b3d 100644
--- a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
@@ -727,6 +727,13 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
return true;
+ if (Name == "amdgcn.alignbit") {
+ // Target specific intrinsic became redundant
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
+ {F->getReturnType()});
+ return true;
+ }
+
break;
}
@@ -4488,7 +4495,7 @@ void llvm::UpgradeFunctionAttributes(Function &F) {
if (F.getCallingConv() == CallingConv::X86_INTR &&
!F.arg_empty() && !F.hasParamAttribute(0, Attribute::ByVal)) {
- Type *ByValTy = cast<PointerType>(F.getArg(0)->getType())->getElementType();
+ Type *ByValTy = F.getArg(0)->getType()->getPointerElementType();
Attribute NewAttr = Attribute::getWithByValType(F.getContext(), ByValTy);
F.addParamAttr(0, NewAttr);
}
@@ -4569,27 +4576,39 @@ std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
return DL.empty() ? std::string("G1") : (DL + "-G1").str();
}
+ std::string Res = DL.str();
+ if (!T.isX86())
+ return Res;
+
+ // If the datalayout matches the expected format, add pointer size address
+ // spaces to the datalayout.
std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
- // If X86, and the datalayout matches the expected format, add pointer size
- // address spaces to the datalayout.
- if (!T.isX86() || DL.contains(AddrSpaces))
- return std::string(DL);
+ if (!DL.contains(AddrSpaces)) {
+ SmallVector<StringRef, 4> Groups;
+ Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
+ if (R.match(DL, &Groups))
+ Res = (Groups[1] + AddrSpaces + Groups[3]).str();
+ }
- SmallVector<StringRef, 4> Groups;
- Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
- if (!R.match(DL, &Groups))
- return std::string(DL);
+ // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
+ // Raising the alignment is safe because Clang did not produce f80 values in
+ // the MSVC environment before this upgrade was added.
+ if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
+ StringRef Ref = Res;
+ auto I = Ref.find("-f80:32-");
+ if (I != StringRef::npos)
+ Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
+ }
- return (Groups[1] + AddrSpaces + Groups[3]).str();
+ return Res;
}
void llvm::UpgradeAttributes(AttrBuilder &B) {
StringRef FramePointer;
- if (B.contains("no-frame-pointer-elim")) {
+ Attribute A = B.getAttribute("no-frame-pointer-elim");
+ if (A.isValid()) {
// The value can be "true" or "false".
- for (const auto &I : B.td_attrs())
- if (I.first == "no-frame-pointer-elim")
- FramePointer = I.second == "true" ? "all" : "none";
+ FramePointer = A.getValueAsString() == "true" ? "all" : "none";
B.removeAttribute("no-frame-pointer-elim");
}
if (B.contains("no-frame-pointer-elim-non-leaf")) {
@@ -4601,12 +4620,10 @@ void llvm::UpgradeAttributes(AttrBuilder &B) {
if (!FramePointer.empty())
B.addAttribute("frame-pointer", FramePointer);
- if (B.contains("null-pointer-is-valid")) {
+ A = B.getAttribute("null-pointer-is-valid");
+ if (A.isValid()) {
// The value can be "true" or "false".
- bool NullPointerIsValid = false;
- for (const auto &I : B.td_attrs())
- if (I.first == "null-pointer-is-valid")
- NullPointerIsValid = I.second == "true";
+ bool NullPointerIsValid = A.getValueAsString() == "true";
B.removeAttribute("null-pointer-is-valid");
if (NullPointerIsValid)
B.addAttribute(Attribute::NullPointerIsValid);
diff --git a/contrib/llvm-project/llvm/lib/IR/Comdat.cpp b/contrib/llvm-project/llvm/lib/IR/Comdat.cpp
index 1a5d38d17bc0..90d5c6e82e5c 100644
--- a/contrib/llvm-project/llvm/lib/IR/Comdat.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Comdat.cpp
@@ -25,6 +25,10 @@ Comdat::Comdat() = default;
StringRef Comdat::getName() const { return Name->first(); }
+void Comdat::addUser(GlobalObject *GO) { Users.insert(GO); }
+
+void Comdat::removeUser(GlobalObject *GO) { Users.erase(GO); }
+
LLVMComdatRef LLVMGetOrInsertComdat(LLVMModuleRef M, const char *Name) {
return wrap(unwrap(M)->getOrInsertComdat(Name));
}
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
index 8668fe82601c..622a984be22c 100644
--- a/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
@@ -119,21 +119,21 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
if (PTy->getAddressSpace() == DPTy->getAddressSpace() &&
!PTy->isOpaque() && !DPTy->isOpaque() &&
- PTy->getElementType()->isSized()) {
+ PTy->getNonOpaquePointerElementType()->isSized()) {
SmallVector<Value*, 8> IdxList;
Value *Zero =
Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
IdxList.push_back(Zero);
- Type *ElTy = PTy->getElementType();
- while (ElTy && ElTy != DPTy->getElementType()) {
+ Type *ElTy = PTy->getNonOpaquePointerElementType();
+ while (ElTy && ElTy != DPTy->getNonOpaquePointerElementType()) {
ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, (uint64_t)0);
IdxList.push_back(Zero);
}
- if (ElTy == DPTy->getElementType())
+ if (ElTy == DPTy->getNonOpaquePointerElementType())
// This GEP is inbounds because all indices are zero.
- return ConstantExpr::getInBoundsGetElementPtr(PTy->getElementType(),
- V, IdxList);
+ return ConstantExpr::getInBoundsGetElementPtr(
+ PTy->getNonOpaquePointerElementType(), V, IdxList);
}
// Handle casts from one vector constant to another. We know that the src
@@ -1299,63 +1299,6 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
return nullptr;
}
-/// This type is zero-sized if it's an array or structure of zero-sized types.
-/// The only leaf zero-sized type is an empty structure.
-static bool isMaybeZeroSizedType(Type *Ty) {
- if (StructType *STy = dyn_cast<StructType>(Ty)) {
- if (STy->isOpaque()) return true; // Can't say.
-
- // If all of elements have zero size, this does too.
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- if (!isMaybeZeroSizedType(STy->getElementType(i))) return false;
- return true;
-
- } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- return isMaybeZeroSizedType(ATy->getElementType());
- }
- return false;
-}
-
-/// Compare the two constants as though they were getelementptr indices.
-/// This allows coercion of the types to be the same thing.
-///
-/// If the two constants are the "same" (after coercion), return 0. If the
-/// first is less than the second, return -1, if the second is less than the
-/// first, return 1. If the constants are not integral, return -2.
-///
-static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) {
- if (C1 == C2) return 0;
-
- // Ok, we found a different index. If they are not ConstantInt, we can't do
- // anything with them.
- if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2))
- return -2; // don't know!
-
- // We cannot compare the indices if they don't fit in an int64_t.
- if (cast<ConstantInt>(C1)->getValue().getActiveBits() > 64 ||
- cast<ConstantInt>(C2)->getValue().getActiveBits() > 64)
- return -2; // don't know!
-
- // Ok, we have two differing integer indices. Sign extend them to be the same
- // type.
- int64_t C1Val = cast<ConstantInt>(C1)->getSExtValue();
- int64_t C2Val = cast<ConstantInt>(C2)->getSExtValue();
-
- if (C1Val == C2Val) return 0; // They are equal
-
- // If the type being indexed over is really just a zero sized type, there is
- // no pointer difference being made here.
- if (isMaybeZeroSizedType(ElTy))
- return -2; // dunno.
-
- // If they are really different, now that they are the same type, then we
- // found a difference!
- if (C1Val < C2Val)
- return -1;
- else
- return 1;
-}
-
/// This function determines if there is anything we can decide about the two
/// constants provided. This doesn't need to handle simple things like
/// ConstantFP comparisons, but should instead handle ConstantExprs.
@@ -1594,103 +1537,28 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
// If its not weak linkage, the GVal must have a non-zero address
// so the result is greater-than
- if (!GV->hasExternalWeakLinkage())
+ if (!GV->hasExternalWeakLinkage() && CE1GEP->isInBounds())
return ICmpInst::ICMP_UGT;
- } else if (isa<ConstantPointerNull>(CE1Op0)) {
- // If we are indexing from a null pointer, check to see if we have any
- // non-zero indices.
- for (unsigned i = 1, e = CE1->getNumOperands(); i != e; ++i)
- if (!CE1->getOperand(i)->isNullValue())
- // Offsetting from null, must not be equal.
- return ICmpInst::ICMP_UGT;
- // Only zero indexes from null, must still be zero.
- return ICmpInst::ICMP_EQ;
}
- // Otherwise, we can't really say if the first operand is null or not.
} else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
- if (isa<ConstantPointerNull>(CE1Op0)) {
- // If its not weak linkage, the GVal must have a non-zero address
- // so the result is less-than
- if (!GV2->hasExternalWeakLinkage())
- return ICmpInst::ICMP_ULT;
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
- if (GV == GV2) {
- // If this is a getelementptr of the same global, then it must be
- // different. Because the types must match, the getelementptr could
- // only have at most one index, and because we fold getelementptr's
- // with a single zero index, it must be nonzero.
- assert(CE1->getNumOperands() == 2 &&
- !CE1->getOperand(1)->isNullValue() &&
- "Surprising getelementptr!");
- return ICmpInst::ICMP_UGT;
- } else {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
+ if (GV != GV2) {
if (CE1GEP->hasAllZeroIndices())
return areGlobalsPotentiallyEqual(GV, GV2);
return ICmpInst::BAD_ICMP_PREDICATE;
}
}
- } else {
- ConstantExpr *CE2 = cast<ConstantExpr>(V2);
- Constant *CE2Op0 = CE2->getOperand(0);
-
- // There are MANY other foldings that we could perform here. They will
- // probably be added on demand, as they seem needed.
- switch (CE2->getOpcode()) {
- default: break;
- case Instruction::GetElementPtr:
- // By far the most common case to handle is when the base pointers are
- // obviously to the same global.
- if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
- // Don't know relative ordering, but check for inequality.
- if (CE1Op0 != CE2Op0) {
- GEPOperator *CE2GEP = cast<GEPOperator>(CE2);
- if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
- return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
- cast<GlobalValue>(CE2Op0));
- return ICmpInst::BAD_ICMP_PREDICATE;
- }
- // Ok, we know that both getelementptr instructions are based on the
- // same global. From this, we can precisely determine the relative
- // ordering of the resultant pointers.
- unsigned i = 1;
-
- // The logic below assumes that the result of the comparison
- // can be determined by finding the first index that differs.
- // This doesn't work if there is over-indexing in any
- // subsequent indices, so check for that case first.
- if (!CE1->isGEPWithNoNotionalOverIndexing() ||
- !CE2->isGEPWithNoNotionalOverIndexing())
- return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
-
- // Compare all of the operands the GEP's have in common.
- gep_type_iterator GTI = gep_type_begin(CE1);
- for (;i != CE1->getNumOperands() && i != CE2->getNumOperands();
- ++i, ++GTI)
- switch (IdxCompare(CE1->getOperand(i),
- CE2->getOperand(i), GTI.getIndexedType())) {
- case -1: return isSigned ? ICmpInst::ICMP_SLT:ICmpInst::ICMP_ULT;
- case 1: return isSigned ? ICmpInst::ICMP_SGT:ICmpInst::ICMP_UGT;
- case -2: return ICmpInst::BAD_ICMP_PREDICATE;
- }
-
- // Ok, we ran out of things they have in common. If any leftovers
- // are non-zero then we have a difference, otherwise we are equal.
- for (; i < CE1->getNumOperands(); ++i)
- if (!CE1->getOperand(i)->isNullValue()) {
- if (isa<ConstantInt>(CE1->getOperand(i)))
- return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
- else
- return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
- }
-
- for (; i < CE2->getNumOperands(); ++i)
- if (!CE2->getOperand(i)->isNullValue()) {
- if (isa<ConstantInt>(CE2->getOperand(i)))
- return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
- else
- return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
- }
- return ICmpInst::ICMP_EQ;
+ } else if (const auto *CE2GEP = dyn_cast<GEPOperator>(V2)) {
+ // By far the most common case to handle is when the base pointers are
+ // obviously to the same global.
+ const Constant *CE2Op0 = cast<Constant>(CE2GEP->getPointerOperand());
+ if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
+ // Don't know relative ordering, but check for inequality.
+ if (CE1Op0 != CE2Op0) {
+ if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
+ cast<GlobalValue>(CE2Op0));
+ return ICmpInst::BAD_ICMP_PREDICATE;
}
}
}
@@ -1704,7 +1572,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
return ICmpInst::BAD_ICMP_PREDICATE;
}
-Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
+Constant *llvm::ConstantFoldCompareInstruction(CmpInst::Predicate Predicate,
Constant *C1, Constant *C2) {
Type *ResultTy;
if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
@@ -1714,10 +1582,10 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
ResultTy = Type::getInt1Ty(C1->getContext());
// Fold FCMP_FALSE/FCMP_TRUE unconditionally.
- if (pred == FCmpInst::FCMP_FALSE)
+ if (Predicate == FCmpInst::FCMP_FALSE)
return Constant::getNullValue(ResultTy);
- if (pred == FCmpInst::FCMP_TRUE)
+ if (Predicate == FCmpInst::FCMP_TRUE)
return Constant::getAllOnesValue(ResultTy);
// Handle some degenerate cases first
@@ -1725,7 +1593,6 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
return PoisonValue::get(ResultTy);
if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
- CmpInst::Predicate Predicate = CmpInst::Predicate(pred);
bool isIntegerPredicate = ICmpInst::isIntPredicate(Predicate);
// For EQ and NE, we can always pick a value for the undef to make the
// predicate pass or fail, so we can return undef.
@@ -1750,9 +1617,9 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
!NullPointerIsDefined(nullptr /* F */,
GV->getType()->getAddressSpace())) {
- if (pred == ICmpInst::ICMP_EQ)
+ if (Predicate == ICmpInst::ICMP_EQ)
return ConstantInt::getFalse(C1->getContext());
- else if (pred == ICmpInst::ICMP_NE)
+ else if (Predicate == ICmpInst::ICMP_NE)
return ConstantInt::getTrue(C1->getContext());
}
// icmp eq/ne(GV,null) -> false/true
@@ -1762,9 +1629,9 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
!NullPointerIsDefined(nullptr /* F */,
GV->getType()->getAddressSpace())) {
- if (pred == ICmpInst::ICMP_EQ)
+ if (Predicate == ICmpInst::ICMP_EQ)
return ConstantInt::getFalse(C1->getContext());
- else if (pred == ICmpInst::ICMP_NE)
+ else if (Predicate == ICmpInst::ICMP_NE)
return ConstantInt::getTrue(C1->getContext());
}
}
@@ -1772,16 +1639,16 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
// The caller is expected to commute the operands if the constant expression
// is C2.
// C1 >= 0 --> true
- if (pred == ICmpInst::ICMP_UGE)
+ if (Predicate == ICmpInst::ICMP_UGE)
return Constant::getAllOnesValue(ResultTy);
// C1 < 0 --> false
- if (pred == ICmpInst::ICMP_ULT)
+ if (Predicate == ICmpInst::ICMP_ULT)
return Constant::getNullValue(ResultTy);
}
// If the comparison is a comparison between two i1's, simplify it.
if (C1->getType()->isIntegerTy(1)) {
- switch(pred) {
+ switch (Predicate) {
case ICmpInst::ICMP_EQ:
if (isa<ConstantInt>(C2))
return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2));
@@ -1796,12 +1663,10 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) {
const APInt &V1 = cast<ConstantInt>(C1)->getValue();
const APInt &V2 = cast<ConstantInt>(C2)->getValue();
- return ConstantInt::get(
- ResultTy, ICmpInst::compare(V1, V2, (ICmpInst::Predicate)pred));
+ return ConstantInt::get(ResultTy, ICmpInst::compare(V1, V2, Predicate));
} else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) {
const APFloat &C1V = cast<ConstantFP>(C1)->getValueAPF();
const APFloat &C2V = cast<ConstantFP>(C2)->getValueAPF();
- CmpInst::Predicate Predicate = CmpInst::Predicate(pred);
return ConstantInt::get(ResultTy, FCmpInst::compare(C1V, C2V, Predicate));
} else if (auto *C1VTy = dyn_cast<VectorType>(C1->getType())) {
@@ -1810,7 +1675,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
if (Constant *C2Splat = C2->getSplatValue())
return ConstantVector::getSplat(
C1VTy->getElementCount(),
- ConstantExpr::getCompare(pred, C1Splat, C2Splat));
+ ConstantExpr::getCompare(Predicate, C1Splat, C2Splat));
// Do not iterate on scalable vector. The number of elements is unknown at
// compile-time.
@@ -1829,7 +1694,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
Constant *C2E =
ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, I));
- ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
+ ResElts.push_back(ConstantExpr::getCompare(Predicate, C1E, C2E));
}
return ConstantVector::get(ResElts);
@@ -1854,46 +1719,52 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
case FCmpInst::BAD_FCMP_PREDICATE:
break; // Couldn't determine anything about these constants.
case FCmpInst::FCMP_OEQ: // We know that C1 == C2
- Result = (pred == FCmpInst::FCMP_UEQ || pred == FCmpInst::FCMP_OEQ ||
- pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE ||
- pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
+ Result =
+ (Predicate == FCmpInst::FCMP_UEQ || Predicate == FCmpInst::FCMP_OEQ ||
+ Predicate == FCmpInst::FCMP_ULE || Predicate == FCmpInst::FCMP_OLE ||
+ Predicate == FCmpInst::FCMP_UGE || Predicate == FCmpInst::FCMP_OGE);
break;
case FCmpInst::FCMP_OLT: // We know that C1 < C2
- Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
- pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT ||
- pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE);
+ Result =
+ (Predicate == FCmpInst::FCMP_UNE || Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_ULT || Predicate == FCmpInst::FCMP_OLT ||
+ Predicate == FCmpInst::FCMP_ULE || Predicate == FCmpInst::FCMP_OLE);
break;
case FCmpInst::FCMP_OGT: // We know that C1 > C2
- Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
- pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT ||
- pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
+ Result =
+ (Predicate == FCmpInst::FCMP_UNE || Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_UGT || Predicate == FCmpInst::FCMP_OGT ||
+ Predicate == FCmpInst::FCMP_UGE || Predicate == FCmpInst::FCMP_OGE);
break;
case FCmpInst::FCMP_OLE: // We know that C1 <= C2
// We can only partially decide this relation.
- if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
+ if (Predicate == FCmpInst::FCMP_UGT || Predicate == FCmpInst::FCMP_OGT)
Result = 0;
- else if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
+ else if (Predicate == FCmpInst::FCMP_ULT ||
+ Predicate == FCmpInst::FCMP_OLT)
Result = 1;
break;
case FCmpInst::FCMP_OGE: // We known that C1 >= C2
// We can only partially decide this relation.
- if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
+ if (Predicate == FCmpInst::FCMP_ULT || Predicate == FCmpInst::FCMP_OLT)
Result = 0;
- else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
+ else if (Predicate == FCmpInst::FCMP_UGT ||
+ Predicate == FCmpInst::FCMP_OGT)
Result = 1;
break;
case FCmpInst::FCMP_ONE: // We know that C1 != C2
// We can only partially decide this relation.
- if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ)
+ if (Predicate == FCmpInst::FCMP_OEQ || Predicate == FCmpInst::FCMP_UEQ)
Result = 0;
- else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE)
+ else if (Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_UNE)
Result = 1;
break;
case FCmpInst::FCMP_UEQ: // We know that C1 == C2 || isUnordered(C1, C2).
// We can only partially decide this relation.
- if (pred == FCmpInst::FCMP_ONE)
+ if (Predicate == FCmpInst::FCMP_ONE)
Result = 0;
- else if (pred == FCmpInst::FCMP_UEQ)
+ else if (Predicate == FCmpInst::FCMP_UEQ)
Result = 1;
break;
}
@@ -1905,67 +1776,84 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
} else {
// Evaluate the relation between the two constants, per the predicate.
int Result = -1; // -1 = unknown, 0 = known false, 1 = known true.
- switch (evaluateICmpRelation(C1, C2,
- CmpInst::isSigned((CmpInst::Predicate)pred))) {
+ switch (evaluateICmpRelation(C1, C2, CmpInst::isSigned(Predicate))) {
default: llvm_unreachable("Unknown relational!");
case ICmpInst::BAD_ICMP_PREDICATE:
break; // Couldn't determine anything about these constants.
case ICmpInst::ICMP_EQ: // We know the constants are equal!
// If we know the constants are equal, we can decide the result of this
// computation precisely.
- Result = ICmpInst::isTrueWhenEqual((ICmpInst::Predicate)pred);
+ Result = ICmpInst::isTrueWhenEqual(Predicate);
break;
case ICmpInst::ICMP_ULT:
- switch (pred) {
+ switch (Predicate) {
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE:
Result = 1; break;
case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE:
Result = 0; break;
+ default:
+ break;
}
break;
case ICmpInst::ICMP_SLT:
- switch (pred) {
+ switch (Predicate) {
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE:
Result = 1; break;
case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE:
Result = 0; break;
+ default:
+ break;
}
break;
case ICmpInst::ICMP_UGT:
- switch (pred) {
+ switch (Predicate) {
case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE:
Result = 1; break;
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE:
Result = 0; break;
+ default:
+ break;
}
break;
case ICmpInst::ICMP_SGT:
- switch (pred) {
+ switch (Predicate) {
case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE:
Result = 1; break;
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE:
Result = 0; break;
+ default:
+ break;
}
break;
case ICmpInst::ICMP_ULE:
- if (pred == ICmpInst::ICMP_UGT) Result = 0;
- if (pred == ICmpInst::ICMP_ULT || pred == ICmpInst::ICMP_ULE) Result = 1;
+ if (Predicate == ICmpInst::ICMP_UGT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_ULT || Predicate == ICmpInst::ICMP_ULE)
+ Result = 1;
break;
case ICmpInst::ICMP_SLE:
- if (pred == ICmpInst::ICMP_SGT) Result = 0;
- if (pred == ICmpInst::ICMP_SLT || pred == ICmpInst::ICMP_SLE) Result = 1;
+ if (Predicate == ICmpInst::ICMP_SGT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_SLT || Predicate == ICmpInst::ICMP_SLE)
+ Result = 1;
break;
case ICmpInst::ICMP_UGE:
- if (pred == ICmpInst::ICMP_ULT) Result = 0;
- if (pred == ICmpInst::ICMP_UGT || pred == ICmpInst::ICMP_UGE) Result = 1;
+ if (Predicate == ICmpInst::ICMP_ULT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_UGT || Predicate == ICmpInst::ICMP_UGE)
+ Result = 1;
break;
case ICmpInst::ICMP_SGE:
- if (pred == ICmpInst::ICMP_SLT) Result = 0;
- if (pred == ICmpInst::ICMP_SGT || pred == ICmpInst::ICMP_SGE) Result = 1;
+ if (Predicate == ICmpInst::ICMP_SLT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_SGT || Predicate == ICmpInst::ICMP_SGE)
+ Result = 1;
break;
case ICmpInst::ICMP_NE:
- if (pred == ICmpInst::ICMP_EQ) Result = 0;
- if (pred == ICmpInst::ICMP_NE) Result = 1;
+ if (Predicate == ICmpInst::ICMP_EQ)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_NE)
+ Result = 1;
break;
}
@@ -1983,16 +1871,16 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy() &&
!CE2Op0->getType()->isFPOrFPVectorTy()) {
Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
- return ConstantExpr::getICmp(pred, Inverse, CE2Op0);
+ return ConstantExpr::getICmp(Predicate, Inverse, CE2Op0);
}
}
// If the left hand side is an extension, try eliminating it.
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
if ((CE1->getOpcode() == Instruction::SExt &&
- ICmpInst::isSigned((ICmpInst::Predicate)pred)) ||
+ ICmpInst::isSigned(Predicate)) ||
(CE1->getOpcode() == Instruction::ZExt &&
- !ICmpInst::isSigned((ICmpInst::Predicate)pred))){
+ !ICmpInst::isSigned(Predicate))) {
Constant *CE1Op0 = CE1->getOperand(0);
Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType());
if (CE1Inverse == CE1Op0) {
@@ -2000,7 +1888,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType());
if (ConstantExpr::getCast(CE1->getOpcode(), C2Inverse,
C2->getType()) == C2)
- return ConstantExpr::getICmp(pred, CE1Inverse, C2Inverse);
+ return ConstantExpr::getICmp(Predicate, CE1Inverse, C2Inverse);
}
}
}
@@ -2010,8 +1898,8 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
// If C2 is a constant expr and C1 isn't, flip them around and fold the
// other way if possible.
// Also, if C1 is null and C2 isn't, flip them around.
- pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred);
- return ConstantExpr::getICmp(pred, C2, C1);
+ Predicate = ICmpInst::getSwappedPredicate(Predicate);
+ return ConstantExpr::getICmp(Predicate, C2, C1);
}
}
return nullptr;
@@ -2086,32 +1974,14 @@ static Constant *foldGEPOfGEP(GEPOperator *GEP, Type *PointeeTy, bool InBounds,
I != E; ++I)
LastI = I;
- // We cannot combine indices if doing so would take us outside of an
- // array or vector. Doing otherwise could trick us if we evaluated such a
- // GEP as part of a load.
- //
- // e.g. Consider if the original GEP was:
- // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
- // i32 0, i32 0, i64 0)
- //
- // If we then tried to offset it by '8' to get to the third element,
- // an i8, we should *not* get:
- // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
- // i32 0, i32 0, i64 8)
- //
- // This GEP tries to index array element '8 which runs out-of-bounds.
- // Subsequent evaluation would get confused and produce erroneous results.
- //
- // The following prohibits such a GEP from being formed by checking to see
- // if the index is in-range with respect to an array.
+ // We can't combine GEPs if the last index is a struct type.
if (!LastI.isSequential())
return nullptr;
+ // We could perform the transform with non-constant index, but prefer leaving
+ // it as GEP of GEP rather than GEP of add for now.
ConstantInt *CI = dyn_cast<ConstantInt>(Idx0);
if (!CI)
return nullptr;
- if (LastI.isBoundedSequential() &&
- !isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI))
- return nullptr;
// TODO: This code may be extended to handle vectors as well.
auto *LastIdx = cast<Constant>(GEP->getOperand(GEP->getNumOperands()-1));
@@ -2226,11 +2096,12 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
PointerType *SrcPtrTy =
dyn_cast<PointerType>(CE->getOperand(0)->getType());
PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType());
- if (SrcPtrTy && DstPtrTy) {
+ if (SrcPtrTy && DstPtrTy && !SrcPtrTy->isOpaque() &&
+ !DstPtrTy->isOpaque()) {
ArrayType *SrcArrayTy =
- dyn_cast<ArrayType>(SrcPtrTy->getElementType());
+ dyn_cast<ArrayType>(SrcPtrTy->getNonOpaquePointerElementType());
ArrayType *DstArrayTy =
- dyn_cast<ArrayType>(DstPtrTy->getElementType());
+ dyn_cast<ArrayType>(DstPtrTy->getNonOpaquePointerElementType());
if (SrcArrayTy && DstArrayTy
&& SrcArrayTy->getElementType() == DstArrayTy->getElementType()
&& SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantFold.h b/contrib/llvm-project/llvm/lib/IR/ConstantFold.h
index 0cdd5cf3cbce..1aa44f4d21e5 100644
--- a/contrib/llvm-project/llvm/lib/IR/ConstantFold.h
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantFold.h
@@ -19,6 +19,7 @@
#define LLVM_LIB_IR_CONSTANTFOLD_H
#include "llvm/ADT/Optional.h"
+#include "llvm/IR/InstrTypes.h"
namespace llvm {
template <typename T> class ArrayRef;
@@ -46,7 +47,7 @@ template <typename T> class ArrayRef;
Constant *ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V);
Constant *ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1,
Constant *V2);
- Constant *ConstantFoldCompareInstruction(unsigned short predicate,
+ Constant *ConstantFoldCompareInstruction(CmpInst::Predicate Predicate,
Constant *C1, Constant *C2);
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool InBounds,
Optional<unsigned> InRangeIndex,
diff --git a/contrib/llvm-project/llvm/lib/IR/Constants.cpp b/contrib/llvm-project/llvm/lib/IR/Constants.cpp
index 837be910f6d8..c13990af360e 100644
--- a/contrib/llvm-project/llvm/lib/IR/Constants.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Constants.cpp
@@ -739,15 +739,8 @@ static bool constantIsDead(const Constant *C, bool RemoveDeadUsers) {
++I;
}
- if (RemoveDeadUsers) {
- // If C is only used by metadata, it should not be preserved but should
- // have its uses replaced.
- if (C->isUsedByMetadata()) {
- const_cast<Constant *>(C)->replaceAllUsesWith(
- UndefValue::get(C->getType()));
- }
+ if (RemoveDeadUsers)
const_cast<Constant *>(C)->destroyConstant();
- }
return true;
}
@@ -779,18 +772,22 @@ void Constant::removeDeadConstantUsers() const {
}
}
-bool Constant::hasOneLiveUse() const {
+bool Constant::hasOneLiveUse() const { return hasNLiveUses(1); }
+
+bool Constant::hasZeroLiveUses() const { return hasNLiveUses(0); }
+
+bool Constant::hasNLiveUses(unsigned N) const {
unsigned NumUses = 0;
- for (const Use &use : uses()) {
- const Constant *User = dyn_cast<Constant>(use.getUser());
+ for (const Use &U : uses()) {
+ const Constant *User = dyn_cast<Constant>(U.getUser());
if (!User || !constantIsDead(User, /* RemoveDeadUsers= */ false)) {
++NumUses;
- if (NumUses > 1)
+ if (NumUses > N)
return false;
}
}
- return NumUses == 1;
+ return NumUses == N;
}
Constant *Constant::replaceUndefsWith(Constant *C, Constant *Replacement) {
@@ -1491,28 +1488,6 @@ bool ConstantExpr::isCompare() const {
return getOpcode() == Instruction::ICmp || getOpcode() == Instruction::FCmp;
}
-bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
- if (getOpcode() != Instruction::GetElementPtr) return false;
-
- gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
- User::const_op_iterator OI = std::next(this->op_begin());
-
- // The remaining indices may be compile-time known integers within the bounds
- // of the corresponding notional static array types.
- for (; GEPI != E; ++GEPI, ++OI) {
- if (isa<UndefValue>(*OI))
- continue;
- auto *CI = dyn_cast<ConstantInt>(*OI);
- if (!CI || (GEPI.isBoundedSequential() &&
- (CI->getValue().getActiveBits() > 64 ||
- CI->getZExtValue() >= GEPI.getSequentialNumElements())))
- return false;
- }
-
- // All the indices checked out.
- return true;
-}
-
bool ConstantExpr::hasIndices() const {
return getOpcode() == Instruction::ExtractValue ||
getOpcode() == Instruction::InsertValue;
@@ -2546,11 +2521,11 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
+ auto Predicate = static_cast<CmpInst::Predicate>(pred);
assert(LHS->getType() == RHS->getType());
- assert(CmpInst::isIntPredicate((CmpInst::Predicate)pred) &&
- "Invalid ICmp Predicate");
+ assert(CmpInst::isIntPredicate(Predicate) && "Invalid ICmp Predicate");
- if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
+ if (Constant *FC = ConstantFoldCompareInstruction(Predicate, LHS, RHS))
return FC; // Fold a few common cases...
if (OnlyIfReduced)
@@ -2559,7 +2534,7 @@ Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
// Look up the constant in the table first to ensure uniqueness
Constant *ArgVec[] = { LHS, RHS };
// Get the key type with both the opcode and predicate
- const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, pred);
+ const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, Predicate);
Type *ResultTy = Type::getInt1Ty(LHS->getContext());
if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
@@ -2571,11 +2546,11 @@ Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
+ auto Predicate = static_cast<CmpInst::Predicate>(pred);
assert(LHS->getType() == RHS->getType());
- assert(CmpInst::isFPPredicate((CmpInst::Predicate)pred) &&
- "Invalid FCmp Predicate");
+ assert(CmpInst::isFPPredicate(Predicate) && "Invalid FCmp Predicate");
- if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
+ if (Constant *FC = ConstantFoldCompareInstruction(Predicate, LHS, RHS))
return FC; // Fold a few common cases...
if (OnlyIfReduced)
@@ -2584,7 +2559,7 @@ Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
// Look up the constant in the table first to ensure uniqueness
Constant *ArgVec[] = { LHS, RHS };
// Get the key type with both the opcode and predicate
- const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, pred);
+ const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, Predicate);
Type *ResultTy = Type::getInt1Ty(LHS->getContext());
if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
diff --git a/contrib/llvm-project/llvm/lib/IR/Core.cpp b/contrib/llvm-project/llvm/lib/IR/Core.cpp
index a263d2536541..43df15e4d932 100644
--- a/contrib/llvm-project/llvm/lib/IR/Core.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Core.cpp
@@ -142,12 +142,12 @@ LLVMAttributeRef LLVMCreateEnumAttribute(LLVMContextRef C, unsigned KindID,
if (AttrKind == Attribute::AttrKind::ByVal) {
// After r362128, byval attributes need to have a type attribute. Provide a
// NULL one until a proper API is added for this.
- return wrap(Attribute::getWithByValType(Ctx, NULL));
+ return wrap(Attribute::getWithByValType(Ctx, nullptr));
}
if (AttrKind == Attribute::AttrKind::StructRet) {
// Same as byval.
- return wrap(Attribute::getWithStructRetType(Ctx, NULL));
+ return wrap(Attribute::getWithStructRetType(Ctx, nullptr));
}
return wrap(Attribute::get(Ctx, AttrKind, Val));
@@ -796,7 +796,7 @@ LLVMTypeRef LLVMScalableVectorType(LLVMTypeRef ElementType,
LLVMTypeRef LLVMGetElementType(LLVMTypeRef WrappedTy) {
auto *Ty = unwrap<Type>(WrappedTy);
if (auto *PTy = dyn_cast<PointerType>(Ty))
- return wrap(PTy->getElementType());
+ return wrap(PTy->getPointerElementType());
if (auto *ATy = dyn_cast<ArrayType>(Ty))
return wrap(ATy->getElementType());
return wrap(cast<VectorType>(Ty)->getElementType());
@@ -1691,8 +1691,7 @@ LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
Constant *Val = unwrap<Constant>(ConstantVal);
- Type *Ty =
- cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
return wrap(ConstantExpr::getGetElementPtr(Ty, Val, IdxList));
}
@@ -1710,8 +1709,7 @@ LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
Constant *Val = unwrap<Constant>(ConstantVal);
- Type *Ty =
- cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
return wrap(ConstantExpr::getInBoundsGetElementPtr(Ty, Val, IdxList));
}
@@ -2278,7 +2276,8 @@ void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit) {
LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
const char *Name) {
auto *PTy = cast<PointerType>(unwrap(Ty));
- return wrap(GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ return wrap(GlobalAlias::create(PTy->getNonOpaquePointerElementType(),
+ PTy->getAddressSpace(),
GlobalValue::ExternalLinkage, Name,
unwrap<Constant>(Aliasee), unwrap(M)));
}
@@ -2293,7 +2292,7 @@ LLVMValueRef LLVMAddAlias2(LLVMModuleRef M, LLVMTypeRef ValueTy,
LLVMValueRef LLVMGetNamedGlobalAlias(LLVMModuleRef M,
const char *Name, size_t NameLen) {
- return wrap(unwrap(M)->getNamedAlias(Name));
+ return wrap(unwrap(M)->getNamedAlias(StringRef(Name, NameLen)));
}
LLVMValueRef LLVMGetFirstGlobalAlias(LLVMModuleRef M) {
@@ -3218,7 +3217,7 @@ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
const char *Name) {
Value *V = unwrap(Fn);
FunctionType *FnT =
- cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+ cast<FunctionType>(V->getType()->getNonOpaquePointerElementType());
return wrap(
unwrap(B)->CreateInvoke(FnT, unwrap(Fn), unwrap(Then), unwrap(Catch),
@@ -3590,7 +3589,8 @@ LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal,
Value *V = unwrap(PointerVal);
PointerType *Ty = cast<PointerType>(V->getType());
- return wrap(unwrap(B)->CreateLoad(Ty->getElementType(), V, Name));
+ return wrap(
+ unwrap(B)->CreateLoad(Ty->getNonOpaquePointerElementType(), V, Name));
}
LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef B, LLVMTypeRef Ty,
@@ -3692,8 +3692,7 @@ LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
const char *Name) {
ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
Value *Val = unwrap(Pointer);
- Type *Ty =
- cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
return wrap(unwrap(B)->CreateGEP(Ty, Val, IdxList, Name));
}
@@ -3709,8 +3708,7 @@ LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
const char *Name) {
ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
Value *Val = unwrap(Pointer);
- Type *Ty =
- cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
return wrap(unwrap(B)->CreateInBoundsGEP(Ty, Val, IdxList, Name));
}
@@ -3725,8 +3723,7 @@ LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
unsigned Idx, const char *Name) {
Value *Val = unwrap(Pointer);
- Type *Ty =
- cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
return wrap(unwrap(B)->CreateStructGEP(Ty, Val, Idx, Name));
}
@@ -3947,7 +3944,7 @@ LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
const char *Name) {
Value *V = unwrap(Fn);
FunctionType *FnT =
- cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+ cast<FunctionType>(V->getType()->getNonOpaquePointerElementType());
return wrap(unwrap(B)->CreateCall(FnT, unwrap(Fn),
makeArrayRef(unwrap(Args), NumArgs), Name));
@@ -4022,7 +4019,16 @@ LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef B, LLVMValueRef Val,
LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS,
LLVMValueRef RHS, const char *Name) {
- return wrap(unwrap(B)->CreatePtrDiff(unwrap(LHS), unwrap(RHS), Name));
+ Value *L = unwrap(LHS);
+ Type *ElemTy = L->getType()->getNonOpaquePointerElementType();
+ return wrap(unwrap(B)->CreatePtrDiff(ElemTy, L, unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildPtrDiff2(LLVMBuilderRef B, LLVMTypeRef ElemTy,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreatePtrDiff(unwrap(ElemTy), unwrap(LHS),
+ unwrap(RHS), Name));
}
LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
diff --git a/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp
index 35af22034a12..a6e84dfbe1dd 100644
--- a/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp
@@ -33,7 +33,7 @@ static cl::opt<bool>
DIBuilder::DIBuilder(Module &m, bool AllowUnresolvedNodes, DICompileUnit *CU)
: M(m), VMContext(M.getContext()), CUNode(CU), DeclareFn(nullptr),
- ValueFn(nullptr), LabelFn(nullptr),
+ ValueFn(nullptr), LabelFn(nullptr), AddrFn(nullptr),
AllowUnresolvedNodes(AllowUnresolvedNodes) {
if (CUNode) {
if (const auto &ETs = CUNode->getEnumTypes())
@@ -821,12 +821,6 @@ DIExpression *DIBuilder::createExpression(ArrayRef<uint64_t> Addr) {
return DIExpression::get(VMContext, Addr);
}
-DIExpression *DIBuilder::createExpression(ArrayRef<int64_t> Signed) {
- // TODO: Remove the callers of this signed version and delete.
- SmallVector<uint64_t, 8> Addr(Signed.begin(), Signed.end());
- return createExpression(Addr);
-}
-
template <class... Ts>
static DISubprogram *getSubprogram(bool IsDistinct, Ts &&...Args) {
if (IsDistinct)
@@ -980,6 +974,24 @@ Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V,
return insertDbgValueIntrinsic(V, VarInfo, Expr, DL, InsertAtEnd, nullptr);
}
+Instruction *DIBuilder::insertDbgAddrIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore) {
+ return insertDbgAddrIntrinsic(
+ V, VarInfo, Expr, DL, InsertBefore ? InsertBefore->getParent() : nullptr,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDbgAddrIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ return insertDbgAddrIntrinsic(V, VarInfo, Expr, DL, InsertAtEnd, nullptr);
+}
+
/// Initialize IRBuilder for inserting dbg.declare and dbg.value intrinsics.
/// This abstracts over the various ways to specify an insert position.
static void initIRBuilder(IRBuilder<> &Builder, const DILocation *DL,
@@ -1001,6 +1013,24 @@ static Function *getDeclareIntrin(Module &M) {
: Intrinsic::dbg_declare);
}
+Instruction *DIBuilder::insertDbgValueIntrinsic(
+ llvm::Value *Val, DILocalVariable *VarInfo, DIExpression *Expr,
+ const DILocation *DL, BasicBlock *InsertBB, Instruction *InsertBefore) {
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+ return insertDbgIntrinsic(ValueFn, Val, VarInfo, Expr, DL, InsertBB,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDbgAddrIntrinsic(
+ llvm::Value *Val, DILocalVariable *VarInfo, DIExpression *Expr,
+ const DILocation *DL, BasicBlock *InsertBB, Instruction *InsertBefore) {
+ if (!AddrFn)
+ AddrFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_addr);
+ return insertDbgIntrinsic(AddrFn, Val, VarInfo, Expr, DL, InsertBB,
+ InsertBefore);
+}
+
Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
DIExpression *Expr, const DILocation *DL,
BasicBlock *InsertBB,
@@ -1024,17 +1054,20 @@ Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
return B.CreateCall(DeclareFn, Args);
}
-Instruction *DIBuilder::insertDbgValueIntrinsic(
- Value *V, DILocalVariable *VarInfo, DIExpression *Expr,
- const DILocation *DL, BasicBlock *InsertBB, Instruction *InsertBefore) {
- assert(V && "no value passed to dbg.value");
- assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.value");
+Instruction *DIBuilder::insertDbgIntrinsic(llvm::Function *IntrinsicFn,
+ Value *V, DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertBB,
+ Instruction *InsertBefore) {
+ assert(IntrinsicFn && "must pass a non-null intrinsic function");
+ assert(V && "must pass a value to a dbg intrinsic");
+ assert(VarInfo &&
+ "empty or invalid DILocalVariable* passed to debug intrinsic");
assert(DL && "Expected debug loc");
assert(DL->getScope()->getSubprogram() ==
VarInfo->getScope()->getSubprogram() &&
"Expected matching subprograms");
- if (!ValueFn)
- ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
trackIfUnresolved(VarInfo);
trackIfUnresolved(Expr);
@@ -1044,7 +1077,7 @@ Instruction *DIBuilder::insertDbgValueIntrinsic(
IRBuilder<> B(DL->getContext());
initIRBuilder(B, DL, InsertBB, InsertBefore);
- return B.CreateCall(ValueFn, Args);
+ return B.CreateCall(IntrinsicFn, Args);
}
Instruction *DIBuilder::insertLabel(DILabel *LabelInfo, const DILocation *DL,
diff --git a/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp
index 7c69fbf7085d..98f25b035157 100644
--- a/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp
@@ -1436,14 +1436,14 @@ LLVMDIBuilderCreateSubroutineType(LLVMDIBuilderRef Builder,
}
LLVMMetadataRef LLVMDIBuilderCreateExpression(LLVMDIBuilderRef Builder,
- int64_t *Addr, size_t Length) {
- return wrap(unwrap(Builder)->createExpression(ArrayRef<int64_t>(Addr,
- Length)));
+ uint64_t *Addr, size_t Length) {
+ return wrap(
+ unwrap(Builder)->createExpression(ArrayRef<uint64_t>(Addr, Length)));
}
LLVMMetadataRef
LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
- int64_t Value) {
+ uint64_t Value) {
return wrap(unwrap(Builder)->createConstantValueExpression(Value));
}
diff --git a/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp b/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp
index b20e581d283a..59afb844eb89 100644
--- a/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -567,13 +567,16 @@ Optional<DIBasicType::Signedness> DIBasicType::getSignedness() const {
DIStringType *DIStringType::getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, Metadata *StringLength,
Metadata *StringLengthExp,
+ Metadata *StringLocationExp,
uint64_t SizeInBits, uint32_t AlignInBits,
unsigned Encoding, StorageType Storage,
bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
- DEFINE_GETIMPL_LOOKUP(DIStringType, (Tag, Name, StringLength, StringLengthExp,
- SizeInBits, AlignInBits, Encoding));
- Metadata *Ops[] = {nullptr, nullptr, Name, StringLength, StringLengthExp};
+ DEFINE_GETIMPL_LOOKUP(DIStringType,
+ (Tag, Name, StringLength, StringLengthExp,
+ StringLocationExp, SizeInBits, AlignInBits, Encoding));
+ Metadata *Ops[] = {nullptr, nullptr, Name,
+ StringLength, StringLengthExp, StringLocationExp};
DEFINE_GETIMPL_STORE(DIStringType, (Tag, SizeInBits, AlignInBits, Encoding),
Ops);
}
diff --git a/contrib/llvm-project/llvm/lib/IR/Function.cpp b/contrib/llvm-project/llvm/lib/IR/Function.cpp
index f1a6402fb11b..1e874d7afa79 100644
--- a/contrib/llvm-project/llvm/lib/IR/Function.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Function.cpp
@@ -300,9 +300,9 @@ void Argument::removeAttr(Attribute::AttrKind Kind) {
getParent()->removeParamAttr(getArgNo(), Kind);
}
-void Argument::removeAttrs(const AttrBuilder &B) {
+void Argument::removeAttrs(const AttributeMask &AM) {
AttributeList AL = getParent()->getAttributes();
- AL = AL.removeParamAttributes(Parent->getContext(), getArgNo(), B);
+ AL = AL.removeParamAttributes(Parent->getContext(), getArgNo(), AM);
getParent()->setAttributes(AL);
}
@@ -340,7 +340,7 @@ Function *Function::createWithDefaultAttr(FunctionType *Ty,
unsigned AddrSpace, const Twine &N,
Module *M) {
auto *F = new Function(Ty, Linkage, AddrSpace, N, M);
- AttrBuilder B;
+ AttrBuilder B(F->getContext());
if (M->getUwtable())
B.addAttribute(Attribute::UWTable);
switch (M->getFramePointer()) {
@@ -589,8 +589,8 @@ void Function::removeFnAttr(StringRef Kind) {
AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind);
}
-void Function::removeFnAttrs(const AttrBuilder &Attrs) {
- AttributeSets = AttributeSets.removeFnAttributes(getContext(), Attrs);
+void Function::removeFnAttrs(const AttributeMask &AM) {
+ AttributeSets = AttributeSets.removeFnAttributes(getContext(), AM);
}
void Function::removeRetAttr(Attribute::AttrKind Kind) {
@@ -601,7 +601,7 @@ void Function::removeRetAttr(StringRef Kind) {
AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind);
}
-void Function::removeRetAttrs(const AttrBuilder &Attrs) {
+void Function::removeRetAttrs(const AttributeMask &Attrs) {
AttributeSets = AttributeSets.removeRetAttributes(getContext(), Attrs);
}
@@ -613,7 +613,7 @@ void Function::removeParamAttr(unsigned ArgNo, StringRef Kind) {
AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind);
}
-void Function::removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
+void Function::removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs) {
AttributeSets =
AttributeSets.removeParamAttributes(getContext(), ArgNo, Attrs);
}
@@ -817,7 +817,8 @@ static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) {
// Opaque pointer doesn't have pointee type information, so we just mangle
// address space for opaque pointer.
if (!PTyp->isOpaque())
- Result += getMangledTypeStr(PTyp->getElementType(), HasUnnamedType);
+ Result += getMangledTypeStr(PTyp->getNonOpaquePointerElementType(),
+ HasUnnamedType);
} else if (ArrayType *ATyp = dyn_cast<ArrayType>(Ty)) {
Result += "a" + utostr(ATyp->getNumElements()) +
getMangledTypeStr(ATyp->getElementType(), HasUnnamedType);
@@ -1465,8 +1466,8 @@ static bool matchIntrinsicType(
if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace)
return true;
if (!PT->isOpaque())
- return matchIntrinsicType(PT->getElementType(), Infos, ArgTys,
- DeferredChecks, IsDeferredCheck);
+ return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos,
+ ArgTys, DeferredChecks, IsDeferredCheck);
// Consume IIT descriptors relating to the pointer element type.
while (Infos.front().Kind == IITDescriptor::Pointer)
Infos = Infos.slice(1);
@@ -1573,7 +1574,8 @@ static bool matchIntrinsicType(
return IsDeferredCheck || DeferCheck(Ty);
Type * ReferenceType = ArgTys[D.getArgumentNumber()];
PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
- return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
+ return (!ThisArgType ||
+ !ThisArgType->isOpaqueOrPointeeTypeMatches(ReferenceType));
}
case IITDescriptor::PtrToElt: {
if (D.getArgumentNumber() >= ArgTys.size())
diff --git a/contrib/llvm-project/llvm/lib/IR/Globals.cpp b/contrib/llvm-project/llvm/lib/IR/Globals.cpp
index b6bd25aa1234..c832499dde06 100644
--- a/contrib/llvm-project/llvm/lib/IR/Globals.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Globals.cpp
@@ -95,6 +95,8 @@ void GlobalValue::eraseFromParent() {
llvm_unreachable("not a global");
}
+GlobalObject::~GlobalObject() { setComdat(nullptr); }
+
bool GlobalValue::isInterposable() const {
if (isInterposableLinkage(getLinkage()))
return true;
@@ -103,10 +105,15 @@ bool GlobalValue::isInterposable() const {
}
bool GlobalValue::canBenefitFromLocalAlias() const {
- // See AsmPrinter::getSymbolPreferLocal().
+ // See AsmPrinter::getSymbolPreferLocal(). For a deduplicate comdat kind,
+ // references to a discarded local symbol from outside the group are not
+ // allowed, so avoid the local alias.
+ auto isDeduplicateComdat = [](const Comdat *C) {
+ return C && C->getSelectionKind() != Comdat::NoDeduplicate;
+ };
return hasDefaultVisibility() &&
GlobalObject::isExternalLinkage(getLinkage()) && !isDeclaration() &&
- !isa<GlobalIFunc>(this) && !hasComdat();
+ !isa<GlobalIFunc>(this) && !isDeduplicateComdat(getComdat());
}
unsigned GlobalValue::getAddressSpace() const {
@@ -182,6 +189,14 @@ const Comdat *GlobalValue::getComdat() const {
return cast<GlobalObject>(this)->getComdat();
}
+void GlobalObject::setComdat(Comdat *C) {
+ if (ObjComdat)
+ ObjComdat->removeUser(this);
+ ObjComdat = C;
+ if (C)
+ C->addUser(this);
+}
+
StringRef GlobalValue::getPartition() const {
if (!hasPartition())
return "";
diff --git a/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
index 98f6ccf81973..27528a69be21 100644
--- a/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
@@ -679,7 +679,7 @@ static CallInst *CreateGCStatepointCallCommon(
const Twine &Name) {
// Extract out the type of the callee.
auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
- assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
+ assert(isa<FunctionType>(FuncPtrType->getPointerElementType()) &&
"actual callee must be a callable value");
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
@@ -736,7 +736,7 @@ static InvokeInst *CreateGCStatepointInvokeCommon(
ArrayRef<T3> GCArgs, const Twine &Name) {
// Extract out the type of the callee.
auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
- assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
+ assert(isa<FunctionType>(FuncPtrType->getPointerElementType()) &&
"actual callee must be a callable value");
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
@@ -984,10 +984,8 @@ CallInst *IRBuilderBase::CreateConstrainedFPCall(
Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
const Twine &Name, Instruction *MDFrom) {
- if (auto *CC = dyn_cast<Constant>(C))
- if (auto *TC = dyn_cast<Constant>(True))
- if (auto *FC = dyn_cast<Constant>(False))
- return Insert(Folder.CreateSelect(CC, TC, FC), Name);
+ if (auto *V = Folder.FoldSelect(C, True, False))
+ return V;
SelectInst *Sel = SelectInst::Create(C, True, False);
if (MDFrom) {
@@ -1000,16 +998,17 @@ Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
return Insert(Sel, Name);
}
-Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
+Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
const Twine &Name) {
assert(LHS->getType() == RHS->getType() &&
"Pointer subtraction operand types must match!");
- auto *ArgType = cast<PointerType>(LHS->getType());
+ assert(cast<PointerType>(LHS->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
+ "Pointer type must match element type");
Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
Value *Difference = CreateSub(LHS_int, RHS_int);
- return CreateExactSDiv(Difference,
- ConstantExpr::getSizeOf(ArgType->getElementType()),
+ return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
Name);
}
diff --git a/contrib/llvm-project/llvm/lib/IR/Instruction.cpp b/contrib/llvm-project/llvm/lib/IR/Instruction.cpp
index 4480ec799c35..59b7221d1fa2 100644
--- a/contrib/llvm-project/llvm/lib/IR/Instruction.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Instruction.cpp
@@ -186,7 +186,8 @@ void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
AttributeList AL = CB->getAttributes();
if (AL.isEmpty())
return;
- AttrBuilder UBImplyingAttributes = AttributeFuncs::getUBImplyingAttributes();
+ AttributeMask UBImplyingAttributes =
+ AttributeFuncs::getUBImplyingAttributes();
for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
CB->removeRetAttrs(UBImplyingAttributes);
@@ -584,7 +585,7 @@ bool Instruction::mayReadFromMemory() const {
case Instruction::Call:
case Instruction::Invoke:
case Instruction::CallBr:
- return !cast<CallBase>(this)->doesNotReadMemory();
+ return !cast<CallBase>(this)->onlyWritesMemory();
case Instruction::Store:
return !cast<StoreInst>(this)->isUnordered();
}
diff --git a/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
index 8f7318665cfb..adea7abb75cf 100644
--- a/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
@@ -178,6 +178,18 @@ int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
return -1;
}
+ConstantInt *InstrProfInstBase::getNumCounters() const {
+ if (InstrProfValueProfileInst::classof(this))
+ llvm_unreachable("InstrProfValueProfileInst does not have counters!");
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+}
+
+ConstantInt *InstrProfInstBase::getIndex() const {
+ if (InstrProfValueProfileInst::classof(this))
+ llvm_unreachable("Please use InstrProfValueProfileInst::getIndex()");
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+}
+
Value *InstrProfIncrementInst::getStep() const {
if (InstrProfIncrementInstStep::classof(this)) {
return const_cast<Value *>(getArgOperand(4));
@@ -482,6 +494,7 @@ Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
break;
}
+ case Intrinsic::vp_merge:
case Intrinsic::vp_select:
VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
break;
diff --git a/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h
index 24c4a348f4da..0b5f928165e8 100644
--- a/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h
+++ b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h
@@ -428,20 +428,22 @@ template <> struct MDNodeKeyImpl<DIStringType> {
MDString *Name;
Metadata *StringLength;
Metadata *StringLengthExp;
+ Metadata *StringLocationExp;
uint64_t SizeInBits;
uint32_t AlignInBits;
unsigned Encoding;
MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *StringLength,
- Metadata *StringLengthExp, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding)
+ Metadata *StringLengthExp, Metadata *StringLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding)
: Tag(Tag), Name(Name), StringLength(StringLength),
- StringLengthExp(StringLengthExp), SizeInBits(SizeInBits),
- AlignInBits(AlignInBits), Encoding(Encoding) {}
+ StringLengthExp(StringLengthExp), StringLocationExp(StringLocationExp),
+ SizeInBits(SizeInBits), AlignInBits(AlignInBits), Encoding(Encoding) {}
MDNodeKeyImpl(const DIStringType *N)
: Tag(N->getTag()), Name(N->getRawName()),
StringLength(N->getRawStringLength()),
StringLengthExp(N->getRawStringLengthExp()),
+ StringLocationExp(N->getRawStringLocationExp()),
SizeInBits(N->getSizeInBits()), AlignInBits(N->getAlignInBits()),
Encoding(N->getEncoding()) {}
diff --git a/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp b/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp
index bb72bec93066..4357c95aa9f6 100644
--- a/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp
@@ -256,9 +256,9 @@ private:
bool wasRun;
public:
static char ID;
- explicit FunctionPassManagerImpl() :
- Pass(PT_PassManager, ID), PMDataManager(),
- PMTopLevelManager(new FPPassManager()), wasRun(false) {}
+ explicit FunctionPassManagerImpl()
+ : Pass(PT_PassManager, ID), PMTopLevelManager(new FPPassManager()),
+ wasRun(false) {}
/// \copydoc FunctionPassManager::add()
void add(Pass *P) {
@@ -387,8 +387,7 @@ namespace {
class MPPassManager : public Pass, public PMDataManager {
public:
static char ID;
- explicit MPPassManager() :
- Pass(PT_PassManager, ID), PMDataManager() { }
+ explicit MPPassManager() : Pass(PT_PassManager, ID) {}
// Delete on the fly managers.
~MPPassManager() override {
@@ -478,9 +477,8 @@ class PassManagerImpl : public Pass,
public:
static char ID;
- explicit PassManagerImpl() :
- Pass(PT_PassManager, ID), PMDataManager(),
- PMTopLevelManager(new MPPassManager()) {}
+ explicit PassManagerImpl()
+ : Pass(PT_PassManager, ID), PMTopLevelManager(new MPPassManager()) {}
/// \copydoc PassManager::add()
void add(Pass *P) {
diff --git a/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp b/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp
index 1f757d7dbf4e..904af7e737cc 100644
--- a/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp
@@ -18,8 +18,10 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
@@ -34,22 +36,27 @@ void TypeFinder::run(const Module &M, bool onlyNamed) {
// Get types from global variables.
for (const auto &G : M.globals()) {
- incorporateType(G.getType());
+ incorporateType(G.getValueType());
if (G.hasInitializer())
incorporateValue(G.getInitializer());
}
// Get types from aliases.
for (const auto &A : M.aliases()) {
- incorporateType(A.getType());
+ incorporateType(A.getValueType());
if (const Value *Aliasee = A.getAliasee())
incorporateValue(Aliasee);
}
+ // Get types from ifuncs.
+ for (const auto &GI : M.ifuncs())
+ incorporateType(GI.getValueType());
+
// Get types from functions.
SmallVector<std::pair<unsigned, MDNode *>, 4> MDForInst;
for (const Function &FI : M) {
- incorporateType(FI.getType());
+ incorporateType(FI.getFunctionType());
+ incorporateAttributes(FI.getAttributes());
for (const Use &U : FI.operands())
incorporateValue(U.get());
@@ -69,6 +76,13 @@ void TypeFinder::run(const Module &M, bool onlyNamed) {
if (&*O && !isa<Instruction>(&*O))
incorporateValue(&*O);
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
+ incorporateType(GEP->getSourceElementType());
+ if (auto *AI = dyn_cast<AllocaInst>(&I))
+ incorporateType(AI->getAllocatedType());
+ if (const auto *CB = dyn_cast<CallBase>(&I))
+ incorporateAttributes(CB->getAttributes());
+
// Incorporate types hiding in metadata.
I.getAllMetadataOtherThanDebugLoc(MDForInst);
for (const auto &MD : MDForInst)
@@ -138,6 +152,9 @@ void TypeFinder::incorporateValue(const Value *V) {
if (isa<Instruction>(V))
return;
+ if (auto *GEP = dyn_cast<GEPOperator>(V))
+ incorporateType(GEP->getSourceElementType());
+
// Look in operands for types.
const User *U = cast<User>(V);
for (const auto &I : U->operands())
@@ -173,3 +190,13 @@ void TypeFinder::incorporateMDNode(const MDNode *V) {
}
}
}
+
+void TypeFinder::incorporateAttributes(AttributeList AL) {
+ if (!VisitedAttributes.insert(AL).second)
+ return;
+
+ for (AttributeSet AS : AL)
+ for (Attribute A : AS)
+ if (A.isTypeAttribute())
+ incorporateType(A.getValueAsType());
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Verifier.cpp b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
index fb7c423e54e2..b84edb789405 100644
--- a/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
@@ -551,11 +551,12 @@ private:
void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
const Value *V);
void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
- const Value *V, bool IsIntrinsic);
+ const Value *V, bool IsIntrinsic, bool IsInlineAsm);
void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
void visitConstantExprsRecursively(const Constant *EntryC);
void visitConstantExpr(const ConstantExpr *CE);
+ void verifyInlineAsmCall(const CallBase &Call);
void verifyStatepoint(const CallBase &Call);
void verifyFrameRecoverIndices();
void verifySiblingFuncletUnwinds();
@@ -1058,6 +1059,7 @@ void Verifier::visitDIDerivedType(const DIDerivedType &N) {
N.getTag() == dwarf::DW_TAG_reference_type ||
N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
N.getTag() == dwarf::DW_TAG_const_type ||
+ N.getTag() == dwarf::DW_TAG_immutable_type ||
N.getTag() == dwarf::DW_TAG_volatile_type ||
N.getTag() == dwarf::DW_TAG_restrict_type ||
N.getTag() == dwarf::DW_TAG_atomic_type ||
@@ -1792,7 +1794,7 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
"'noinline and alwaysinline' are incompatible!",
V);
- AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
+ AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
for (Attribute Attr : Attrs) {
if (!Attr.isStringAttribute() &&
IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
@@ -1824,33 +1826,34 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
"Attribute 'preallocated' does not support unsized types!", V);
}
if (!PTy->isOpaque()) {
- if (!isa<PointerType>(PTy->getElementType()))
+ if (!isa<PointerType>(PTy->getNonOpaquePointerElementType()))
Assert(!Attrs.hasAttribute(Attribute::SwiftError),
"Attribute 'swifterror' only applies to parameters "
"with pointer to pointer type!",
V);
if (Attrs.hasAttribute(Attribute::ByRef)) {
- Assert(Attrs.getByRefType() == PTy->getElementType(),
+ Assert(Attrs.getByRefType() == PTy->getNonOpaquePointerElementType(),
"Attribute 'byref' type does not match parameter!", V);
}
if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
- Assert(Attrs.getByValType() == PTy->getElementType(),
+ Assert(Attrs.getByValType() == PTy->getNonOpaquePointerElementType(),
"Attribute 'byval' type does not match parameter!", V);
}
if (Attrs.hasAttribute(Attribute::Preallocated)) {
- Assert(Attrs.getPreallocatedType() == PTy->getElementType(),
+ Assert(Attrs.getPreallocatedType() ==
+ PTy->getNonOpaquePointerElementType(),
"Attribute 'preallocated' type does not match parameter!", V);
}
if (Attrs.hasAttribute(Attribute::InAlloca)) {
- Assert(Attrs.getInAllocaType() == PTy->getElementType(),
+ Assert(Attrs.getInAllocaType() == PTy->getNonOpaquePointerElementType(),
"Attribute 'inalloca' type does not match parameter!", V);
}
if (Attrs.hasAttribute(Attribute::ElementType)) {
- Assert(Attrs.getElementType() == PTy->getElementType(),
+ Assert(Attrs.getElementType() == PTy->getNonOpaquePointerElementType(),
"Attribute 'elementtype' type does not match parameter!", V);
}
}
@@ -1870,7 +1873,8 @@ void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
// Check parameter attributes against a function type.
// The value V is printed in error messages.
void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
- const Value *V, bool IsIntrinsic) {
+ const Value *V, bool IsIntrinsic,
+ bool IsInlineAsm) {
if (Attrs.isEmpty())
return;
@@ -1913,8 +1917,10 @@ void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
if (!IsIntrinsic) {
Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
"immarg attribute only applies to intrinsics",V);
- Assert(!ArgAttrs.hasAttribute(Attribute::ElementType),
- "Attribute 'elementtype' can only be applied to intrinsics.", V);
+ if (!IsInlineAsm)
+ Assert(!ArgAttrs.hasAttribute(Attribute::ElementType),
+ "Attribute 'elementtype' can only be applied to intrinsics"
+ " and inline asm.", V);
}
verifyParameterAttrs(ArgAttrs, Ty, V);
@@ -2141,6 +2147,33 @@ bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
return Attrs.getNumAttrSets() <= Params + 2;
}
+void Verifier::verifyInlineAsmCall(const CallBase &Call) {
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
+ unsigned ArgNo = 0;
+ for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
+ // Only deal with constraints that correspond to call arguments.
+ if (!CI.hasArg())
+ continue;
+
+ if (CI.isIndirect) {
+ const Value *Arg = Call.getArgOperand(ArgNo);
+ Assert(Arg->getType()->isPointerTy(),
+ "Operand for indirect constraint must have pointer type",
+ &Call);
+
+ Assert(Call.getAttributes().getParamElementType(ArgNo),
+ "Operand for indirect constraint must have elementtype attribute",
+ &Call);
+ } else {
+ Assert(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
+ "Elementtype attribute can only be applied for indirect "
+ "constraints", &Call);
+ }
+
+ ArgNo++;
+ }
+}
+
/// Verify that statepoint intrinsic is well formed.
void Verifier::verifyStatepoint(const CallBase &Call) {
assert(Call.getCalledFunction() &&
@@ -2163,9 +2196,10 @@ void Verifier::verifyStatepoint(const CallBase &Call) {
const Value *Target = Call.getArgOperand(2);
auto *PT = dyn_cast<PointerType>(Target->getType());
- Assert(PT && PT->getElementType()->isFunctionTy(),
+ Assert(PT && PT->getPointerElementType()->isFunctionTy(),
"gc.statepoint callee must be of function pointer type", Call, Target);
- FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
+ FunctionType *TargetFuncType =
+ cast<FunctionType>(PT->getPointerElementType());
const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
Assert(NumCallArgs >= 0,
@@ -2364,7 +2398,7 @@ void Verifier::visitFunction(const Function &F) {
bool IsIntrinsic = F.isIntrinsic();
// Check function attributes.
- verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic);
+ verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
// On function declarations/definitions, we do not support the builtin
// attribute. We do not check this in VerifyFunctionAttrs since that is
@@ -2779,6 +2813,7 @@ void Verifier::visitCallBrInst(CallBrInst &CBI) {
Assert(ArgBBs.count(BB), "Indirect label missing from arglist.", &CBI);
}
+ verifyInlineAsmCall(CBI);
visitTerminator(CBI);
}
@@ -3123,7 +3158,7 @@ void Verifier::visitCallBase(CallBase &Call) {
}
// Verify call attributes.
- verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
+ verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
// Conservatively check the inalloca argument.
// We have a bug if we can find that there is an underlying alloca without
@@ -3316,6 +3351,9 @@ void Verifier::visitCallBase(CallBase &Call) {
"debug info must have a !dbg location",
Call);
+ if (Call.isInlineAsm())
+ verifyInlineAsmCall(Call);
+
visitInstruction(Call);
}
@@ -3345,13 +3383,13 @@ static bool isTypeCongruent(Type *L, Type *R) {
return PL->getAddressSpace() == PR->getAddressSpace();
}
-static AttrBuilder getParameterABIAttributes(unsigned I, AttributeList Attrs) {
+static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
static const Attribute::AttrKind ABIAttrs[] = {
Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
Attribute::ByRef};
- AttrBuilder Copy;
+ AttrBuilder Copy(C);
for (auto AK : ABIAttrs) {
Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
if (Attr.isValid())
@@ -3414,12 +3452,12 @@ void Verifier::verifyMustTailCall(CallInst &CI) {
// - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
// are allowed in swifttailcc call
for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
- AttrBuilder ABIAttrs = getParameterABIAttributes(I, CallerAttrs);
+ AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
SmallString<32> Context{CCName, StringRef(" musttail caller")};
verifyTailCCMustTailAttrs(ABIAttrs, Context);
}
for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
- AttrBuilder ABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
+ AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
SmallString<32> Context{CCName, StringRef(" musttail callee")};
verifyTailCCMustTailAttrs(ABIAttrs, Context);
}
@@ -3446,8 +3484,8 @@ void Verifier::verifyMustTailCall(CallInst &CI) {
// - All ABI-impacting function attributes, such as sret, byval, inreg,
// returned, preallocated, and inalloca, must match.
for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
- AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
- AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
+ AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
+ AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
Assert(CallerABIAttrs == CalleeABIAttrs,
"cannot guarantee tail call due to mismatched ABI impacting "
"function attributes",
@@ -3963,6 +4001,11 @@ void Verifier::visitEHPadPredecessors(Instruction &I) {
"A single unwind edge may only enter one EH pad", TI);
Assert(Seen.insert(FromPad).second,
"EH pad jumps through a cycle of pads", FromPad);
+
+ // This will be diagnosed on the corresponding instruction already. We
+ // need the extra check here to make sure getParentPad() works.
+ Assert(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
+ "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
}
}
}
@@ -4964,7 +5007,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
// Assert that result type matches wrapped callee.
const Value *Target = StatepointCall->getArgOperand(2);
auto *PT = cast<PointerType>(Target->getType());
- auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
+ auto *TargetFuncType = cast<FunctionType>(PT->getPointerElementType());
Assert(Call.getType() == TargetFuncType->getReturnType(),
"gc.result result type does not match wrapped callee", Call);
break;
@@ -5271,7 +5314,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
PointerType *Op0PtrTy =
cast<PointerType>(Call.getArgOperand(0)->getType());
if (!Op0PtrTy->isOpaque())
- Op0ElemTy = Op0PtrTy->getElementType();
+ Op0ElemTy = Op0PtrTy->getNonOpaquePointerElementType();
break;
}
case Intrinsic::matrix_column_major_store: {
@@ -5285,7 +5328,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
PointerType *Op1PtrTy =
cast<PointerType>(Call.getArgOperand(1)->getType());
if (!Op1PtrTy->isOpaque())
- Op1ElemTy = Op1PtrTy->getElementType();
+ Op1ElemTy = Op1PtrTy->getNonOpaquePointerElementType();
break;
}
default:
@@ -5316,6 +5359,24 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
break;
}
+ case Intrinsic::experimental_vector_splice: {
+ VectorType *VecTy = cast<VectorType>(Call.getType());
+ int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
+ int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
+ if (Call.getParent() && Call.getParent()->getParent()) {
+ AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
+ if (Attrs.hasFnAttr(Attribute::VScaleRange))
+ KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
+ }
+ Assert((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
+ (Idx >= 0 && Idx < KnownMinNumElements),
+ "The splice index exceeds the range [-VL, VL-1] where VL is the "
+ "known minimum number of elements in the vector. For scalable "
+ "vectors the minimum number of elements is determined from "
+ "vscale_range.",
+ &Call);
+ break;
+ }
case Intrinsic::experimental_stepvector: {
VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
Assert(VecTy && VecTy->getScalarType()->isIntegerTy() &&
diff --git a/contrib/llvm-project/llvm/lib/InterfaceStub/ELFObjHandler.cpp b/contrib/llvm-project/llvm/lib/InterfaceStub/ELFObjHandler.cpp
index 0d1a864f31ac..cb72f57f7bde 100644
--- a/contrib/llvm-project/llvm/lib/InterfaceStub/ELFObjHandler.cpp
+++ b/contrib/llvm-project/llvm/lib/InterfaceStub/ELFObjHandler.cpp
@@ -19,7 +19,6 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Process.h"
-using llvm::MemoryBufferRef;
using llvm::object::ELFObjectFile;
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/InterfaceStub/IFSHandler.cpp b/contrib/llvm-project/llvm/lib/InterfaceStub/IFSHandler.cpp
index e6bf09232ce2..4ccbb18ca04a 100644
--- a/contrib/llvm-project/llvm/lib/InterfaceStub/IFSHandler.cpp
+++ b/contrib/llvm-project/llvm/lib/InterfaceStub/IFSHandler.cpp
@@ -195,7 +195,7 @@ Expected<std::unique_ptr<IFSStub>> ifs::readIFSFromBuffer(StringRef Buf) {
}
Error ifs::writeIFSToOutputStream(raw_ostream &OS, const IFSStub &Stub) {
- yaml::Output YamlOut(OS, NULL, /*WrapColumn =*/0);
+ yaml::Output YamlOut(OS, nullptr, /*WrapColumn =*/0);
std::unique_ptr<IFSStubTriple> CopyStub(new IFSStubTriple(Stub));
if (Stub.Target.Arch) {
CopyStub->Target.ArchString = std::string(
diff --git a/contrib/llvm-project/llvm/lib/InterfaceStub/IFSStub.cpp b/contrib/llvm-project/llvm/lib/InterfaceStub/IFSStub.cpp
index 008263f8db9f..1ce7a66869b8 100644
--- a/contrib/llvm-project/llvm/lib/InterfaceStub/IFSStub.cpp
+++ b/contrib/llvm-project/llvm/lib/InterfaceStub/IFSStub.cpp
@@ -37,7 +37,7 @@ IFSStubTriple::IFSStubTriple(IFSStubTriple const &Stub) : IFSStub() {
Symbols = Stub.Symbols;
}
-IFSStubTriple::IFSStubTriple(IFSStub const &Stub) : IFSStub() {
+IFSStubTriple::IFSStubTriple(IFSStub const &Stub) {
IfsVersion = Stub.IfsVersion;
Target = Stub.Target;
SoName = Stub.SoName;
diff --git a/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp b/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
index 855d0fc8a8be..7694c9848384 100644
--- a/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
@@ -229,8 +229,7 @@ static void runNewPMPasses(const Config &Conf, Module &Mod, TargetMachine *TM,
PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
PGOOptions::NoCSAction, true);
}
- if (TM)
- TM->setPGOOption(PGOOpt);
+ TM->setPGOOption(PGOOpt);
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
@@ -415,6 +414,8 @@ static void codegen(const Config &Conf, TargetMachine *TM,
TM->Options.ObjectFilenameForDebug = Stream->ObjectPathName;
legacy::PassManager CodeGenPasses;
+ TargetLibraryInfoImpl TLII(Triple(Mod.getTargetTriple()));
+ CodeGenPasses.add(new TargetLibraryInfoWrapperPass(TLII));
CodeGenPasses.add(
createImmutableModuleSummaryIndexWrapperPass(&CombinedIndex));
if (Conf.PreCodeGenPassesHook)
diff --git a/contrib/llvm-project/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm-project/llvm/lib/MC/MCAsmStreamer.cpp
index 5c2aaddff4d1..119237bb052e 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCAsmStreamer.cpp
@@ -620,6 +620,8 @@ void MCAsmStreamer::emitVersionMin(MCVersionMinType Type, unsigned Major,
static const char *getPlatformName(MachO::PlatformType Type) {
switch (Type) {
+ case MachO::PLATFORM_UNKNOWN: /* silence warning*/
+ break;
case MachO::PLATFORM_MACOS: return "macos";
case MachO::PLATFORM_IOS: return "ios";
case MachO::PLATFORM_TVOS: return "tvos";
diff --git a/contrib/llvm-project/llvm/lib/MC/MCContext.cpp b/contrib/llvm-project/llvm/lib/MC/MCContext.cpp
index aa4051aa2400..7f639e9c408f 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCContext.cpp
@@ -978,13 +978,3 @@ void MCContext::reportWarning(SMLoc Loc, const Twine &Msg) {
});
}
}
-
-void MCContext::reportFatalError(SMLoc Loc, const Twine &Msg) {
- reportError(Loc, Msg);
-
- // If we reached here, we are failing ungracefully. Run the interrupt handlers
- // to make sure any special cleanups get done, in particular that we remove
- // files registered with RemoveFileOnSignal.
- sys::RunInterruptHandlers();
- exit(1);
-}
diff --git a/contrib/llvm-project/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm-project/llvm/lib/MC/MCDwarf.cpp
index 1c9cfb9042e2..2cb5a000f88a 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCDwarf.cpp
@@ -561,7 +561,7 @@ Expected<unsigned> MCDwarfLineTable::tryGetFile(StringRef &Directory,
static bool isRootFile(const MCDwarfFile &RootFile, StringRef &Directory,
StringRef &FileName, Optional<MD5::MD5Result> Checksum) {
- if (RootFile.Name.empty() || RootFile.Name != FileName.data())
+ if (RootFile.Name.empty() || StringRef(RootFile.Name) != FileName)
return false;
return RootFile.Checksum == Checksum;
}
@@ -586,7 +586,7 @@ MCDwarfLineTableHeader::tryGetFile(StringRef &Directory,
trackMD5Usage(Checksum.hasValue());
HasSource = (Source != None);
}
- if (isRootFile(RootFile, Directory, FileName, Checksum) && DwarfVersion >= 5)
+ if (DwarfVersion >= 5 && isRootFile(RootFile, Directory, FileName, Checksum))
return 0;
if (FileNumber == 0) {
// File numbers start with 1 and/or after any file numbers
diff --git a/contrib/llvm-project/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm-project/llvm/lib/MC/MCMachOStreamer.cpp
index 3edf7a3f49e6..88aeeb980738 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCMachOStreamer.cpp
@@ -116,8 +116,16 @@ public:
void emitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) override {
getAssembler().getLOHContainer().addDirective(Kind, Args);
}
+ void emitCGProfileEntry(const MCSymbolRefExpr *From,
+ const MCSymbolRefExpr *To, uint64_t Count) override {
+ if (!From->getSymbol().isTemporary() && !To->getSymbol().isTemporary())
+ getAssembler().CGProfile.push_back({From, To, Count});
+ }
void finishImpl() override;
+
+ void finalizeCGProfileEntry(const MCSymbolRefExpr *&SRE);
+ void finalizeCGProfile();
};
} // end anonymous namespace.
@@ -145,7 +153,8 @@ static bool canGoAfterDWARF(const MCSectionMachO &MSec) {
if (SegName == "__DATA" && (SecName == "__nl_symbol_ptr" ||
SecName == "__thread_ptr"))
return true;
-
+ if (SegName == "__LLVM" && SecName == "__cg_profile")
+ return true;
return false;
}
@@ -513,9 +522,40 @@ void MCMachOStreamer::finishImpl() {
}
}
+ finalizeCGProfile();
+
this->MCObjectStreamer::finishImpl();
}
+void MCMachOStreamer::finalizeCGProfileEntry(const MCSymbolRefExpr *&SRE) {
+ const MCSymbol *S = &SRE->getSymbol();
+ bool Created;
+ getAssembler().registerSymbol(*S, &Created);
+ if (Created)
+ S->setExternal(true);
+}
+
+void MCMachOStreamer::finalizeCGProfile() {
+ MCAssembler &Asm = getAssembler();
+ if (Asm.CGProfile.empty())
+ return;
+ for (MCAssembler::CGProfileEntry &E : Asm.CGProfile) {
+ finalizeCGProfileEntry(E.From);
+ finalizeCGProfileEntry(E.To);
+ }
+ // We can't write the section out until symbol indices are finalized which
+ // doesn't happen until after section layout. We need to create the section
+ // and set its size now so that it's accounted for in layout.
+ MCSection *CGProfileSection = Asm.getContext().getMachOSection(
+ "__LLVM", "__cg_profile", 0, SectionKind::getMetadata());
+ Asm.registerSection(*CGProfileSection);
+ auto *Frag = new MCDataFragment(CGProfileSection);
+ // For each entry, reserve space for 2 32-bit indices and a 64-bit count.
+ size_t SectionBytes =
+ Asm.CGProfile.size() * (2 * sizeof(uint32_t) + sizeof(uint64_t));
+ Frag->getContents().resize(SectionBytes);
+}
+
MCStreamer *llvm::createMachOStreamer(MCContext &Context,
std::unique_ptr<MCAsmBackend> &&MAB,
std::unique_ptr<MCObjectWriter> &&OW,
diff --git a/contrib/llvm-project/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm-project/llvm/lib/MC/MCObjectStreamer.cpp
index 6604d7988c4c..ebbbd6ad4e16 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCObjectStreamer.cpp
@@ -119,8 +119,31 @@ void MCObjectStreamer::resolvePendingFixups() {
continue;
}
flushPendingLabels(PendingFixup.DF, PendingFixup.DF->getContents().size());
- PendingFixup.Fixup.setOffset(PendingFixup.Sym->getOffset());
- PendingFixup.DF->getFixups().push_back(PendingFixup.Fixup);
+ PendingFixup.Fixup.setOffset(PendingFixup.Sym->getOffset() +
+ PendingFixup.Fixup.getOffset());
+
+ // If the location symbol to relocate is in MCEncodedFragmentWithFixups,
+ // put the Fixup into location symbol's fragment. Otherwise
+ // put into PendingFixup.DF
+ MCFragment *SymFragment = PendingFixup.Sym->getFragment();
+ switch (SymFragment->getKind()) {
+ case MCFragment::FT_Relaxable:
+ case MCFragment::FT_Dwarf:
+ case MCFragment::FT_PseudoProbe:
+ cast<MCEncodedFragmentWithFixups<8, 1>>(SymFragment)
+ ->getFixups()
+ .push_back(PendingFixup.Fixup);
+ break;
+ case MCFragment::FT_Data:
+ case MCFragment::FT_CVDefRange:
+ cast<MCEncodedFragmentWithFixups<32, 4>>(SymFragment)
+ ->getFixups()
+ .push_back(PendingFixup.Fixup);
+ break;
+ default:
+ PendingFixup.DF->getFixups().push_back(PendingFixup.Fixup);
+ break;
+ }
}
PendingFixups.clear();
}
@@ -816,8 +839,9 @@ MCObjectStreamer::emitRelocDirective(const MCExpr &Offset, StringRef Name,
return None;
}
- PendingFixups.emplace_back(&SRE.getSymbol(), DF,
- MCFixup::create(-1, Expr, Kind, Loc));
+ PendingFixups.emplace_back(
+ &SRE.getSymbol(), DF,
+ MCFixup::create(OffsetVal.getConstant(), Expr, Kind, Loc));
return None;
}
diff --git a/contrib/llvm-project/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm-project/llvm/lib/MC/MCParser/AsmParser.cpp
index 705f7159d55b..0cea491f227d 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -159,7 +159,7 @@ private:
int64_t LineNumber;
SMLoc Loc;
unsigned Buf;
- CppHashInfoTy() : Filename(), LineNumber(0), Loc(), Buf(0) {}
+ CppHashInfoTy() : LineNumber(0), Buf(0) {}
};
CppHashInfoTy CppHashInfo;
@@ -1121,11 +1121,8 @@ StringRef AsmParser::parseStringToComma() {
bool AsmParser::parseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) {
if (parseExpression(Res))
return true;
- if (Lexer.isNot(AsmToken::RParen))
- return TokError("expected ')' in parentheses expression");
EndLoc = Lexer.getTok().getEndLoc();
- Lex();
- return false;
+ return parseRParen();
}
/// Parse a bracket expression and return it.
@@ -1214,9 +1211,7 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc,
Lex(); // eat '('.
StringRef VName;
parseIdentifier(VName);
- // eat ')'.
- if (parseToken(AsmToken::RParen,
- "unexpected token in variant, expected ')'"))
+ if (parseRParen())
return true;
Split = std::make_pair(Identifier, VName);
}
@@ -1379,9 +1374,8 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc,
Lex(); // Eat the operator.
if (parseExpression(Res, EndLoc))
return true;
- if (Lexer.isNot(AsmToken::RParen))
- return TokError("expected ')'");
- Lex(); // Eat the operator.
+ if (parseRParen())
+ return true;
Res = getTargetParser().createTargetUnaryExpr(Res, FirstTokenKind, Ctx);
return !Res;
}
@@ -1553,8 +1547,7 @@ bool AsmParser::parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
// This is the same behavior as parseParenExpression().
if (ParenDepth - 1 > 0) {
EndLoc = getTok().getEndLoc();
- if (parseToken(AsmToken::RParen,
- "expected ')' in parentheses expression"))
+ if (parseRParen())
return true;
}
}
@@ -5047,15 +5040,7 @@ bool AsmParser::parseDirectiveComm(bool IsLocal) {
// NOTE: a size of zero for a .comm should create a undefined symbol
// but a size of .lcomm creates a bss symbol of size zero.
if (Size < 0)
- return Error(SizeLoc, "invalid '.comm' or '.lcomm' directive size, can't "
- "be less than zero");
-
- // NOTE: The alignment in the directive is a power of 2 value, the assembler
- // may internally end up wanting an alignment in bytes.
- // FIXME: Diagnose overflow.
- if (Pow2Alignment < 0)
- return Error(Pow2AlignmentLoc, "invalid '.comm' or '.lcomm' directive "
- "alignment, can't be less than zero");
+ return Error(SizeLoc, "size must be non-negative");
Sym->redefineIfPossible();
if (!Sym->isUndefined())
diff --git a/contrib/llvm-project/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/contrib/llvm-project/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
index 3bc13012c019..308b3842c61e 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -195,6 +195,8 @@ public:
addDirectiveHandler<&DarwinAsmParser::parseMacOSXVersionMin>(
".macosx_version_min");
addDirectiveHandler<&DarwinAsmParser::parseBuildVersion>(".build_version");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveCGProfile>(
+ ".cg_profile");
LastVersionDirective = SMLoc();
}
@@ -467,6 +469,7 @@ public:
bool parseSDKVersion(VersionTuple &SDKVersion);
void checkVersion(StringRef Directive, StringRef Arg, SMLoc Loc,
Triple::OSType ExpectedOS);
+ bool parseDirectiveCGProfile(StringRef Directive, SMLoc Loc);
};
} // end anonymous namespace
@@ -1142,6 +1145,8 @@ bool DarwinAsmParser::parseVersionMin(StringRef Directive, SMLoc Loc,
static Triple::OSType getOSTypeFromPlatform(MachO::PlatformType Type) {
switch (Type) {
+ case MachO::PLATFORM_UNKNOWN: /* silence warning */
+ break;
case MachO::PLATFORM_MACOS: return Triple::MacOSX;
case MachO::PLATFORM_IOS: return Triple::IOS;
case MachO::PLATFORM_TVOS: return Triple::TvOS;
@@ -1198,6 +1203,11 @@ bool DarwinAsmParser::parseBuildVersion(StringRef Directive, SMLoc Loc) {
return false;
}
+/// parseDirectiveCGProfile
+/// ::= .cg_profile from, to, count
+bool DarwinAsmParser::parseDirectiveCGProfile(StringRef S, SMLoc Loc) {
+ return MCAsmParserExtension::ParseDirectiveCGProfile(S, Loc);
+}
namespace llvm {
diff --git a/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index e95019c12db7..e814cf003656 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -499,7 +499,8 @@ bool ELFAsmParser::maybeParseUniqueID(int64_t &UniqueID) {
}
static bool hasPrefix(StringRef SectionName, StringRef Prefix) {
- return SectionName.startswith(Prefix) || SectionName == Prefix.drop_back();
+ return SectionName.consume_front(Prefix) &&
+ (SectionName.empty() || SectionName[0] == '.');
}
static bool allowSectionTypeMismatch(const Triple &TT, StringRef SectionName,
@@ -514,7 +515,7 @@ static bool allowSectionTypeMismatch(const Triple &TT, StringRef SectionName,
// MIPS .debug_* sections should have SHT_MIPS_DWARF section type to
// distinguish among sections contain DWARF and ECOFF debug formats,
// but in assembly files these sections have SHT_PROGBITS type.
- return hasPrefix(SectionName, ".debug_") && Type == ELF::SHT_PROGBITS;
+ return SectionName.startswith(".debug_") && Type == ELF::SHT_PROGBITS;
}
return false;
}
@@ -537,19 +538,18 @@ bool ELFAsmParser::ParseSectionArguments(bool IsPush, SMLoc loc) {
int64_t UniqueID = ~0;
// Set the defaults first.
- if (hasPrefix(SectionName, ".rodata.") || SectionName == ".rodata1")
+ if (hasPrefix(SectionName, ".rodata") || SectionName == ".rodata1")
Flags |= ELF::SHF_ALLOC;
else if (SectionName == ".fini" || SectionName == ".init" ||
- hasPrefix(SectionName, ".text."))
+ hasPrefix(SectionName, ".text"))
Flags |= ELF::SHF_ALLOC | ELF::SHF_EXECINSTR;
- else if (hasPrefix(SectionName, ".data.") || SectionName == ".data1" ||
- hasPrefix(SectionName, ".bss.") ||
- hasPrefix(SectionName, ".init_array.") ||
- hasPrefix(SectionName, ".fini_array.") ||
- hasPrefix(SectionName, ".preinit_array."))
+ else if (hasPrefix(SectionName, ".data") || SectionName == ".data1" ||
+ hasPrefix(SectionName, ".bss") ||
+ hasPrefix(SectionName, ".init_array") ||
+ hasPrefix(SectionName, ".fini_array") ||
+ hasPrefix(SectionName, ".preinit_array"))
Flags |= ELF::SHF_ALLOC | ELF::SHF_WRITE;
- else if (hasPrefix(SectionName, ".tdata.") ||
- hasPrefix(SectionName, ".tbss."))
+ else if (hasPrefix(SectionName, ".tdata") || hasPrefix(SectionName, ".tbss"))
Flags |= ELF::SHF_ALLOC | ELF::SHF_WRITE | ELF::SHF_TLS;
if (getLexer().is(AsmToken::Comma)) {
@@ -620,15 +620,15 @@ EndStmt:
if (TypeName.empty()) {
if (SectionName.startswith(".note"))
Type = ELF::SHT_NOTE;
- else if (hasPrefix(SectionName, ".init_array."))
+ else if (hasPrefix(SectionName, ".init_array"))
Type = ELF::SHT_INIT_ARRAY;
- else if (hasPrefix(SectionName, ".bss."))
+ else if (hasPrefix(SectionName, ".bss"))
Type = ELF::SHT_NOBITS;
- else if (hasPrefix(SectionName, ".tbss."))
+ else if (hasPrefix(SectionName, ".tbss"))
Type = ELF::SHT_NOBITS;
- else if (hasPrefix(SectionName, ".fini_array."))
+ else if (hasPrefix(SectionName, ".fini_array"))
Type = ELF::SHT_FINI_ARRAY;
- else if (hasPrefix(SectionName, ".preinit_array."))
+ else if (hasPrefix(SectionName, ".preinit_array"))
Type = ELF::SHT_PREINIT_ARRAY;
} else {
if (TypeName == "init_array")
diff --git a/contrib/llvm-project/llvm/lib/MC/MCParser/MasmParser.cpp b/contrib/llvm-project/llvm/lib/MC/MCParser/MasmParser.cpp
index f1704cef46ac..f9433240743d 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -13,6 +13,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -379,7 +380,7 @@ private:
/// time of assembly
struct tm TM;
- std::vector<bool> EndStatementAtEOFStack;
+ BitVector EndStatementAtEOFStack;
AsmCond TheCondState;
std::vector<AsmCond> TheCondStack;
@@ -424,7 +425,7 @@ private:
int64_t LineNumber;
SMLoc Loc;
unsigned Buf;
- CppHashInfoTy() : Filename(), LineNumber(0), Loc(), Buf(0) {}
+ CppHashInfoTy() : LineNumber(0), Buf(0) {}
};
CppHashInfoTy CppHashInfo;
@@ -1516,11 +1517,8 @@ StringRef MasmParser::parseStringToEndOfStatement() {
bool MasmParser::parseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) {
if (parseExpression(Res))
return true;
- if (Lexer.isNot(AsmToken::RParen))
- return TokError("expected ')' in parentheses expression");
EndLoc = Lexer.getTok().getEndLoc();
- Lex();
- return false;
+ return parseRParen();
}
/// Parse a bracket expression and return it.
@@ -1838,9 +1836,8 @@ bool MasmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc,
Lex(); // Eat the operator.
if (parseExpression(Res, EndLoc))
return true;
- if (Lexer.isNot(AsmToken::RParen))
- return TokError("expected ')'");
- Lex(); // Eat the operator.
+ if (parseRParen())
+ return true;
Res = getTargetParser().createTargetUnaryExpr(Res, FirstTokenKind, Ctx);
return !Res;
}
@@ -1929,8 +1926,7 @@ bool MasmParser::parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
// This is the same behavior as parseParenExpression().
if (ParenDepth - 1 > 0) {
EndLoc = getTok().getEndLoc();
- if (parseToken(AsmToken::RParen,
- "expected ')' in parentheses expression"))
+ if (parseRParen())
return true;
}
}
@@ -3358,8 +3354,7 @@ bool MasmParser::handleMacroInvocation(const MCAsmMacro *M, SMLoc NameLoc) {
}
// Consume the right-parenthesis on the other side of the arguments.
- if (parseToken(AsmToken::RParen, "invoking macro function '" + M->Name +
- "' requires arguments in parentheses"))
+ if (parseRParen())
return true;
// Exit values may require lexing, unfortunately. We construct a new buffer to
@@ -3743,8 +3738,7 @@ bool MasmParser::parseScalarInitializer(unsigned Size,
SmallVector<const MCExpr *, 1> DuplicatedValues;
if (parseToken(AsmToken::LParen,
"parentheses required for 'dup' contents") ||
- parseScalarInstList(Size, DuplicatedValues) ||
- parseToken(AsmToken::RParen, "unmatched parentheses"))
+ parseScalarInstList(Size, DuplicatedValues) || parseRParen())
return true;
for (int i = 0; i < Repetitions; ++i)
@@ -3950,8 +3944,7 @@ bool MasmParser::parseRealInstList(const fltSemantics &Semantics,
SmallVector<APInt, 1> DuplicatedValues;
if (parseToken(AsmToken::LParen,
"parentheses required for 'dup' contents") ||
- parseRealInstList(Semantics, DuplicatedValues) ||
- parseToken(AsmToken::RParen, "unmatched parentheses"))
+ parseRealInstList(Semantics, DuplicatedValues) || parseRParen())
return true;
for (int i = 0; i < Repetitions; ++i)
@@ -4316,8 +4309,7 @@ bool MasmParser::parseStructInstList(
std::vector<StructInitializer> DuplicatedValues;
if (parseToken(AsmToken::LParen,
"parentheses required for 'dup' contents") ||
- parseStructInstList(Structure, DuplicatedValues) ||
- parseToken(AsmToken::RParen, "unmatched parentheses"))
+ parseStructInstList(Structure, DuplicatedValues) || parseRParen())
return true;
for (int i = 0; i < Repetitions; ++i)
diff --git a/contrib/llvm-project/llvm/lib/MC/MCSectionXCOFF.cpp b/contrib/llvm-project/llvm/lib/MC/MCSectionXCOFF.cpp
index 7f7380bf810d..2ff4839d3706 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCSectionXCOFF.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCSectionXCOFF.cpp
@@ -34,7 +34,8 @@ void MCSectionXCOFF::PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
}
if (getKind().isReadOnly()) {
- if (getMappingClass() != XCOFF::XMC_RO)
+ if (getMappingClass() != XCOFF::XMC_RO &&
+ getMappingClass() != XCOFF::XMC_TD)
report_fatal_error("Unhandled storage-mapping class for .rodata csect.");
printCsectDirective(OS);
return;
@@ -70,7 +71,8 @@ void MCSectionXCOFF::PrintSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
}
if (isCsect() && getMappingClass() == XCOFF::XMC_TD) {
- assert((getKind().isBSSExtern() || getKind().isBSSLocal()) &&
+ assert((getKind().isBSSExtern() || getKind().isBSSLocal() ||
+ getKind().isReadOnlyWithRel()) &&
"Unexepected section kind for toc-data");
printCsectDirective(OS);
return;
diff --git a/contrib/llvm-project/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm-project/llvm/lib/MC/MCStreamer.cpp
index 9c37a7bebe2a..a14f0de65a9d 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCStreamer.cpp
@@ -1348,8 +1348,8 @@ void MCStreamer::emitVersionForTarget(
DarwinTargetVariantTriple->isMacOSX()) {
emitVersionForTarget(*DarwinTargetVariantTriple,
DarwinTargetVariantSDKVersion,
- /*TargetVariantTriple=*/nullptr,
- /*TargetVariantSDKVersion=*/VersionTuple());
+ /*DarwinTargetVariantTriple=*/nullptr,
+ /*DarwinTargetVariantSDKVersion=*/VersionTuple());
emitDarwinTargetVariantBuildVersion(
getMachoBuildVersionPlatformType(Target),
LinkedTargetVersion.getMajor(),
diff --git a/contrib/llvm-project/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm-project/llvm/lib/MC/MachObjectWriter.cpp
index 16941b1cb727..56bb03ad8d42 100644
--- a/contrib/llvm-project/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MachObjectWriter.cpp
@@ -759,6 +759,23 @@ uint64_t MachObjectWriter::writeObject(MCAssembler &Asm,
computeSymbolTable(Asm, LocalSymbolData, ExternalSymbolData,
UndefinedSymbolData);
+ if (!Asm.CGProfile.empty()) {
+ MCSection *CGProfileSection = Asm.getContext().getMachOSection(
+ "__LLVM", "__cg_profile", 0, SectionKind::getMetadata());
+ MCDataFragment *Frag = dyn_cast_or_null<MCDataFragment>(
+ &*CGProfileSection->getFragmentList().begin());
+ assert(Frag && "call graph profile section not reserved");
+ Frag->getContents().clear();
+ raw_svector_ostream OS(Frag->getContents());
+ for (const MCAssembler::CGProfileEntry &CGPE : Asm.CGProfile) {
+ uint32_t FromIndex = CGPE.From->getSymbol().getIndex();
+ uint32_t ToIndex = CGPE.To->getSymbol().getIndex();
+ support::endian::write(OS, FromIndex, W.Endian);
+ support::endian::write(OS, ToIndex, W.Endian);
+ support::endian::write(OS, CGPE.Count, W.Endian);
+ }
+ }
+
unsigned NumSections = Asm.size();
const MCAssembler::VersionInfoType &VersionInfo =
Layout.getAssembler().getVersionInfo();
diff --git a/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp b/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
index 07be7b077bc9..121d320f10e6 100644
--- a/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -68,7 +68,8 @@ void LSUnitBase::dump() const {
unsigned LSUnit::dispatch(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
- unsigned IsMemBarrier = Desc.HasSideEffects;
+ bool IsStoreBarrier = IR.getInstruction()->isAStoreBarrier();
+ bool IsLoadBarrier = IR.getInstruction()->isALoadBarrier();
assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
if (Desc.MayLoad)
@@ -111,12 +112,12 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
CurrentStoreGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsStoreBarrier)
CurrentStoreBarrierGroupID = NewGID;
if (Desc.MayLoad) {
CurrentLoadGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
}
@@ -141,7 +142,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
// However that group has already started execution, so we cannot add
// this load to it.
bool ShouldCreateANewGroup =
- IsMemBarrier || !ImmediateLoadDominator ||
+ IsLoadBarrier || !ImmediateLoadDominator ||
CurrentLoadBarrierGroupID == ImmediateLoadDominator ||
ImmediateLoadDominator <= CurrentStoreGroupID ||
getGroup(ImmediateLoadDominator).isExecuting();
@@ -161,7 +162,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
}
// A load barrier may not pass a previous load or load barrier.
- if (IsMemBarrier) {
+ if (IsLoadBarrier) {
if (ImmediateLoadDominator) {
MemoryGroup &LoadGroup = getGroup(ImmediateLoadDominator);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: ("
@@ -181,7 +182,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
}
CurrentLoadGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
return NewGID;
}
diff --git a/contrib/llvm-project/llvm/lib/MCA/Stages/DispatchStage.cpp b/contrib/llvm-project/llvm/lib/MCA/Stages/DispatchStage.cpp
index 5385142698e6..66228bd5a862 100644
--- a/contrib/llvm-project/llvm/lib/MCA/Stages/DispatchStage.cpp
+++ b/contrib/llvm-project/llvm/lib/MCA/Stages/DispatchStage.cpp
@@ -30,7 +30,7 @@ DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
unsigned MaxDispatchWidth, RetireControlUnit &R,
RegisterFile &F)
: DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
- CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {
+ CarryOver(0U), STI(Subtarget), RCU(R), PRF(F) {
if (!DispatchWidth)
DispatchWidth = Subtarget.getSchedModel().IssueWidth;
}
diff --git a/contrib/llvm-project/llvm/lib/MCA/Stages/InOrderIssueStage.cpp b/contrib/llvm-project/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
index fa5c0fc66b9e..abfbc80f17c9 100644
--- a/contrib/llvm-project/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
+++ b/contrib/llvm-project/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
@@ -47,7 +47,7 @@ InOrderIssueStage::InOrderIssueStage(const MCSubtargetInfo &STI,
RegisterFile &PRF, CustomBehaviour &CB,
LSUnit &LSU)
: STI(STI), PRF(PRF), RM(STI.getSchedModel()), CB(CB), LSU(LSU),
- NumIssued(), SI(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
+ NumIssued(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
unsigned InOrderIssueStage::getIssueWidth() const {
return STI.getSchedModel().IssueWidth;
diff --git a/contrib/llvm-project/llvm/lib/Object/Archive.cpp b/contrib/llvm-project/llvm/lib/Object/Archive.cpp
index 5492692445e7..9a4ef055faa4 100644
--- a/contrib/llvm-project/llvm/lib/Object/Archive.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/Archive.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -38,9 +39,6 @@ using namespace llvm;
using namespace object;
using namespace llvm::support::endian;
-const char Magic[] = "!<arch>\n";
-const char ThinMagic[] = "!<thin>\n";
-
void Archive::anchor() {}
static Error malformedError(Twine Msg) {
@@ -49,27 +47,62 @@ static Error malformedError(Twine Msg) {
object_error::parse_failed);
}
+static Error
+createMemberHeaderParseError(const AbstractArchiveMemberHeader *ArMemHeader,
+ const char *RawHeaderPtr, uint64_t Size) {
+ StringRef Msg("remaining size of archive too small for next archive "
+ "member header ");
+
+ Expected<StringRef> NameOrErr = ArMemHeader->getName(Size);
+ if (NameOrErr)
+ return malformedError(Msg + "for " + *NameOrErr);
+
+ consumeError(NameOrErr.takeError());
+ uint64_t Offset = RawHeaderPtr - ArMemHeader->Parent->getData().data();
+ return malformedError(Msg + "at offset " + Twine(Offset));
+}
+
+template <class T, std::size_t N>
+StringRef getFieldRawString(const T (&Field)[N]) {
+ return StringRef(Field, N).rtrim(" ");
+}
+
+template <class T>
+StringRef CommonArchiveMemberHeader<T>::getRawAccessMode() const {
+ return getFieldRawString(ArMemHdr->AccessMode);
+}
+
+template <class T>
+StringRef CommonArchiveMemberHeader<T>::getRawLastModified() const {
+ return getFieldRawString(ArMemHdr->LastModified);
+}
+
+template <class T> StringRef CommonArchiveMemberHeader<T>::getRawUID() const {
+ return getFieldRawString(ArMemHdr->UID);
+}
+
+template <class T> StringRef CommonArchiveMemberHeader<T>::getRawGID() const {
+ return getFieldRawString(ArMemHdr->GID);
+}
+
+template <class T> uint64_t CommonArchiveMemberHeader<T>::getOffset() const {
+ return reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
+}
+
+template class object::CommonArchiveMemberHeader<UnixArMemHdrType>;
+template class object::CommonArchiveMemberHeader<BigArMemHdrType>;
+
ArchiveMemberHeader::ArchiveMemberHeader(const Archive *Parent,
const char *RawHeaderPtr,
uint64_t Size, Error *Err)
- : Parent(Parent),
- ArMemHdr(reinterpret_cast<const ArMemHdrType *>(RawHeaderPtr)) {
+ : CommonArchiveMemberHeader<UnixArMemHdrType>(
+ Parent, reinterpret_cast<const UnixArMemHdrType *>(RawHeaderPtr)) {
if (RawHeaderPtr == nullptr)
return;
ErrorAsOutParameter ErrAsOutParam(Err);
- if (Size < sizeof(ArMemHdrType)) {
- if (Err) {
- std::string Msg("remaining size of archive too small for next archive "
- "member header ");
- Expected<StringRef> NameOrErr = getName(Size);
- if (!NameOrErr) {
- consumeError(NameOrErr.takeError());
- uint64_t Offset = RawHeaderPtr - Parent->getData().data();
- *Err = malformedError(Msg + "at offset " + Twine(Offset));
- } else
- *Err = malformedError(Msg + "for " + NameOrErr.get());
- }
+ if (Size < getSizeOf()) {
+ *Err = createMemberHeaderParseError(this, RawHeaderPtr, Size);
return;
}
if (ArMemHdr->Terminator[0] != '`' || ArMemHdr->Terminator[1] != '\n') {
@@ -94,6 +127,19 @@ ArchiveMemberHeader::ArchiveMemberHeader(const Archive *Parent,
}
}
+BigArchiveMemberHeader::BigArchiveMemberHeader(const Archive *Parent,
+ const char *RawHeaderPtr,
+ uint64_t Size, Error *Err)
+ : CommonArchiveMemberHeader<BigArMemHdrType>(
+ Parent, reinterpret_cast<const BigArMemHdrType *>(RawHeaderPtr)) {
+ if (RawHeaderPtr == nullptr)
+ return;
+ ErrorAsOutParameter ErrAsOutParam(Err);
+
+ if (Size < getSizeOf())
+ *Err = createMemberHeaderParseError(this, RawHeaderPtr, Size);
+}
+
// This gets the raw name from the ArMemHdr->Name field and checks that it is
// valid for the kind of archive. If it is not valid it returns an Error.
Expected<StringRef> ArchiveMemberHeader::getRawName() const {
@@ -121,7 +167,69 @@ Expected<StringRef> ArchiveMemberHeader::getRawName() const {
return StringRef(ArMemHdr->Name, end);
}
-// This gets the name looking up long names. Size is the size of the archive
+Expected<uint64_t>
+getArchiveMemberDecField(Twine FieldName, const StringRef RawField,
+ const Archive *Parent,
+ const AbstractArchiveMemberHeader *MemHeader) {
+ uint64_t Value;
+ if (RawField.getAsInteger(10, Value)) {
+ uint64_t Offset = MemHeader->getOffset();
+ return malformedError("characters in " + FieldName +
+ " field in archive member header are not "
+ "all decimal numbers: '" +
+ RawField +
+ "' for the archive "
+ "member header at offset " +
+ Twine(Offset));
+ }
+ return Value;
+}
+
+Expected<uint64_t>
+getArchiveMemberOctField(Twine FieldName, const StringRef RawField,
+ const Archive *Parent,
+ const AbstractArchiveMemberHeader *MemHeader) {
+ uint64_t Value;
+ if (RawField.getAsInteger(8, Value)) {
+ uint64_t Offset = MemHeader->getOffset();
+ return malformedError("characters in " + FieldName +
+ " field in archive member header are not "
+ "all octal numbers: '" +
+ RawField +
+ "' for the archive "
+ "member header at offset " +
+ Twine(Offset));
+ }
+ return Value;
+}
+
+Expected<StringRef> BigArchiveMemberHeader::getRawName() const {
+ Expected<uint64_t> NameLenOrErr = getArchiveMemberDecField(
+ "NameLen", getFieldRawString(ArMemHdr->NameLen), Parent, this);
+ if (!NameLenOrErr)
+ // TODO: Out-of-line.
+ return NameLenOrErr.takeError();
+ uint64_t NameLen = NameLenOrErr.get();
+
+ // If the name length is odd, pad with '\0' to get an even length. After
+ // padding, there is the name terminator "`\n".
+ uint64_t NameLenWithPadding = alignTo(NameLen, 2);
+ StringRef NameTerminator = "`\n";
+ StringRef NameStringWithNameTerminator =
+ StringRef(ArMemHdr->Name, NameLenWithPadding + NameTerminator.size());
+ if (!NameStringWithNameTerminator.endswith(NameTerminator)) {
+ uint64_t Offset =
+ reinterpret_cast<const char *>(ArMemHdr->Name + NameLenWithPadding) -
+ Parent->getData().data();
+ // TODO: Out-of-line.
+ return malformedError(
+ "name does not have name terminator \"`\\n\" for archive member"
+ "header at offset " +
+ Twine(Offset));
+ }
+ return StringRef(ArMemHdr->Name, NameLen);
+}
+
// member including the header, so the size of any name following the header
// is checked to make sure it does not overflow.
Expected<StringRef> ArchiveMemberHeader::getName(uint64_t Size) const {
@@ -129,7 +237,7 @@ Expected<StringRef> ArchiveMemberHeader::getName(uint64_t Size) const {
// This can be called from the ArchiveMemberHeader constructor when the
// archive header is truncated to produce an error message with the name.
// Make sure the name field is not truncated.
- if (Size < offsetof(ArMemHdrType, Name) + sizeof(ArMemHdr->Name)) {
+ if (Size < offsetof(UnixArMemHdrType, Name) + sizeof(ArMemHdr->Name)) {
uint64_t ArchiveOffset =
reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
return malformedError("archive header truncated before the name field "
@@ -224,126 +332,133 @@ Expected<StringRef> ArchiveMemberHeader::getName(uint64_t Size) const {
return Name.drop_back(1);
}
+Expected<StringRef> BigArchiveMemberHeader::getName(uint64_t Size) const {
+ return getRawName();
+}
+
Expected<uint64_t> ArchiveMemberHeader::getSize() const {
- uint64_t Ret;
- if (StringRef(ArMemHdr->Size, sizeof(ArMemHdr->Size))
- .rtrim(" ")
- .getAsInteger(10, Ret)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(
- StringRef(ArMemHdr->Size, sizeof(ArMemHdr->Size)).rtrim(" "));
- OS.flush();
- uint64_t Offset =
- reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
- return malformedError("characters in size field in archive header are not "
- "all decimal numbers: '" +
- Buf +
- "' for archive "
- "member header at offset " +
- Twine(Offset));
- }
- return Ret;
+ return getArchiveMemberDecField("size", getFieldRawString(ArMemHdr->Size),
+ Parent, this);
}
-Expected<sys::fs::perms> ArchiveMemberHeader::getAccessMode() const {
- unsigned Ret;
- if (StringRef(ArMemHdr->AccessMode, sizeof(ArMemHdr->AccessMode))
- .rtrim(' ')
- .getAsInteger(8, Ret)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(
- StringRef(ArMemHdr->AccessMode, sizeof(ArMemHdr->AccessMode))
- .rtrim(" "));
- OS.flush();
- uint64_t Offset =
- reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
- return malformedError("characters in AccessMode field in archive header "
- "are not all decimal numbers: '" +
- Buf + "' for the archive member header at offset " +
- Twine(Offset));
- }
- return static_cast<sys::fs::perms>(Ret);
+Expected<uint64_t> BigArchiveMemberHeader::getSize() const {
+ Expected<uint64_t> SizeOrErr = getArchiveMemberDecField(
+ "size", getFieldRawString(ArMemHdr->Size), Parent, this);
+ if (!SizeOrErr)
+ return SizeOrErr.takeError();
+
+ Expected<uint64_t> NameLenOrErr = getRawNameSize();
+ if (!NameLenOrErr)
+ return NameLenOrErr.takeError();
+
+ return *SizeOrErr + alignTo(*NameLenOrErr, 2);
+}
+
+Expected<uint64_t> BigArchiveMemberHeader::getRawNameSize() const {
+ return getArchiveMemberDecField(
+ "NameLen", getFieldRawString(ArMemHdr->NameLen), Parent, this);
+}
+
+Expected<uint64_t> BigArchiveMemberHeader::getNextOffset() const {
+ return getArchiveMemberDecField(
+ "NextOffset", getFieldRawString(ArMemHdr->NextOffset), Parent, this);
+}
+
+Expected<sys::fs::perms> AbstractArchiveMemberHeader::getAccessMode() const {
+ Expected<uint64_t> AccessModeOrErr =
+ getArchiveMemberOctField("AccessMode", getRawAccessMode(), Parent, this);
+ if (!AccessModeOrErr)
+ return AccessModeOrErr.takeError();
+ return static_cast<sys::fs::perms>(*AccessModeOrErr);
}
Expected<sys::TimePoint<std::chrono::seconds>>
-ArchiveMemberHeader::getLastModified() const {
- unsigned Seconds;
- if (StringRef(ArMemHdr->LastModified, sizeof(ArMemHdr->LastModified))
- .rtrim(' ')
- .getAsInteger(10, Seconds)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(
- StringRef(ArMemHdr->LastModified, sizeof(ArMemHdr->LastModified))
- .rtrim(" "));
- OS.flush();
- uint64_t Offset =
- reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
- return malformedError("characters in LastModified field in archive header "
- "are not all decimal numbers: '" +
- Buf + "' for the archive member header at offset " +
- Twine(Offset));
- }
+AbstractArchiveMemberHeader::getLastModified() const {
+ Expected<uint64_t> SecondsOrErr = getArchiveMemberDecField(
+ "LastModified", getRawLastModified(), Parent, this);
- return sys::toTimePoint(Seconds);
+ if (!SecondsOrErr)
+ return SecondsOrErr.takeError();
+
+ return sys::toTimePoint(*SecondsOrErr);
}
-Expected<unsigned> ArchiveMemberHeader::getUID() const {
- unsigned Ret;
- StringRef User = StringRef(ArMemHdr->UID, sizeof(ArMemHdr->UID)).rtrim(' ');
+Expected<unsigned> AbstractArchiveMemberHeader::getUID() const {
+ StringRef User = getRawUID();
if (User.empty())
return 0;
- if (User.getAsInteger(10, Ret)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(User);
- OS.flush();
- uint64_t Offset =
- reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
- return malformedError("characters in UID field in archive header "
- "are not all decimal numbers: '" +
- Buf + "' for the archive member header at offset " +
- Twine(Offset));
- }
- return Ret;
+ return getArchiveMemberDecField("UID", User, Parent, this);
}
-Expected<unsigned> ArchiveMemberHeader::getGID() const {
- unsigned Ret;
- StringRef Group = StringRef(ArMemHdr->GID, sizeof(ArMemHdr->GID)).rtrim(' ');
+Expected<unsigned> AbstractArchiveMemberHeader::getGID() const {
+ StringRef Group = getRawGID();
if (Group.empty())
return 0;
- if (Group.getAsInteger(10, Ret)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(Group);
- OS.flush();
- uint64_t Offset =
- reinterpret_cast<const char *>(ArMemHdr) - Parent->getData().data();
- return malformedError("characters in GID field in archive header "
- "are not all decimal numbers: '" +
- Buf + "' for the archive member header at offset " +
- Twine(Offset));
+ return getArchiveMemberDecField("GID", Group, Parent, this);
+}
+
+Expected<bool> ArchiveMemberHeader::isThin() const {
+ Expected<StringRef> NameOrErr = getRawName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = NameOrErr.get();
+ return Parent->isThin() && Name != "/" && Name != "//" && Name != "/SYM64/";
+}
+
+Expected<const char *> ArchiveMemberHeader::getNextChildLoc() const {
+ uint64_t Size = getSizeOf();
+ Expected<bool> isThinOrErr = isThin();
+ if (!isThinOrErr)
+ return isThinOrErr.takeError();
+
+ bool isThin = isThinOrErr.get();
+ if (!isThin) {
+ Expected<uint64_t> MemberSize = getSize();
+ if (!MemberSize)
+ return MemberSize.takeError();
+
+ Size += MemberSize.get();
}
- return Ret;
+
+ // If Size is odd, add 1 to make it even.
+ const char *NextLoc =
+ reinterpret_cast<const char *>(ArMemHdr) + alignTo(Size, 2);
+
+ if (NextLoc == Parent->getMemoryBufferRef().getBufferEnd())
+ return nullptr;
+
+ return NextLoc;
+}
+
+Expected<const char *> BigArchiveMemberHeader::getNextChildLoc() const {
+ if (getOffset() ==
+ static_cast<const BigArchive *>(Parent)->getLastChildOffset())
+ return nullptr;
+
+ Expected<uint64_t> NextOffsetOrErr = getNextOffset();
+ if (!NextOffsetOrErr)
+ return NextOffsetOrErr.takeError();
+ return Parent->getData().data() + NextOffsetOrErr.get();
}
Archive::Child::Child(const Archive *Parent, StringRef Data,
uint16_t StartOfFile)
- : Parent(Parent), Header(Parent, Data.data(), Data.size(), nullptr),
- Data(Data), StartOfFile(StartOfFile) {}
+ : Parent(Parent), Data(Data), StartOfFile(StartOfFile) {
+ Header = Parent->createArchiveMemberHeader(Data.data(), Data.size(), nullptr);
+}
Archive::Child::Child(const Archive *Parent, const char *Start, Error *Err)
- : Parent(Parent),
- Header(Parent, Start,
- Parent
- ? Parent->getData().size() - (Start - Parent->getData().data())
- : 0,
- Err) {
- if (!Start)
+ : Parent(Parent) {
+ if (!Start) {
+ Header = nullptr;
return;
+ }
+
+ Header = Parent->createArchiveMemberHeader(
+ Start,
+ Parent ? Parent->getData().size() - (Start - Parent->getData().data())
+ : 0,
+ Err);
// If we are pointed to real data, Start is not a nullptr, then there must be
// a non-null Err pointer available to report malformed data on. Only in
@@ -358,7 +473,7 @@ Archive::Child::Child(const Archive *Parent, const char *Start, Error *Err)
if (*Err)
return;
- uint64_t Size = Header.getSizeOf();
+ uint64_t Size = Header->getSizeOf();
Data = StringRef(Start, Size);
Expected<bool> isThinOrErr = isThinMember();
if (!isThinOrErr) {
@@ -377,7 +492,7 @@ Archive::Child::Child(const Archive *Parent, const char *Start, Error *Err)
}
// Setup StartOfFile and PaddingBytes.
- StartOfFile = Header.getSizeOf();
+ StartOfFile = Header->getSizeOf();
// Don't include attached name.
Expected<StringRef> NameOrErr = getRawName();
if (!NameOrErr) {
@@ -385,17 +500,20 @@ Archive::Child::Child(const Archive *Parent, const char *Start, Error *Err)
return;
}
StringRef Name = NameOrErr.get();
- if (Name.startswith("#1/")) {
+
+ if (Parent->kind() == Archive::K_AIXBIG) {
+ // The actual start of the file is after the name and any necessary
+ // even-alignment padding.
+ StartOfFile += ((Name.size() + 1) >> 1) << 1;
+ } else if (Name.startswith("#1/")) {
uint64_t NameSize;
- if (Name.substr(3).rtrim(' ').getAsInteger(10, NameSize)) {
- std::string Buf;
- raw_string_ostream OS(Buf);
- OS.write_escaped(Name.substr(3).rtrim(' '));
- OS.flush();
+ StringRef RawNameSize = Name.substr(3).rtrim(' ');
+ if (RawNameSize.getAsInteger(10, NameSize)) {
uint64_t Offset = Start - Parent->getData().data();
*Err = malformedError("long name length characters after the #1/ are "
"not all decimal numbers: '" +
- Buf + "' for archive member header at offset " +
+ RawNameSize +
+ "' for archive member header at offset " +
Twine(Offset));
return;
}
@@ -405,21 +523,15 @@ Archive::Child::Child(const Archive *Parent, const char *Start, Error *Err)
Expected<uint64_t> Archive::Child::getSize() const {
if (Parent->IsThin)
- return Header.getSize();
+ return Header->getSize();
return Data.size() - StartOfFile;
}
Expected<uint64_t> Archive::Child::getRawSize() const {
- return Header.getSize();
+ return Header->getSize();
}
-Expected<bool> Archive::Child::isThinMember() const {
- Expected<StringRef> NameOrErr = Header.getRawName();
- if (!NameOrErr)
- return NameOrErr.takeError();
- StringRef Name = NameOrErr.get();
- return Parent->IsThin && Name != "/" && Name != "//" && Name != "/SYM64/";
-}
+Expected<bool> Archive::Child::isThinMember() const { return Header->isThin(); }
Expected<std::string> Archive::Child::getFullName() const {
Expected<bool> isThin = isThinMember();
@@ -462,15 +574,14 @@ Expected<StringRef> Archive::Child::getBuffer() const {
}
Expected<Archive::Child> Archive::Child::getNext() const {
- size_t SpaceToSkip = Data.size();
- // If it's odd, add 1 to make it even.
- if (SpaceToSkip & 1)
- ++SpaceToSkip;
+ Expected<const char *> NextLocOrErr = Header->getNextChildLoc();
+ if (!NextLocOrErr)
+ return NextLocOrErr.takeError();
- const char *NextLoc = Data.data() + SpaceToSkip;
+ const char *NextLoc = *NextLocOrErr;
// Check to see if this is at the end of the archive.
- if (NextLoc == Parent->Data.getBufferEnd())
+ if (NextLoc == nullptr)
return Child(nullptr, nullptr, nullptr);
// Check to see if this is past the end of the archive.
@@ -505,7 +616,8 @@ Expected<StringRef> Archive::Child::getName() const {
if (!RawSizeOrErr)
return RawSizeOrErr.takeError();
uint64_t RawSize = RawSizeOrErr.get();
- Expected<StringRef> NameOrErr = Header.getName(Header.getSizeOf() + RawSize);
+ Expected<StringRef> NameOrErr =
+ Header->getName(Header->getSizeOf() + RawSize);
if (!NameOrErr)
return NameOrErr.takeError();
StringRef Name = NameOrErr.get();
@@ -537,12 +649,39 @@ Archive::Child::getAsBinary(LLVMContext *Context) const {
Expected<std::unique_ptr<Archive>> Archive::create(MemoryBufferRef Source) {
Error Err = Error::success();
- std::unique_ptr<Archive> Ret(new Archive(Source, Err));
+ std::unique_ptr<Archive> Ret;
+ StringRef Buffer = Source.getBuffer();
+
+ if (Buffer.startswith(BigArchiveMagic))
+ Ret = std::make_unique<BigArchive>(Source, Err);
+ else
+ Ret = std::make_unique<Archive>(Source, Err);
+
if (Err)
return std::move(Err);
return std::move(Ret);
}
+std::unique_ptr<AbstractArchiveMemberHeader>
+Archive::createArchiveMemberHeader(const char *RawHeaderPtr, uint64_t Size,
+ Error *Err) const {
+ ErrorAsOutParameter ErrAsOutParam(Err);
+ if (kind() != K_AIXBIG)
+ return std::make_unique<ArchiveMemberHeader>(this, RawHeaderPtr, Size, Err);
+ return std::make_unique<BigArchiveMemberHeader>(this, RawHeaderPtr, Size,
+ Err);
+}
+
+uint64_t Archive::getArchiveMagicLen() const {
+ if (isThin())
+ return sizeof(ThinArchiveMagic) - 1;
+
+ if (Kind() == K_AIXBIG)
+ return sizeof(BigArchiveMagic) - 1;
+
+ return sizeof(ArchiveMagic) - 1;
+}
+
void Archive::setFirstRegular(const Child &C) {
FirstRegularData = C.Data;
FirstRegularStartOfFile = C.StartOfFile;
@@ -553,10 +692,14 @@ Archive::Archive(MemoryBufferRef Source, Error &Err)
ErrorAsOutParameter ErrAsOutParam(&Err);
StringRef Buffer = Data.getBuffer();
// Check for sufficient magic.
- if (Buffer.startswith(ThinMagic)) {
+ if (Buffer.startswith(ThinArchiveMagic)) {
IsThin = true;
- } else if (Buffer.startswith(Magic)) {
+ } else if (Buffer.startswith(ArchiveMagic)) {
+ IsThin = false;
+ } else if (Buffer.startswith(BigArchiveMagic)) {
+ Format = K_AIXBIG;
IsThin = false;
+ return;
} else {
Err = make_error<GenericBinaryError>("file too small to be an archive",
object_error::invalid_file_type);
@@ -788,7 +931,7 @@ Archive::child_iterator Archive::child_begin(Error &Err,
return child_iterator::itr(
Child(this, FirstRegularData, FirstRegularStartOfFile), Err);
- const char *Loc = Data.getBufferStart() + strlen(Magic);
+ const char *Loc = Data.getBufferStart() + getFirstChildOffset();
Child C(this, Loc, &Err);
if (Err)
return child_end();
@@ -997,6 +1140,38 @@ Expected<Optional<Archive::Child>> Archive::findSym(StringRef name) const {
}
// Returns true if archive file contains no member file.
-bool Archive::isEmpty() const { return Data.getBufferSize() == 8; }
+bool Archive::isEmpty() const {
+ return Data.getBufferSize() == getArchiveMagicLen();
+}
bool Archive::hasSymbolTable() const { return !SymbolTable.empty(); }
+
+BigArchive::BigArchive(MemoryBufferRef Source, Error &Err)
+ : Archive(Source, Err) {
+ ErrorAsOutParameter ErrAsOutParam(&Err);
+ StringRef Buffer = Data.getBuffer();
+ ArFixLenHdr = reinterpret_cast<const FixLenHdr *>(Buffer.data());
+
+ StringRef RawOffset = getFieldRawString(ArFixLenHdr->FirstChildOffset);
+ if (RawOffset.getAsInteger(10, FirstChildOffset))
+ // TODO: Out-of-line.
+ Err = malformedError("malformed AIX big archive: first member offset \"" +
+ RawOffset + "\" is not a number");
+
+ RawOffset = getFieldRawString(ArFixLenHdr->LastChildOffset);
+ if (RawOffset.getAsInteger(10, LastChildOffset))
+ // TODO: Out-of-line.
+ Err = malformedError("malformed AIX big archive: last member offset \"" +
+ RawOffset + "\" is not a number");
+
+ child_iterator I = child_begin(Err, false);
+ if (Err)
+ return;
+ child_iterator E = child_end();
+ if (I == E) {
+ Err = Error::success();
+ return;
+ }
+ setFirstRegular(*I);
+ Err = Error::success();
+}
diff --git a/contrib/llvm-project/llvm/lib/Object/ArchiveWriter.cpp b/contrib/llvm-project/llvm/lib/Object/ArchiveWriter.cpp
index da8bcec7f3d4..053b3dafed95 100644
--- a/contrib/llvm-project/llvm/lib/Object/ArchiveWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/ArchiveWriter.cpp
@@ -137,6 +137,7 @@ static bool isBSDLike(object::Archive::Kind Kind) {
case object::Archive::K_DARWIN:
case object::Archive::K_DARWIN64:
return true;
+ case object::Archive::K_AIXBIG:
case object::Archive::K_COFF:
break;
}
@@ -199,6 +200,7 @@ static bool is64BitKind(object::Archive::Kind Kind) {
case object::Archive::K_BSD:
case object::Archive::K_DARWIN:
case object::Archive::K_COFF:
+ case object::Archive::K_AIXBIG:
return false;
case object::Archive::K_DARWIN64:
case object::Archive::K_GNU64:
diff --git a/contrib/llvm-project/llvm/lib/Object/IRSymtab.cpp b/contrib/llvm-project/llvm/lib/Object/IRSymtab.cpp
index 093ae1bbc267..dea3d90d3560 100644
--- a/contrib/llvm-project/llvm/lib/Object/IRSymtab.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/IRSymtab.cpp
@@ -14,6 +14,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/DataLayout.h"
@@ -22,13 +23,13 @@
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/VCSRevision.h"
@@ -41,6 +42,10 @@
using namespace llvm;
using namespace irsymtab;
+cl::opt<bool> DisableBitcodeVersionUpgrade(
+ "disable-bitcode-version-upgrade", cl::init(false), cl::Hidden,
+ cl::desc("Disable automatic bitcode upgrade for version mismatch"));
+
static const char *PreservedSymbols[] = {
#define HANDLE_LIBCALL(code, name) name,
#include "llvm/IR/RuntimeLibcalls.def"
@@ -402,20 +407,22 @@ Expected<FileContents> irsymtab::readBitcode(const BitcodeFileContents &BFC) {
return make_error<StringError>("Bitcode file does not contain any modules",
inconvertibleErrorCode());
- if (BFC.StrtabForSymtab.empty() ||
- BFC.Symtab.size() < sizeof(storage::Header))
- return upgrade(BFC.Mods);
-
- // We cannot use the regular reader to read the version and producer, because
- // it will expect the header to be in the current format. The only thing we
- // can rely on is that the version and producer will be present as the first
- // struct elements.
- auto *Hdr = reinterpret_cast<const storage::Header *>(BFC.Symtab.data());
- unsigned Version = Hdr->Version;
- StringRef Producer = Hdr->Producer.get(BFC.StrtabForSymtab);
- if (Version != storage::Header::kCurrentVersion ||
- Producer != kExpectedProducerName)
- return upgrade(BFC.Mods);
+ if (!DisableBitcodeVersionUpgrade) {
+ if (BFC.StrtabForSymtab.empty() ||
+ BFC.Symtab.size() < sizeof(storage::Header))
+ return upgrade(BFC.Mods);
+
+ // We cannot use the regular reader to read the version and producer,
+ // because it will expect the header to be in the current format. The only
+ // thing we can rely on is that the version and producer will be present as
+ // the first struct elements.
+ auto *Hdr = reinterpret_cast<const storage::Header *>(BFC.Symtab.data());
+ unsigned Version = Hdr->Version;
+ StringRef Producer = Hdr->Producer.get(BFC.StrtabForSymtab);
+ if (Version != storage::Header::kCurrentVersion ||
+ Producer != kExpectedProducerName)
+ return upgrade(BFC.Mods);
+ }
FileContents FC;
FC.TheReader = {{BFC.Symtab.data(), BFC.Symtab.size()},
diff --git a/contrib/llvm-project/llvm/lib/Object/Object.cpp b/contrib/llvm-project/llvm/lib/Object/Object.cpp
index 0659cf6a2d41..576eb8d069d6 100644
--- a/contrib/llvm-project/llvm/lib/Object/Object.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/Object.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Support/MemAlloc.h"
using namespace llvm;
using namespace object;
diff --git a/contrib/llvm-project/llvm/lib/Object/TapiFile.cpp b/contrib/llvm-project/llvm/lib/Object/TapiFile.cpp
index 6b576260bdb1..83568e8d823a 100644
--- a/contrib/llvm-project/llvm/lib/Object/TapiFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/TapiFile.cpp
@@ -45,8 +45,7 @@ TapiFile::TapiFile(MemoryBufferRef Source, const InterfaceFile &interface,
Symbols.emplace_back(StringRef(), Symbol->getName(), getFlags(Symbol));
break;
case SymbolKind::ObjectiveCClass:
- if (interface.getPlatforms().count(PlatformKind::macOS) &&
- Arch == AK_i386) {
+ if (interface.getPlatforms().count(PLATFORM_MACOS) && Arch == AK_i386) {
Symbols.emplace_back(ObjC1ClassNamePrefix, Symbol->getName(),
getFlags(Symbol));
} else {
diff --git a/contrib/llvm-project/llvm/lib/Object/XCOFFObjectFile.cpp b/contrib/llvm-project/llvm/lib/Object/XCOFFObjectFile.cpp
index 9b0a5efacba7..f2f6d700ddd8 100644
--- a/contrib/llvm-project/llvm/lib/Object/XCOFFObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/XCOFFObjectFile.cpp
@@ -1112,8 +1112,12 @@ bool XCOFFSymbolRef::isFunction() const {
return true;
Expected<XCOFFCsectAuxRef> ExpCsectAuxEnt = getXCOFFCsectAuxRef();
- if (!ExpCsectAuxEnt)
+ if (!ExpCsectAuxEnt) {
+ // If we could not get the CSECT auxiliary entry, then treat this symbol as
+ // if it isn't a function. Consume the error and return `false` to move on.
+ consumeError(ExpCsectAuxEnt.takeError());
return false;
+ }
const XCOFFCsectAuxRef CsectAuxRef = ExpCsectAuxEnt.get();
diff --git a/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp b/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
index 9b9266998ea6..ffe2599beaf8 100644
--- a/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
@@ -518,6 +518,7 @@ void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
BCaseMask(EF_RISCV_FLOAT_ABI_DOUBLE, EF_RISCV_FLOAT_ABI);
BCaseMask(EF_RISCV_FLOAT_ABI_QUAD, EF_RISCV_FLOAT_ABI);
BCase(EF_RISCV_RVE);
+ BCase(EF_RISCV_TSO);
break;
case ELF::EM_AMDGPU:
BCaseMask(EF_AMDGPU_MACH_NONE, EF_AMDGPU_MACH);
diff --git a/contrib/llvm-project/llvm/lib/ObjectYAML/MachOEmitter.cpp b/contrib/llvm-project/llvm/lib/ObjectYAML/MachOEmitter.cpp
index e5ffb12df434..b9fad2982828 100644
--- a/contrib/llvm-project/llvm/lib/ObjectYAML/MachOEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjectYAML/MachOEmitter.cpp
@@ -481,9 +481,9 @@ void MachOWriter::writeLinkEditData(raw_ostream &OS) {
typedef std::pair<uint64_t, writeHandler> writeOperation;
std::vector<writeOperation> WriteQueue;
- MachO::dyld_info_command *DyldInfoOnlyCmd = 0;
- MachO::symtab_command *SymtabCmd = 0;
- MachO::dysymtab_command *DSymtabCmd = 0;
+ MachO::dyld_info_command *DyldInfoOnlyCmd = nullptr;
+ MachO::symtab_command *SymtabCmd = nullptr;
+ MachO::dysymtab_command *DSymtabCmd = nullptr;
for (auto &LC : Obj.LoadCommands) {
switch (LC.Data.load_command_data.cmd) {
case MachO::LC_SYMTAB:
diff --git a/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFEmitter.cpp b/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFEmitter.cpp
index cf0d058c518c..2a7204d3f773 100644
--- a/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFEmitter.cpp
@@ -47,6 +47,7 @@ private:
bool initRelocations(uint64_t &CurrentOffset);
bool initStringTable();
bool assignAddressesAndIndices();
+
void writeFileHeader();
void writeAuxFileHeader();
void writeSectionHeader();
@@ -55,6 +56,15 @@ private:
bool writeSymbols();
void writeStringTable();
+ void writeAuxSymbol(const XCOFFYAML::CsectAuxEnt &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::FileAuxEnt &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::FunctionAuxEnt &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::ExcpetionAuxEnt &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::BlockAuxEnt &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::SectAuxEntForDWARF &AuxSym);
+ void writeAuxSymbol(const XCOFFYAML::SectAuxEntForStat &AuxSym);
+ void writeAuxSymbol(const std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym);
+
XCOFFYAML::Object &Obj;
bool Is64Bit = false;
support::endian::Writer W;
@@ -190,12 +200,23 @@ bool XCOFFWriter::initStringTable() {
}
}
} else {
- for (XCOFFYAML::Symbol &YamlSym : Obj.Symbols) {
+ for (const XCOFFYAML::Symbol &YamlSym : Obj.Symbols) {
if (nameShouldBeInStringTable(YamlSym.SymbolName))
StrTblBuilder.add(YamlSym.SymbolName);
}
}
+ // Check if the file name in the File Auxiliary Entry should be added to the
+ // string table.
+ for (const XCOFFYAML::Symbol &YamlSym : Obj.Symbols) {
+ for (const std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym :
+ YamlSym.AuxEntries) {
+ if (auto AS = dyn_cast<XCOFFYAML::FileAuxEnt>(AuxSym.get()))
+ if (nameShouldBeInStringTable(AS->FileNameOrString.getValueOr("")))
+ StrTblBuilder.add(AS->FileNameOrString.getValueOr(""));
+ }
+ }
+
StrTblBuilder.finalize();
size_t StrTblSize = StrTblBuilder.getSize();
@@ -216,9 +237,21 @@ bool XCOFFWriter::initFileHeader(uint64_t CurrentOffset) {
InitFileHdr.NumberOfSections = Obj.Sections.size();
InitFileHdr.NumberOfSymTableEntries = Obj.Symbols.size();
- for (const XCOFFYAML::Symbol &YamlSym : Obj.Symbols)
+ for (XCOFFYAML::Symbol &YamlSym : Obj.Symbols) {
+ uint32_t AuxCount = YamlSym.AuxEntries.size();
+ if (YamlSym.NumberOfAuxEntries && *YamlSym.NumberOfAuxEntries < AuxCount) {
+ ErrHandler("specified NumberOfAuxEntries " +
+ Twine(static_cast<uint32_t>(*YamlSym.NumberOfAuxEntries)) +
+ " is less than the actual number "
+ "of auxiliary entries " +
+ Twine(AuxCount));
+ return false;
+ }
+ YamlSym.NumberOfAuxEntries =
+ YamlSym.NumberOfAuxEntries.getValueOr(AuxCount);
// Add the number of auxiliary symbols to the total number.
- InitFileHdr.NumberOfSymTableEntries += YamlSym.NumberOfAuxEntries;
+ InitFileHdr.NumberOfSymTableEntries += *YamlSym.NumberOfAuxEntries;
+ }
// Calculate SymbolTableOffset for the file header.
if (InitFileHdr.NumberOfSymTableEntries) {
@@ -491,6 +524,125 @@ bool XCOFFWriter::writeRelocations() {
return true;
}
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::CsectAuxEnt &AuxSym) {
+ if (Is64Bit) {
+ W.write<uint32_t>(AuxSym.SectionOrLengthLo.getValueOr(0));
+ W.write<uint32_t>(AuxSym.ParameterHashIndex.getValueOr(0));
+ W.write<uint16_t>(AuxSym.TypeChkSectNum.getValueOr(0));
+ W.write<uint8_t>(AuxSym.SymbolAlignmentAndType.getValueOr(0));
+ W.write<uint8_t>(AuxSym.StorageMappingClass.getValueOr(XCOFF::XMC_PR));
+ W.write<uint32_t>(AuxSym.SectionOrLengthHi.getValueOr(0));
+ W.write<uint8_t>(0);
+ W.write<uint8_t>(XCOFF::AUX_CSECT);
+ } else {
+ W.write<uint32_t>(AuxSym.SectionOrLength.getValueOr(0));
+ W.write<uint32_t>(AuxSym.ParameterHashIndex.getValueOr(0));
+ W.write<uint16_t>(AuxSym.TypeChkSectNum.getValueOr(0));
+ W.write<uint8_t>(AuxSym.SymbolAlignmentAndType.getValueOr(0));
+ W.write<uint8_t>(AuxSym.StorageMappingClass.getValueOr(XCOFF::XMC_PR));
+ W.write<uint32_t>(AuxSym.StabInfoIndex.getValueOr(0));
+ W.write<uint16_t>(AuxSym.StabSectNum.getValueOr(0));
+ }
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::ExcpetionAuxEnt &AuxSym) {
+ assert(Is64Bit && "can't write the exception auxiliary symbol for XCOFF32");
+ W.write<uint64_t>(AuxSym.OffsetToExceptionTbl.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SizeOfFunction.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SymIdxOfNextBeyond.getValueOr(0));
+ W.write<uint8_t>(0);
+ W.write<uint8_t>(XCOFF::AUX_EXCEPT);
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::FunctionAuxEnt &AuxSym) {
+ if (Is64Bit) {
+ W.write<uint64_t>(AuxSym.PtrToLineNum.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SizeOfFunction.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SymIdxOfNextBeyond.getValueOr(0));
+ W.write<uint8_t>(0);
+ W.write<uint8_t>(XCOFF::AUX_FCN);
+ } else {
+ W.write<uint32_t>(AuxSym.OffsetToExceptionTbl.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SizeOfFunction.getValueOr(0));
+ W.write<uint32_t>(AuxSym.PtrToLineNum.getValueOr(0));
+ W.write<uint32_t>(AuxSym.SymIdxOfNextBeyond.getValueOr(0));
+ W.OS.write_zeros(2);
+ }
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::FileAuxEnt &AuxSym) {
+ StringRef FileName = AuxSym.FileNameOrString.getValueOr("");
+ if (nameShouldBeInStringTable(FileName)) {
+ W.write<int32_t>(0);
+ W.write<uint32_t>(StrTblBuilder.getOffset(FileName));
+ } else {
+ writeName(FileName, W);
+ }
+ W.OS.write_zeros(XCOFF::FileNamePadSize);
+ W.write<uint8_t>(AuxSym.FileStringType.getValueOr(XCOFF::XFT_FN));
+ if (Is64Bit) {
+ W.OS.write_zeros(2);
+ W.write<uint8_t>(XCOFF::AUX_FILE);
+ } else {
+ W.OS.write_zeros(3);
+ }
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::BlockAuxEnt &AuxSym) {
+ if (Is64Bit) {
+ W.write<uint32_t>(AuxSym.LineNum.getValueOr(0));
+ W.OS.write_zeros(13);
+ W.write<uint8_t>(XCOFF::AUX_SYM);
+ } else {
+ W.OS.write_zeros(2);
+ W.write<uint16_t>(AuxSym.LineNumHi.getValueOr(0));
+ W.write<uint16_t>(AuxSym.LineNumLo.getValueOr(0));
+ W.OS.write_zeros(12);
+ }
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::SectAuxEntForDWARF &AuxSym) {
+ if (Is64Bit) {
+ W.write<uint64_t>(AuxSym.LengthOfSectionPortion.getValueOr(0));
+ W.write<uint64_t>(AuxSym.NumberOfRelocEnt.getValueOr(0));
+ W.write<uint8_t>(0);
+ W.write<uint8_t>(XCOFF::AUX_SECT);
+ } else {
+ W.write<uint32_t>(AuxSym.LengthOfSectionPortion.getValueOr(0));
+ W.OS.write_zeros(4);
+ W.write<uint32_t>(AuxSym.NumberOfRelocEnt.getValueOr(0));
+ W.OS.write_zeros(6);
+ }
+}
+
+void XCOFFWriter::writeAuxSymbol(const XCOFFYAML::SectAuxEntForStat &AuxSym) {
+ assert(!Is64Bit && "can't write the stat auxiliary symbol for XCOFF64");
+ W.write<uint32_t>(AuxSym.SectionLength.getValueOr(0));
+ W.write<uint16_t>(AuxSym.NumberOfRelocEnt.getValueOr(0));
+ W.write<uint16_t>(AuxSym.NumberOfLineNum.getValueOr(0));
+ W.OS.write_zeros(10);
+}
+
+void XCOFFWriter::writeAuxSymbol(
+ const std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym) {
+ if (auto AS = dyn_cast<XCOFFYAML::CsectAuxEnt>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::FunctionAuxEnt>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::ExcpetionAuxEnt>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::FileAuxEnt>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::BlockAuxEnt>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::SectAuxEntForDWARF>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else if (auto AS = dyn_cast<XCOFFYAML::SectAuxEntForStat>(AuxSym.get()))
+ writeAuxSymbol(*AS);
+ else
+ llvm_unreachable("unknown auxiliary symbol type");
+}
+
bool XCOFFWriter::writeSymbols() {
int64_t PaddingSize =
(uint64_t)InitFileHdr.SymbolTableOffset - (W.OS.tell() - StartOffset);
@@ -533,16 +685,25 @@ bool XCOFFWriter::writeSymbols() {
}
W.write<uint16_t>(YamlSym.Type);
W.write<uint8_t>(YamlSym.StorageClass);
- W.write<uint8_t>(YamlSym.NumberOfAuxEntries);
-
- // Now output the auxiliary entry.
- for (uint8_t I = 0, E = YamlSym.NumberOfAuxEntries; I < E; ++I) {
- // TODO: Auxiliary entry is not supported yet.
- // The auxiliary entries for a symbol follow its symbol table entry. The
- // length of each auxiliary entry is the same as a symbol table entry (18
- // bytes). The format and quantity of auxiliary entries depend on the
- // storage class (n_sclass) and type (n_type) of the symbol table entry.
- W.OS.write_zeros(XCOFF::SymbolTableEntrySize);
+
+ uint8_t NumOfAuxSym = YamlSym.NumberOfAuxEntries.getValueOr(0);
+ W.write<uint8_t>(NumOfAuxSym);
+
+ if (!NumOfAuxSym && !YamlSym.AuxEntries.size())
+ continue;
+
+ // Now write auxiliary entries.
+ if (!YamlSym.AuxEntries.size()) {
+ W.OS.write_zeros(XCOFF::SymbolTableEntrySize * NumOfAuxSym);
+ } else {
+ for (const std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym :
+ YamlSym.AuxEntries) {
+ writeAuxSymbol(AuxSym);
+ }
+ // Pad with zeros.
+ if (NumOfAuxSym > YamlSym.AuxEntries.size())
+ W.OS.write_zeros(XCOFF::SymbolTableEntrySize *
+ (NumOfAuxSym - YamlSym.AuxEntries.size()));
}
}
return true;
diff --git a/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFYAML.cpp b/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFYAML.cpp
index 221cf3b064c0..44ef33501b65 100644
--- a/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFYAML.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjectYAML/XCOFFYAML.cpp
@@ -19,6 +19,8 @@ namespace XCOFFYAML {
Object::Object() { memset(&Header, 0, sizeof(Header)); }
+AuxSymbolEnt::~AuxSymbolEnt() = default;
+
} // namespace XCOFFYAML
namespace yaml {
@@ -98,6 +100,56 @@ void ScalarEnumerationTraits<XCOFF::StorageClass>::enumeration(
#undef ECase
}
+void ScalarEnumerationTraits<XCOFF::StorageMappingClass>::enumeration(
+ IO &IO, XCOFF::StorageMappingClass &Value) {
+#define ECase(X) IO.enumCase(Value, #X, XCOFF::X)
+ ECase(XMC_PR);
+ ECase(XMC_RO);
+ ECase(XMC_DB);
+ ECase(XMC_GL);
+ ECase(XMC_XO);
+ ECase(XMC_SV);
+ ECase(XMC_SV64);
+ ECase(XMC_SV3264);
+ ECase(XMC_TI);
+ ECase(XMC_TB);
+ ECase(XMC_RW);
+ ECase(XMC_TC0);
+ ECase(XMC_TC);
+ ECase(XMC_TD);
+ ECase(XMC_DS);
+ ECase(XMC_UA);
+ ECase(XMC_BS);
+ ECase(XMC_UC);
+ ECase(XMC_TL);
+ ECase(XMC_UL);
+ ECase(XMC_TE);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<XCOFFYAML::AuxSymbolType>::enumeration(
+ IO &IO, XCOFFYAML::AuxSymbolType &Type) {
+#define ECase(X) IO.enumCase(Type, #X, XCOFFYAML::X)
+ ECase(AUX_EXCEPT);
+ ECase(AUX_FCN);
+ ECase(AUX_SYM);
+ ECase(AUX_FILE);
+ ECase(AUX_CSECT);
+ ECase(AUX_SECT);
+ ECase(AUX_STAT);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<XCOFF::CFileStringType>::enumeration(
+ IO &IO, XCOFF::CFileStringType &Type) {
+#define ECase(X) IO.enumCase(Type, #X, XCOFF::X)
+ ECase(XFT_FN);
+ ECase(XFT_CT);
+ ECase(XFT_CV);
+ ECase(XFT_CD);
+#undef ECase
+}
+
struct NSectionFlags {
NSectionFlags(IO &) : Flags(XCOFF::SectionTypeFlags(0)) {}
NSectionFlags(IO &, uint32_t C) : Flags(XCOFF::SectionTypeFlags(C)) {}
@@ -173,6 +225,107 @@ void MappingTraits<XCOFFYAML::Section>::mapping(IO &IO,
IO.mapOptional("Relocations", Sec.Relocations);
}
+static void auxSymMapping(IO &IO, XCOFFYAML::CsectAuxEnt &AuxSym, bool Is64) {
+ IO.mapOptional("ParameterHashIndex", AuxSym.ParameterHashIndex);
+ IO.mapOptional("TypeChkSectNum", AuxSym.TypeChkSectNum);
+ IO.mapOptional("SymbolAlignmentAndType", AuxSym.SymbolAlignmentAndType);
+ IO.mapOptional("StorageMappingClass", AuxSym.StorageMappingClass);
+ if (Is64) {
+ IO.mapOptional("SectionOrLengthLo", AuxSym.SectionOrLengthLo);
+ IO.mapOptional("SectionOrLengthHi", AuxSym.SectionOrLengthHi);
+ } else {
+ IO.mapOptional("SectionOrLength", AuxSym.SectionOrLength);
+ IO.mapOptional("StabInfoIndex", AuxSym.StabInfoIndex);
+ IO.mapOptional("StabSectNum", AuxSym.StabSectNum);
+ }
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::FileAuxEnt &AuxSym) {
+ IO.mapOptional("FileNameOrString", AuxSym.FileNameOrString);
+ IO.mapOptional("FileStringType", AuxSym.FileStringType);
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::BlockAuxEnt &AuxSym, bool Is64) {
+ if (Is64) {
+ IO.mapOptional("LineNum", AuxSym.LineNum);
+ } else {
+ IO.mapOptional("LineNumHi", AuxSym.LineNumHi);
+ IO.mapOptional("LineNumLo", AuxSym.LineNumLo);
+ }
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::FunctionAuxEnt &AuxSym,
+ bool Is64) {
+ if (!Is64)
+ IO.mapOptional("OffsetToExceptionTbl", AuxSym.OffsetToExceptionTbl);
+ IO.mapOptional("SizeOfFunction", AuxSym.SizeOfFunction);
+ IO.mapOptional("SymIdxOfNextBeyond", AuxSym.SymIdxOfNextBeyond);
+ IO.mapOptional("PtrToLineNum", AuxSym.PtrToLineNum);
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::ExcpetionAuxEnt &AuxSym) {
+ IO.mapOptional("OffsetToExceptionTbl", AuxSym.OffsetToExceptionTbl);
+ IO.mapOptional("SizeOfFunction", AuxSym.SizeOfFunction);
+ IO.mapOptional("SymIdxOfNextBeyond", AuxSym.SymIdxOfNextBeyond);
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::SectAuxEntForDWARF &AuxSym) {
+ IO.mapOptional("LengthOfSectionPortion", AuxSym.LengthOfSectionPortion);
+ IO.mapOptional("NumberOfRelocEnt", AuxSym.NumberOfRelocEnt);
+}
+
+static void auxSymMapping(IO &IO, XCOFFYAML::SectAuxEntForStat &AuxSym) {
+ IO.mapOptional("SectionLength", AuxSym.SectionLength);
+ IO.mapOptional("NumberOfRelocEnt", AuxSym.NumberOfRelocEnt);
+ IO.mapOptional("NumberOfLineNum", AuxSym.NumberOfLineNum);
+}
+
+void MappingTraits<std::unique_ptr<XCOFFYAML::AuxSymbolEnt>>::mapping(
+ IO &IO, std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym) {
+ assert(!IO.outputting() && "We don't dump aux symbols currently.");
+ const bool Is64 =
+ static_cast<XCOFFYAML::Object *>(IO.getContext())->Header.Magic ==
+ (llvm::yaml::Hex16)XCOFF::XCOFF64;
+ XCOFFYAML::AuxSymbolType AuxType;
+ IO.mapRequired("Type", AuxType);
+ switch (AuxType) {
+ case XCOFFYAML::AUX_EXCEPT:
+ if (!Is64)
+ IO.setError("an auxiliary symbol of type AUX_EXCEPT cannot be defined in "
+ "XCOFF32");
+ AuxSym.reset(new XCOFFYAML::ExcpetionAuxEnt());
+ auxSymMapping(IO, *cast<XCOFFYAML::ExcpetionAuxEnt>(AuxSym.get()));
+ break;
+ case XCOFFYAML::AUX_FCN:
+ AuxSym.reset(new XCOFFYAML::FunctionAuxEnt());
+ auxSymMapping(IO, *cast<XCOFFYAML::FunctionAuxEnt>(AuxSym.get()), Is64);
+ break;
+ case XCOFFYAML::AUX_SYM:
+ AuxSym.reset(new XCOFFYAML::BlockAuxEnt());
+ auxSymMapping(IO, *cast<XCOFFYAML::BlockAuxEnt>(AuxSym.get()), Is64);
+ break;
+ case XCOFFYAML::AUX_FILE:
+ AuxSym.reset(new XCOFFYAML::FileAuxEnt());
+ auxSymMapping(IO, *cast<XCOFFYAML::FileAuxEnt>(AuxSym.get()));
+ break;
+ case XCOFFYAML::AUX_CSECT:
+ AuxSym.reset(new XCOFFYAML::CsectAuxEnt());
+ auxSymMapping(IO, *cast<XCOFFYAML::CsectAuxEnt>(AuxSym.get()), Is64);
+ break;
+ case XCOFFYAML::AUX_SECT:
+ AuxSym.reset(new XCOFFYAML::SectAuxEntForDWARF());
+ auxSymMapping(IO, *cast<XCOFFYAML::SectAuxEntForDWARF>(AuxSym.get()));
+ break;
+ case XCOFFYAML::AUX_STAT:
+ if (Is64)
+ IO.setError(
+ "an auxiliary symbol of type AUX_STAT cannot be defined in XCOFF64");
+ AuxSym.reset(new XCOFFYAML::SectAuxEntForStat());
+ auxSymMapping(IO, *cast<XCOFFYAML::SectAuxEntForStat>(AuxSym.get()));
+ break;
+ }
+}
+
void MappingTraits<XCOFFYAML::Symbol>::mapping(IO &IO, XCOFFYAML::Symbol &S) {
IO.mapOptional("Name", S.SymbolName);
IO.mapOptional("Value", S.Value);
@@ -181,6 +334,8 @@ void MappingTraits<XCOFFYAML::Symbol>::mapping(IO &IO, XCOFFYAML::Symbol &S) {
IO.mapOptional("Type", S.Type);
IO.mapOptional("StorageClass", S.StorageClass);
IO.mapOptional("NumberOfAuxEntries", S.NumberOfAuxEntries);
+ if (!IO.outputting())
+ IO.mapOptional("AuxEntries", S.AuxEntries);
}
void MappingTraits<XCOFFYAML::StringTable>::mapping(IO &IO, XCOFFYAML::StringTable &Str) {
@@ -191,12 +346,14 @@ void MappingTraits<XCOFFYAML::StringTable>::mapping(IO &IO, XCOFFYAML::StringTab
}
void MappingTraits<XCOFFYAML::Object>::mapping(IO &IO, XCOFFYAML::Object &Obj) {
+ IO.setContext(&Obj);
IO.mapTag("!XCOFF", true);
IO.mapRequired("FileHeader", Obj.Header);
IO.mapOptional("AuxiliaryHeader", Obj.AuxHeader);
IO.mapOptional("Sections", Obj.Sections);
IO.mapOptional("Symbols", Obj.Symbols);
IO.mapOptional("StringTable", Obj.StrTbl);
+ IO.setContext(nullptr);
}
} // namespace yaml
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassBuilder.cpp b/contrib/llvm-project/llvm/lib/Passes/PassBuilder.cpp
index d7615ef4e9bf..015ca1eec4df 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/PassBuilder.cpp
@@ -35,6 +35,7 @@
#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/DependenceAnalysis.h"
#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/DomPrinter.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/FunctionPropertiesAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
@@ -218,6 +219,7 @@
#include "llvm/Transforms/Utils/BreakCriticalEdges.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h"
+#include "llvm/Transforms/Utils/Debugify.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/FixIrreducible.h"
#include "llvm/Transforms/Utils/HelloWorld.h"
@@ -655,6 +657,8 @@ Expected<MemorySanitizerOptions> parseMSanPassOptions(StringRef Params) {
ParamName)
.str(),
inconvertibleErrorCode());
+ } else if (ParamName == "eager-checks") {
+ Result.EagerChecks = true;
} else {
return make_error<StringError>(
formatv("invalid MemorySanitizer pass parameter '{0}' ", ParamName)
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
index a6a36ff25402..6110bda02406 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -300,6 +300,8 @@ PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
// TODO: Investigate promotion cap for O1.
LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap));
LPM1.addPass(SimpleLoopUnswitchPass());
+ if (EnableLoopFlatten)
+ LPM1.addPass(LoopFlattenPass());
LPM2.addPass(LoopIdiomRecognizePass());
LPM2.addPass(IndVarSimplifyPass());
@@ -335,8 +337,6 @@ PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
/*UseBlockFrequencyInfo=*/true));
FPM.addPass(SimplifyCFGPass());
FPM.addPass(InstCombinePass());
- if (EnableLoopFlatten)
- FPM.addPass(createFunctionToLoopPassAdaptor(LoopFlattenPass()));
// The loop passes in LPM2 (LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
@@ -475,6 +475,9 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
LPM1.addPass(
SimpleLoopUnswitchPass(/* NonTrivial */ Level == OptimizationLevel::O3 &&
EnableO3NonTrivialUnswitching));
+ if (EnableLoopFlatten)
+ LPM1.addPass(LoopFlattenPass());
+
LPM2.addPass(LoopIdiomRecognizePass());
LPM2.addPass(IndVarSimplifyPass());
@@ -509,8 +512,6 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
/*UseBlockFrequencyInfo=*/true));
FPM.addPass(SimplifyCFGPass());
FPM.addPass(InstCombinePass());
- if (EnableLoopFlatten)
- FPM.addPass(createFunctionToLoopPassAdaptor(LoopFlattenPass()));
// The loop passes in LPM2 (LoopIdiomRecognizePass, IndVarSimplifyPass,
// LoopDeletionPass and LoopFullUnrollPass) do not preserve MemorySSA.
// *All* loop passes must preserve it, in order to be able to use it.
@@ -1623,14 +1624,13 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MainFPM.addPass(DSEPass());
MainFPM.addPass(MergedLoadStoreMotionPass());
- // More loops are countable; try to optimize them.
- if (EnableLoopFlatten && Level.getSpeedupLevel() > 1)
- MainFPM.addPass(createFunctionToLoopPassAdaptor(LoopFlattenPass()));
if (EnableConstraintElimination)
MainFPM.addPass(ConstraintEliminationPass());
LoopPassManager LPM;
+ if (EnableLoopFlatten && Level.getSpeedupLevel() > 1)
+ LPM.addPass(LoopFlattenPass());
LPM.addPass(IndVarSimplifyPass());
LPM.addPass(LoopDeletionPass());
// FIXME: Add loop interchange.
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def b/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
index 74613a7fcce0..8e0af11b854d 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
+++ b/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
@@ -48,9 +48,11 @@ MODULE_PASS("openmp-opt", OpenMPOptPass())
MODULE_PASS("called-value-propagation", CalledValuePropagationPass())
MODULE_PASS("canonicalize-aliases", CanonicalizeAliasesPass())
MODULE_PASS("cg-profile", CGProfilePass())
+MODULE_PASS("check-debugify", NewPMCheckDebugifyPass())
MODULE_PASS("constmerge", ConstantMergePass())
MODULE_PASS("cross-dso-cfi", CrossDSOCFIPass())
MODULE_PASS("deadargelim", DeadArgumentEliminationPass())
+MODULE_PASS("debugify", NewPMDebugifyPass())
MODULE_PASS("elim-avail-extern", EliminateAvailableExternallyPass())
MODULE_PASS("extract-blocks", BlockExtractorPass())
MODULE_PASS("forceattrs", ForceFunctionAttrsPass())
@@ -62,6 +64,7 @@ MODULE_PASS("globalsplit", GlobalSplitPass())
MODULE_PASS("hotcoldsplit", HotColdSplittingPass())
MODULE_PASS("inferattrs", InferFunctionAttrsPass())
MODULE_PASS("inliner-wrapper", ModuleInlinerWrapperPass())
+MODULE_PASS("print<inline-advisor>", InlineAdvisorAnalysisPrinterPass(dbgs()))
MODULE_PASS("inliner-wrapper-no-mandatory-first", ModuleInlinerWrapperPass(
getInlineParams(),
false))
@@ -254,6 +257,8 @@ FUNCTION_PASS("div-rem-pairs", DivRemPairsPass())
FUNCTION_PASS("dse", DSEPass())
FUNCTION_PASS("dot-cfg", CFGPrinterPass())
FUNCTION_PASS("dot-cfg-only", CFGOnlyPrinterPass())
+FUNCTION_PASS("dot-dom", DomTreePrinterPass())
+FUNCTION_PASS("dot-dom-only", DomTreeOnlyPrinterPass())
FUNCTION_PASS("fix-irreducible", FixIrreduciblePass())
FUNCTION_PASS("flattencfg", FlattenCFGPass())
FUNCTION_PASS("make-guards-explicit", MakeGuardsExplicitPass())
@@ -410,7 +415,7 @@ FUNCTION_PASS_WITH_PARAMS("msan",
return MemorySanitizerPass(Opts);
},
parseMSanPassOptions,
- "recover;kernel;track-origins=N")
+ "recover;kernel;eager-checks;track-origins=N")
FUNCTION_PASS_WITH_PARAMS("simplifycfg",
"SimplifyCFGPass",
[](SimplifyCFGOptions Opts) {
diff --git a/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp b/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
index 23c825c78713..c42b1cb26f13 100644
--- a/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
@@ -441,7 +441,7 @@ const Module *getModuleForComparison(Any IR) {
} // namespace
-template <typename T> ChangeReporter<T>::~ChangeReporter<T>() {
+template <typename T> ChangeReporter<T>::~ChangeReporter() {
assert(BeforeStack.empty() && "Problem with Change Printer stack.");
}
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/GCOV.cpp b/contrib/llvm-project/llvm/lib/ProfileData/GCOV.cpp
index afef71f5b5ad..72d1addab01e 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/GCOV.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/GCOV.cpp
@@ -346,7 +346,7 @@ StringRef GCOVFunction::getName(bool demangle) const {
}
}
demangled = Name;
- } while (0);
+ } while (false);
}
return demangled;
}
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp b/contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp
index 34e0c5ebcd58..051655e1fed6 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp
@@ -119,9 +119,6 @@ static std::string getInstrProfErrString(instrprof_error Err,
case instrprof_error::unable_to_correlate_profile:
OS << "unable to correlate profile";
break;
- case instrprof_error::unsupported_debug_format:
- OS << "unsupported debug info format (only DWARF is supported)";
- break;
case instrprof_error::invalid_prof:
OS << "invalid profile created. Please file a bug "
"at: " BUG_REPORT_URL
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/InstrProfCorrelator.cpp b/contrib/llvm-project/llvm/lib/ProfileData/InstrProfCorrelator.cpp
index f9c113027da2..8e38a6869d07 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/InstrProfCorrelator.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/InstrProfCorrelator.cpp
@@ -23,7 +23,8 @@ Expected<object::SectionRef> getCountersSection(const object::ObjectFile &Obj) {
if (SectionName.get() == INSTR_PROF_CNTS_SECT_NAME)
return Section;
return make_error<InstrProfError>(
- instrprof_error::unable_to_correlate_profile);
+ instrprof_error::unable_to_correlate_profile,
+ "could not find counter section (" INSTR_PROF_CNTS_SECT_NAME ")");
}
const char *InstrProfCorrelator::FunctionNameAttributeName = "Function Name";
@@ -54,9 +55,9 @@ InstrProfCorrelator::get(StringRef DebugInfoFilename) {
// TODO: Enable profile correlation when there are multiple objects in a
// dSYM bundle.
if (DsymObjectsOrErr->size() > 1)
- return createStringError(
- std::error_code(),
- "Profile correlation using multiple objects is not yet supported");
+ return make_error<InstrProfError>(
+ instrprof_error::unable_to_correlate_profile,
+ "using multiple objects is not yet supported");
DebugInfoFilename = *DsymObjectsOrErr->begin();
}
auto BufferOrErr =
@@ -84,7 +85,16 @@ InstrProfCorrelator::get(std::unique_ptr<MemoryBuffer> Buffer) {
return InstrProfCorrelatorImpl<uint32_t>::get(std::move(*CtxOrErr), *Obj);
}
return make_error<InstrProfError>(
- instrprof_error::unable_to_correlate_profile);
+ instrprof_error::unable_to_correlate_profile, "not an object file");
+}
+
+Optional<size_t> InstrProfCorrelator::getDataSize() const {
+ if (auto *C = dyn_cast<InstrProfCorrelatorImpl<uint32_t>>(this)) {
+ return C->getDataSize();
+ } else if (auto *C = dyn_cast<InstrProfCorrelatorImpl<uint64_t>>(this)) {
+ return C->getDataSize();
+ }
+ return {};
}
namespace llvm {
@@ -120,16 +130,23 @@ InstrProfCorrelatorImpl<IntPtrT>::get(
return std::make_unique<DwarfInstrProfCorrelator<IntPtrT>>(std::move(DICtx),
std::move(Ctx));
}
- return make_error<InstrProfError>(instrprof_error::unsupported_debug_format);
+ return make_error<InstrProfError>(
+ instrprof_error::unable_to_correlate_profile,
+ "unsupported debug info format (only DWARF is supported)");
}
template <class IntPtrT>
Error InstrProfCorrelatorImpl<IntPtrT>::correlateProfileData() {
- assert(Data.empty() && CompressedNames.empty() && Names.empty());
+ assert(Data.empty() && Names.empty() && NamesVec.empty());
correlateProfileDataImpl();
+ if (Data.empty() || NamesVec.empty())
+ return make_error<InstrProfError>(
+ instrprof_error::unable_to_correlate_profile,
+ "could not find any profile metadata in debug info");
auto Result =
- collectPGOFuncNameStrings(Names, /*doCompression=*/true, CompressedNames);
- Names.clear();
+ collectPGOFuncNameStrings(NamesVec, /*doCompression=*/false, Names);
+ CounterOffsets.clear();
+ NamesVec.clear();
return Result;
}
@@ -139,6 +156,9 @@ void InstrProfCorrelatorImpl<IntPtrT>::addProbe(StringRef FunctionName,
IntPtrT CounterOffset,
IntPtrT FunctionPtr,
uint32_t NumCounters) {
+ // Check if a probe was already added for this counter offset.
+ if (!CounterOffsets.insert(CounterOffset).second)
+ return;
Data.push_back({
maybeSwap<uint64_t>(IndexedInstrProf::ComputeHash(FunctionName)),
maybeSwap<uint64_t>(CFGHash),
@@ -151,7 +171,7 @@ void InstrProfCorrelatorImpl<IntPtrT>::addProbe(StringRef FunctionName,
maybeSwap<uint32_t>(NumCounters),
/*NumValueSites=*/{maybeSwap<uint16_t>(0), maybeSwap<uint16_t>(0)},
});
- Names.push_back(FunctionName.str());
+ NamesVec.push_back(FunctionName.str());
}
template <class IntPtrT>
@@ -163,13 +183,19 @@ DwarfInstrProfCorrelator<IntPtrT>::getLocation(const DWARFDie &Die) const {
return {};
}
auto &DU = *Die.getDwarfUnit();
+ auto AddressSize = DU.getAddressByteSize();
for (auto &Location : *Locations) {
- auto AddressSize = DU.getAddressByteSize();
DataExtractor Data(Location.Expr, DICtx->isLittleEndian(), AddressSize);
DWARFExpression Expr(Data, AddressSize);
- for (auto &Op : Expr)
- if (Op.getCode() == dwarf::DW_OP_addr)
+ for (auto &Op : Expr) {
+ if (Op.getCode() == dwarf::DW_OP_addr) {
return Op.getRawOperand(0);
+ } else if (Op.getCode() == dwarf::DW_OP_addrx) {
+ uint64_t Index = Op.getRawOperand(0);
+ if (auto SA = DU.getAddrOffsetSectionItem(Index))
+ return SA->Address;
+ }
+ }
}
return {};
}
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/InstrProfReader.cpp b/contrib/llvm-project/llvm/lib/ProfileData/InstrProfReader.cpp
index 37cdf4dd1fe2..861ff61df510 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/InstrProfReader.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -383,22 +383,21 @@ Error RawInstrProfReader<IntPtrT>::readHeader(
CountersDelta = swap(Header.CountersDelta);
NamesDelta = swap(Header.NamesDelta);
- auto DataSize = swap(Header.DataSize);
+ auto NumData = swap(Header.DataSize);
auto PaddingBytesBeforeCounters = swap(Header.PaddingBytesBeforeCounters);
- auto CountersSize = swap(Header.CountersSize);
+ auto CountersSize = swap(Header.CountersSize) * getCounterTypeSize();
auto PaddingBytesAfterCounters = swap(Header.PaddingBytesAfterCounters);
auto NamesSize = swap(Header.NamesSize);
ValueKindLast = swap(Header.ValueKindLast);
- auto DataSizeInBytes = DataSize * sizeof(RawInstrProf::ProfileData<IntPtrT>);
+ auto DataSize = NumData * sizeof(RawInstrProf::ProfileData<IntPtrT>);
auto PaddingSize = getNumPaddingBytes(NamesSize);
// Profile data starts after profile header and binary ids if exist.
ptrdiff_t DataOffset = sizeof(RawInstrProf::Header) + BinaryIdsSize;
- ptrdiff_t CountersOffset =
- DataOffset + DataSizeInBytes + PaddingBytesBeforeCounters;
- ptrdiff_t NamesOffset = CountersOffset + (sizeof(uint64_t) * CountersSize) +
- PaddingBytesAfterCounters;
+ ptrdiff_t CountersOffset = DataOffset + DataSize + PaddingBytesBeforeCounters;
+ ptrdiff_t NamesOffset =
+ CountersOffset + CountersSize + PaddingBytesAfterCounters;
ptrdiff_t ValueDataOffset = NamesOffset + NamesSize + PaddingSize;
auto *Start = reinterpret_cast<const char *>(&Header);
@@ -412,12 +411,12 @@ Error RawInstrProfReader<IntPtrT>::readHeader(
assert(CountersDelta == 0 && NamesDelta == 0);
Data = Correlator->getDataPointer();
DataEnd = Data + Correlator->getDataSize();
- NamesStart = Correlator->getCompressedNamesPointer();
- NamesEnd = NamesStart + Correlator->getCompressedNamesSize();
+ NamesStart = Correlator->getNamesPointer();
+ NamesEnd = NamesStart + Correlator->getNamesSize();
} else {
Data = reinterpret_cast<const RawInstrProf::ProfileData<IntPtrT> *>(
Start + DataOffset);
- DataEnd = Data + DataSize;
+ DataEnd = Data + NumData;
NamesStart = Start + NamesOffset;
NamesEnd = NamesStart + NamesSize;
}
@@ -425,7 +424,8 @@ Error RawInstrProfReader<IntPtrT>::readHeader(
// Binary ids start just after the header.
BinaryIdsStart =
reinterpret_cast<const uint8_t *>(&Header) + sizeof(RawInstrProf::Header);
- CountersStart = reinterpret_cast<const uint64_t *>(Start + CountersOffset);
+ CountersStart = Start + CountersOffset;
+ CountersEnd = CountersStart + CountersSize;
ValueDataStart = reinterpret_cast<const uint8_t *>(Start + ValueDataOffset);
const uint8_t *BufferEnd = (const uint8_t *)DataBuffer->getBufferEnd();
@@ -459,58 +459,36 @@ Error RawInstrProfReader<IntPtrT>::readRawCounts(
if (NumCounters == 0)
return error(instrprof_error::malformed, "number of counters is zero");
- ArrayRef<uint64_t> RawCounts;
- if (Correlator) {
- uint64_t CounterOffset = swap<IntPtrT>(Data->CounterPtr) / sizeof(uint64_t);
- RawCounts =
- makeArrayRef<uint64_t>(CountersStart + CounterOffset, NumCounters);
- } else {
- IntPtrT CounterPtr = Data->CounterPtr;
- ptrdiff_t CounterOffset = getCounterOffset(CounterPtr);
- if (CounterOffset < 0)
- return error(
- instrprof_error::malformed,
- ("counter offset " + Twine(CounterOffset) + " is negative").str());
-
- // Check bounds. Note that the counter pointer embedded in the data record
- // may itself be corrupt.
- auto *NamesStartAsCounter = reinterpret_cast<const uint64_t *>(NamesStart);
- ptrdiff_t MaxNumCounters = NamesStartAsCounter - CountersStart;
- if (MaxNumCounters < 0 || NumCounters > (uint32_t)MaxNumCounters)
- return error(instrprof_error::malformed,
- "counter pointer is out of bounds");
- // We need to compute the in-buffer counter offset from the in-memory
- // address distance. The initial CountersDelta is the in-memory address
- // difference start(__llvm_prf_cnts)-start(__llvm_prf_data), so
- // SrcData->CounterPtr - CountersDelta computes the offset into the
- // in-buffer counter section.
- if (CounterOffset > MaxNumCounters)
- return error(instrprof_error::malformed,
- ("counter offset " + Twine(CounterOffset) +
- " is greater than the maximum number of counters " +
- Twine((uint32_t)MaxNumCounters))
- .str());
-
- if (((uint32_t)CounterOffset + NumCounters) > (uint32_t)MaxNumCounters)
- return error(instrprof_error::malformed,
- ("number of counters " +
- Twine(((uint32_t)CounterOffset + NumCounters)) +
- " is greater than the maximum number of counters " +
- Twine((uint32_t)MaxNumCounters))
- .str());
- // CountersDelta decreases as we advance to the next data record.
- CountersDelta -= sizeof(*Data);
-
- RawCounts = makeArrayRef(getCounter(CounterOffset), NumCounters);
- }
+ ptrdiff_t CounterBaseOffset = swap(Data->CounterPtr) - CountersDelta;
+ if (CounterBaseOffset < 0)
+ return error(
+ instrprof_error::malformed,
+ ("counter offset " + Twine(CounterBaseOffset) + " is negative").str());
- if (ShouldSwapBytes) {
- Record.Counts.clear();
- Record.Counts.reserve(RawCounts.size());
- for (uint64_t Count : RawCounts)
- Record.Counts.push_back(swap(Count));
- } else
- Record.Counts = RawCounts;
+ if (CounterBaseOffset >= CountersEnd - CountersStart)
+ return error(instrprof_error::malformed,
+ ("counter offset " + Twine(CounterBaseOffset) +
+ " is greater than the maximum counter offset " +
+ Twine(CountersEnd - CountersStart - 1))
+ .str());
+
+ uint64_t MaxNumCounters =
+ (CountersEnd - (CountersStart + CounterBaseOffset)) /
+ getCounterTypeSize();
+ if (NumCounters > MaxNumCounters)
+ return error(instrprof_error::malformed,
+ ("number of counters " + Twine(NumCounters) +
+ " is greater than the maximum number of counters " +
+ Twine(MaxNumCounters))
+ .str());
+
+ Record.Counts.clear();
+ Record.Counts.reserve(NumCounters);
+ for (uint32_t I = 0; I < NumCounters; I++) {
+ const auto *CounterValue = reinterpret_cast<const uint64_t *>(
+ CountersStart + CounterBaseOffset + I * getCounterTypeSize());
+ Record.Counts.push_back(swap(*CounterValue));
+ }
return success();
}
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp b/contrib/llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp
index da16309fb82c..80c02faaba04 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp
@@ -655,6 +655,8 @@ std::error_code SampleProfileReaderExtBinaryBase::readOneSection(
Summary->setPartialProfile(true);
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFullContext))
FunctionSamples::ProfileIsCSFlat = ProfileIsCSFlat = true;
+ if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagIsCSNested))
+ FunctionSamples::ProfileIsCSNested = ProfileIsCSNested;
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFSDiscriminator))
FunctionSamples::ProfileIsFS = ProfileIsFS = true;
break;
@@ -688,9 +690,6 @@ std::error_code SampleProfileReaderExtBinaryBase::readOneSection(
ProfileIsProbeBased =
hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagIsProbeBased);
FunctionSamples::ProfileIsProbeBased = ProfileIsProbeBased;
- ProfileIsCSNested =
- hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagIsCSNested);
- FunctionSamples::ProfileIsCSNested = ProfileIsCSNested;
bool HasAttribute =
hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagHasAttribute);
if (std::error_code EC = readFuncMetadata(HasAttribute))
@@ -1276,6 +1275,8 @@ static std::string getSecFlagsStr(const SecHdrTableEntry &Entry) {
Flags.append("partial,");
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFullContext))
Flags.append("context,");
+ if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagIsCSNested))
+ Flags.append("context-nested,");
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFSDiscriminator))
Flags.append("fs-discriminator,");
break;
@@ -1288,8 +1289,6 @@ static std::string getSecFlagsStr(const SecHdrTableEntry &Entry) {
Flags.append("probe,");
if (hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagHasAttribute))
Flags.append("attr,");
- if (hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagIsCSNested))
- Flags.append("preinlined,");
break;
default:
break;
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/SampleProfWriter.cpp b/contrib/llvm-project/llvm/lib/ProfileData/SampleProfWriter.cpp
index 6f02bd203a9f..b575425d4e94 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/SampleProfWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/SampleProfWriter.cpp
@@ -323,13 +323,13 @@ std::error_code SampleProfileWriterExtBinaryBase::writeOneSection(
setToCompressSection(SecProfileSymbolList);
if (Type == SecFuncMetadata && FunctionSamples::ProfileIsProbeBased)
addSectionFlag(SecFuncMetadata, SecFuncMetadataFlags::SecFlagIsProbeBased);
- if (Type == SecFuncMetadata && FunctionSamples::ProfileIsCSNested)
- addSectionFlag(SecFuncMetadata, SecFuncMetadataFlags::SecFlagIsCSNested);
if (Type == SecFuncMetadata &&
(FunctionSamples::ProfileIsCSFlat || FunctionSamples::ProfileIsCSNested))
addSectionFlag(SecFuncMetadata, SecFuncMetadataFlags::SecFlagHasAttribute);
if (Type == SecProfSummary && FunctionSamples::ProfileIsCSFlat)
addSectionFlag(SecProfSummary, SecProfSummaryFlags::SecFlagFullContext);
+ if (Type == SecProfSummary && FunctionSamples::ProfileIsCSNested)
+ addSectionFlag(SecProfSummary, SecProfSummaryFlags::SecFlagIsCSNested);
if (Type == SecProfSummary && FunctionSamples::ProfileIsFS)
addSectionFlag(SecProfSummary, SecProfSummaryFlags::SecFlagFSDiscriminator);
diff --git a/contrib/llvm-project/llvm/lib/Remarks/BitstreamRemarkSerializer.cpp b/contrib/llvm-project/llvm/lib/Remarks/BitstreamRemarkSerializer.cpp
index 36ba93564771..0810bf531db8 100644
--- a/contrib/llvm-project/llvm/lib/Remarks/BitstreamRemarkSerializer.cpp
+++ b/contrib/llvm-project/llvm/lib/Remarks/BitstreamRemarkSerializer.cpp
@@ -18,7 +18,7 @@ using namespace llvm::remarks;
BitstreamRemarkSerializerHelper::BitstreamRemarkSerializerHelper(
BitstreamRemarkContainerType ContainerType)
- : Encoded(), R(), Bitstream(Encoded), ContainerType(ContainerType) {}
+ : Bitstream(Encoded), ContainerType(ContainerType) {}
static void push(SmallVectorImpl<uint64_t> &R, StringRef Str) {
append_range(R, Str);
diff --git a/contrib/llvm-project/llvm/lib/Remarks/Remark.cpp b/contrib/llvm-project/llvm/lib/Remarks/Remark.cpp
index 057d1a378599..e6b7de1a2cf5 100644
--- a/contrib/llvm-project/llvm/lib/Remarks/Remark.cpp
+++ b/contrib/llvm-project/llvm/lib/Remarks/Remark.cpp
@@ -111,7 +111,7 @@ LLVMRemarkEntryGetFirstArg(LLVMRemarkEntryRef Remark) {
ArrayRef<Argument> Args = unwrap(Remark)->Args;
// No arguments to iterate on.
if (Args.empty())
- return NULL;
+ return nullptr;
return reinterpret_cast<LLVMRemarkArgRef>(
const_cast<Argument *>(Args.begin()));
}
@@ -119,13 +119,13 @@ LLVMRemarkEntryGetFirstArg(LLVMRemarkEntryRef Remark) {
extern "C" LLVMRemarkArgRef
LLVMRemarkEntryGetNextArg(LLVMRemarkArgRef ArgIt, LLVMRemarkEntryRef Remark) {
// No more arguments to iterate on.
- if (ArgIt == NULL)
- return NULL;
+ if (ArgIt == nullptr)
+ return nullptr;
auto It = (ArrayRef<Argument>::const_iterator)ArgIt;
auto Next = std::next(It);
if (Next == unwrap(Remark)->Args.end())
- return NULL;
+ return nullptr;
return reinterpret_cast<LLVMRemarkArgRef>(const_cast<Argument *>(Next));
}
diff --git a/contrib/llvm-project/llvm/lib/Remarks/RemarkStreamer.cpp b/contrib/llvm-project/llvm/lib/Remarks/RemarkStreamer.cpp
index 2f00b8e73670..543b00723659 100644
--- a/contrib/llvm-project/llvm/lib/Remarks/RemarkStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Remarks/RemarkStreamer.cpp
@@ -26,7 +26,7 @@ static cl::opt<cl::boolOrDefault> EnableRemarksSection(
RemarkStreamer::RemarkStreamer(
std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer,
Optional<StringRef> FilenameIn)
- : PassFilter(), RemarkSerializer(std::move(RemarkSerializer)),
+ : RemarkSerializer(std::move(RemarkSerializer)),
Filename(FilenameIn ? Optional<std::string>(FilenameIn->str()) : None) {}
Error RemarkStreamer::setFilter(StringRef Filter) {
diff --git a/contrib/llvm-project/llvm/lib/Remarks/RemarkStringTable.cpp b/contrib/llvm-project/llvm/lib/Remarks/RemarkStringTable.cpp
index 5f462f01bb9a..03d93baba038 100644
--- a/contrib/llvm-project/llvm/lib/Remarks/RemarkStringTable.cpp
+++ b/contrib/llvm-project/llvm/lib/Remarks/RemarkStringTable.cpp
@@ -20,7 +20,7 @@
using namespace llvm;
using namespace llvm::remarks;
-StringTable::StringTable(const ParsedStringTable &Other) : StrTab() {
+StringTable::StringTable(const ParsedStringTable &Other) {
for (unsigned i = 0, e = Other.size(); i < e; ++i)
if (Expected<StringRef> MaybeStr = Other[i])
add(*MaybeStr);
diff --git a/contrib/llvm-project/llvm/lib/Remarks/YAMLRemarkParser.cpp b/contrib/llvm-project/llvm/lib/Remarks/YAMLRemarkParser.cpp
index 3d9996c931ae..a32629c9f557 100644
--- a/contrib/llvm-project/llvm/lib/Remarks/YAMLRemarkParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Remarks/YAMLRemarkParser.cpp
@@ -171,7 +171,7 @@ YAMLRemarkParser::YAMLRemarkParser(StringRef Buf)
YAMLRemarkParser::YAMLRemarkParser(StringRef Buf,
Optional<ParsedStringTable> StrTab)
- : RemarkParser{Format::YAML}, StrTab(std::move(StrTab)), LastErrorMessage(),
+ : RemarkParser{Format::YAML}, StrTab(std::move(StrTab)),
SM(setupSM(LastErrorMessage)), Stream(Buf, SM), YAMLIt(Stream.begin()) {}
Error YAMLRemarkParser::error(StringRef Message, yaml::Node &Node) {
diff --git a/contrib/llvm-project/llvm/lib/Support/AArch64TargetParser.cpp b/contrib/llvm-project/llvm/lib/Support/AArch64TargetParser.cpp
index 4bc9c8487131..cdf7c8ade9aa 100644
--- a/contrib/llvm-project/llvm/lib/Support/AArch64TargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/AArch64TargetParser.cpp
@@ -114,6 +114,12 @@ bool AArch64::getExtensionFeatures(uint64_t Extensions,
Features.push_back("+sme-f64");
if (Extensions & AArch64::AEK_SMEI64)
Features.push_back("+sme-i64");
+ if (Extensions & AArch64::AEK_HBC)
+ Features.push_back("+hbc");
+ if (Extensions & AArch64::AEK_MOPS)
+ Features.push_back("+mops");
+ if (Extensions & AArch64::AEK_PERFMON)
+ Features.push_back("+perfmon");
return true;
}
@@ -136,12 +142,16 @@ bool AArch64::getArchFeatures(AArch64::ArchKind AK,
Features.push_back("+v8.6a");
if (AK == AArch64::ArchKind::ARMV8_7A)
Features.push_back("+v8.7a");
+ if (AK == AArch64::ArchKind::ARMV8_8A)
+ Features.push_back("+v8.8a");
if (AK == AArch64::ArchKind::ARMV9A)
Features.push_back("+v9a");
if (AK == AArch64::ArchKind::ARMV9_1A)
Features.push_back("+v9.1a");
if (AK == AArch64::ArchKind::ARMV9_2A)
Features.push_back("+v9.2a");
+ if (AK == AArch64::ArchKind::ARMV9_3A)
+ Features.push_back("+v9.3a");
if(AK == AArch64::ArchKind::ARMV8R)
Features.push_back("+v8r");
diff --git a/contrib/llvm-project/llvm/lib/Support/APInt.cpp b/contrib/llvm-project/llvm/lib/Support/APInt.cpp
index 4940b61602d1..b536e9a9a6d0 100644
--- a/contrib/llvm-project/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/APInt.cpp
@@ -24,9 +24,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <climits>
#include <cmath>
-#include <cstdlib>
#include <cstring>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp b/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
index 241cfb1eedbe..908e56319025 100644
--- a/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
@@ -9,8 +9,6 @@
#include "llvm/Support/ARMAttributeParser.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Errc.h"
-#include "llvm/Support/LEB128.h"
#include "llvm/Support/ScopedPrinter.h"
using namespace llvm;
@@ -70,7 +68,7 @@ const ARMAttributeParser::DisplayHandler ARMAttributeParser::displayRoutines[] =
Error ARMAttributeParser::stringAttribute(AttrType tag) {
StringRef tagName =
- ELFAttrs::attrTypeAsString(tag, tagToStringMap, /*TagPrefix=*/false);
+ ELFAttrs::attrTypeAsString(tag, tagToStringMap, /*hasTagPrefix=*/false);
StringRef desc = de.getCStrRef(cursor);
if (sw) {
diff --git a/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp b/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp
index 4405ed176fe2..d7294b5b1074 100644
--- a/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ARMTargetParser.cpp
@@ -77,6 +77,7 @@ unsigned ARM::parseArchVersion(StringRef Arch) {
case ArchKind::ARMV8_5A:
case ArchKind::ARMV8_6A:
case ArchKind::ARMV8_7A:
+ case ArchKind::ARMV8_8A:
case ArchKind::ARMV8R:
case ArchKind::ARMV8MBaseline:
case ArchKind::ARMV8MMainline:
@@ -85,6 +86,7 @@ unsigned ARM::parseArchVersion(StringRef Arch) {
case ArchKind::ARMV9A:
case ArchKind::ARMV9_1A:
case ArchKind::ARMV9_2A:
+ case ArchKind::ARMV9_3A:
return 9;
case ArchKind::INVALID:
return 0;
@@ -117,9 +119,11 @@ ARM::ProfileKind ARM::parseArchProfile(StringRef Arch) {
case ArchKind::ARMV8_5A:
case ArchKind::ARMV8_6A:
case ArchKind::ARMV8_7A:
+ case ArchKind::ARMV8_8A:
case ArchKind::ARMV9A:
case ArchKind::ARMV9_1A:
case ArchKind::ARMV9_2A:
+ case ArchKind::ARMV9_3A:
return ProfileKind::A;
case ArchKind::ARMV2:
case ArchKind::ARMV2A:
@@ -164,10 +168,12 @@ StringRef ARM::getArchSynonym(StringRef Arch) {
.Case("v8.5a", "v8.5-a")
.Case("v8.6a", "v8.6-a")
.Case("v8.7a", "v8.7-a")
+ .Case("v8.8a", "v8.8-a")
.Case("v8r", "v8-r")
.Cases("v9", "v9a", "v9-a")
.Case("v9.1a", "v9.1-a")
.Case("v9.2a", "v9.2-a")
+ .Case("v9.3a", "v9.3-a")
.Case("v8m.base", "v8-m.base")
.Case("v8m.main", "v8-m.main")
.Case("v8.1m.main", "v8.1-m.main")
diff --git a/contrib/llvm-project/llvm/lib/Support/ARMWinEH.cpp b/contrib/llvm-project/llvm/lib/Support/ARMWinEH.cpp
index 2e2fcf28451f..8e7fa1149082 100644
--- a/contrib/llvm-project/llvm/lib/Support/ARMWinEH.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ARMWinEH.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/ARMWinEH.h"
-#include "llvm/Support/raw_ostream.h"
namespace llvm {
namespace ARM {
diff --git a/contrib/llvm-project/llvm/lib/Support/BinaryStreamError.cpp b/contrib/llvm-project/llvm/lib/Support/BinaryStreamError.cpp
index f22523f09ac8..9b8f6862b65c 100644
--- a/contrib/llvm-project/llvm/lib/Support/BinaryStreamError.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/BinaryStreamError.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/BinaryStreamError.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/BlockFrequency.cpp b/contrib/llvm-project/llvm/lib/Support/BlockFrequency.cpp
index 2b63294f3789..702165ac480b 100644
--- a/contrib/llvm-project/llvm/lib/Support/BlockFrequency.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/BlockFrequency.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/BranchProbability.h"
#include <cassert>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/Caching.cpp b/contrib/llvm-project/llvm/lib/Support/Caching.cpp
index 8c685640f791..d6902f660e39 100644
--- a/contrib/llvm-project/llvm/lib/Support/Caching.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Caching.cpp
@@ -30,8 +30,6 @@ Expected<FileCache> llvm::localCache(Twine CacheNameRef,
Twine TempFilePrefixRef,
Twine CacheDirectoryPathRef,
AddBufferFn AddBuffer) {
- if (std::error_code EC = sys::fs::create_directories(CacheDirectoryPathRef))
- return errorCodeToError(EC);
// Create local copies which are safely captured-by-copy in lambdas
SmallString<64> CacheName, TempFilePrefix, CacheDirectoryPath;
@@ -140,6 +138,12 @@ Expected<FileCache> llvm::localCache(Twine CacheNameRef,
};
return [=](size_t Task) -> Expected<std::unique_ptr<CachedFileStream>> {
+ // Create the cache directory if not already done. Doing this lazily
+ // ensures the filesystem isn't mutated until the cache is.
+ if (std::error_code EC = sys::fs::create_directories(
+ CacheDirectoryPath, /*IgnoreExisting=*/true))
+ return errorCodeToError(EC);
+
// Write to a temporary to avoid race condition
SmallString<64> TempFilenameModel;
sys::path::append(TempFilenameModel, CacheDirectoryPath,
diff --git a/contrib/llvm-project/llvm/lib/Support/CodeGenCoverage.cpp b/contrib/llvm-project/llvm/lib/Support/CodeGenCoverage.cpp
index 93f386b6e23d..73e0fb3edce8 100644
--- a/contrib/llvm-project/llvm/lib/Support/CodeGenCoverage.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/CodeGenCoverage.cpp
@@ -27,7 +27,7 @@ CodeGenCoverage::CodeGenCoverage() {}
void CodeGenCoverage::setCovered(uint64_t RuleID) {
if (RuleCoverage.size() <= RuleID)
- RuleCoverage.resize(RuleID + 1, 0);
+ RuleCoverage.resize(RuleID + 1, false);
RuleCoverage[RuleID] = true;
}
diff --git a/contrib/llvm-project/llvm/lib/Support/CommandLine.cpp b/contrib/llvm-project/llvm/lib/Support/CommandLine.cpp
index 4153a69abf5d..71a6ebf2a72e 100644
--- a/contrib/llvm-project/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/CommandLine.cpp
@@ -22,7 +22,7 @@
#include "llvm-c/Support.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -45,7 +45,6 @@
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
-#include <map>
#include <string>
using namespace llvm;
using namespace cl;
@@ -1078,11 +1077,45 @@ static bool hasUTF8ByteOrderMark(ArrayRef<char> S) {
return (S.size() >= 3 && S[0] == '\xef' && S[1] == '\xbb' && S[2] == '\xbf');
}
+// Substitute <CFGDIR> with the file's base path.
+static void ExpandBasePaths(StringRef BasePath, StringSaver &Saver,
+ const char *&Arg) {
+ assert(sys::path::is_absolute(BasePath));
+ constexpr StringLiteral Token("<CFGDIR>");
+ const StringRef ArgString(Arg);
+
+ SmallString<128> ResponseFile;
+ StringRef::size_type StartPos = 0;
+ for (StringRef::size_type TokenPos = ArgString.find(Token);
+ TokenPos != StringRef::npos;
+ TokenPos = ArgString.find(Token, StartPos)) {
+ // Token may appear more than once per arg (e.g. comma-separated linker
+ // args). Support by using path-append on any subsequent appearances.
+ const StringRef LHS = ArgString.substr(StartPos, TokenPos - StartPos);
+ if (ResponseFile.empty())
+ ResponseFile = LHS;
+ else
+ llvm::sys::path::append(ResponseFile, LHS);
+ ResponseFile.append(BasePath);
+ StartPos = TokenPos + Token.size();
+ }
+
+ if (!ResponseFile.empty()) {
+ // Path-append the remaining arg substring if at least one token appeared.
+ const StringRef Remaining = ArgString.substr(StartPos);
+ if (!Remaining.empty())
+ llvm::sys::path::append(ResponseFile, Remaining);
+ Arg = Saver.save(ResponseFile.str()).data();
+ }
+}
+
// FName must be an absolute path.
-static llvm::Error ExpandResponseFile(
- StringRef FName, StringSaver &Saver, TokenizerCallback Tokenizer,
- SmallVectorImpl<const char *> &NewArgv, bool MarkEOLs, bool RelativeNames,
- llvm::vfs::FileSystem &FS) {
+static llvm::Error ExpandResponseFile(StringRef FName, StringSaver &Saver,
+ TokenizerCallback Tokenizer,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs, bool RelativeNames,
+ bool ExpandBasePath,
+ llvm::vfs::FileSystem &FS) {
assert(sys::path::is_absolute(FName));
llvm::ErrorOr<std::unique_ptr<MemoryBuffer>> MemBufOrErr =
FS.getBufferForFile(FName);
@@ -1116,8 +1149,15 @@ static llvm::Error ExpandResponseFile(
// file, replace the included response file names with their full paths
// obtained by required resolution.
for (auto &Arg : NewArgv) {
+ if (!Arg)
+ continue;
+
+ // Substitute <CFGDIR> with the file's base path.
+ if (ExpandBasePath)
+ ExpandBasePaths(BasePath, Saver, Arg);
+
// Skip non-rsp file arguments.
- if (!Arg || Arg[0] != '@')
+ if (Arg[0] != '@')
continue;
StringRef FileName(Arg + 1);
@@ -1129,7 +1169,7 @@ static llvm::Error ExpandResponseFile(
ResponseFile.push_back('@');
ResponseFile.append(BasePath);
llvm::sys::path::append(ResponseFile, FileName);
- Arg = Saver.save(ResponseFile.c_str()).data();
+ Arg = Saver.save(ResponseFile.str()).data();
}
return Error::success();
}
@@ -1138,7 +1178,7 @@ static llvm::Error ExpandResponseFile(
/// StringSaver and tokenization strategy.
bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv, bool MarkEOLs,
- bool RelativeNames,
+ bool RelativeNames, bool ExpandBasePath,
llvm::Optional<llvm::StringRef> CurrentDir,
llvm::vfs::FileSystem &FS) {
bool AllExpanded = true;
@@ -1218,7 +1258,7 @@ bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVector<const char *, 0> ExpandedArgv;
if (llvm::Error Err =
ExpandResponseFile(FName, Saver, Tokenizer, ExpandedArgv, MarkEOLs,
- RelativeNames, FS)) {
+ RelativeNames, ExpandBasePath, FS)) {
// We couldn't read this file, so we leave it in the argument stream and
// move on.
// TODO: The error should be propagated up the stack.
@@ -1250,11 +1290,11 @@ bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv, bool MarkEOLs,
- bool RelativeNames,
+ bool RelativeNames, bool ExpandBasePath,
llvm::Optional<StringRef> CurrentDir) {
return ExpandResponseFiles(Saver, std::move(Tokenizer), Argv, MarkEOLs,
- RelativeNames, std::move(CurrentDir),
- *vfs::getRealFileSystem());
+ RelativeNames, ExpandBasePath,
+ std::move(CurrentDir), *vfs::getRealFileSystem());
}
bool cl::expandResponseFiles(int Argc, const char *const *Argv,
@@ -1281,16 +1321,17 @@ bool cl::readConfigFile(StringRef CfgFile, StringSaver &Saver,
llvm::sys::path::append(AbsPath, CfgFile);
CfgFile = AbsPath.str();
}
- if (llvm::Error Err =
- ExpandResponseFile(CfgFile, Saver, cl::tokenizeConfigFile, Argv,
- /*MarkEOLs=*/false, /*RelativeNames=*/true,
- *llvm::vfs::getRealFileSystem())) {
+ if (llvm::Error Err = ExpandResponseFile(
+ CfgFile, Saver, cl::tokenizeConfigFile, Argv,
+ /*MarkEOLs=*/false, /*RelativeNames=*/true, /*ExpandBasePath=*/true,
+ *llvm::vfs::getRealFileSystem())) {
// TODO: The error should be propagated up the stack.
llvm::consumeError(std::move(Err));
return false;
}
return ExpandResponseFiles(Saver, cl::tokenizeConfigFile, Argv,
- /*MarkEOLs=*/false, /*RelativeNames=*/true);
+ /*MarkEOLs=*/false, /*RelativeNames=*/true,
+ /*ExpandBasePath=*/true, llvm::None);
}
static void initCommonOptions();
@@ -2297,7 +2338,7 @@ public:
protected:
void printOptions(StrOptionPairVector &Opts, size_t MaxArgLen) override {
std::vector<OptionCategory *> SortedCategories;
- std::map<OptionCategory *, std::vector<Option *>> CategorizedOptions;
+ DenseMap<OptionCategory *, std::vector<Option *>> CategorizedOptions;
// Collect registered option categories into vector in preparation for
// sorting.
@@ -2309,17 +2350,13 @@ protected:
array_pod_sort(SortedCategories.begin(), SortedCategories.end(),
OptionCategoryCompare);
- // Create map to empty vectors.
- for (OptionCategory *Category : SortedCategories)
- CategorizedOptions[Category] = std::vector<Option *>();
-
// Walk through pre-sorted options and assign into categories.
// Because the options are already alphabetically sorted the
// options within categories will also be alphabetically sorted.
for (size_t I = 0, E = Opts.size(); I != E; ++I) {
Option *Opt = Opts[I].second;
for (auto &Cat : Opt->Categories) {
- assert(CategorizedOptions.count(Cat) > 0 &&
+ assert(find(SortedCategories, Cat) != SortedCategories.end() &&
"Option has an unregistered category");
CategorizedOptions[Cat].push_back(Opt);
}
diff --git a/contrib/llvm-project/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm-project/llvm/lib/Support/CrashRecoveryContext.cpp
index b6aaf373a522..2ee3074b840e 100644
--- a/contrib/llvm-project/llvm/lib/Support/CrashRecoveryContext.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -9,7 +9,6 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ExitCodes.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/ThreadLocal.h"
@@ -17,6 +16,10 @@
#include <mutex>
#include <setjmp.h>
+#if !defined(_MSC_VER) && !defined(_WIN32)
+#include "llvm/Support/ExitCodes.h"
+#endif
+
using namespace llvm;
namespace {
diff --git a/contrib/llvm-project/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/contrib/llvm-project/llvm/lib/Support/DAGDeltaAlgorithm.cpp
index a6daee00bd43..f1b730e2b58c 100644
--- a/contrib/llvm-project/llvm/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -37,7 +37,6 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
-#include <iterator>
#include <map>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/DataExtractor.cpp b/contrib/llvm-project/llvm/lib/Support/DataExtractor.cpp
index 133d674275e8..8cf312191153 100644
--- a/contrib/llvm-project/llvm/lib/Support/DataExtractor.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/DataExtractor.cpp
@@ -9,7 +9,6 @@
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/SwapByteOrder.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/DivisionByConstantInfo.cpp b/contrib/llvm-project/llvm/lib/Support/DivisionByConstantInfo.cpp
index 077629670e40..69f39386798c 100644
--- a/contrib/llvm-project/llvm/lib/Support/DivisionByConstantInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/DivisionByConstantInfo.cpp
@@ -67,7 +67,7 @@ UnsignedDivisonByConstantInfo::get(const APInt &D, unsigned LeadingZeros) {
unsigned P;
APInt NC, Delta, Q1, R1, Q2, R2;
struct UnsignedDivisonByConstantInfo Retval;
- Retval.IsAdd = 0; // initialize "add" indicator
+ Retval.IsAdd = false; // initialize "add" indicator
APInt AllOnes = APInt::getAllOnes(D.getBitWidth()).lshr(LeadingZeros);
APInt SignedMin = APInt::getSignedMinValue(D.getBitWidth());
APInt SignedMax = APInt::getSignedMaxValue(D.getBitWidth());
@@ -89,12 +89,12 @@ UnsignedDivisonByConstantInfo::get(const APInt &D, unsigned LeadingZeros) {
}
if ((R2 + 1).uge(D - R2)) {
if (Q2.uge(SignedMax))
- Retval.IsAdd = 1;
+ Retval.IsAdd = true;
Q2 = Q2 + Q2 + 1; // update Q2
R2 = R2 + R2 + 1 - D; // update R2
} else {
if (Q2.uge(SignedMin))
- Retval.IsAdd = 1;
+ Retval.IsAdd = true;
Q2 = Q2 + Q2; // update Q2
R2 = R2 + R2 + 1; // update R2
}
diff --git a/contrib/llvm-project/llvm/lib/Support/ELFAttributeParser.cpp b/contrib/llvm-project/llvm/lib/Support/ELFAttributeParser.cpp
index 1206553343ef..cf8a666e92bc 100644
--- a/contrib/llvm-project/llvm/lib/Support/ELFAttributeParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ELFAttributeParser.cpp
@@ -7,10 +7,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/ELFAttributeParser.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Errc.h"
-#include "llvm/Support/LEB128.h"
#include "llvm/Support/ScopedPrinter.h"
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/FileOutputBuffer.cpp b/contrib/llvm-project/llvm/lib/Support/FileOutputBuffer.cpp
index 4b4406c4c9f4..c11ee59da0dd 100644
--- a/contrib/llvm-project/llvm/lib/Support/FileOutputBuffer.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/FileOutputBuffer.cpp
@@ -11,11 +11,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/FileOutputBuffer.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Memory.h"
-#include "llvm/Support/Path.h"
#include <system_error>
#if !defined(_MSC_VER) && !defined(__MINGW32__)
diff --git a/contrib/llvm-project/llvm/lib/Support/FileUtilities.cpp b/contrib/llvm-project/llvm/lib/Support/FileUtilities.cpp
index dbe28e56b2c3..489b8d119e6f 100644
--- a/contrib/llvm-project/llvm/lib/Support/FileUtilities.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/FileUtilities.cpp
@@ -12,16 +12,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/FileUtilities.h"
-#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include <cctype>
-#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <cstring>
diff --git a/contrib/llvm-project/llvm/lib/Support/GraphWriter.cpp b/contrib/llvm-project/llvm/lib/Support/GraphWriter.cpp
index 696e6b7a99d8..e875e18a7e92 100644
--- a/contrib/llvm-project/llvm/lib/Support/GraphWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/GraphWriter.cpp
@@ -18,7 +18,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/config.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@@ -26,7 +25,11 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
-#include <cassert>
+
+#ifdef __APPLE__
+#include "llvm/Support/CommandLine.h"
+#endif
+
#include <string>
#include <system_error>
#include <vector>
diff --git a/contrib/llvm-project/llvm/lib/Support/Host.cpp b/contrib/llvm-project/llvm/lib/Support/Host.cpp
index 7b14616f6fea..9a4470289bcf 100644
--- a/contrib/llvm-project/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Host.cpp
@@ -83,12 +83,12 @@ StringRef sys::detail::getHostCPUNameForPowerPC(StringRef ProcCpuinfoContent) {
StringRef::const_iterator CIP = CPUInfoStart;
- StringRef::const_iterator CPUStart = 0;
+ StringRef::const_iterator CPUStart = nullptr;
size_t CPULen = 0;
// We need to find the first line which starts with cpu, spaces, and a colon.
// After the colon, there may be some additional spaces and then the cpu type.
- while (CIP < CPUInfoEnd && CPUStart == 0) {
+ while (CIP < CPUInfoEnd && CPUStart == nullptr) {
if (CIP < CPUInfoEnd && *CIP == '\n')
++CIP;
@@ -118,12 +118,12 @@ StringRef sys::detail::getHostCPUNameForPowerPC(StringRef ProcCpuinfoContent) {
}
}
- if (CPUStart == 0)
+ if (CPUStart == nullptr)
while (CIP < CPUInfoEnd && *CIP != '\n')
++CIP;
}
- if (CPUStart == 0)
+ if (CPUStart == nullptr)
return generic;
return StringSwitch<const char *>(StringRef(CPUStart, CPULen))
@@ -213,6 +213,7 @@ StringRef sys::detail::getHostCPUNameForARM(StringRef ProcCpuinfoContent) {
.Case("0xd44", "cortex-x1")
.Case("0xd0c", "neoverse-n1")
.Case("0xd49", "neoverse-n2")
+ .Case("0xd40", "neoverse-v1")
.Default("generic");
}
diff --git a/contrib/llvm-project/llvm/lib/Support/InitLLVM.cpp b/contrib/llvm-project/llvm/lib/Support/InitLLVM.cpp
index 152de6ebae0a..2b7173b28940 100644
--- a/contrib/llvm-project/llvm/lib/Support/InitLLVM.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/InitLLVM.cpp
@@ -7,14 +7,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/InitLLVM.h"
-#include "llvm/Support/Error.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/Process.h"
#include "llvm/Support/Signals.h"
-#include <string>
+#include "llvm/Support/SwapByteOrder.h"
#ifdef _WIN32
+#include "llvm/Support/Error.h"
#include "llvm/Support/Windows/WindowsSupport.h"
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/JSON.cpp b/contrib/llvm-project/llvm/lib/Support/JSON.cpp
index 17b36ed51850..20babbe56d86 100644
--- a/contrib/llvm-project/llvm/lib/Support/JSON.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/JSON.cpp
@@ -12,6 +12,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/NativeFormatting.h"
#include <cctype>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/lib/Support/LowLevelType.cpp b/contrib/llvm-project/llvm/lib/Support/LowLevelType.cpp
index ecf557997ad1..0282cd9bd79e 100644
--- a/contrib/llvm-project/llvm/lib/Support/LowLevelType.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/LowLevelType.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
LLT::LLT(MVT VT) {
if (VT.isVector()) {
- bool asVector = VT.getVectorNumElements() > 1;
+ bool asVector = VT.getVectorMinNumElements() > 1;
init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
/*AddressSpace=*/0);
diff --git a/contrib/llvm-project/llvm/lib/Support/MD5.cpp b/contrib/llvm-project/llvm/lib/Support/MD5.cpp
index 9dceb4d418cd..caadde389504 100644
--- a/contrib/llvm-project/llvm/lib/Support/MD5.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/MD5.cpp
@@ -40,10 +40,9 @@
#include "llvm/Support/MD5.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Endian.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/raw_ostream.h"
#include <array>
#include <cstdint>
#include <cstring>
@@ -281,14 +280,12 @@ StringRef MD5::result() {
SmallString<32> MD5::MD5Result::digest() const {
SmallString<32> Str;
- raw_svector_ostream Res(Str);
- for (int i = 0; i < 16; ++i)
- Res << format("%.2x", Bytes[i]);
+ toHex(Bytes, /*LowerCase*/ true, Str);
return Str;
}
-void MD5::stringifyResult(MD5Result &Result, SmallString<32> &Str) {
- Str = Result.digest();
+void MD5::stringifyResult(MD5Result &Result, SmallVectorImpl<char> &Str) {
+ toHex(Result.Bytes, /*LowerCase*/ true, Str);
}
std::array<uint8_t, 16> MD5::hash(ArrayRef<uint8_t> Data) {
diff --git a/contrib/llvm-project/llvm/lib/Support/MSP430AttributeParser.cpp b/contrib/llvm-project/llvm/lib/Support/MSP430AttributeParser.cpp
index a9948a158fc0..a230a3a70adb 100644
--- a/contrib/llvm-project/llvm/lib/Support/MSP430AttributeParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/MSP430AttributeParser.cpp
@@ -7,7 +7,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/MSP430AttributeParser.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
using namespace llvm::MSP430Attrs;
diff --git a/contrib/llvm-project/llvm/lib/Support/MemAlloc.cpp b/contrib/llvm-project/llvm/lib/Support/MemAlloc.cpp
index 7aaa0dc6e205..07a26cf26480 100644
--- a/contrib/llvm-project/llvm/lib/Support/MemAlloc.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/MemAlloc.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/MemAlloc.h"
+#include <new>
// These are out of line to have __cpp_aligned_new not affect ABI.
diff --git a/contrib/llvm-project/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm-project/llvm/lib/Support/MemoryBuffer.cpp
index d3fa3c6f065d..7816779cca1d 100644
--- a/contrib/llvm-project/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/MemoryBuffer.cpp
@@ -14,16 +14,15 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/config.h"
#include "llvm/Support/AutoConvert.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Errc.h"
-#include "llvm/Support/Errno.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/SmallVectorMemoryBuffer.h"
#include <cassert>
-#include <cerrno>
#include <cstring>
#include <new>
#include <sys/types.h>
@@ -220,28 +219,16 @@ public:
MemoryBuffer::BufferKind getBufferKind() const override {
return MemoryBuffer::MemoryBuffer_MMap;
}
+
+ void dontNeedIfMmap() override { MFR.dontNeed(); }
};
} // namespace
static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
getMemoryBufferForStream(sys::fs::file_t FD, const Twine &BufferName) {
- const ssize_t ChunkSize = 4096*4;
- SmallString<ChunkSize> Buffer;
-
- // Read into Buffer until we hit EOF.
- size_t Size = Buffer.size();
- for (;;) {
- Buffer.resize_for_overwrite(Size + ChunkSize);
- Expected<size_t> ReadBytes = sys::fs::readNativeFile(
- FD, makeMutableArrayRef(Buffer.begin() + Size, ChunkSize));
- if (!ReadBytes)
- return errorToErrorCode(ReadBytes.takeError());
- if (*ReadBytes == 0)
- break;
- Size += *ReadBytes;
- }
- Buffer.truncate(Size);
-
+ SmallString<sys::fs::DefaultReadChunkSize> Buffer;
+ if (Error E = sys::fs::readNativeFileToEOF(FD, Buffer))
+ return errorToErrorCode(std::move(E));
return getMemBufferCopyImpl(Buffer, BufferName);
}
diff --git a/contrib/llvm-project/llvm/lib/Support/NativeFormatting.cpp b/contrib/llvm-project/llvm/lib/Support/NativeFormatting.cpp
index 254d18d797b3..0a797046bb68 100644
--- a/contrib/llvm-project/llvm/lib/Support/NativeFormatting.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/NativeFormatting.cpp
@@ -13,7 +13,6 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <float.h>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
index 71e3a1362f7e..4977c188f934 100644
--- a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
@@ -174,3 +174,35 @@ void TaskGroup::spawn(std::function<void()> F) {
} // namespace parallel
} // namespace llvm
#endif // LLVM_ENABLE_THREADS
+
+void llvm::parallelForEachN(size_t Begin, size_t End,
+ llvm::function_ref<void(size_t)> Fn) {
+ // If we have zero or one items, then do not incur the overhead of spinning up
+ // a task group. They are surprisingly expensive, and because they do not
+ // support nested parallelism, a single entry task group can block parallel
+ // execution underneath them.
+#if LLVM_ENABLE_THREADS
+ auto NumItems = End - Begin;
+ if (NumItems > 1 && parallel::strategy.ThreadsRequested != 1) {
+ // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
+ // overhead on large inputs.
+ auto TaskSize = NumItems / parallel::detail::MaxTasksPerGroup;
+ if (TaskSize == 0)
+ TaskSize = 1;
+
+ parallel::detail::TaskGroup TG;
+ for (; Begin + TaskSize < End; Begin += TaskSize) {
+ TG.spawn([=, &Fn] {
+ for (size_t I = Begin, E = Begin + TaskSize; I != E; ++I)
+ Fn(I);
+ });
+ }
+ for (; Begin != End; ++Begin)
+ Fn(Begin);
+ return;
+ }
+#endif
+
+ for (; Begin != End; ++Begin)
+ Fn(Begin);
+}
diff --git a/contrib/llvm-project/llvm/lib/Support/Path.cpp b/contrib/llvm-project/llvm/lib/Support/Path.cpp
index 7c99d088911c..63d8d4ee4648 100644
--- a/contrib/llvm-project/llvm/lib/Support/Path.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Path.cpp
@@ -12,6 +12,7 @@
#include "llvm/Support/Path.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/Config/config.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Endian.h"
@@ -1167,6 +1168,25 @@ const char *mapped_file_region::const_data() const {
return reinterpret_cast<const char *>(Mapping);
}
+Error readNativeFileToEOF(file_t FileHandle, SmallVectorImpl<char> &Buffer,
+ ssize_t ChunkSize) {
+ // Install a handler to truncate the buffer to the correct size on exit.
+ size_t Size = Buffer.size();
+ auto TruncateOnExit = make_scope_exit([&]() { Buffer.truncate(Size); });
+
+ // Read into Buffer until we hit EOF.
+ for (;;) {
+ Buffer.resize_for_overwrite(Size + ChunkSize);
+ Expected<size_t> ReadBytes = readNativeFile(
+ FileHandle, makeMutableArrayRef(Buffer.begin() + Size, ChunkSize));
+ if (!ReadBytes)
+ return ReadBytes.takeError();
+ if (*ReadBytes == 0)
+ return Error::success();
+ Size += *ReadBytes;
+ }
+}
+
} // end namespace fs
} // end namespace sys
} // end namespace llvm
@@ -1234,7 +1254,8 @@ Error TempFile::keep(const Twine &Name) {
#ifdef _WIN32
// If we can't cancel the delete don't rename.
auto H = reinterpret_cast<HANDLE>(_get_osfhandle(FD));
- std::error_code RenameEC = setDeleteDisposition(H, false);
+ std::error_code RenameEC =
+ RemoveOnClose ? std::error_code() : setDeleteDisposition(H, false);
bool ShouldDelete = false;
if (!RenameEC) {
RenameEC = rename_handle(H, Name);
diff --git a/contrib/llvm-project/llvm/lib/Support/PrettyStackTrace.cpp b/contrib/llvm-project/llvm/lib/Support/PrettyStackTrace.cpp
index 0d07057f1df0..fa91405fee10 100644
--- a/contrib/llvm-project/llvm/lib/Support/PrettyStackTrace.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/PrettyStackTrace.cpp
@@ -13,7 +13,6 @@
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm-c/ErrorHandling.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -21,6 +20,10 @@
#include "llvm/Support/Watchdog.h"
#include "llvm/Support/raw_ostream.h"
+#ifdef __APPLE__
+#include "llvm/ADT/SmallString.h"
+#endif
+
#include <atomic>
#include <cassert>
#include <cstdarg>
diff --git a/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp b/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
index e2e4340f44e9..6c59d8a7ef04 100644
--- a/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
@@ -9,6 +9,7 @@
#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Errc.h"
@@ -46,25 +47,56 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
{"f", RISCVExtensionVersion{2, 0}},
{"d", RISCVExtensionVersion{2, 0}},
{"c", RISCVExtensionVersion{2, 0}},
-};
-static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
- {"v", RISCVExtensionVersion{0, 10}},
+ {"zfhmin", RISCVExtensionVersion{1, 0}},
+ {"zfh", RISCVExtensionVersion{1, 0}},
+
{"zba", RISCVExtensionVersion{1, 0}},
{"zbb", RISCVExtensionVersion{1, 0}},
{"zbc", RISCVExtensionVersion{1, 0}},
+ {"zbs", RISCVExtensionVersion{1, 0}},
+
+ {"zbkb", RISCVExtensionVersion{1, 0}},
+ {"zbkc", RISCVExtensionVersion{1, 0}},
+ {"zbkx", RISCVExtensionVersion{1, 0}},
+ {"zknd", RISCVExtensionVersion{1, 0}},
+ {"zkne", RISCVExtensionVersion{1, 0}},
+ {"zknh", RISCVExtensionVersion{1, 0}},
+ {"zksed", RISCVExtensionVersion{1, 0}},
+ {"zksh", RISCVExtensionVersion{1, 0}},
+ {"zkr", RISCVExtensionVersion{1, 0}},
+ {"zkn", RISCVExtensionVersion{1, 0}},
+ {"zks", RISCVExtensionVersion{1, 0}},
+ {"zkt", RISCVExtensionVersion{1, 0}},
+ {"zk", RISCVExtensionVersion{1, 0}},
+
+ {"v", RISCVExtensionVersion{1, 0}},
+ {"zvl32b", RISCVExtensionVersion{1, 0}},
+ {"zvl64b", RISCVExtensionVersion{1, 0}},
+ {"zvl128b", RISCVExtensionVersion{1, 0}},
+ {"zvl256b", RISCVExtensionVersion{1, 0}},
+ {"zvl512b", RISCVExtensionVersion{1, 0}},
+ {"zvl1024b", RISCVExtensionVersion{1, 0}},
+ {"zvl2048b", RISCVExtensionVersion{1, 0}},
+ {"zvl4096b", RISCVExtensionVersion{1, 0}},
+ {"zvl8192b", RISCVExtensionVersion{1, 0}},
+ {"zvl16384b", RISCVExtensionVersion{1, 0}},
+ {"zvl32768b", RISCVExtensionVersion{1, 0}},
+ {"zvl65536b", RISCVExtensionVersion{1, 0}},
+ {"zve32x", RISCVExtensionVersion{1, 0}},
+ {"zve32f", RISCVExtensionVersion{1, 0}},
+ {"zve64x", RISCVExtensionVersion{1, 0}},
+ {"zve64f", RISCVExtensionVersion{1, 0}},
+ {"zve64d", RISCVExtensionVersion{1, 0}},
+};
+
+static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
{"zbe", RISCVExtensionVersion{0, 93}},
{"zbf", RISCVExtensionVersion{0, 93}},
{"zbm", RISCVExtensionVersion{0, 93}},
{"zbp", RISCVExtensionVersion{0, 93}},
{"zbr", RISCVExtensionVersion{0, 93}},
- {"zbs", RISCVExtensionVersion{1, 0}},
{"zbt", RISCVExtensionVersion{0, 93}},
-
- {"zvlsseg", RISCVExtensionVersion{0, 10}},
-
- {"zfhmin", RISCVExtensionVersion{0, 1}},
- {"zfh", RISCVExtensionVersion{0, 1}},
};
static bool stripExperimentalPrefix(StringRef &Ext) {
@@ -78,9 +110,9 @@ static bool stripExperimentalPrefix(StringRef &Ext) {
// NOTE: This function is NOT able to take empty strings or strings that only
// have version numbers and no extension name. It assumes the extension name
// will be at least more than one character.
-static size_t findFirstNonVersionCharacter(const StringRef &Ext) {
- if (Ext.size() == 0)
- llvm_unreachable("Already guarded by if-statement in ::parseArchString");
+static size_t findFirstNonVersionCharacter(StringRef Ext) {
+ assert(!Ext.empty() &&
+ "Already guarded by if-statement in ::parseArchString");
int Pos = Ext.size() - 1;
while (Pos > 0 && isDigit(Ext[Pos]))
@@ -276,16 +308,13 @@ bool RISCVISAInfo::compareExtension(const std::string &LHS,
void RISCVISAInfo::toFeatures(
std::vector<StringRef> &Features,
std::function<StringRef(const Twine &)> StrAlloc) const {
- for (auto &Ext : Exts) {
+ for (auto const &Ext : Exts) {
StringRef ExtName = Ext.first;
if (ExtName == "i")
continue;
- if (ExtName == "zvlsseg") {
- Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvlsseg");
- } else if (isExperimentalExtension(ExtName)) {
+ if (isExperimentalExtension(ExtName)) {
Features.push_back(StrAlloc("+experimental-" + ExtName));
} else {
Features.push_back(StrAlloc("+" + ExtName));
@@ -434,6 +463,8 @@ RISCVISAInfo::parseFeatures(unsigned XLen,
ISAInfo->updateImplication();
ISAInfo->updateFLen();
+ ISAInfo->updateMinVLen();
+ ISAInfo->updateMaxELen();
if (Error Result = ISAInfo->checkDependency())
return std::move(Result);
@@ -657,6 +688,8 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
ISAInfo->updateImplication();
ISAInfo->updateFLen();
+ ISAInfo->updateMinVLen();
+ ISAInfo->updateMaxELen();
if (Error Result = ISAInfo->checkDependency())
return std::move(Result);
@@ -669,6 +702,12 @@ Error RISCVISAInfo::checkDependency() {
bool HasE = Exts.count("e") == 1;
bool HasD = Exts.count("d") == 1;
bool HasF = Exts.count("f") == 1;
+ bool HasZve32x = Exts.count("zve32x") == 1;
+ bool HasZve32f = Exts.count("zve32f") == 1;
+ bool HasZve64d = Exts.count("zve64d") == 1;
+ bool HasV = Exts.count("v") == 1;
+ bool HasVector = HasZve32x || HasV;
+ bool HasZvl = MinVLen != 0;
if (HasE && !IsRv32)
return createStringError(
@@ -683,6 +722,29 @@ Error RISCVISAInfo::checkDependency() {
return createStringError(errc::invalid_argument,
"d requires f extension to also be specified");
+ // FIXME: Consider Zfinx in the future
+ if (HasZve32f && !HasF)
+ return createStringError(
+ errc::invalid_argument,
+ "zve32f requires f extension to also be specified");
+
+ // FIXME: Consider Zdinx in the future
+ if (HasZve64d && !HasD)
+ return createStringError(
+ errc::invalid_argument,
+ "zve64d requires d extension to also be specified");
+
+ if (HasZvl && !HasVector)
+ return createStringError(
+ errc::invalid_argument,
+ "zvl*b requires v or zve* extension to also be specified");
+
+ // Could not implement Zve* extension and the V extension at the same time.
+ if (HasZve32x && HasV)
+ return createStringError(
+ errc::invalid_argument,
+ "It is illegal to specify the v extension with zve* extensions");
+
// Additional dependency checks.
// TODO: The 'q' extension requires rv64.
// TODO: It is illegal to specify 'e' extensions with 'f' and 'd'.
@@ -690,8 +752,27 @@ Error RISCVISAInfo::checkDependency() {
return Error::success();
}
-static const char *ImpliedExtsV[] = {"zvlsseg"};
+static const char *ImpliedExtsV[] = {"zvl128b", "f", "d"};
static const char *ImpliedExtsZfh[] = {"zfhmin"};
+static const char *ImpliedExtsZve64d[] = {"zve64f"};
+static const char *ImpliedExtsZve64f[] = {"zve64x", "zve32f"};
+static const char *ImpliedExtsZve64x[] = {"zve32x", "zvl64b"};
+static const char *ImpliedExtsZve32f[] = {"zve32x"};
+static const char *ImpliedExtsZve32x[] = {"zvl32b"};
+static const char *ImpliedExtsZvl65536b[] = {"zvl32768b"};
+static const char *ImpliedExtsZvl32768b[] = {"zvl16384b"};
+static const char *ImpliedExtsZvl16384b[] = {"zvl8192b"};
+static const char *ImpliedExtsZvl8192b[] = {"zvl4096b"};
+static const char *ImpliedExtsZvl4096b[] = {"zvl2048b"};
+static const char *ImpliedExtsZvl2048b[] = {"zvl1024b"};
+static const char *ImpliedExtsZvl1024b[] = {"zvl512b"};
+static const char *ImpliedExtsZvl512b[] = {"zvl256b"};
+static const char *ImpliedExtsZvl256b[] = {"zvl128b"};
+static const char *ImpliedExtsZvl128b[] = {"zvl64b"};
+static const char *ImpliedExtsZvl64b[] = {"zvl32b"};
+static const char *ImpliedExtsZk[] = {"zkn", "zkt", "zkr"};
+static const char *ImpliedExtsZkn[] = {"zbkb", "zbkc", "zbkx", "zkne", "zknd", "zknh"};
+static const char *ImpliedExtsZks[] = {"zbkb", "zbkc", "zbkx", "zksed", "zksh"};
struct ImpliedExtsEntry {
StringLiteral Name;
@@ -707,6 +788,25 @@ struct ImpliedExtsEntry {
static constexpr ImpliedExtsEntry ImpliedExts[] = {
{{"v"}, {ImpliedExtsV}},
{{"zfh"}, {ImpliedExtsZfh}},
+ {{"zk"}, {ImpliedExtsZk}},
+ {{"zkn"}, {ImpliedExtsZkn}},
+ {{"zks"}, {ImpliedExtsZks}},
+ {{"zve32f"}, {ImpliedExtsZve32f}},
+ {{"zve32x"}, {ImpliedExtsZve32x}},
+ {{"zve64d"}, {ImpliedExtsZve64d}},
+ {{"zve64f"}, {ImpliedExtsZve64f}},
+ {{"zve64x"}, {ImpliedExtsZve64x}},
+ {{"zvl1024b"}, {ImpliedExtsZvl1024b}},
+ {{"zvl128b"}, {ImpliedExtsZvl128b}},
+ {{"zvl16384b"}, {ImpliedExtsZvl16384b}},
+ {{"zvl2048b"}, {ImpliedExtsZvl2048b}},
+ {{"zvl256b"}, {ImpliedExtsZvl256b}},
+ {{"zvl32768b"}, {ImpliedExtsZvl32768b}},
+ {{"zvl4096b"}, {ImpliedExtsZvl4096b}},
+ {{"zvl512b"}, {ImpliedExtsZvl512b}},
+ {{"zvl64b"}, {ImpliedExtsZvl64b}},
+ {{"zvl65536b"}, {ImpliedExtsZvl65536b}},
+ {{"zvl8192b"}, {ImpliedExtsZvl8192b}},
};
void RISCVISAInfo::updateImplication() {
@@ -721,12 +821,25 @@ void RISCVISAInfo::updateImplication() {
}
assert(llvm::is_sorted(ImpliedExts) && "Table not sorted by Name");
- for (auto &Ext : Exts) {
- auto I = llvm::lower_bound(ImpliedExts, Ext.first);
- if (I != std::end(ImpliedExts) && I->Name == Ext.first) {
- for (auto &ImpliedExt : I->Exts) {
+
+ // This loop may execute over 1 iteration since implication can be layered
+ // Exits loop if no more implication is applied
+ SmallSetVector<StringRef, 16> WorkList;
+ for (auto const &Ext : Exts)
+ WorkList.insert(Ext.first);
+
+ while (!WorkList.empty()) {
+ StringRef ExtName = WorkList.pop_back_val();
+ auto I = llvm::lower_bound(ImpliedExts, ExtName);
+ if (I != std::end(ImpliedExts) && I->Name == ExtName) {
+ for (const char *ImpliedExt : I->Exts) {
+ if (WorkList.count(ImpliedExt))
+ continue;
+ if (Exts.count(ImpliedExt))
+ continue;
auto Version = findDefaultVersion(ImpliedExt);
addExtension(ImpliedExt, Version->Major, Version->Minor);
+ WorkList.insert(ImpliedExt);
}
}
}
@@ -741,6 +854,41 @@ void RISCVISAInfo::updateFLen() {
FLen = 32;
}
+void RISCVISAInfo::updateMinVLen() {
+ for (auto const &Ext : Exts) {
+ StringRef ExtName = Ext.first;
+ bool IsZvlExt = ExtName.consume_front("zvl") && ExtName.consume_back("b");
+ if (IsZvlExt) {
+ unsigned ZvlLen;
+ if (!ExtName.getAsInteger(10, ZvlLen))
+ MinVLen = std::max(MinVLen, ZvlLen);
+ }
+ }
+}
+
+void RISCVISAInfo::updateMaxELen() {
+ // handles EEW restriction by sub-extension zve
+ for (auto const &Ext : Exts) {
+ StringRef ExtName = Ext.first;
+ bool IsZveExt = ExtName.consume_front("zve");
+ if (IsZveExt) {
+ if (ExtName.back() == 'f')
+ MaxELenFp = std::max(MaxELenFp, 32u);
+ if (ExtName.back() == 'd')
+ MaxELenFp = std::max(MaxELenFp, 64u);
+ ExtName = ExtName.drop_back();
+ unsigned ZveELen;
+ ExtName.getAsInteger(10, ZveELen);
+ MaxELen = std::max(MaxELen, ZveELen);
+ }
+ if (ExtName == "v") {
+ MaxELenFp = 64;
+ MaxELen = 64;
+ return;
+ }
+ }
+}
+
std::string RISCVISAInfo::toString() const {
std::string Buffer;
raw_string_ostream Arch(Buffer);
@@ -748,7 +896,7 @@ std::string RISCVISAInfo::toString() const {
Arch << "rv" << XLen;
ListSeparator LS("_");
- for (auto &Ext : Exts) {
+ for (auto const &Ext : Exts) {
StringRef ExtName = Ext.first;
auto ExtInfo = Ext.second;
Arch << LS << ExtName;
@@ -757,3 +905,17 @@ std::string RISCVISAInfo::toString() const {
return Arch.str();
}
+
+std::vector<std::string> RISCVISAInfo::toFeatureVector() const {
+ std::vector<std::string> FeatureVector;
+ for (auto const &Ext : Exts) {
+ std::string ExtName = Ext.first;
+ if (ExtName == "i") // i is not recognized in clang -cc1
+ continue;
+ std::string Feature = isExperimentalExtension(ExtName)
+ ? "+experimental-" + ExtName
+ : "+" + ExtName;
+ FeatureVector.push_back(Feature);
+ }
+ return FeatureVector;
+}
diff --git a/contrib/llvm-project/llvm/lib/Support/ScopedPrinter.cpp b/contrib/llvm-project/llvm/lib/Support/ScopedPrinter.cpp
index ea90a24eaced..a434e50e8c1f 100644
--- a/contrib/llvm-project/llvm/lib/Support/ScopedPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ScopedPrinter.cpp
@@ -1,7 +1,6 @@
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/Format.h"
-#include <cctype>
using namespace llvm::support;
diff --git a/contrib/llvm-project/llvm/lib/Support/Signals.cpp b/contrib/llvm-project/llvm/lib/Support/Signals.cpp
index c018dc92bf40..5ce41c987029 100644
--- a/contrib/llvm-project/llvm/lib/Support/Signals.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Signals.cpp
@@ -28,6 +28,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/Signposts.cpp b/contrib/llvm-project/llvm/lib/Support/Signposts.cpp
index 58fafb26cdf3..074dddc81c80 100644
--- a/contrib/llvm-project/llvm/lib/Support/Signposts.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Signposts.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Signposts.h"
-#include "llvm/Support/Timer.h"
#include "llvm/Config/config.h"
#if LLVM_SUPPORT_XCODE_SIGNPOSTS
diff --git a/contrib/llvm-project/llvm/lib/Support/SmallPtrSet.cpp b/contrib/llvm-project/llvm/lib/Support/SmallPtrSet.cpp
index f6e2dfb8a6c9..cbb87ea8717c 100644
--- a/contrib/llvm-project/llvm/lib/Support/SmallPtrSet.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/SmallPtrSet.cpp
@@ -13,7 +13,6 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemAlloc.h"
#include <algorithm>
diff --git a/contrib/llvm-project/llvm/lib/Support/SmallVector.cpp b/contrib/llvm-project/llvm/lib/Support/SmallVector.cpp
index 2d7721e4e1fb..8cafbc7fad0d 100644
--- a/contrib/llvm-project/llvm/lib/Support/SmallVector.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/SmallVector.cpp
@@ -12,6 +12,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemAlloc.h"
#include <cstdint>
#ifdef LLVM_ENABLE_EXCEPTIONS
#include <stdexcept>
diff --git a/contrib/llvm-project/llvm/lib/Support/SpecialCaseList.cpp b/contrib/llvm-project/llvm/lib/Support/SpecialCaseList.cpp
index 1939ed9e9547..137b37f2b1c3 100644
--- a/contrib/llvm-project/llvm/lib/Support/SpecialCaseList.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/SpecialCaseList.cpp
@@ -15,7 +15,6 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/VirtualFileSystem.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/StringMap.cpp b/contrib/llvm-project/llvm/lib/Support/StringMap.cpp
index f65d3846623c..012c785b4351 100644
--- a/contrib/llvm-project/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/StringMap.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/MathExtras.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/StringRef.cpp b/contrib/llvm-project/llvm/lib/Support/StringRef.cpp
index 652303fdb6a0..3ed08ed38661 100644
--- a/contrib/llvm-project/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/StringRef.cpp
@@ -597,3 +597,11 @@ bool StringRef::getAsDouble(double &Result, bool AllowInexact) const {
hash_code llvm::hash_value(StringRef S) {
return hash_combine_range(S.begin(), S.end());
}
+
+unsigned DenseMapInfo<StringRef, void>::getHashValue(StringRef Val) {
+ assert(Val.data() != getEmptyKey().data() &&
+ "Cannot hash the empty key!");
+ assert(Val.data() != getTombstoneKey().data() &&
+ "Cannot hash the tombstone key!");
+ return (unsigned)(hash_value(Val));
+}
diff --git a/contrib/llvm-project/llvm/lib/Support/SymbolRemappingReader.cpp b/contrib/llvm-project/llvm/lib/Support/SymbolRemappingReader.cpp
index 1caf0947216e..90997ab0a6ce 100644
--- a/contrib/llvm-project/llvm/lib/Support/SymbolRemappingReader.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/SymbolRemappingReader.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MemoryBuffer.h"
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/TargetParser.cpp b/contrib/llvm-project/llvm/lib/Support/TargetParser.cpp
index bc60bdea5f62..0105cd2e8153 100644
--- a/contrib/llvm-project/llvm/lib/Support/TargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/TargetParser.cpp
@@ -13,10 +13,8 @@
#include "llvm/Support/TargetParser.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/ADT/Triple.h"
using namespace llvm;
using namespace AMDGPU;
diff --git a/contrib/llvm-project/llvm/lib/Support/ThreadPool.cpp b/contrib/llvm-project/llvm/lib/Support/ThreadPool.cpp
index 54ea84d4bd6d..9f92ae1c7a7c 100644
--- a/contrib/llvm-project/llvm/lib/Support/ThreadPool.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ThreadPool.cpp
@@ -13,8 +13,12 @@
#include "llvm/Support/ThreadPool.h"
#include "llvm/Config/llvm-config.h"
+
+#if LLVM_ENABLE_THREADS
#include "llvm/Support/Threading.h"
+#else
#include "llvm/Support/raw_ostream.h"
+#endif
using namespace llvm;
@@ -117,6 +121,10 @@ void ThreadPool::wait() {
}
}
+bool ThreadPool::isWorkerThread() const {
+ report_fatal_error("LLVM compiled without multithreading");
+}
+
ThreadPool::~ThreadPool() { wait(); }
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/TimeProfiler.cpp b/contrib/llvm-project/llvm/lib/Support/TimeProfiler.cpp
index 2b094a4983a0..9380fa01c84a 100644
--- a/contrib/llvm-project/llvm/lib/Support/TimeProfiler.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/TimeProfiler.cpp
@@ -11,10 +11,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/TimeProfiler.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/JSON.h"
+#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Threading.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/ToolOutputFile.cpp b/contrib/llvm-project/llvm/lib/Support/ToolOutputFile.cpp
index c192ce60f31c..c2ca97a59c62 100644
--- a/contrib/llvm-project/llvm/lib/Support/ToolOutputFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ToolOutputFile.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Signals.h"
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/Triple.cpp b/contrib/llvm-project/llvm/lib/Support/Triple.cpp
index 2819dc0c139a..20dea8c302a5 100644
--- a/contrib/llvm-project/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Triple.cpp
@@ -14,7 +14,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/SwapByteOrder.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <cstring>
@@ -663,12 +663,16 @@ static Triple::SubArchType parseSubArch(StringRef SubArchName) {
return Triple::ARMSubArch_v8_6a;
case ARM::ArchKind::ARMV8_7A:
return Triple::ARMSubArch_v8_7a;
+ case ARM::ArchKind::ARMV8_8A:
+ return Triple::ARMSubArch_v8_8a;
case ARM::ArchKind::ARMV9A:
return Triple::ARMSubArch_v9;
case ARM::ArchKind::ARMV9_1A:
return Triple::ARMSubArch_v9_1a;
case ARM::ArchKind::ARMV9_2A:
return Triple::ARMSubArch_v9_2a;
+ case ARM::ArchKind::ARMV9_3A:
+ return Triple::ARMSubArch_v9_3a;
case ARM::ArchKind::ARMV8R:
return Triple::ARMSubArch_v8r;
case ARM::ArchKind::ARMV8MBaseline:
diff --git a/contrib/llvm-project/llvm/lib/Support/TypeSize.cpp b/contrib/llvm-project/llvm/lib/Support/TypeSize.cpp
index abb81016a0ba..a80fde83e3bc 100644
--- a/contrib/llvm-project/llvm/lib/Support/TypeSize.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/TypeSize.cpp
@@ -8,6 +8,7 @@
#include "llvm/Support/TypeSize.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/WithColor.h"
#include "DebugOptions.h"
diff --git a/contrib/llvm-project/llvm/lib/Support/Unix/Path.inc b/contrib/llvm-project/llvm/lib/Support/Unix/Path.inc
index 1b33e6a88c6a..4697c46d3848 100644
--- a/contrib/llvm-project/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Unix/Path.inc
@@ -273,7 +273,7 @@ std::string getMainExecutable(const char *argv0, void *MainAddr) {
// the program, and not the eventual binary file. Therefore, call realpath
// so this behaves the same on all platforms.
#if _POSIX_VERSION >= 200112 || defined(__GLIBC__)
- if (char *real_path = realpath(exe_path, NULL)) {
+ if (char *real_path = realpath(exe_path, nullptr)) {
std::string ret = std::string(real_path);
free(real_path);
return ret;
@@ -380,20 +380,22 @@ std::error_code current_path(SmallVectorImpl<char> &result) {
return std::error_code();
}
- result.reserve(PATH_MAX);
+ result.resize_for_overwrite(PATH_MAX);
while (true) {
- if (::getcwd(result.data(), result.capacity()) == nullptr) {
+ if (::getcwd(result.data(), result.size()) == nullptr) {
// See if there was a real error.
- if (errno != ENOMEM)
+ if (errno != ENOMEM) {
+ result.clear();
return std::error_code(errno, std::generic_category());
+ }
// Otherwise there just wasn't enough space.
- result.reserve(result.capacity() * 2);
+ result.resize_for_overwrite(result.capacity() * 2);
} else
break;
}
- result.set_size(strlen(result.data()));
+ result.truncate(strlen(result.data()));
return std::error_code();
}
@@ -870,6 +872,17 @@ void mapped_file_region::unmapImpl() {
::munmap(Mapping, Size);
}
+void mapped_file_region::dontNeedImpl() {
+ assert(Mode == mapped_file_region::readonly);
+#if defined(__MVS__) || defined(_AIX)
+ // If we don't have madvise, or it isn't beneficial, treat this as a no-op.
+ return;
+#else
+ if (Mapping)
+ ::madvise(Mapping, Size, MADV_DONTNEED);
+#endif
+}
+
int mapped_file_region::alignment() {
return Process::getPageSizeEstimate();
}
diff --git a/contrib/llvm-project/llvm/lib/Support/VirtualFileSystem.cpp b/contrib/llvm-project/llvm/lib/Support/VirtualFileSystem.cpp
index bec4e8dbe06c..f15e301874c4 100644
--- a/contrib/llvm-project/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/VirtualFileSystem.cpp
@@ -35,7 +35,6 @@
#include "llvm/Support/FileSystem/UniqueID.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/Process.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
@@ -46,9 +45,7 @@
#include <cstdint>
#include <iterator>
#include <limits>
-#include <map>
#include <memory>
-#include <mutex>
#include <string>
#include <system_error>
#include <utility>
@@ -574,6 +571,11 @@ public:
}
virtual ~InMemoryNode() = default;
+ /// Return the \p Status for this node. \p RequestedName should be the name
+ /// through which the caller referred to this node. It will override
+ /// \p Status::Name in the return value, to mimic the behavior of \p RealFile.
+ virtual Status getStatus(const Twine &RequestedName) const = 0;
+
/// Get the filename of this node (the name without the directory part).
StringRef getFileName() const { return FileName; }
InMemoryNodeKind getKind() const { return Kind; }
@@ -589,10 +591,7 @@ public:
: InMemoryNode(Stat.getName(), IME_File), Stat(std::move(Stat)),
Buffer(std::move(Buffer)) {}
- /// Return the \p Status for this node. \p RequestedName should be the name
- /// through which the caller referred to this node. It will override
- /// \p Status::Name in the return value, to mimic the behavior of \p RealFile.
- Status getStatus(const Twine &RequestedName) const {
+ Status getStatus(const Twine &RequestedName) const override {
return Status::copyWithNewName(Stat, RequestedName);
}
llvm::MemoryBuffer *getBuffer() const { return Buffer.get(); }
@@ -616,6 +615,10 @@ public:
: InMemoryNode(Path, IME_HardLink), ResolvedFile(ResolvedFile) {}
const InMemoryFile &getResolvedFile() const { return ResolvedFile; }
+ Status getStatus(const Twine &RequestedName) const override {
+ return ResolvedFile.getStatus(RequestedName);
+ }
+
std::string toString(unsigned Indent) const override {
return std::string(Indent, ' ') + "HardLink to -> " +
ResolvedFile.toString(0);
@@ -668,7 +671,7 @@ public:
/// Return the \p Status for this node. \p RequestedName should be the name
/// through which the caller referred to this node. It will override
/// \p Status::Name in the return value, to mimic the behavior of \p RealFile.
- Status getStatus(const Twine &RequestedName) const {
+ Status getStatus(const Twine &RequestedName) const override {
return Status::copyWithNewName(Stat, RequestedName);
}
@@ -704,17 +707,6 @@ public:
}
};
-namespace {
-Status getNodeStatus(const InMemoryNode *Node, const Twine &RequestedName) {
- if (auto Dir = dyn_cast<detail::InMemoryDirectory>(Node))
- return Dir->getStatus(RequestedName);
- if (auto File = dyn_cast<detail::InMemoryFile>(Node))
- return File->getStatus(RequestedName);
- if (auto Link = dyn_cast<detail::InMemoryHardLink>(Node))
- return Link->getResolvedFile().getStatus(RequestedName);
- llvm_unreachable("Unknown node type");
-}
-} // namespace
} // namespace detail
// The UniqueID of in-memory files is derived from path and content.
@@ -734,6 +726,16 @@ static sys::fs::UniqueID getDirectoryID(sys::fs::UniqueID Parent,
return getUniqueID(llvm::hash_combine(Parent.getFile(), Name));
}
+Status detail::NewInMemoryNodeInfo::makeStatus() const {
+ UniqueID UID =
+ (Type == sys::fs::file_type::directory_file)
+ ? getDirectoryID(DirUID, Name)
+ : getFileID(DirUID, Name, Buffer ? Buffer->getBuffer() : "");
+
+ return Status(Path, UID, llvm::sys::toTimePoint(ModificationTime), User,
+ Group, Buffer ? Buffer->getBufferSize() : 0, Type, Perms);
+}
+
InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
: Root(new detail::InMemoryDirectory(
Status("", getDirectoryID(llvm::sys::fs::UniqueID(), ""),
@@ -754,7 +756,7 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
Optional<uint32_t> Group,
Optional<llvm::sys::fs::file_type> Type,
Optional<llvm::sys::fs::perms> Perms,
- const detail::InMemoryFile *HardLinkTarget) {
+ MakeNodeFn MakeNode) {
SmallString<128> Path;
P.toVector(Path);
@@ -775,7 +777,6 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
const auto ResolvedGroup = Group.getValueOr(0);
const auto ResolvedType = Type.getValueOr(sys::fs::file_type::regular_file);
const auto ResolvedPerms = Perms.getValueOr(sys::fs::all_all);
- assert(!(HardLinkTarget && Buffer) && "HardLink cannot have a buffer");
// Any intermediate directories we create should be accessible by
// the owner, even if Perms says otherwise for the final path.
const auto NewDirectoryPerms = ResolvedPerms | sys::fs::owner_all;
@@ -786,27 +787,10 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
if (!Node) {
if (I == E) {
// End of the path.
- std::unique_ptr<detail::InMemoryNode> Child;
- if (HardLinkTarget)
- Child.reset(new detail::InMemoryHardLink(P.str(), *HardLinkTarget));
- else {
- // Create a new file or directory.
- Status Stat(
- P.str(),
- (ResolvedType == sys::fs::file_type::directory_file)
- ? getDirectoryID(Dir->getUniqueID(), Name)
- : getFileID(Dir->getUniqueID(), Name, Buffer->getBuffer()),
- llvm::sys::toTimePoint(ModificationTime), ResolvedUser,
- ResolvedGroup, Buffer->getBufferSize(), ResolvedType,
- ResolvedPerms);
- if (ResolvedType == sys::fs::file_type::directory_file) {
- Child.reset(new detail::InMemoryDirectory(std::move(Stat)));
- } else {
- Child.reset(
- new detail::InMemoryFile(std::move(Stat), std::move(Buffer)));
- }
- }
- Dir->addChild(Name, std::move(Child));
+ Dir->addChild(
+ Name, MakeNode({Dir->getUniqueID(), Path, Name, ModificationTime,
+ std::move(Buffer), ResolvedUser, ResolvedGroup,
+ ResolvedType, ResolvedPerms}));
return true;
}
@@ -850,7 +834,15 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
Optional<llvm::sys::fs::file_type> Type,
Optional<llvm::sys::fs::perms> Perms) {
return addFile(P, ModificationTime, std::move(Buffer), User, Group, Type,
- Perms, /*HardLinkTarget=*/nullptr);
+ Perms,
+ [](detail::NewInMemoryNodeInfo NNI)
+ -> std::unique_ptr<detail::InMemoryNode> {
+ Status Stat = NNI.makeStatus();
+ if (Stat.getType() == sys::fs::file_type::directory_file)
+ return std::make_unique<detail::InMemoryDirectory>(Stat);
+ return std::make_unique<detail::InMemoryFile>(
+ Stat, std::move(NNI.Buffer));
+ });
}
bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
@@ -861,7 +853,15 @@ bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
Optional<llvm::sys::fs::perms> Perms) {
return addFile(P, ModificationTime, llvm::MemoryBuffer::getMemBuffer(Buffer),
std::move(User), std::move(Group), std::move(Type),
- std::move(Perms));
+ std::move(Perms),
+ [](detail::NewInMemoryNodeInfo NNI)
+ -> std::unique_ptr<detail::InMemoryNode> {
+ Status Stat = NNI.makeStatus();
+ if (Stat.getType() == sys::fs::file_type::directory_file)
+ return std::make_unique<detail::InMemoryDirectory>(Stat);
+ return std::make_unique<detail::InMemoryFile>(
+ Stat, std::move(NNI.Buffer));
+ });
}
static ErrorOr<const detail::InMemoryNode *>
@@ -916,14 +916,17 @@ bool InMemoryFileSystem::addHardLink(const Twine &FromPath,
// before. Resolved ToPath must be a File.
if (!ToNode || FromNode || !isa<detail::InMemoryFile>(*ToNode))
return false;
- return this->addFile(FromPath, 0, nullptr, None, None, None, None,
- cast<detail::InMemoryFile>(*ToNode));
+ return addFile(FromPath, 0, nullptr, None, None, None, None,
+ [&](detail::NewInMemoryNodeInfo NNI) {
+ return std::make_unique<detail::InMemoryHardLink>(
+ NNI.Path.str(), *cast<detail::InMemoryFile>(*ToNode));
+ });
}
llvm::ErrorOr<Status> InMemoryFileSystem::status(const Twine &Path) {
auto Node = lookupInMemoryNode(*this, Root.get(), Path);
if (Node)
- return detail::getNodeStatus(*Node, Path);
+ return (*Node)->getStatus(Path);
return Node.getError();
}
@@ -1649,10 +1652,19 @@ private:
sys::path::Style::windows_backslash)) {
path_style = sys::path::Style::windows_backslash;
} else {
- assert(NameValueNode && "Name presence should be checked earlier");
- error(NameValueNode,
+ // Relative VFS root entries are made absolute to the current working
+ // directory, then we can determine the path style from that.
+ auto EC = sys::fs::make_absolute(Name);
+ if (EC) {
+ assert(NameValueNode && "Name presence should be checked earlier");
+ error(
+ NameValueNode,
"entry with relative path at the root level is not discoverable");
- return nullptr;
+ return nullptr;
+ }
+ path_style = sys::path::is_absolute(Name, sys::path::Style::posix)
+ ? sys::path::Style::posix
+ : sys::path::Style::windows_backslash;
}
}
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
index b15e71a9ce2a..5f1a364ea1a8 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
@@ -132,7 +132,8 @@ const file_t kInvalidFile = INVALID_HANDLE_VALUE;
std::string getMainExecutable(const char *argv0, void *MainExecAddr) {
SmallVector<wchar_t, MAX_PATH> PathName;
- DWORD Size = ::GetModuleFileNameW(NULL, PathName.data(), PathName.capacity());
+ PathName.resize_for_overwrite(PathName.capacity());
+ DWORD Size = ::GetModuleFileNameW(NULL, PathName.data(), PathName.size());
// A zero return value indicates a failure other than insufficient space.
if (Size == 0)
@@ -145,7 +146,7 @@ std::string getMainExecutable(const char *argv0, void *MainExecAddr) {
// On success, GetModuleFileNameW returns the number of characters written to
// the buffer not including the NULL terminator.
- PathName.set_size(Size);
+ PathName.truncate(Size);
// Convert the result from UTF-16 to UTF-8.
SmallVector<char, MAX_PATH> PathNameUTF8;
@@ -201,8 +202,8 @@ std::error_code current_path(SmallVectorImpl<char> &result) {
DWORD len = MAX_PATH;
do {
- cur_path.reserve(len);
- len = ::GetCurrentDirectoryW(cur_path.capacity(), cur_path.data());
+ cur_path.resize_for_overwrite(len);
+ len = ::GetCurrentDirectoryW(cur_path.size(), cur_path.data());
// A zero return value indicates a failure other than insufficient space.
if (len == 0)
@@ -210,11 +211,11 @@ std::error_code current_path(SmallVectorImpl<char> &result) {
// If there's insufficient space, the len returned is larger than the len
// given.
- } while (len > cur_path.capacity());
+ } while (len > cur_path.size());
// On success, GetCurrentDirectoryW returns the number of characters not
// including the null-terminator.
- cur_path.set_size(len);
+ cur_path.truncate(len);
if (std::error_code EC =
UTF16ToUTF8(cur_path.begin(), cur_path.size(), result))
@@ -328,7 +329,7 @@ static std::error_code is_local_internal(SmallVectorImpl<wchar_t> &Path,
// the null terminator, it will leave the output unterminated. Push a null
// terminator onto the end to ensure that this never happens.
VolumePath.push_back(L'\0');
- VolumePath.set_size(wcslen(VolumePath.data()));
+ VolumePath.truncate(wcslen(VolumePath.data()));
const wchar_t *P = VolumePath.data();
UINT Type = ::GetDriveTypeW(P);
@@ -364,18 +365,19 @@ std::error_code is_local(const Twine &path, bool &result) {
static std::error_code realPathFromHandle(HANDLE H,
SmallVectorImpl<wchar_t> &Buffer) {
+ Buffer.resize_for_overwrite(Buffer.capacity());
DWORD CountChars = ::GetFinalPathNameByHandleW(
H, Buffer.begin(), Buffer.capacity(), FILE_NAME_NORMALIZED);
if (CountChars && CountChars >= Buffer.capacity()) {
// The buffer wasn't big enough, try again. In this case the return value
// *does* indicate the size of the null terminator.
- Buffer.reserve(CountChars);
+ Buffer.resize_for_overwrite(CountChars);
CountChars = ::GetFinalPathNameByHandleW(
- H, Buffer.begin(), Buffer.capacity(), FILE_NAME_NORMALIZED);
+ H, Buffer.begin(), Buffer.size(), FILE_NAME_NORMALIZED);
}
+ Buffer.truncate(CountChars);
if (CountChars == 0)
return mapWindowsError(GetLastError());
- Buffer.set_size(CountChars);
return std::error_code();
}
@@ -959,6 +961,8 @@ void mapped_file_region::unmapImpl() {
}
}
+void mapped_file_region::dontNeedImpl() {}
+
int mapped_file_region::alignment() {
SYSTEM_INFO SysInfo;
::GetSystemInfo(&SysInfo);
@@ -1448,14 +1452,14 @@ static bool getTempDirEnvVar(const wchar_t *Var, SmallVectorImpl<char> &Res) {
SmallVector<wchar_t, 1024> Buf;
size_t Size = 1024;
do {
- Buf.reserve(Size);
- Size = GetEnvironmentVariableW(Var, Buf.data(), Buf.capacity());
+ Buf.resize_for_overwrite(Size);
+ Size = GetEnvironmentVariableW(Var, Buf.data(), Buf.size());
if (Size == 0)
return false;
// Try again with larger buffer.
- } while (Size > Buf.capacity());
- Buf.set_size(Size);
+ } while (Size > Buf.size());
+ Buf.truncate(Size);
return !windows::UTF16ToUTF8(Buf.data(), Size, Res);
}
@@ -1504,7 +1508,7 @@ std::error_code CodePageToUTF16(unsigned codepage,
}
utf16.reserve(len + 1);
- utf16.set_size(len);
+ utf16.resize_for_overwrite(len);
len = ::MultiByteToWideChar(codepage, MB_ERR_INVALID_CHARS, original.begin(),
original.size(), utf16.begin(), utf16.size());
@@ -1544,8 +1548,8 @@ std::error_code UTF16ToCodePage(unsigned codepage, const wchar_t *utf16,
return mapWindowsError(::GetLastError());
}
- converted.reserve(len);
- converted.set_size(len);
+ converted.reserve(len + 1);
+ converted.resize_for_overwrite(len);
// Now do the actual conversion.
len = ::WideCharToMultiByte(codepage, 0, utf16, utf16_len, converted.data(),
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Process.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Process.inc
index 6732063b562e..dfaab1613de1 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Process.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Process.inc
@@ -129,16 +129,16 @@ Optional<std::string> Process::GetEnv(StringRef Name) {
SmallVector<wchar_t, MAX_PATH> Buf;
size_t Size = MAX_PATH;
do {
- Buf.reserve(Size);
+ Buf.resize_for_overwrite(Size);
SetLastError(NO_ERROR);
Size =
- GetEnvironmentVariableW(NameUTF16.data(), Buf.data(), Buf.capacity());
+ GetEnvironmentVariableW(NameUTF16.data(), Buf.data(), Buf.size());
if (Size == 0 && GetLastError() == ERROR_ENVVAR_NOT_FOUND)
return None;
// Try again with larger buffer.
- } while (Size > Buf.capacity());
- Buf.set_size(Size);
+ } while (Size > Buf.size());
+ Buf.truncate(Size);
// Convert the result from UTF-16 to UTF-8.
SmallVector<char, MAX_PATH> Res;
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Program.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Program.inc
index a9cf2db7ec72..ee633411584f 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Program.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Program.inc
@@ -72,7 +72,7 @@ ErrorOr<std::string> sys::findProgramByName(StringRef Name,
SmallVector<wchar_t, MAX_PATH> U16Result;
DWORD Len = MAX_PATH;
do {
- U16Result.reserve(Len);
+ U16Result.resize_for_overwrite(Len);
// Lets attach the extension manually. That is needed for files
// with a point in name like aaa.bbb. SearchPathW will not add extension
// from its argument to such files because it thinks they already had one.
@@ -82,13 +82,13 @@ ErrorOr<std::string> sys::findProgramByName(StringRef Name,
return EC;
Len = ::SearchPathW(Path, c_str(U16NameExt), nullptr,
- U16Result.capacity(), U16Result.data(), nullptr);
- } while (Len > U16Result.capacity());
+ U16Result.size(), U16Result.data(), nullptr);
+ } while (Len > U16Result.size());
if (Len == 0)
continue;
- U16Result.set_size(Len);
+ U16Result.truncate(Len);
if (std::error_code EC =
windows::UTF16ToUTF8(U16Result.data(), U16Result.size(), U8Result))
diff --git a/contrib/llvm-project/llvm/lib/Support/X86TargetParser.cpp b/contrib/llvm-project/llvm/lib/Support/X86TargetParser.cpp
index ab49ac548f89..10f9692d217e 100644
--- a/contrib/llvm-project/llvm/lib/Support/X86TargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/X86TargetParser.cpp
@@ -12,7 +12,6 @@
#include "llvm/Support/X86TargetParser.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include <numeric>
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/lib/Support/YAMLParser.cpp b/contrib/llvm-project/llvm/lib/Support/YAMLParser.cpp
index 2adf37a511d1..200261d3ed5c 100644
--- a/contrib/llvm-project/llvm/lib/Support/YAMLParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/YAMLParser.cpp
@@ -27,7 +27,6 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/Unicode.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
diff --git a/contrib/llvm-project/llvm/lib/Support/YAMLTraits.cpp b/contrib/llvm-project/llvm/lib/Support/YAMLTraits.cpp
index aa6163a76161..8cdd03149bcf 100644
--- a/contrib/llvm-project/llvm/lib/Support/YAMLTraits.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/YAMLTraits.cpp
@@ -18,13 +18,12 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Unicode.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
-#include <cstdlib>
#include <cstring>
#include <string>
#include <vector>
@@ -300,7 +299,7 @@ void Input::endEnumScalar() {
bool Input::beginBitSetScalar(bool &DoClear) {
BitValuesUsed.clear();
if (SequenceHNode *SQ = dyn_cast<SequenceHNode>(CurrentNode)) {
- BitValuesUsed.insert(BitValuesUsed.begin(), SQ->Entries.size(), false);
+ BitValuesUsed.resize(SQ->Entries.size());
} else {
setError(CurrentNode, "expected sequence of bit values");
}
@@ -527,8 +526,9 @@ std::vector<StringRef> Output::keys() {
}
bool Output::preflightKey(const char *Key, bool Required, bool SameAsDefault,
- bool &UseDefault, void *&) {
+ bool &UseDefault, void *&SaveInfo) {
UseDefault = false;
+ SaveInfo = nullptr;
if (Required || !SameAsDefault || WriteDefaultValues) {
auto State = StateStack.back();
if (State == inFlowMapFirstKey || State == inFlowMapOtherKey) {
@@ -599,7 +599,8 @@ void Output::endSequence() {
StateStack.pop_back();
}
-bool Output::preflightElement(unsigned, void *&) {
+bool Output::preflightElement(unsigned, void *&SaveInfo) {
+ SaveInfo = nullptr;
return true;
}
@@ -627,7 +628,7 @@ void Output::endFlowSequence() {
outputUpToEndOfLine(" ]");
}
-bool Output::preflightFlowElement(unsigned, void *&) {
+bool Output::preflightFlowElement(unsigned, void *&SaveInfo) {
if (NeedFlowSequenceComma)
output(", ");
if (WrapColumn && Column > WrapColumn) {
@@ -637,6 +638,7 @@ bool Output::preflightFlowElement(unsigned, void *&) {
Column = ColumnAtFlowStart;
output(" ");
}
+ SaveInfo = nullptr;
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
index 4590a3d19b0d..e4b747b68bea 100644
--- a/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Duration.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
@@ -24,10 +25,8 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include <algorithm>
-#include <cctype>
#include <cerrno>
#include <cstdio>
-#include <iterator>
#include <sys/stat.h>
// <fcntl.h> may provide O_BINARY.
@@ -643,13 +642,14 @@ raw_fd_ostream::raw_fd_ostream(int fd, bool shouldClose, bool unbuffered,
// Get the starting position.
off_t loc = ::lseek(FD, 0, SEEK_CUR);
-#ifdef _WIN32
- // MSVCRT's _lseek(SEEK_CUR) doesn't return -1 for pipes.
sys::fs::file_status Status;
std::error_code EC = status(FD, Status);
- SupportsSeeking = !EC && Status.type() == sys::fs::file_type::regular_file;
+ IsRegularFile = Status.type() == sys::fs::file_type::regular_file;
+#ifdef _WIN32
+ // MSVCRT's _lseek(SEEK_CUR) doesn't return -1 for pipes.
+ SupportsSeeking = !EC && IsRegularFile;
#else
- SupportsSeeking = loc != (off_t)-1;
+ SupportsSeeking = !EC && loc != (off_t)-1;
#endif
if (!SupportsSeeking)
pos = 0;
@@ -869,8 +869,8 @@ Expected<sys::fs::FileLocker> raw_fd_ostream::lock() {
}
Expected<sys::fs::FileLocker>
-raw_fd_ostream::tryLockFor(std::chrono::milliseconds Timeout) {
- std::error_code EC = sys::fs::tryLockFile(FD, Timeout);
+raw_fd_ostream::tryLockFor(Duration const& Timeout) {
+ std::error_code EC = sys::fs::tryLockFile(FD, Timeout.getDuration());
if (!EC)
return sys::fs::FileLocker(FD);
return errorCodeToError(EC);
@@ -914,8 +914,7 @@ raw_fd_stream::raw_fd_stream(StringRef Filename, std::error_code &EC)
if (EC)
return;
- // Do not support non-seekable files.
- if (!supportsSeeking())
+ if (!isRegularFile())
EC = std::make_error_code(std::errc::invalid_argument);
}
@@ -937,10 +936,6 @@ bool raw_fd_stream::classof(const raw_ostream *OS) {
// raw_string_ostream
//===----------------------------------------------------------------------===//
-raw_string_ostream::~raw_string_ostream() {
- flush();
-}
-
void raw_string_ostream::write_impl(const char *Ptr, size_t Size) {
OS.append(Ptr, Size);
}
diff --git a/contrib/llvm-project/llvm/lib/TableGen/TGParser.cpp b/contrib/llvm-project/llvm/lib/TableGen/TGParser.cpp
index 6ccca4d69f40..3709a375ed1b 100644
--- a/contrib/llvm-project/llvm/lib/TableGen/TGParser.cpp
+++ b/contrib/llvm-project/llvm/lib/TableGen/TGParser.cpp
@@ -3203,7 +3203,8 @@ bool TGParser::ParseIf(MultiClass *CurMultiClass) {
// iteration variable being assigned.
ListInit *EmptyList = ListInit::get({}, BitRecTy::get());
- ListInit *SingletonList = ListInit::get({BitInit::get(1)}, BitRecTy::get());
+ ListInit *SingletonList =
+ ListInit::get({BitInit::get(true)}, BitRecTy::get());
RecTy *BitListTy = ListRecTy::get(BitRecTy::get());
// The foreach containing the then-clause selects SingletonList if
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.h
index b0dd30c13137..4d1464901777 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.h
@@ -26,7 +26,6 @@ class AArch64Subtarget;
class AArch64TargetMachine;
class FunctionPass;
class InstructionSelector;
-class MachineFunctionPass;
FunctionPass *createAArch64DeadRegisterDefinitions();
FunctionPass *createAArch64RedundantCopyEliminationPass();
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.td
index cb17fd94c335..b87468d5c8de 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64.td
@@ -416,6 +416,12 @@ def FeatureHCX : SubtargetFeature<
def FeatureLS64 : SubtargetFeature<"ls64", "HasLS64",
"true", "Enable Armv8.7-A LD64B/ST64B Accelerator Extension">;
+def FeatureHBC : SubtargetFeature<"hbc", "HasHBC",
+ "true", "Enable Armv8.8-A Hinted Conditional Branches Extension">;
+
+def FeatureMOPS : SubtargetFeature<"mops", "HasMOPS",
+ "true", "Enable Armv8.8-A memcpy and memset acceleration instructions">;
+
def FeatureBRBE : SubtargetFeature<"brbe", "HasBRBE",
"true", "Enable Branch Record Buffer Extension">;
@@ -497,6 +503,10 @@ def HasV8_7aOps : SubtargetFeature<
"v8.7a", "HasV8_7aOps", "true", "Support ARM v8.7a instructions",
[HasV8_6aOps, FeatureXS, FeatureWFxT, FeatureHCX]>;
+def HasV8_8aOps : SubtargetFeature<
+ "v8.8a", "HasV8_8aOps", "true", "Support ARM v8.8a instructions",
+ [HasV8_7aOps, FeatureHBC, FeatureMOPS]>;
+
def HasV9_0aOps : SubtargetFeature<
"v9a", "HasV9_0aOps", "true", "Support ARM v9a instructions",
[HasV8_5aOps, FeatureSVE2]>;
@@ -509,21 +519,22 @@ def HasV9_2aOps : SubtargetFeature<
"v9.2a", "HasV9_2aOps", "true", "Support ARM v9.2a instructions",
[HasV8_7aOps, HasV9_1aOps]>;
+def HasV9_3aOps : SubtargetFeature<
+ "v9.3a", "HasV9_3aOps", "true", "Support ARM v9.3a instructions",
+ [HasV8_8aOps, HasV9_2aOps]>;
+
def HasV8_0rOps : SubtargetFeature<
"v8r", "HasV8_0rOps", "true", "Support ARM v8r instructions",
[//v8.1
FeatureCRC, FeaturePAN, FeatureRDM, FeatureLSE, FeatureCONTEXTIDREL2,
//v8.2
- FeaturePerfMon, FeatureRAS, FeaturePsUAO, FeatureSM4,
- FeatureSHA3, FeatureCCPP, FeatureFullFP16, FeaturePAN_RWV,
+ FeatureRAS, FeaturePsUAO, FeatureCCPP, FeaturePAN_RWV,
//v8.3
FeatureComplxNum, FeatureCCIDX, FeatureJS,
FeaturePAuth, FeatureRCPC,
//v8.4
- FeatureDotProd, FeatureFP16FML, FeatureTRACEV8_4,
- FeatureTLB_RMI, FeatureFlagM, FeatureDIT, FeatureSEL2, FeatureRCPC_IMMO,
- //v8.5
- FeatureSSBS, FeaturePredRes, FeatureSB, FeatureSpecRestrict]>;
+ FeatureDotProd, FeatureTRACEV8_4, FeatureTLB_RMI,
+ FeatureFlagM, FeatureDIT, FeatureSEL2, FeatureRCPC_IMMO]>;
//===----------------------------------------------------------------------===//
// Register File Description
@@ -955,7 +966,9 @@ def ProcessorFeatures {
list<SubtargetFeature> A710 = [HasV9_0aOps, FeatureNEON, FeaturePerfMon,
FeatureETE, FeatureMTE, FeatureFP16FML,
FeatureSVE2BitPerm, FeatureBF16, FeatureMatMulInt8];
- list<SubtargetFeature> R82 = [HasV8_0rOps];
+ list<SubtargetFeature> R82 = [HasV8_0rOps, FeaturePerfMon, FeatureFullFP16,
+ FeatureFP16FML, FeatureSSBS, FeaturePredRes,
+ FeatureSB, FeatureSpecRestrict];
list<SubtargetFeature> X1 = [HasV8_2aOps, FeatureCrypto, FeatureFPARMv8,
FeatureNEON, FeatureRCPC, FeaturePerfMon,
FeatureSPE, FeatureFullFP16, FeatureDotProd];
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index c90601443934..f26151536a58 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -468,7 +468,7 @@ def CSR_Darwin_AArch64_TLS
// CSR_Darwin_AArch64_CXX_TLS should be a subset of CSR_Darwin_AArch64_TLS.
def CSR_Darwin_AArch64_CXX_TLS
: CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS,
- (sub (sequence "X%u", 1, 28), X15, X16, X17, X18),
+ (sub (sequence "X%u", 1, 28), X9, X15, X16, X17, X18, X19),
(sequence "D%u", 0, 31))>;
// CSRs that are handled by prologue, epilogue.
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index ee6e670fe3cd..109b739528bf 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -443,7 +443,7 @@ bool AArch64ExpandPseudo::expand_DestructiveOp(
uint64_t FalseLanes = MI.getDesc().TSFlags & AArch64::FalseLanesMask;
bool FalseZero = FalseLanes == AArch64::FalseLanesZero;
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
if (DType == AArch64::DestructiveBinary)
@@ -989,7 +989,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
.addReg(DstReg, RegState::Kill)
.addReg(DstReg, DstFlags | RegState::Implicit);
} else {
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
.add(MI.getOperand(0))
.addUse(DstReg, RegState::Kill);
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 3dc694df509d..c67fa62c7a92 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -355,7 +355,7 @@ unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(SI->second)
@@ -378,7 +378,7 @@ unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) {
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass;
unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(ZeroReg, getKillRegState(true));
return ResultReg;
@@ -410,11 +410,11 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
const TargetRegisterClass *RC = Is64Bit ?
&AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg)
.addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(TmpReg, getKillRegState(true));
@@ -427,12 +427,12 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
Align Alignment = DL.getPrefTypeAlign(CFP->getType());
unsigned CPI = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
ADRPReg).addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGE);
unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(ADRPReg)
.addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
@@ -455,7 +455,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
if (!DestEVT.isSimple())
return 0;
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
unsigned ResultReg;
if (OpFlags & AArch64II::MO_GOT) {
@@ -482,7 +482,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
// LDRWui produces a 32-bit register, but pointers in-register are 64-bits
// so we must extend the result on ILP32.
- unsigned Result64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Result64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG))
.addDef(Result64)
@@ -751,7 +751,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
if (const auto *C = dyn_cast<ConstantInt>(RHS))
if (C->getValue() == 0xffffffff) {
Addr.setExtendType(AArch64_AM::UXTW);
- unsigned Reg = getRegForValue(LHS);
+ Register Reg = getRegForValue(LHS);
if (!Reg)
return false;
Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
@@ -760,7 +760,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
}
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -821,7 +821,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
}
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -847,7 +847,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
Addr.setExtendType(AArch64_AM::LSL);
Addr.setExtendType(AArch64_AM::UXTW);
- unsigned Reg = getRegForValue(LHS);
+ Register Reg = getRegForValue(LHS);
if (!Reg)
return false;
Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
@@ -879,7 +879,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
break;
Addr.setShift(0);
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -888,7 +888,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
} // end switch
if (Addr.isRegBase() && !Addr.getReg()) {
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (!Reg)
return false;
Addr.setReg(Reg);
@@ -896,7 +896,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
if (!Addr.getOffsetReg()) {
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -1034,7 +1034,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
// continue. This should almost never happen.
if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase())
{
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(Addr.getFI())
@@ -1178,7 +1178,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
SI->getOpcode() == Instruction::AShr )
std::swap(LHS, RHS);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -1207,13 +1207,13 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType,
C->getZExtValue(), SetFlags, WantResult);
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, 0,
@@ -1232,7 +1232,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
- unsigned RHSReg = getRegForValue(MulLHS);
+ Register RHSReg = getRegForValue(MulLHS);
if (!RHSReg)
return 0;
ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL,
@@ -1255,7 +1255,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
}
uint64_t ShiftVal = C->getZExtValue();
if (ShiftType != AArch64_AM::InvalidShiftExtend) {
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, ShiftType,
@@ -1267,7 +1267,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
}
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
@@ -1489,7 +1489,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
if (CFP->isZero() && !CFP->isNegative())
UseImm = true;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
@@ -1500,7 +1500,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
return true;
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
@@ -1577,7 +1577,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (isa<ConstantInt>(SI->getOperand(1)))
std::swap(LHS, RHS);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -1602,7 +1602,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
- unsigned RHSReg = getRegForValue(MulLHS);
+ Register RHSReg = getRegForValue(MulLHS);
if (!RHSReg)
return 0;
ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
@@ -1616,7 +1616,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (const auto *SI = dyn_cast<ShlOperator>(RHS))
if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1))) {
uint64_t ShiftVal = C->getZExtValue();
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
@@ -1625,7 +1625,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
}
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
@@ -1673,7 +1673,7 @@ unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
return 0;
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_ri(Opc, RC, LHSReg,
AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) {
@@ -1715,7 +1715,7 @@ unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
RC = &AArch64::GPR64RegClass;
break;
}
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_rri(Opc, RC, LHSReg, RHSReg,
AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm));
if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
@@ -1841,7 +1841,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
}
// Create the base instruction, then add the operands.
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO);
@@ -1856,7 +1856,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
// For zero-extending loads to 64bit we emit a 32bit load and then convert
// the 32bit reg to a 64bit reg.
if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) {
- unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Reg64)
.addImm(0)
@@ -1991,7 +1991,7 @@ bool AArch64FastISel::selectLoad(const Instruction *I) {
// The integer extend hasn't been emitted yet. FastISel or SelectionDAG
// could select it. Emit a copy to subreg if necessary. FastISel will remove
// it when it selects the integer extend.
- unsigned Reg = lookUpRegForValue(IntExtVal);
+ Register Reg = lookUpRegForValue(IntExtVal);
auto *MI = MRI.getUniqueVRegDef(Reg);
if (!MI) {
if (RetVT == MVT::i64 && VT <= MVT::i32) {
@@ -2174,7 +2174,7 @@ bool AArch64FastISel::selectStore(const Instruction *I) {
// The non-atomic instructions are sufficient for relaxed stores.
if (isReleaseOrStronger(Ord)) {
// The STLR addressing mode only supports a base reg; pass that directly.
- unsigned AddrReg = getRegForValue(PtrV);
+ Register AddrReg = getRegForValue(PtrV);
return emitStoreRelease(VT, SrcReg, AddrReg,
createMachineMemOperandFor(I));
}
@@ -2339,7 +2339,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
const MCInstrDesc &II = TII.get(Opc);
- unsigned SrcReg = getRegForValue(LHS);
+ Register SrcReg = getRegForValue(LHS);
if (!SrcReg)
return false;
@@ -2454,7 +2454,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
if (foldXALUIntrinsic(CC, I, BI->getCondition())) {
// Fake request the condition, otherwise the intrinsic might be completely
// optimized away.
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (!CondReg)
return false;
@@ -2468,7 +2468,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
}
}
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (CondReg == 0)
return false;
@@ -2480,7 +2480,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
}
const MCInstrDesc &II = TII.get(Opcode);
- unsigned ConstrainedCondReg
+ Register ConstrainedCondReg
= constrainOperandRegClass(II, CondReg, II.getNumDefs());
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(ConstrainedCondReg)
@@ -2493,7 +2493,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
bool AArch64FastISel::selectIndirectBr(const Instruction *I) {
const IndirectBrInst *BI = cast<IndirectBrInst>(I);
- unsigned AddrReg = getRegForValue(BI->getOperand(0));
+ Register AddrReg = getRegForValue(BI->getOperand(0));
if (AddrReg == 0)
return false;
@@ -2563,7 +2563,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) {
}
if (CondCodes) {
- unsigned TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
+ Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
TmpReg1)
.addReg(AArch64::WZR, getKillRegState(true))
@@ -2630,18 +2630,18 @@ bool AArch64FastISel::optimizeSelect(const SelectInst *SI) {
if (!Opc)
return false;
- unsigned Src1Reg = getRegForValue(Src1Val);
+ Register Src1Reg = getRegForValue(Src1Val);
if (!Src1Reg)
return false;
- unsigned Src2Reg = getRegForValue(Src2Val);
+ Register Src2Reg = getRegForValue(Src2Val);
if (!Src2Reg)
return false;
if (NeedExtraOp)
Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, 1);
- unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
+ Register ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
Src2Reg);
updateValueMap(SI, ResultReg);
return true;
@@ -2690,7 +2690,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
// Try to pickup the flags, so we don't have to emit another compare.
if (foldXALUIntrinsic(CC, I, Cond)) {
// Fake request the condition to force emission of the XALU intrinsic.
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (!CondReg)
return false;
} else if (isa<CmpInst>(Cond) && cast<CmpInst>(Cond)->hasOneUse() &&
@@ -2711,7 +2711,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
}
if (FoldSelect) {
- unsigned SrcReg = getRegForValue(FoldSelect);
+ Register SrcReg = getRegForValue(FoldSelect);
if (!SrcReg)
return false;
@@ -2739,7 +2739,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
}
assert((CC != AArch64CC::AL) && "Unexpected condition code.");
} else {
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (!CondReg)
return false;
@@ -2753,8 +2753,8 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
.addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
}
- unsigned Src1Reg = getRegForValue(SI->getTrueValue());
- unsigned Src2Reg = getRegForValue(SI->getFalseValue());
+ Register Src1Reg = getRegForValue(SI->getTrueValue());
+ Register Src2Reg = getRegForValue(SI->getFalseValue());
if (!Src1Reg || !Src2Reg)
return false;
@@ -2762,7 +2762,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
if (ExtraCC != AArch64CC::AL)
Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, ExtraCC);
- unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, CC);
+ Register ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, CC);
updateValueMap(I, ResultReg);
return true;
}
@@ -2772,11 +2772,11 @@ bool AArch64FastISel::selectFPExt(const Instruction *I) {
if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0)
return false;
- unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
+ Register ResultReg = createResultReg(&AArch64::FPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
ResultReg).addReg(Op);
updateValueMap(I, ResultReg);
@@ -2788,11 +2788,11 @@ bool AArch64FastISel::selectFPTrunc(const Instruction *I) {
if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0)
return false;
- unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
+ Register ResultReg = createResultReg(&AArch64::FPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
ResultReg).addReg(Op);
updateValueMap(I, ResultReg);
@@ -2805,7 +2805,7 @@ bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
return false;
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (SrcReg == 0)
return false;
@@ -2825,7 +2825,7 @@ bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
else
Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
}
- unsigned ResultReg = createResultReg(
+ Register ResultReg = createResultReg(
DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
@@ -2844,7 +2844,7 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
assert((DestVT == MVT::f32 || DestVT == MVT::f64) &&
"Unexpected value type.");
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (!SrcReg)
return false;
@@ -2871,7 +2871,7 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg);
+ Register ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg);
updateValueMap(I, ResultReg);
return true;
}
@@ -2975,11 +2975,11 @@ bool AArch64FastISel::fastLowerArguments() {
} else
llvm_unreachable("Unexpected value type.");
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
@@ -3009,7 +3009,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
const Value *ArgVal = CLI.OutVals[VA.getValNo()];
MVT ArgVT = OutVTs[VA.getValNo()];
- unsigned ArgReg = getRegForValue(ArgVal);
+ Register ArgReg = getRegForValue(ArgVal);
if (!ArgReg)
return false;
@@ -3104,7 +3104,7 @@ bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
if (CopyVT.isVector() && !Subtarget->isLittleEndian())
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(RVLocs[0].getLocReg());
@@ -3209,14 +3209,14 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
else if (Addr.getGlobalValue())
MIB.addGlobalAddress(Addr.getGlobalValue(), 0, 0);
else if (Addr.getReg()) {
- unsigned Reg = constrainOperandRegClass(II, Addr.getReg(), 0);
+ Register Reg = constrainOperandRegClass(II, Addr.getReg(), 0);
MIB.addReg(Reg);
} else
return false;
} else {
unsigned CallReg = 0;
if (Symbol) {
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
ADRPReg)
.addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE);
@@ -3438,7 +3438,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// SP = FP + Fixed Object + 16
int FI = MFI.CreateFixedObject(4, 0, false);
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::ADDXri), ResultReg)
.addFrameIndex(FI)
@@ -3568,10 +3568,10 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
Opc = AArch64::FABSDr;
break;
}
- unsigned SrcReg = getRegForValue(II->getOperand(0));
+ Register SrcReg = getRegForValue(II->getOperand(0));
if (!SrcReg)
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
updateValueMap(II, ResultReg);
@@ -3593,7 +3593,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- unsigned Op0Reg = getRegForValue(II->getOperand(0));
+ Register Op0Reg = getRegForValue(II->getOperand(0));
if (!Op0Reg)
return false;
@@ -3671,17 +3671,17 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
break;
case Intrinsic::smul_with_overflow: {
CC = AArch64CC::NE;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
if (VT == MVT::i32) {
MulReg = emitSMULL_rr(MVT::i64, LHSReg, RHSReg);
- unsigned MulSubReg =
+ Register MulSubReg =
fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
// cmp xreg, wreg, sxtw
emitAddSub_rx(/*UseAdd=*/false, MVT::i64, MulReg, MulSubReg,
@@ -3701,11 +3701,11 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
}
case Intrinsic::umul_with_overflow: {
CC = AArch64CC::NE;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
@@ -3799,7 +3799,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
if (!VA.isRegLoc())
return false;
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -3879,7 +3879,7 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) {
DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg)
return false;
@@ -3906,7 +3906,7 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) {
break;
}
// Issue an extract_subreg to get the lower 32-bits.
- unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
+ Register Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
AArch64::sub_32);
// Create the AND instruction which performs the actual truncation.
ResultReg = emitAnd_ri(MVT::i32, Reg32, Mask);
@@ -4007,7 +4007,7 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg,
if (NeedTrunc)
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4033,7 +4033,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4110,7 +4110,7 @@ unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg,
Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Mask);
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
}
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4136,7 +4136,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4226,7 +4226,7 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg,
Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*isZExt=*/false);
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
}
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4252,7 +4252,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4428,7 +4428,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
return false;
// Check if the load instruction has already been selected.
- unsigned Reg = lookUpRegForValue(LI);
+ Register Reg = lookUpRegForValue(LI);
if (!Reg)
return false;
@@ -4456,7 +4456,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
}
if (IsZExt) {
- unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Reg64)
.addImm(0)
@@ -4490,7 +4490,7 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) {
if (optimizeIntExtLoad(I, RetVT, SrcVT))
return true;
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (!SrcReg)
return false;
@@ -4499,7 +4499,7 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) {
if (const auto *Arg = dyn_cast<Argument>(I->getOperand(0))) {
if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
if (RetVT == MVT::i64 && SrcVT != MVT::i64) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), ResultReg)
.addImm(0)
@@ -4543,21 +4543,21 @@ bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) {
break;
}
unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src1Reg)
return false;
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
+ Register QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
+ Register ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
updateValueMap(I, ResultReg);
return true;
}
@@ -4602,7 +4602,7 @@ bool AArch64FastISel::selectMul(const Instruction *I) {
}
}
- unsigned Src0Reg = getRegForValue(Src0);
+ Register Src0Reg = getRegForValue(Src0);
if (!Src0Reg)
return false;
@@ -4615,11 +4615,11 @@ bool AArch64FastISel::selectMul(const Instruction *I) {
}
}
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src1Reg)
return false;
@@ -4666,7 +4666,7 @@ bool AArch64FastISel::selectShift(const Instruction *I) {
}
}
- unsigned Op0Reg = getRegForValue(Op0);
+ Register Op0Reg = getRegForValue(Op0);
if (!Op0Reg)
return false;
@@ -4689,11 +4689,11 @@ bool AArch64FastISel::selectShift(const Instruction *I) {
return true;
}
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (!Op0Reg)
return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (!Op1Reg)
return false;
@@ -4746,11 +4746,11 @@ bool AArch64FastISel::selectBitCast(const Instruction *I) {
case MVT::f32: RC = &AArch64::FPR32RegClass; break;
case MVT::f64: RC = &AArch64::FPR64RegClass; break;
}
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (!Op0Reg)
return false;
- unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg);
+ Register ResultReg = fastEmitInst_r(Opc, RC, Op0Reg);
if (!ResultReg)
return false;
@@ -4810,7 +4810,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
return selectBinaryOp(I, ISD::SDIV);
unsigned Lg2 = C.countTrailingZeros();
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
@@ -4840,7 +4840,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
SelectOpc = AArch64::CSELWr;
RC = &AArch64::GPR32RegClass;
}
- unsigned SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
+ Register SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
AArch64CC::LT);
if (!SelectReg)
return false;
@@ -4866,7 +4866,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
/// have to duplicate it for AArch64, because otherwise we would fail during the
/// sign-extend emission.
unsigned AArch64FastISel::getRegForGEPIndex(const Value *Idx) {
- unsigned IdxN = getRegForValue(Idx);
+ Register IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return 0;
@@ -4889,7 +4889,7 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
if (Subtarget->isTargetILP32())
return false;
- unsigned N = getRegForValue(I->getOperand(0));
+ Register N = getRegForValue(I->getOperand(0));
if (!N)
return false;
@@ -4983,16 +4983,16 @@ bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) {
const MCInstrDesc &II = TII.get(Opc);
- const unsigned AddrReg = constrainOperandRegClass(
+ const Register AddrReg = constrainOperandRegClass(
II, getRegForValue(I->getPointerOperand()), II.getNumDefs());
- const unsigned DesiredReg = constrainOperandRegClass(
+ const Register DesiredReg = constrainOperandRegClass(
II, getRegForValue(I->getCompareOperand()), II.getNumDefs() + 1);
- const unsigned NewReg = constrainOperandRegClass(
+ const Register NewReg = constrainOperandRegClass(
II, getRegForValue(I->getNewValOperand()), II.getNumDefs() + 2);
- const unsigned ResultReg1 = createResultReg(ResRC);
- const unsigned ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
- const unsigned ScratchReg = createResultReg(&AArch64::GPR32RegClass);
+ const Register ResultReg1 = createResultReg(ResRC);
+ const Register ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
+ const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass);
// FIXME: MachineMemOperand doesn't support cmpxchg yet.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 638e45b30d99..a4d20735e2b1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -547,7 +547,7 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
return;
for (const auto &Info : CSI) {
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
// Not all unwinders may know about SVE registers, so assume the lowest
// common demoninator.
@@ -1653,8 +1653,7 @@ static void InsertReturnAddressAuth(MachineFunction &MF,
// The AUTIASP instruction assembles to a hint instruction before v8.3a so
// this instruction can safely used for any v8a architecture.
// From v8.3a onwards there are optimised authenticate LR and return
- // instructions, namely RETA{A,B}, that can be used instead. In this case the
- // DW_CFA_AARCH64_negate_ra_state can't be emitted.
+ // instructions, namely RETA{A,B}, that can be used instead.
if (Subtarget.hasPAuth() && MBBI != MBB.end() &&
MBBI->getOpcode() == AArch64::RET_ReallyLR) {
BuildMI(MBB, MBBI, DL,
@@ -1666,12 +1665,6 @@ static void InsertReturnAddressAuth(MachineFunction &MF,
MBB, MBBI, DL,
TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP))
.setMIFlag(MachineInstr::FrameDestroy);
-
- unsigned CFIIndex =
- MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
- BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex)
- .setMIFlags(MachineInstr::FrameDestroy);
}
}
@@ -2292,7 +2285,7 @@ static void computeCalleeSaveRegisterPairs(
// MachO's compact unwind format relies on all registers being stored in
// pairs.
assert((!produceCompactUnwindFrame(MF) ||
- CC == CallingConv::PreserveMost ||
+ CC == CallingConv::PreserveMost || CC == CallingConv::CXX_FAST_TLS ||
(Count & 1) == 0) &&
"Odd number of callee-saved regs to spill!");
int ByteOffset = AFI->getCalleeSavedStackSize();
@@ -2331,7 +2324,7 @@ static void computeCalleeSaveRegisterPairs(
// Add the next reg to the pair if it is in the same register class.
if (unsigned(i + RegInc) < Count) {
- unsigned NextReg = CSI[i + RegInc].getReg();
+ Register NextReg = CSI[i + RegInc].getReg();
bool IsFirst = i == FirstReg;
switch (RPI.Type) {
case RegPairInfo::GPR:
@@ -2387,7 +2380,7 @@ static void computeCalleeSaveRegisterPairs(
// MachO's compact unwind format relies on all registers being stored in
// adjacent register pairs.
assert((!produceCompactUnwindFrame(MF) ||
- CC == CallingConv::PreserveMost ||
+ CC == CallingConv::PreserveMost || CC == CallingConv::CXX_FAST_TLS ||
(RPI.isPaired() &&
((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
RPI.Reg1 + 1 == RPI.Reg2))) &&
@@ -3135,7 +3128,7 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
DebugLoc DL;
RS->enterBasicBlockEnd(MBB);
RS->backward(std::prev(MBBI));
- unsigned DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
+ Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
assert(DstReg && "There must be a free register after frame setup");
BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2);
BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi))
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index e6d997f91b47..31f57cbc49f2 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -26,9 +26,8 @@ public:
: TargetFrameLowering(StackGrowsDown, Align(16), 0, Align(16),
true /*StackRealignable*/) {}
- void
- emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) const override;
+ void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const;
MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index fe9b2f8883b9..899f069abdd4 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -5147,5 +5147,5 @@ bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
const AArch64TargetLowering *TLI =
static_cast<const AArch64TargetLowering *>(getTargetLowering());
- return TLI->isAllActivePredicate(N);
+ return TLI->isAllActivePredicate(*CurDAG, N);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e141179fb5c8..a26bbc77f248 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -962,6 +962,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setMinFunctionAlignment(Align(4));
// Set preferred alignments.
setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
+ setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
// Only change the limit for entries in a jump table if specified by
@@ -1205,6 +1206,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::ABS, VT, Custom);
+ setOperationAction(ISD::ABDS, VT, Custom);
+ setOperationAction(ISD::ABDU, VT, Custom);
setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
@@ -1245,6 +1248,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
// There are no legal MVT::nxv16f## based types.
if (VT != MVT::nxv16i1) {
@@ -1831,6 +1835,28 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
Known = KnownBits::commonBits(Known, Known2);
break;
}
+ case AArch64ISD::BICi: {
+ // Compute the bit cleared value.
+ uint64_t Mask =
+ ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
+ Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
+ Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
+ break;
+ }
+ case AArch64ISD::VLSHR: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
+ Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
+ Known = KnownBits::lshr(Known, Known2);
+ break;
+ }
+ case AArch64ISD::VASHR: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
+ Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
+ Known = KnownBits::ashr(Known, Known2);
+ break;
+ }
case AArch64ISD::LOADgot:
case AArch64ISD::ADDlow: {
if (!Subtarget->isTargetILP32())
@@ -1971,6 +1997,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(AArch64ISD::CSINC)
MAKE_CASE(AArch64ISD::THREAD_POINTER)
MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
+ MAKE_CASE(AArch64ISD::ABDS_PRED)
+ MAKE_CASE(AArch64ISD::ABDU_PRED)
MAKE_CASE(AArch64ISD::ADD_PRED)
MAKE_CASE(AArch64ISD::MUL_PRED)
MAKE_CASE(AArch64ISD::MULHS_PRED)
@@ -2173,6 +2201,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(AArch64ISD::INSR)
MAKE_CASE(AArch64ISD::PTEST)
MAKE_CASE(AArch64ISD::PTRUE)
+ MAKE_CASE(AArch64ISD::PFALSE)
MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
@@ -5173,6 +5202,10 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
return LowerFixedLengthVectorSelectToSVE(Op, DAG);
case ISD::ABS:
return LowerABS(Op, DAG);
+ case ISD::ABDS:
+ return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
+ case ISD::ABDU:
+ return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
case ISD::BITREVERSE:
return LowerBitreverse(Op, DAG);
case ISD::BSWAP:
@@ -5380,7 +5413,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
// If this is an 8, 16 or 32-bit value, it is really passed promoted
@@ -5542,7 +5575,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// Conservatively forward X8, since it might be used for aggregate return.
if (!CCInfo.isAllocated(AArch64::X8)) {
- unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
+ Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
}
}
@@ -5626,7 +5659,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
+ Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), DL, Val, FIN,
@@ -5656,7 +5689,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
+ Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
@@ -7256,6 +7289,9 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
return getSVESafeBitCast(VT, IntResult, DAG);
}
+ if (!Subtarget->hasNEON())
+ return SDValue();
+
if (SrcVT.bitsLT(VT))
In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
else if (SrcVT.bitsGT(VT))
@@ -7795,10 +7831,37 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
SelectionDAG &DAG) const {
EVT Ty = Op.getValueType();
auto Idx = Op.getConstantOperandAPInt(2);
+ int64_t IdxVal = Idx.getSExtValue();
+ assert(Ty.isScalableVector() &&
+ "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
+
+ // We can use the splice instruction for certain index values where we are
+ // able to efficiently generate the correct predicate. The index will be
+ // inverted and used directly as the input to the ptrue instruction, i.e.
+ // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
+ // splice predicate. However, we can only do this if we can guarantee that
+ // there are enough elements in the vector, hence we check the index <= min
+ // number of elements.
+ Optional<unsigned> PredPattern;
+ if (Ty.isScalableVector() && IdxVal < 0 &&
+ (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
+ None) {
+ SDLoc DL(Op);
+
+ // Create a predicate where all but the last -IdxVal elements are false.
+ EVT PredVT = Ty.changeVectorElementType(MVT::i1);
+ SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
+ Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
+
+ // Now splice the two inputs together using the predicate.
+ return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
+ Op.getOperand(1));
+ }
// This will select to an EXT instruction, which has a maximum immediate
// value of 255, hence 2048-bits is the maximum value we can lower.
- if (Idx.sge(-1) && Idx.slt(2048 / Ty.getVectorElementType().getSizeInBits()))
+ if (IdxVal >= 0 &&
+ IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
return Op;
return SDValue();
@@ -8227,7 +8290,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
} else {
// Return LR, which contains the return address. Mark it an implicit
// live-in.
- unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
+ Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
@@ -9631,14 +9694,12 @@ static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
MVT CastVT;
if (getScaledOffsetDup(V, Lane, CastVT)) {
V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
- } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+ } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ V.getOperand(0).getValueType().is128BitVector()) {
// The lane is incremented by the index of the extract.
// Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
- auto VecVT = V.getOperand(0).getValueType();
- if (VecVT.isFixedLengthVector() && VecVT.getFixedSizeInBits() <= 128) {
- Lane += V.getConstantOperandVal(1);
- V = V.getOperand(0);
- }
+ Lane += V.getConstantOperandVal(1);
+ V = V.getOperand(0);
} else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
// The lane is decremented if we are splatting from the 2nd operand.
// Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
@@ -9925,7 +9986,7 @@ SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
// lowering code.
if (auto *ConstVal = dyn_cast<ConstantSDNode>(SplatVal)) {
if (ConstVal->isZero())
- return SDValue(DAG.getMachineNode(AArch64::PFALSE, dl, VT), 0);
+ return DAG.getNode(AArch64ISD::PFALSE, dl, VT);
if (ConstVal->isOne())
return getPTrue(DAG, dl, VT, AArch64SVEPredPattern::all);
}
@@ -10978,6 +11039,28 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
if (!isTypeLegal(VT))
return SDValue();
+ // Break down insert_subvector into simpler parts.
+ if (VT.getVectorElementType() == MVT::i1) {
+ unsigned NumElts = VT.getVectorMinNumElements();
+ EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
+
+ SDValue Lo, Hi;
+ Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
+ DAG.getVectorIdxConstant(0, DL));
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
+ DAG.getVectorIdxConstant(NumElts / 2, DL));
+ if (Idx < (NumElts / 2)) {
+ SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
+ DAG.getVectorIdxConstant(Idx, DL));
+ return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
+ } else {
+ SDValue NewHi =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
+ DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
+ return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
+ }
+ }
+
// Ensure the subvector is half the size of the main vector.
if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
return SDValue();
@@ -11012,10 +11095,10 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
if (Vec0.isUndef())
return Op;
- unsigned int PredPattern =
+ Optional<unsigned> PredPattern =
getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
auto PredTy = VT.changeVectorElementType(MVT::i1);
- SDValue PTrue = getPTrue(DAG, DL, PredTy, PredPattern);
+ SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
}
@@ -11730,10 +11813,10 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_ldxr: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
return true;
}
@@ -11741,10 +11824,10 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_stxr: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
return true;
}
@@ -11772,7 +11855,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = MVT::getVT(I.getType());
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
return true;
}
@@ -11782,7 +11865,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.memVT = MVT::getVT(I.getOperand(0)->getType());
Info.ptrVal = I.getArgOperand(2);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
return true;
}
@@ -12320,7 +12403,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
Value *PTrue = nullptr;
if (UseScalable) {
- unsigned PgPattern =
+ Optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(FVTy->getNumElements());
if (Subtarget->getMinSVEVectorSizeInBits() ==
Subtarget->getMaxSVEVectorSizeInBits() &&
@@ -12328,7 +12411,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
PgPattern = AArch64SVEPredPattern::all;
auto *PTruePat =
- ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), PgPattern);
+ ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
{PTruePat});
}
@@ -12500,7 +12583,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
Value *PTrue = nullptr;
if (UseScalable) {
- unsigned PgPattern =
+ Optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
if (Subtarget->getMinSVEVectorSizeInBits() ==
Subtarget->getMaxSVEVectorSizeInBits() &&
@@ -12509,7 +12592,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
PgPattern = AArch64SVEPredPattern::all;
auto *PTruePat =
- ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), PgPattern);
+ ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
{PTruePat});
}
@@ -12901,7 +12984,7 @@ bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
return false;
- return (Index == 0 || Index == ResVT.getVectorNumElements());
+ return (Index == 0 || Index == ResVT.getVectorMinNumElements());
}
/// Turn vector tests of the signbit in the form of:
@@ -14261,6 +14344,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
static SDValue
performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
+ SDLoc DL(N);
SDValue Vec = N->getOperand(0);
SDValue SubVec = N->getOperand(1);
uint64_t IdxVal = N->getConstantOperandVal(2);
@@ -14286,7 +14370,6 @@ performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
// Fold insert_subvector -> concat_vectors
// insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
// insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
- SDLoc DL(N);
SDValue Lo, Hi;
if (IdxVal == 0) {
Lo = SubVec;
@@ -15004,7 +15087,15 @@ static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
Zero);
}
-static bool isAllActivePredicate(SDValue N) {
+static bool isAllInactivePredicate(SDValue N) {
+ // Look through cast.
+ while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ N = N.getOperand(0);
+
+ return N.getOpcode() == AArch64ISD::PFALSE;
+}
+
+static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
unsigned NumElts = N.getValueType().getVectorMinNumElements();
// Look through cast.
@@ -15023,6 +15114,21 @@ static bool isAllActivePredicate(SDValue N) {
N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
return N.getValueType().getVectorMinNumElements() >= NumElts;
+ // If we're compiling for a specific vector-length, we can check if the
+ // pattern's VL equals that of the scalable vector at runtime.
+ if (N.getOpcode() == AArch64ISD::PTRUE) {
+ const auto &Subtarget =
+ static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
+ unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
+ unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
+ if (MaxSVESize && MinSVESize == MaxSVESize) {
+ unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
+ unsigned PatNumElts =
+ getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
+ return PatNumElts == (NumElts * VScale);
+ }
+ }
+
return false;
}
@@ -15039,7 +15145,7 @@ static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
// ISD way to specify an all active predicate.
- if (isAllActivePredicate(Pg)) {
+ if (isAllActivePredicate(DAG, Pg)) {
if (UnpredOp)
return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
@@ -15870,7 +15976,7 @@ static SDValue performPostLD1Combine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
- if (VT.isScalableVector())
+ if (!VT.is128BitVector() && !VT.is64BitVector())
return SDValue();
unsigned LoadIdx = IsLaneOp ? 1 : 0;
@@ -16710,6 +16816,12 @@ static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
EVT CCVT = N0.getValueType();
+ if (isAllActivePredicate(DAG, N0))
+ return N->getOperand(1);
+
+ if (isAllInactivePredicate(N0))
+ return N->getOperand(2);
+
// Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
// into (OR (ASR lhs, N-1), 1), which requires less instructions for the
// supported types.
@@ -18753,7 +18865,7 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Expected legal fixed length vector!");
- unsigned PgPattern =
+ Optional<unsigned> PgPattern =
getSVEPredPatternFromNumElements(VT.getVectorNumElements());
assert(PgPattern && "Unexpected element count for SVE predicate");
@@ -18789,7 +18901,7 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
break;
}
- return getPTrue(DAG, DL, MaskVT, PgPattern);
+ return getPTrue(DAG, DL, MaskVT, *PgPattern);
}
static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
@@ -19281,7 +19393,12 @@ SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
default:
return SDValue();
case ISD::VECREDUCE_OR:
- return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
+ if (isAllActivePredicate(DAG, Pg))
+ // The predicate can be 'Op' because
+ // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
+ return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
+ else
+ return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
case ISD::VECREDUCE_AND: {
Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
@@ -19725,8 +19842,9 @@ SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
return Op;
}
-bool AArch64TargetLowering::isAllActivePredicate(SDValue N) const {
- return ::isAllActivePredicate(N);
+bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
+ SDValue N) const {
+ return ::isAllActivePredicate(DAG, N);
}
EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
@@ -19777,7 +19895,7 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
}
-bool AArch64TargetLowering::isConstantUnsignedBitfieldExtactLegal(
+bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
unsigned Opc, LLT Ty1, LLT Ty2) const {
return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 367ba3039a0c..ca6c70297c0b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -77,14 +77,16 @@ enum NodeType : unsigned {
SBC, // adc, sbc instructions
// Predicated instructions where inactive lanes produce undefined results.
+ ABDS_PRED,
+ ABDU_PRED,
ADD_PRED,
FADD_PRED,
FDIV_PRED,
FMA_PRED,
- FMAXNM_PRED,
- FMINNM_PRED,
FMAX_PRED,
+ FMAXNM_PRED,
FMIN_PRED,
+ FMINNM_PRED,
FMUL_PRED,
FSUB_PRED,
MUL_PRED,
@@ -321,6 +323,7 @@ enum NodeType : unsigned {
INSR,
PTEST,
PTRUE,
+ PFALSE,
BITREVERSE_MERGE_PASSTHRU,
BSWAP_MERGE_PASSTHRU,
@@ -487,7 +490,6 @@ const unsigned RoundingBitsPos = 22;
} // namespace AArch64
class AArch64Subtarget;
-class AArch64TargetMachine;
class AArch64TargetLowering : public TargetLowering {
public:
@@ -842,7 +844,7 @@ public:
return 128;
}
- bool isAllActivePredicate(SDValue N) const;
+ bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
EVT getPromotedVTForPredicate(EVT VT) const;
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
@@ -1137,8 +1139,8 @@ private:
// with BITCAST used otherwise.
SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
- bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1,
- LLT Ty2) const override;
+ bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
+ LLT Ty2) const override;
};
namespace AArch64 {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
index 84573dac7e41..b220929514f9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -102,6 +102,34 @@ def : Pat<(relaxed_load<atomic_load_64>
(am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
(LDURXi GPR64sp:$Rn, simm9:$offset)>;
+// FP 32-bit loads
+def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend))))),
+ (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
+def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend))))),
+ (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
+def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
+ uimm12s8:$offset))))),
+ (LDRSui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32>
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
+ (LDURSi GPR64sp:$Rn, simm9:$offset)>;
+
+// FP 64-bit loads
+def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend))))),
+ (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
+def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend))))),
+ (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
+def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
+ uimm12s8:$offset))))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64>
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset))))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+
//===----------------------------------
// Atomic stores
//===----------------------------------
@@ -196,6 +224,38 @@ def : Pat<(relaxed_store<atomic_store_64>
(am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
(STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
+// FP 32-bit stores
+def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend),
+ (i32 (bitconvert (f32 FPR32Op:$val)))),
+ (STRSroW FPR32Op:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
+def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend),
+ (i32 (bitconvert (f32 FPR32Op:$val)))),
+ (STRSroX FPR32Op:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
+def : Pat<(relaxed_store<atomic_store_32>
+ (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), (i32 (bitconvert (f32 FPR32Op:$val)))),
+ (STRSui FPR32Op:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
+def : Pat<(relaxed_store<atomic_store_32>
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset), (i32 (bitconvert (f32 FPR32Op:$val)))),
+ (STURSi FPR32Op:$val, GPR64sp:$Rn, simm9:$offset)>;
+
+// FP 64-bit stores
+def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend),
+ (i64 (bitconvert (f64 FPR64Op:$val)))),
+ (STRDroW FPR64Op:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
+def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend),
+ (i64 (bitconvert (f64 FPR64Op:$val)))),
+ (STRDroX FPR64Op:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
+def : Pat<(relaxed_store<atomic_store_64>
+ (am_indexed64 GPR64sp:$Rn, uimm12s4:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))),
+ (STRDui FPR64Op:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
+def : Pat<(relaxed_store<atomic_store_64>
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))),
+ (STURDi FPR64Op:$val, GPR64sp:$Rn, simm9:$offset)>;
+
//===----------------------------------
// Low-level exclusive operations
//===----------------------------------
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index f8d492188744..4c1e41b7efee 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -1816,10 +1816,10 @@ def am_brcond : Operand<OtherVT> {
let OperandType = "OPERAND_PCREL";
}
-class BranchCond : I<(outs), (ins ccode:$cond, am_brcond:$target),
- "b", ".$cond\t$target", "",
- [(AArch64brcond bb:$target, imm:$cond, NZCV)]>,
- Sched<[WriteBr]> {
+class BranchCond<bit bit4, string mnemonic>
+ : I<(outs), (ins ccode:$cond, am_brcond:$target),
+ mnemonic, ".$cond\t$target", "",
+ [(AArch64brcond bb:$target, imm:$cond, NZCV)]>, Sched<[WriteBr]> {
let isBranch = 1;
let isTerminator = 1;
let Uses = [NZCV];
@@ -1828,7 +1828,7 @@ class BranchCond : I<(outs), (ins ccode:$cond, am_brcond:$target),
bits<19> target;
let Inst{31-24} = 0b01010100;
let Inst{23-5} = target;
- let Inst{4} = 0;
+ let Inst{4} = bit4;
let Inst{3-0} = cond;
}
@@ -7700,10 +7700,10 @@ multiclass SIMDTableLookupTied<bit op, string asm> {
//----------------------------------------------------------------------------
-// AdvSIMD scalar CPY
+// AdvSIMD scalar DUP
//----------------------------------------------------------------------------
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
-class BaseSIMDScalarCPY<RegisterClass regtype, RegisterOperand vectype,
+class BaseSIMDScalarDUP<RegisterClass regtype, RegisterOperand vectype,
string asm, string kind, Operand idxtype>
: I<(outs regtype:$dst), (ins vectype:$src, idxtype:$idx), asm,
"{\t$dst, $src" # kind # "$idx" #
@@ -7717,30 +7717,30 @@ class BaseSIMDScalarCPY<RegisterClass regtype, RegisterOperand vectype,
let Inst{4-0} = dst;
}
-class SIMDScalarCPYAlias<string asm, string size, Instruction inst,
+class SIMDScalarDUPAlias<string asm, string size, Instruction inst,
RegisterClass regtype, RegisterOperand vectype, Operand idxtype>
: InstAlias<asm # "{\t$dst, $src" # size # "$index"
# "|\t$dst, $src$index}",
(inst regtype:$dst, vectype:$src, idxtype:$index), 0>;
-multiclass SIMDScalarCPY<string asm> {
- def i8 : BaseSIMDScalarCPY<FPR8, V128, asm, ".b", VectorIndexB> {
+multiclass SIMDScalarDUP<string asm> {
+ def i8 : BaseSIMDScalarDUP<FPR8, V128, asm, ".b", VectorIndexB> {
bits<4> idx;
let Inst{20-17} = idx;
let Inst{16} = 1;
}
- def i16 : BaseSIMDScalarCPY<FPR16, V128, asm, ".h", VectorIndexH> {
+ def i16 : BaseSIMDScalarDUP<FPR16, V128, asm, ".h", VectorIndexH> {
bits<3> idx;
let Inst{20-18} = idx;
let Inst{17-16} = 0b10;
}
- def i32 : BaseSIMDScalarCPY<FPR32, V128, asm, ".s", VectorIndexS> {
+ def i32 : BaseSIMDScalarDUP<FPR32, V128, asm, ".s", VectorIndexS> {
bits<2> idx;
let Inst{20-19} = idx;
let Inst{18-16} = 0b100;
}
- def i64 : BaseSIMDScalarCPY<FPR64, V128, asm, ".d", VectorIndexD> {
+ def i64 : BaseSIMDScalarDUP<FPR64, V128, asm, ".d", VectorIndexD> {
bits<1> idx;
let Inst{20} = idx;
let Inst{19-16} = 0b1000;
@@ -7751,16 +7751,16 @@ multiclass SIMDScalarCPY<string asm> {
(!cast<Instruction>(NAME # i64) V128:$src, VectorIndexD:$idx)>;
// 'DUP' mnemonic aliases.
- def : SIMDScalarCPYAlias<"dup", ".b",
+ def : SIMDScalarDUPAlias<"dup", ".b",
!cast<Instruction>(NAME#"i8"),
FPR8, V128, VectorIndexB>;
- def : SIMDScalarCPYAlias<"dup", ".h",
+ def : SIMDScalarDUPAlias<"dup", ".h",
!cast<Instruction>(NAME#"i16"),
FPR16, V128, VectorIndexH>;
- def : SIMDScalarCPYAlias<"dup", ".s",
+ def : SIMDScalarDUPAlias<"dup", ".s",
!cast<Instruction>(NAME#"i32"),
FPR32, V128, VectorIndexS>;
- def : SIMDScalarCPYAlias<"dup", ".d",
+ def : SIMDScalarDUPAlias<"dup", ".d",
!cast<Instruction>(NAME#"i64"),
FPR64, V128, VectorIndexD>;
}
@@ -10556,40 +10556,30 @@ class BaseSIMDThreeSameVectorTiedR0<bit Q, bit U, bits<2> size, bits<5> opcode,
pattern> {
}
multiclass SIMDThreeSameVectorSQRDMLxHTiedHS<bit U, bits<5> opc, string asm,
- SDPatternOperator Accum> {
+ SDPatternOperator op> {
def v4i16 : BaseSIMDThreeSameVectorTiedR0<0, U, 0b01, opc, V64, asm, ".4h",
[(set (v4i16 V64:$dst),
- (Accum (v4i16 V64:$Rd),
- (v4i16 (int_aarch64_neon_sqrdmulh (v4i16 V64:$Rn),
- (v4i16 V64:$Rm)))))]>;
+ (v4i16 (op (v4i16 V64:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))))]>;
def v8i16 : BaseSIMDThreeSameVectorTiedR0<1, U, 0b01, opc, V128, asm, ".8h",
[(set (v8i16 V128:$dst),
- (Accum (v8i16 V128:$Rd),
- (v8i16 (int_aarch64_neon_sqrdmulh (v8i16 V128:$Rn),
- (v8i16 V128:$Rm)))))]>;
+ (v8i16 (op (v8i16 V128:$Rd), (v8i16 V128:$Rn), (v8i16 V128:$Rm))))]>;
def v2i32 : BaseSIMDThreeSameVectorTiedR0<0, U, 0b10, opc, V64, asm, ".2s",
[(set (v2i32 V64:$dst),
- (Accum (v2i32 V64:$Rd),
- (v2i32 (int_aarch64_neon_sqrdmulh (v2i32 V64:$Rn),
- (v2i32 V64:$Rm)))))]>;
+ (v2i32 (op (v2i32 V64:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))))]>;
def v4i32 : BaseSIMDThreeSameVectorTiedR0<1, U, 0b10, opc, V128, asm, ".4s",
[(set (v4i32 V128:$dst),
- (Accum (v4i32 V128:$Rd),
- (v4i32 (int_aarch64_neon_sqrdmulh (v4i32 V128:$Rn),
- (v4i32 V128:$Rm)))))]>;
+ (v4i32 (op (v4i32 V128:$Rd), (v4i32 V128:$Rn), (v4i32 V128:$Rm))))]>;
}
multiclass SIMDIndexedSQRDMLxHSDTied<bit U, bits<4> opc, string asm,
- SDPatternOperator Accum> {
+ SDPatternOperator op> {
def v4i16_indexed : BaseSIMDIndexedTied<0, U, 0, 0b01, opc,
V64, V64, V128_lo, VectorIndexH,
asm, ".4h", ".4h", ".4h", ".h",
[(set (v4i16 V64:$dst),
- (Accum (v4i16 V64:$Rd),
- (v4i16 (int_aarch64_neon_sqrdmulh
- (v4i16 V64:$Rn),
- (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
- VectorIndexH:$idx))))))]> {
+ (v4i16 (op (v4i16 V64:$Rd), (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx)))))]> {
bits<3> idx;
let Inst{11} = idx{2};
let Inst{21} = idx{1};
@@ -10600,11 +10590,9 @@ multiclass SIMDIndexedSQRDMLxHSDTied<bit U, bits<4> opc, string asm,
V128, V128, V128_lo, VectorIndexH,
asm, ".8h", ".8h", ".8h", ".h",
[(set (v8i16 V128:$dst),
- (Accum (v8i16 V128:$Rd),
- (v8i16 (int_aarch64_neon_sqrdmulh
- (v8i16 V128:$Rn),
- (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
- VectorIndexH:$idx))))))]> {
+ (v8i16 (op (v8i16 V128:$Rd), (v8i16 V128:$Rn),
+ (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx)))))]> {
bits<3> idx;
let Inst{11} = idx{2};
let Inst{21} = idx{1};
@@ -10615,75 +10603,26 @@ multiclass SIMDIndexedSQRDMLxHSDTied<bit U, bits<4> opc, string asm,
V64, V64, V128, VectorIndexS,
asm, ".2s", ".2s", ".2s", ".s",
[(set (v2i32 V64:$dst),
- (Accum (v2i32 V64:$Rd),
- (v2i32 (int_aarch64_neon_sqrdmulh
- (v2i32 V64:$Rn),
- (v2i32 (AArch64duplane32 (v4i32 V128:$Rm),
- VectorIndexS:$idx))))))]> {
+ (v2i32 (op (v2i32 V64:$Rd), (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx)))))]> {
bits<2> idx;
let Inst{11} = idx{1};
let Inst{21} = idx{0};
}
- // FIXME: it would be nice to use the scalar (v1i32) instruction here, but
- // an intermediate EXTRACT_SUBREG would be untyped.
- // FIXME: direct EXTRACT_SUBREG from v2i32 to i32 is illegal, that's why we
- // got it lowered here as (i32 vector_extract (v4i32 insert_subvector(..)))
- def : Pat<(i32 (Accum (i32 FPR32Op:$Rd),
- (i32 (vector_extract
- (v4i32 (insert_subvector
- (undef),
- (v2i32 (int_aarch64_neon_sqrdmulh
- (v2i32 V64:$Rn),
- (v2i32 (AArch64duplane32
- (v4i32 V128:$Rm),
- VectorIndexS:$idx)))),
- (i64 0))),
- (i64 0))))),
- (EXTRACT_SUBREG
- (v2i32 (!cast<Instruction>(NAME # v2i32_indexed)
- (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
- FPR32Op:$Rd,
- ssub)),
- V64:$Rn,
- V128:$Rm,
- VectorIndexS:$idx)),
- ssub)>;
-
def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc,
V128, V128, V128, VectorIndexS,
asm, ".4s", ".4s", ".4s", ".s",
[(set (v4i32 V128:$dst),
- (Accum (v4i32 V128:$Rd),
- (v4i32 (int_aarch64_neon_sqrdmulh
- (v4i32 V128:$Rn),
- (v4i32 (AArch64duplane32 (v4i32 V128:$Rm),
- VectorIndexS:$idx))))))]> {
+ (v4i32 (op (v4i32 V128:$Rd), (v4i32 V128:$Rn),
+ (v4i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx)))))]> {
bits<2> idx;
let Inst{11} = idx{1};
let Inst{21} = idx{0};
}
- // FIXME: it would be nice to use the scalar (v1i32) instruction here, but
- // an intermediate EXTRACT_SUBREG would be untyped.
- def : Pat<(i32 (Accum (i32 FPR32Op:$Rd),
- (i32 (vector_extract
- (v4i32 (int_aarch64_neon_sqrdmulh
- (v4i32 V128:$Rn),
- (v4i32 (AArch64duplane32
- (v4i32 V128:$Rm),
- VectorIndexS:$idx)))),
- (i64 0))))),
- (EXTRACT_SUBREG
- (v4i32 (!cast<Instruction>(NAME # v4i32_indexed)
- (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
- FPR32Op:$Rd,
- ssub)),
- V128:$Rn,
- V128:$Rm,
- VectorIndexS:$idx)),
- ssub)>;
-
def i16_indexed : BaseSIMDIndexedTied<1, U, 1, 0b01, opc,
FPR16Op, FPR16Op, V128_lo,
VectorIndexH, asm, ".h", "", "", ".h",
@@ -10698,11 +10637,9 @@ multiclass SIMDIndexedSQRDMLxHSDTied<bit U, bits<4> opc, string asm,
FPR32Op, FPR32Op, V128, VectorIndexS,
asm, ".s", "", "", ".s",
[(set (i32 FPR32Op:$dst),
- (Accum (i32 FPR32Op:$Rd),
- (i32 (int_aarch64_neon_sqrdmulh
- (i32 FPR32Op:$Rn),
- (i32 (vector_extract (v4i32 V128:$Rm),
- VectorIndexS:$idx))))))]> {
+ (i32 (op (i32 FPR32Op:$Rd), (i32 FPR32Op:$Rn),
+ (i32 (vector_extract (v4i32 V128:$Rm),
+ VectorIndexS:$idx)))))]> {
bits<2> idx;
let Inst{11} = idx{1};
let Inst{21} = idx{0};
@@ -11430,6 +11367,123 @@ class Store64BV<bits<3> opc, string asm_inst, list<dag> pat = []>
let Inst{20-16} = Rs;
}
+class MOPSMemoryCopyMoveBase<bit isMove, bits<2> opcode, bits<2> op1,
+ bits<2> op2, string asm>
+ : I<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
+ (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
+ asm, "\t[$Rd]!, [$Rs]!, $Rn!",
+ "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb", []>,
+ Sched<[]> {
+ bits<5> Rd;
+ bits<5> Rs;
+ bits<5> Rn;
+ let Inst{31-27} = 0b00011;
+ let Inst{26} = isMove;
+ let Inst{25-24} = 0b01;
+ let Inst{23-22} = opcode;
+ let Inst{21} = 0b0;
+ let Inst{20-16} = Rs;
+ let Inst{15-14} = op2;
+ let Inst{13-12} = op1;
+ let Inst{11-10} = 0b01;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeCPYMemOpInstruction";
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+class MOPSMemoryCopy<bits<2> opcode, bits<2> op1, bits<2> op2, string asm>
+ : MOPSMemoryCopyMoveBase<0, opcode, op1, op2, asm>;
+
+class MOPSMemoryMove<bits<2> opcode, bits<2> op1, bits<2> op2, string asm>
+ : MOPSMemoryCopyMoveBase<1, opcode, op1, op2, asm>;
+
+class MOPSMemorySetBase<bit isTagging, bits<2> opcode, bit op1, bit op2,
+ string asm>
+ : I<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
+ (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
+ asm, "\t[$Rd]!, $Rn!, $Rm",
+ "$Rd = $Rd_wb,$Rn = $Rn_wb", []>,
+ Sched<[]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-27} = 0b00011;
+ let Inst{26} = isTagging;
+ let Inst{25-21} = 0b01110;
+ let Inst{20-16} = Rm;
+ let Inst{15-14} = opcode;
+ let Inst{13} = op2;
+ let Inst{12} = op1;
+ let Inst{11-10} = 0b01;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeSETMemOpInstruction";
+ let mayLoad = 0;
+ let mayStore = 1;
+}
+
+class MOPSMemorySet<bits<2> opcode, bit op1, bit op2, string asm>
+ : MOPSMemorySetBase<0, opcode, op1, op2, asm>;
+
+class MOPSMemorySetTagging<bits<2> opcode, bit op1, bit op2, string asm>
+ : MOPSMemorySetBase<1, opcode, op1, op2, asm>;
+
+multiclass MOPSMemoryCopyInsns<bits<2> opcode, string asm> {
+ def "" : MOPSMemoryCopy<opcode, 0b00, 0b00, asm>;
+ def WN : MOPSMemoryCopy<opcode, 0b00, 0b01, asm # "wn">;
+ def RN : MOPSMemoryCopy<opcode, 0b00, 0b10, asm # "rn">;
+ def N : MOPSMemoryCopy<opcode, 0b00, 0b11, asm # "n">;
+ def WT : MOPSMemoryCopy<opcode, 0b01, 0b00, asm # "wt">;
+ def WTWN : MOPSMemoryCopy<opcode, 0b01, 0b01, asm # "wtwn">;
+ def WTRN : MOPSMemoryCopy<opcode, 0b01, 0b10, asm # "wtrn">;
+ def WTN : MOPSMemoryCopy<opcode, 0b01, 0b11, asm # "wtn">;
+ def RT : MOPSMemoryCopy<opcode, 0b10, 0b00, asm # "rt">;
+ def RTWN : MOPSMemoryCopy<opcode, 0b10, 0b01, asm # "rtwn">;
+ def RTRN : MOPSMemoryCopy<opcode, 0b10, 0b10, asm # "rtrn">;
+ def RTN : MOPSMemoryCopy<opcode, 0b10, 0b11, asm # "rtn">;
+ def T : MOPSMemoryCopy<opcode, 0b11, 0b00, asm # "t">;
+ def TWN : MOPSMemoryCopy<opcode, 0b11, 0b01, asm # "twn">;
+ def TRN : MOPSMemoryCopy<opcode, 0b11, 0b10, asm # "trn">;
+ def TN : MOPSMemoryCopy<opcode, 0b11, 0b11, asm # "tn">;
+}
+
+multiclass MOPSMemoryMoveInsns<bits<2> opcode, string asm> {
+ def "" : MOPSMemoryMove<opcode, 0b00, 0b00, asm>;
+ def WN : MOPSMemoryMove<opcode, 0b00, 0b01, asm # "wn">;
+ def RN : MOPSMemoryMove<opcode, 0b00, 0b10, asm # "rn">;
+ def N : MOPSMemoryMove<opcode, 0b00, 0b11, asm # "n">;
+ def WT : MOPSMemoryMove<opcode, 0b01, 0b00, asm # "wt">;
+ def WTWN : MOPSMemoryMove<opcode, 0b01, 0b01, asm # "wtwn">;
+ def WTRN : MOPSMemoryMove<opcode, 0b01, 0b10, asm # "wtrn">;
+ def WTN : MOPSMemoryMove<opcode, 0b01, 0b11, asm # "wtn">;
+ def RT : MOPSMemoryMove<opcode, 0b10, 0b00, asm # "rt">;
+ def RTWN : MOPSMemoryMove<opcode, 0b10, 0b01, asm # "rtwn">;
+ def RTRN : MOPSMemoryMove<opcode, 0b10, 0b10, asm # "rtrn">;
+ def RTN : MOPSMemoryMove<opcode, 0b10, 0b11, asm # "rtn">;
+ def T : MOPSMemoryMove<opcode, 0b11, 0b00, asm # "t">;
+ def TWN : MOPSMemoryMove<opcode, 0b11, 0b01, asm # "twn">;
+ def TRN : MOPSMemoryMove<opcode, 0b11, 0b10, asm # "trn">;
+ def TN : MOPSMemoryMove<opcode, 0b11, 0b11, asm # "tn">;
+}
+
+multiclass MOPSMemorySetInsns<bits<2> opcode, string asm> {
+ def "" : MOPSMemorySet<opcode, 0, 0, asm>;
+ def T : MOPSMemorySet<opcode, 1, 0, asm # "t">;
+ def N : MOPSMemorySet<opcode, 0, 1, asm # "n">;
+ def TN : MOPSMemorySet<opcode, 1, 1, asm # "tn">;
+}
+
+multiclass MOPSMemorySetTaggingInsns<bits<2> opcode, string asm> {
+ def "" : MOPSMemorySetTagging<opcode, 0, 0, asm>;
+ def T : MOPSMemorySetTagging<opcode, 1, 0, asm # "t">;
+ def N : MOPSMemorySetTagging<opcode, 0, 1, asm # "n">;
+ def TN : MOPSMemorySetTagging<opcode, 1, 1, asm # "tn">;
+}
+
//----------------------------------------------------------------------------
// Allow the size specifier tokens to be upper case, not just lower.
def : TokenAlias<".4B", ".4b">; // Add dot product
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5fc5e4e5eb35..93c17133c845 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2574,6 +2574,7 @@ AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
AM.BaseReg = Base->getReg();
AM.Displacement = Offset;
AM.ScaledReg = 0;
+ AM.Scale = 0;
return AM;
}
@@ -7350,8 +7351,7 @@ static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
.setMIFlags(MachineInstr::FrameSetup);
// If v8.3a features are available we can replace a RET instruction by
- // RETAA or RETAB and omit the AUT instructions. In this case the
- // DW_CFA_AARCH64_negate_ra_state can't be emitted.
+ // RETAA or RETAB and omit the AUT instructions
if (Subtarget.hasPAuth() && MBBAUT != MBB.end() &&
MBBAUT->getOpcode() == AArch64::RET) {
BuildMI(MBB, MBBAUT, DL,
@@ -7364,11 +7364,6 @@ static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
TII->get(ShouldSignReturnAddrWithAKey ? AArch64::AUTIASP
: AArch64::AUTIBSP))
.setMIFlag(MachineInstr::FrameDestroy);
- unsigned CFIIndexAuth =
- MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
- BuildMI(MBB, MBBAUT, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndexAuth)
- .setMIFlags(MachineInstr::FrameDestroy);
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index b2f9e82a7e8b..1054bea40e68 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -26,7 +26,6 @@
namespace llvm {
class AArch64Subtarget;
-class AArch64TargetMachine;
static const MachineMemOperand::Flags MOSuppressPair =
MachineMemOperand::MOTargetFlag1;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ebccc07edc7a..c8a697c8b82f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -33,6 +33,8 @@ def HasV9_1a : Predicate<"Subtarget->hasV9_1aOps()">,
AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">;
def HasV9_2a : Predicate<"Subtarget->hasV9_2aOps()">,
AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">;
+def HasV9_3a : Predicate<"Subtarget->hasV9_3aOps()">,
+ AssemblerPredicate<(all_of HasV9_3aOps), "armv9.3a">;
def HasV8_0r : Predicate<"Subtarget->hasV8_0rOps()">,
AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">;
@@ -198,6 +200,10 @@ def HasBRBE : Predicate<"Subtarget->hasBRBE()">,
AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
def HasSPE_EEF : Predicate<"Subtarget->hasSPE_EEF()">,
AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
+def HasHBC : Predicate<"Subtarget->hasHBC()">,
+ AssemblerPredicate<(all_of FeatureHBC), "hbc">;
+def HasMOPS : Predicate<"Subtarget->hasMOPS()">,
+ AssemblerPredicate<(all_of FeatureMOPS), "mops">;
def IsLE : Predicate<"Subtarget->isLittleEndian()">;
def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
def IsWindows : Predicate<"Subtarget->isTargetWindows()">;
@@ -2362,7 +2368,12 @@ def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
//===----------------------------------------------------------------------===//
// Conditional branch (immediate) instruction.
//===----------------------------------------------------------------------===//
-def Bcc : BranchCond;
+def Bcc : BranchCond<0, "b">;
+
+// Armv8.8-A variant form which hints to the branch predictor that
+// this branch is very likely to go the same way nearly all the time
+// (even though it is not known at compile time _which_ way that is).
+def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>;
//===----------------------------------------------------------------------===//
// Compare-and-branch instructions.
@@ -4500,9 +4511,9 @@ defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
- int_aarch64_neon_sqadd>;
+ int_aarch64_neon_sqrdmlah>;
defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
- int_aarch64_neon_sqsub>;
+ int_aarch64_neon_sqrdmlsh>;
// Extra saturate patterns, other than the intrinsics matches above
defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
@@ -4769,15 +4780,11 @@ defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
let Predicates = [HasRDM] in {
defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
- def : Pat<(i32 (int_aarch64_neon_sqadd
- (i32 FPR32:$Rd),
- (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
- (i32 FPR32:$Rm))))),
+ def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn),
+ (i32 FPR32:$Rm))),
(SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
- def : Pat<(i32 (int_aarch64_neon_sqsub
- (i32 FPR32:$Rd),
- (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
- (i32 FPR32:$Rm))))),
+ def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn),
+ (i32 FPR32:$Rm))),
(SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
}
@@ -5342,19 +5349,6 @@ def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))),
(v2i32 (trunc (v2i64 V128:$Vm))))),
(UZP1v4i32 V128:$Vn, V128:$Vm)>;
-def : Pat<(v16i8 (concat_vectors
- (v8i8 (trunc (AArch64vlshr (v8i16 V128:$Vn), (i32 8)))),
- (v8i8 (trunc (AArch64vlshr (v8i16 V128:$Vm), (i32 8)))))),
- (UZP2v16i8 V128:$Vn, V128:$Vm)>;
-def : Pat<(v8i16 (concat_vectors
- (v4i16 (trunc (AArch64vlshr (v4i32 V128:$Vn), (i32 16)))),
- (v4i16 (trunc (AArch64vlshr (v4i32 V128:$Vm), (i32 16)))))),
- (UZP2v8i16 V128:$Vn, V128:$Vm)>;
-def : Pat<(v4i32 (concat_vectors
- (v2i32 (trunc (AArch64vlshr (v2i64 V128:$Vn), (i32 32)))),
- (v2i32 (trunc (AArch64vlshr (v2i64 V128:$Vm), (i32 32)))))),
- (UZP2v4i32 V128:$Vn, V128:$Vm)>;
-
//----------------------------------------------------------------------------
// AdvSIMD TBL/TBX instructions
//----------------------------------------------------------------------------
@@ -5376,10 +5370,10 @@ def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
//----------------------------------------------------------------------------
-// AdvSIMD scalar CPY instruction
+// AdvSIMD scalar DUP instruction
//----------------------------------------------------------------------------
-defm CPY : SIMDScalarCPY<"mov">;
+defm DUP : SIMDScalarDUP<"mov">;
//----------------------------------------------------------------------------
// AdvSIMD scalar pairwise instructions
@@ -5790,7 +5784,7 @@ defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
// Floating point vector extractions are codegen'd as either a sequence of
-// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
+// subregister extractions, or a MOV (aka DUP here) if
// the lane number is anything other than zero.
def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
(f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
@@ -5803,13 +5797,13 @@ def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
- (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
+ (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>;
def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
- (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
+ (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>;
def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
- (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
+ (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
- (bf16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
+ (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
// All concat_vectors operations are canonicalised to act on i64 vectors for
// AArch64. In the general case we need an instruction, which had just as well be
@@ -6407,9 +6401,9 @@ defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
int_aarch64_neon_sqsub>;
defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
- int_aarch64_neon_sqadd>;
+ int_aarch64_neon_sqrdmlah>;
defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
- int_aarch64_neon_sqsub>;
+ int_aarch64_neon_sqrdmlsh>;
defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
@@ -6425,6 +6419,22 @@ def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
VectorIndexS:$idx)),
(SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
+// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands
+// have no common bits.
+def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs),
+ [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{
+ if (N->getOpcode() == ISD::ADD)
+ return true;
+ return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
+}]> {
+ let GISelPredicateCode = [{
+ // Only handle G_ADD for now. FIXME. build capability to compute whether
+ // operands of G_OR have common bits set or not.
+ return MI.getOpcode() == TargetOpcode::G_ADD;
+ }];
+}
+
+
//----------------------------------------------------------------------------
// AdvSIMD scalar shift instructions
//----------------------------------------------------------------------------
@@ -6530,7 +6540,7 @@ defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
(AArch64srshri node:$MHS, node:$RHS))>>;
defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
- TriOpFrag<(add node:$LHS,
+ TriOpFrag<(add_and_or_is_add node:$LHS,
(AArch64vashr node:$MHS, node:$RHS))>>;
defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
int_aarch64_neon_uqrshrn>;
@@ -6543,7 +6553,7 @@ defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
(AArch64urshri node:$MHS, node:$RHS))>>;
defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
- TriOpFrag<(add node:$LHS,
+ TriOpFrag<(add_and_or_is_add node:$LHS,
(AArch64vlshr node:$MHS, node:$RHS))>>;
//----------------------------------------------------------------------------
@@ -6585,7 +6595,7 @@ defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
- TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
+ TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
int_aarch64_neon_vcvtfxu2fp>;
defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
@@ -6601,7 +6611,7 @@ defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
- TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
+ TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
// RADDHN patterns for when RSHRN shifts by half the size of the vector element
def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
@@ -8106,7 +8116,7 @@ class NTStore128Pat<ValueType VT> :
Pat<(nontemporalstore (VT FPR128:$Rt),
(am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
(STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
- (CPYi64 FPR128:$Rt, (i64 1)),
+ (DUPi64 FPR128:$Rt, (i64 1)),
GPR64sp:$Rn, simm7s8:$offset)>;
def : NTStore128Pat<v2i64>;
@@ -8118,7 +8128,7 @@ class NTStore64Pat<ValueType VT> :
Pat<(nontemporalstore (VT FPR64:$Rt),
(am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
(STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
- (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
+ (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
GPR64sp:$Rn, simm7s4:$offset)>;
// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
@@ -8319,6 +8329,26 @@ let Predicates = [HasLS64] in {
def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
}
+let Predicates = [HasMOPS] in {
+ defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">;
+ defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">;
+ defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">;
+
+ defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">;
+ defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">;
+ defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">;
+
+ defm SETP : MOPSMemorySetInsns<0b00, "setp">;
+ defm SETM : MOPSMemorySetInsns<0b01, "setm">;
+ defm SETE : MOPSMemorySetInsns<0b10, "sete">;
+}
+let Predicates = [HasMOPS, HasMTE] in {
+ defm SETGP : MOPSMemorySetTaggingInsns<0b00, "setgp">;
+ defm SETGM : MOPSMemorySetTaggingInsns<0b01, "setgm">;
+ // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td
+ defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">;
+}
+
let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1 in
def StoreSwiftAsyncContext
: Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 3a836ac33064..6aefc1fdb599 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -1139,7 +1139,7 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
? getLdStOffsetOp(*StoreI).getImm()
: getLdStOffsetOp(*StoreI).getImm() * StoreSize;
int Width = LoadSize * 8;
- unsigned DestReg =
+ Register DestReg =
IsStoreXReg ? Register(TRI->getMatchingSuperReg(
LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
: LdRt;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MCInstLower.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MCInstLower.h
index 8f3148a98410..b008e49d52dd 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MCInstLower.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MCInstLower.h
@@ -14,15 +14,12 @@
namespace llvm {
class AsmPrinter;
-class MCAsmInfo;
class MCContext;
class MCInst;
class MCOperand;
class MCSymbol;
class MachineInstr;
-class MachineModuleInfoMachO;
class MachineOperand;
-class Mangler;
/// AArch64MCInstLower - This class is used to lower an MachineInstr
/// into an MCInst.
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 42db18332f1c..1fc5617b49f6 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -11,12 +11,19 @@
// 1. MOVi32imm + ANDWrr ==> ANDWri + ANDWri
// MOVi64imm + ANDXrr ==> ANDXri + ANDXri
//
+// 2. MOVi32imm + ADDWrr ==> ADDWRi + ADDWRi
+// MOVi64imm + ADDXrr ==> ANDXri + ANDXri
+//
+// 3. MOVi32imm + SUBWrr ==> SUBWRi + SUBWRi
+// MOVi64imm + SUBXrr ==> SUBXri + SUBXri
+//
// The mov pseudo instruction could be expanded to multiple mov instructions
// later. In this case, we could try to split the constant operand of mov
-// instruction into two bitmask immediates. It makes two AND instructions
-// intead of multiple `mov` + `and` instructions.
+// instruction into two immediates which can be directly encoded into
+// *Wri/*Xri instructions. It makes two AND/ADD/SUB instructions instead of
+// multiple `mov` + `and/add/sub` instructions.
//
-// 2. Remove redundant ORRWrs which is generated by zero-extend.
+// 4. Remove redundant ORRWrs which is generated by zero-extend.
//
// %3:gpr32 = ORRWrs $wzr, %2, 0
// %4:gpr64 = SUBREG_TO_REG 0, %3, %subreg.sub_32
@@ -30,6 +37,7 @@
#include "AArch64ExpandImm.h"
#include "AArch64InstrInfo.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
@@ -48,11 +56,44 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
}
const AArch64InstrInfo *TII;
+ const AArch64RegisterInfo *TRI;
MachineLoopInfo *MLI;
MachineRegisterInfo *MRI;
template <typename T>
- bool visitAND(MachineInstr &MI,
+ using SplitAndOpcFunc =
+ std::function<Optional<unsigned>(T, unsigned, T &, T &)>;
+ using BuildMIFunc =
+ std::function<void(MachineInstr &, unsigned, unsigned, unsigned, Register,
+ Register, Register)>;
+
+ /// For instructions where an immediate operand could be split into two
+ /// separate immediate instructions, use the splitTwoPartImm two handle the
+ /// optimization.
+ ///
+ /// To implement, the following function types must be passed to
+ /// splitTwoPartImm. A SplitAndOpcFunc must be implemented that determines if
+ /// splitting the immediate is valid and returns the associated new opcode. A
+ /// BuildMIFunc must be implemented to build the two immediate instructions.
+ ///
+ /// Example Pattern (where IMM would require 2+ MOV instructions):
+ /// %dst = <Instr>rr %src IMM [...]
+ /// becomes:
+ /// %tmp = <Instr>ri %src (encode half IMM) [...]
+ /// %dst = <Instr>ri %tmp (encode half IMM) [...]
+ template <typename T>
+ bool splitTwoPartImm(MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved,
+ SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr);
+
+ bool checkMovImmInstr(MachineInstr &MI, MachineInstr *&MovMI,
+ MachineInstr *&SubregToRegMI);
+
+ template <typename T>
+ bool visitADDSUB(unsigned PosOpc, unsigned NegOpc, MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
+ template <typename T>
+ bool visitAND(unsigned Opc, MachineInstr &MI,
SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
bool visitORR(MachineInstr &MI,
SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
@@ -116,7 +157,8 @@ static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
template <typename T>
bool AArch64MIPeepholeOpt::visitAND(
- MachineInstr &MI, SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
+ unsigned Opc, MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
// Try below transformation.
//
// MOVi32imm + ANDWrr ==> ANDWri + ANDWri
@@ -127,23 +169,151 @@ bool AArch64MIPeepholeOpt::visitAND(
// bitmask immediates. It makes only two AND instructions intead of multiple
// mov + and instructions.
- unsigned RegSize = sizeof(T) * 8;
- assert((RegSize == 32 || RegSize == 64) &&
- "Invalid RegSize for AND bitmask peephole optimization");
+ return splitTwoPartImm<T>(
+ MI, ToBeRemoved,
+ [Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<unsigned> {
+ if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
+ return Opc;
+ return None;
+ },
+ [&TII = TII](MachineInstr &MI, unsigned Opcode, unsigned Imm0,
+ unsigned Imm1, Register SrcReg, Register NewTmpReg,
+ Register NewDstReg) {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineBasicBlock *MBB = MI.getParent();
+ BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg)
+ .addReg(SrcReg)
+ .addImm(Imm0);
+ BuildMI(*MBB, MI, DL, TII->get(Opcode), NewDstReg)
+ .addReg(NewTmpReg)
+ .addImm(Imm1);
+ });
+}
+
+bool AArch64MIPeepholeOpt::visitORR(
+ MachineInstr &MI, SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
+ // Check this ORR comes from below zero-extend pattern.
+ //
+ // def : Pat<(i64 (zext GPR32:$src)),
+ // (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
+ if (MI.getOperand(3).getImm() != 0)
+ return false;
+
+ if (MI.getOperand(1).getReg() != AArch64::WZR)
+ return false;
+
+ MachineInstr *SrcMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
+ if (!SrcMI)
+ return false;
+
+ // From https://developer.arm.com/documentation/dui0801/b/BABBGCAC
+ //
+ // When you use the 32-bit form of an instruction, the upper 32 bits of the
+ // source registers are ignored and the upper 32 bits of the destination
+ // register are set to zero.
+ //
+ // If AArch64's 32-bit form of instruction defines the source operand of
+ // zero-extend, we do not need the zero-extend. Let's check the MI's opcode is
+ // real AArch64 instruction and if it is not, do not process the opcode
+ // conservatively.
+ if (SrcMI->getOpcode() <= TargetOpcode::GENERIC_OP_END)
+ return false;
+
+ Register DefReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(2).getReg();
+ MRI->replaceRegWith(DefReg, SrcReg);
+ MRI->clearKillFlags(SrcReg);
+ // replaceRegWith changes MI's definition register. Keep it for SSA form until
+ // deleting MI.
+ MI.getOperand(0).setReg(DefReg);
+ ToBeRemoved.insert(&MI);
+
+ LLVM_DEBUG(dbgs() << "Removed: " << MI << "\n");
+
+ return true;
+}
+
+template <typename T>
+static bool splitAddSubImm(T Imm, unsigned RegSize, T &Imm0, T &Imm1) {
+ // The immediate must be in the form of ((imm0 << 12) + imm1), in which both
+ // imm0 and imm1 are non-zero 12-bit unsigned int.
+ if ((Imm & 0xfff000) == 0 || (Imm & 0xfff) == 0 ||
+ (Imm & ~static_cast<T>(0xffffff)) != 0)
+ return false;
+
+ // The immediate can not be composed via a single instruction.
+ SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
+ AArch64_IMM::expandMOVImm(Imm, RegSize, Insn);
+ if (Insn.size() == 1)
+ return false;
+
+ // Split Imm into (Imm0 << 12) + Imm1;
+ Imm0 = (Imm >> 12) & 0xfff;
+ Imm1 = Imm & 0xfff;
+ return true;
+}
+
+template <typename T>
+bool AArch64MIPeepholeOpt::visitADDSUB(
+ unsigned PosOpc, unsigned NegOpc, MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
+ // Try below transformation.
+ //
+ // MOVi32imm + ADDWrr ==> ADDWri + ADDWri
+ // MOVi64imm + ADDXrr ==> ADDXri + ADDXri
+ //
+ // MOVi32imm + SUBWrr ==> SUBWri + SUBWri
+ // MOVi64imm + SUBXrr ==> SUBXri + SUBXri
+ //
+ // The mov pseudo instruction could be expanded to multiple mov instructions
+ // later. Let's try to split the constant operand of mov instruction into two
+ // legal add/sub immediates. It makes only two ADD/SUB instructions intead of
+ // multiple `mov` + `and/sub` instructions.
+
+ return splitTwoPartImm<T>(
+ MI, ToBeRemoved,
+ [PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0,
+ T &Imm1) -> Optional<unsigned> {
+ if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
+ return PosOpc;
+ if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
+ return NegOpc;
+ return None;
+ },
+ [&TII = TII](MachineInstr &MI, unsigned Opcode, unsigned Imm0,
+ unsigned Imm1, Register SrcReg, Register NewTmpReg,
+ Register NewDstReg) {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineBasicBlock *MBB = MI.getParent();
+ BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg)
+ .addReg(SrcReg)
+ .addImm(Imm0)
+ .addImm(12);
+ BuildMI(*MBB, MI, DL, TII->get(Opcode), NewDstReg)
+ .addReg(NewTmpReg)
+ .addImm(Imm1)
+ .addImm(0);
+ });
+}
- // Check whether AND's MBB is in loop and the AND is loop invariant.
+// Checks if the corresponding MOV immediate instruction is applicable for
+// this peephole optimization.
+bool AArch64MIPeepholeOpt::checkMovImmInstr(MachineInstr &MI,
+ MachineInstr *&MovMI,
+ MachineInstr *&SubregToRegMI) {
+ // Check whether current MBB is in loop and the AND is loop invariant.
MachineBasicBlock *MBB = MI.getParent();
MachineLoop *L = MLI->getLoopFor(MBB);
if (L && !L->isLoopInvariant(MI))
return false;
- // Check whether AND's operand is MOV with immediate.
- MachineInstr *MovMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
+ // Check whether current MI's operand is MOV with immediate.
+ MovMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
if (!MovMI)
return false;
- MachineInstr *SubregToRegMI = nullptr;
// If it is SUBREG_TO_REG, check its operand.
+ SubregToRegMI = nullptr;
if (MovMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) {
SubregToRegMI = MovMI;
MovMI = MRI->getUniqueVRegDef(MovMI->getOperand(2).getReg());
@@ -159,47 +329,63 @@ bool AArch64MIPeepholeOpt::visitAND(
// more instructions.
if (!MRI->hasOneUse(MovMI->getOperand(0).getReg()))
return false;
-
if (SubregToRegMI && !MRI->hasOneUse(SubregToRegMI->getOperand(0).getReg()))
return false;
- // Split the bitmask immediate into two.
- T UImm = static_cast<T>(MovMI->getOperand(1).getImm());
+ // It is OK to perform this peephole optimization.
+ return true;
+}
+
+template <typename T>
+bool AArch64MIPeepholeOpt::splitTwoPartImm(
+ MachineInstr &MI, SmallSetVector<MachineInstr *, 8> &ToBeRemoved,
+ SplitAndOpcFunc<T> SplitAndOpc, BuildMIFunc BuildInstr) {
+ unsigned RegSize = sizeof(T) * 8;
+ assert((RegSize == 32 || RegSize == 64) &&
+ "Invalid RegSize for legal immediate peephole optimization");
+
+ // Perform several essential checks against current MI.
+ MachineInstr *MovMI, *SubregToRegMI;
+ if (!checkMovImmInstr(MI, MovMI, SubregToRegMI))
+ return false;
+
+ // Split the immediate to Imm0 and Imm1, and calculate the Opcode.
+ T Imm = static_cast<T>(MovMI->getOperand(1).getImm()), Imm0, Imm1;
// For the 32 bit form of instruction, the upper 32 bits of the destination
// register are set to zero. If there is SUBREG_TO_REG, set the upper 32 bits
- // of UImm to zero.
+ // of Imm to zero. This is essential if the Immediate value was a negative
+ // number since it was sign extended when we assign to the 64-bit Imm.
if (SubregToRegMI)
- UImm &= 0xFFFFFFFF;
- T Imm1Enc;
- T Imm2Enc;
- if (!splitBitmaskImm(UImm, RegSize, Imm1Enc, Imm2Enc))
+ Imm &= 0xFFFFFFFF;
+ unsigned Opcode;
+ if (auto R = SplitAndOpc(Imm, RegSize, Imm0, Imm1))
+ Opcode = R.getValue();
+ else
return false;
- // Create new AND MIs.
- DebugLoc DL = MI.getDebugLoc();
- const TargetRegisterClass *ANDImmRC =
- (RegSize == 32) ? &AArch64::GPR32spRegClass : &AArch64::GPR64spRegClass;
+ // Create new ADD/SUB MIs.
+ MachineFunction *MF = MI.getMF();
+ const TargetRegisterClass *RC =
+ TII->getRegClass(TII->get(Opcode), 0, TRI, *MF);
+ const TargetRegisterClass *ORC =
+ TII->getRegClass(TII->get(Opcode), 1, TRI, *MF);
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- Register NewTmpReg = MRI->createVirtualRegister(ANDImmRC);
- Register NewDstReg = MRI->createVirtualRegister(ANDImmRC);
- unsigned Opcode = (RegSize == 32) ? AArch64::ANDWri : AArch64::ANDXri;
-
- MRI->constrainRegClass(NewTmpReg, MRI->getRegClass(SrcReg));
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg)
- .addReg(SrcReg)
- .addImm(Imm1Enc);
+ Register NewTmpReg = MRI->createVirtualRegister(RC);
+ Register NewDstReg = MRI->createVirtualRegister(RC);
+ MRI->constrainRegClass(SrcReg, RC);
+ MRI->constrainRegClass(NewTmpReg, ORC);
MRI->constrainRegClass(NewDstReg, MRI->getRegClass(DstReg));
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewDstReg)
- .addReg(NewTmpReg)
- .addImm(Imm2Enc);
+
+ BuildInstr(MI, Opcode, Imm0, Imm1, SrcReg, NewTmpReg, NewDstReg);
MRI->replaceRegWith(DstReg, NewDstReg);
// replaceRegWith changes MI's definition register. Keep it for SSA form until
// deleting MI.
MI.getOperand(0).setReg(DstReg);
+ // Record the MIs need to be removed.
ToBeRemoved.insert(&MI);
if (SubregToRegMI)
ToBeRemoved.insert(SubregToRegMI);
@@ -208,59 +394,17 @@ bool AArch64MIPeepholeOpt::visitAND(
return true;
}
-bool AArch64MIPeepholeOpt::visitORR(
- MachineInstr &MI, SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
- // Check this ORR comes from below zero-extend pattern.
- //
- // def : Pat<(i64 (zext GPR32:$src)),
- // (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
- if (MI.getOperand(3).getImm() != 0)
- return false;
-
- if (MI.getOperand(1).getReg() != AArch64::WZR)
- return false;
-
- MachineInstr *SrcMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
- if (!SrcMI)
- return false;
-
- // From https://developer.arm.com/documentation/dui0801/b/BABBGCAC
- //
- // When you use the 32-bit form of an instruction, the upper 32 bits of the
- // source registers are ignored and the upper 32 bits of the destination
- // register are set to zero.
- //
- // If AArch64's 32-bit form of instruction defines the source operand of
- // zero-extend, we do not need the zero-extend. Let's check the MI's opcode is
- // real AArch64 instruction and if it is not, do not process the opcode
- // conservatively.
- if (SrcMI->getOpcode() <= TargetOpcode::GENERIC_OP_END)
- return false;
-
- Register DefReg = MI.getOperand(0).getReg();
- Register SrcReg = MI.getOperand(2).getReg();
- MRI->replaceRegWith(DefReg, SrcReg);
- MRI->clearKillFlags(SrcReg);
- // replaceRegWith changes MI's definition register. Keep it for SSA form until
- // deleting MI.
- MI.getOperand(0).setReg(DefReg);
- ToBeRemoved.insert(&MI);
-
- LLVM_DEBUG({ dbgs() << "Removed: " << MI << "\n"; });
-
- return true;
-}
-
bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ TRI = static_cast<const AArch64RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
MLI = &getAnalysis<MachineLoopInfo>();
MRI = &MF.getRegInfo();
- if (!MRI->isSSA())
- return false;
+ assert(MRI->isSSA() && "Expected to be run on SSA form!");
bool Changed = false;
SmallSetVector<MachineInstr *, 8> ToBeRemoved;
@@ -271,13 +415,30 @@ bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
default:
break;
case AArch64::ANDWrr:
- Changed = visitAND<uint32_t>(MI, ToBeRemoved);
+ Changed = visitAND<uint32_t>(AArch64::ANDWri, MI, ToBeRemoved);
break;
case AArch64::ANDXrr:
- Changed = visitAND<uint64_t>(MI, ToBeRemoved);
+ Changed = visitAND<uint64_t>(AArch64::ANDXri, MI, ToBeRemoved);
break;
case AArch64::ORRWrs:
Changed = visitORR(MI, ToBeRemoved);
+ break;
+ case AArch64::ADDWrr:
+ Changed = visitADDSUB<uint32_t>(AArch64::ADDWri, AArch64::SUBWri, MI,
+ ToBeRemoved);
+ break;
+ case AArch64::SUBWrr:
+ Changed = visitADDSUB<uint32_t>(AArch64::SUBWri, AArch64::ADDWri, MI,
+ ToBeRemoved);
+ break;
+ case AArch64::ADDXrr:
+ Changed = visitADDSUB<uint64_t>(AArch64::ADDXri, AArch64::SUBXri, MI,
+ ToBeRemoved);
+ break;
+ case AArch64::SUBXrr:
+ Changed = visitADDSUB<uint64_t>(AArch64::SUBXri, AArch64::ADDXri, MI,
+ ToBeRemoved);
+ break;
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
index 80d98d17e1d6..2ef7bc83003a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
@@ -633,7 +633,7 @@ bool AArch64SIMDInstrOpt::optimizeLdStInterleave(MachineInstr &MI) {
/// Return true when the instruction is processed successfully.
bool AArch64SIMDInstrOpt::processSeqRegInst(MachineInstr *DefiningMI,
unsigned* StReg, unsigned* StRegKill, unsigned NumArg) const {
- assert (DefiningMI != NULL);
+ assert(DefiningMI != nullptr);
if (DefiningMI->getOpcode() != AArch64::REG_SEQUENCE)
return false;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index eb55a472a69a..73a680465f6f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -180,20 +180,22 @@ def AArch64asr_p : SDNode<"AArch64ISD::SRA_PRED", SDT_AArch64Arith>;
def AArch64fadd_p : SDNode<"AArch64ISD::FADD_PRED", SDT_AArch64Arith>;
def AArch64fdiv_p : SDNode<"AArch64ISD::FDIV_PRED", SDT_AArch64Arith>;
def AArch64fma_p : SDNode<"AArch64ISD::FMA_PRED", SDT_AArch64FMA>;
-def AArch64fmaxnm_p : SDNode<"AArch64ISD::FMAXNM_PRED", SDT_AArch64Arith>;
-def AArch64fminnm_p : SDNode<"AArch64ISD::FMINNM_PRED", SDT_AArch64Arith>;
def AArch64fmax_p : SDNode<"AArch64ISD::FMAX_PRED", SDT_AArch64Arith>;
+def AArch64fmaxnm_p : SDNode<"AArch64ISD::FMAXNM_PRED", SDT_AArch64Arith>;
def AArch64fmin_p : SDNode<"AArch64ISD::FMIN_PRED", SDT_AArch64Arith>;
+def AArch64fminnm_p : SDNode<"AArch64ISD::FMINNM_PRED", SDT_AArch64Arith>;
def AArch64fmul_p : SDNode<"AArch64ISD::FMUL_PRED", SDT_AArch64Arith>;
def AArch64fsub_p : SDNode<"AArch64ISD::FSUB_PRED", SDT_AArch64Arith>;
def AArch64lsl_p : SDNode<"AArch64ISD::SHL_PRED", SDT_AArch64Arith>;
def AArch64lsr_p : SDNode<"AArch64ISD::SRL_PRED", SDT_AArch64Arith>;
def AArch64mul_p : SDNode<"AArch64ISD::MUL_PRED", SDT_AArch64Arith>;
+def AArch64sabd_p : SDNode<"AArch64ISD::ABDS_PRED", SDT_AArch64Arith>;
def AArch64sdiv_p : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
def AArch64smax_p : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>;
def AArch64smin_p : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
def AArch64smulh_p : SDNode<"AArch64ISD::MULHS_PRED", SDT_AArch64Arith>;
def AArch64sub_p : SDNode<"AArch64ISD::SUB_PRED", SDT_AArch64Arith>;
+def AArch64uabd_p : SDNode<"AArch64ISD::ABDU_PRED", SDT_AArch64Arith>;
def AArch64udiv_p : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
def AArch64umax_p : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>;
def AArch64umin_p : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>;
@@ -277,8 +279,11 @@ def AArch64mul_p_oneuse : PatFrag<(ops node:$pred, node:$src1, node:$src2),
return N->hasOneUse();
}]>;
+def AArch64fabd_p : PatFrag<(ops node:$pg, node:$op1, node:$op2),
+ (AArch64fabs_mt node:$pg, (AArch64fsub_p node:$pg, node:$op1, node:$op2), undef)>;
+
def AArch64fneg_mt_nsz : PatFrag<(ops node:$pred, node:$op, node:$pt),
- (AArch64fneg_mt node:$pred, node:$op, node:$pt), [{
+ (AArch64fneg_mt node:$pred, node:$op, node:$pt), [{
return N->getFlags().hasNoSignedZeros();
}]>;
@@ -415,6 +420,8 @@ let Predicates = [HasSVEorStreamingSVE] in {
defm UMAX_ZPZZ : sve_int_bin_pred_bhsd<AArch64umax_p>;
defm SMIN_ZPZZ : sve_int_bin_pred_bhsd<AArch64smin_p>;
defm UMIN_ZPZZ : sve_int_bin_pred_bhsd<AArch64umin_p>;
+ defm SABD_ZPZZ : sve_int_bin_pred_bhsd<AArch64sabd_p>;
+ defm UABD_ZPZZ : sve_int_bin_pred_bhsd<AArch64uabd_p>;
defm FRECPE_ZZ : sve_fp_2op_u_zd<0b110, "frecpe", AArch64frecpe>;
defm FRSQRTE_ZZ : sve_fp_2op_u_zd<0b111, "frsqrte", AArch64frsqrte>;
@@ -469,6 +476,7 @@ let Predicates = [HasSVEorStreamingSVE] in {
defm FMINNM_ZPZZ : sve_fp_bin_pred_hfd<AArch64fminnm_p>;
defm FMAX_ZPZZ : sve_fp_bin_pred_hfd<AArch64fmax_p>;
defm FMIN_ZPZZ : sve_fp_bin_pred_hfd<AArch64fmin_p>;
+ defm FABD_ZPZZ : sve_fp_bin_pred_hfd<AArch64fabd_p>;
defm FDIV_ZPZZ : sve_fp_bin_pred_hfd<AArch64fdiv_p>;
} // End HasSVEorStreamingSVE
@@ -642,11 +650,11 @@ let Predicates = [HasSVEorStreamingSVE] in {
(DUP_ZI_D $a, $b)>;
// Duplicate immediate FP into all vector elements.
- def : Pat<(nxv2f32 (AArch64dup (f32 fpimm:$val))),
+ def : Pat<(nxv2f32 (AArch64dup (f32 fpimm:$val))),
(DUP_ZR_S (MOVi32imm (bitcast_fpimm_to_i32 f32:$val)))>;
- def : Pat<(nxv4f32 (AArch64dup (f32 fpimm:$val))),
+ def : Pat<(nxv4f32 (AArch64dup (f32 fpimm:$val))),
(DUP_ZR_S (MOVi32imm (bitcast_fpimm_to_i32 f32:$val)))>;
- def : Pat<(nxv2f64 (AArch64dup (f64 fpimm:$val))),
+ def : Pat<(nxv2f64 (AArch64dup (f64 fpimm:$val))),
(DUP_ZR_D (MOVi64imm (bitcast_fpimm_to_i64 f64:$val)))>;
// Duplicate FP immediate into all vector elements
@@ -722,11 +730,11 @@ let Predicates = [HasSVEorStreamingSVE] in {
defm BRKBS_PPzP : sve_int_break_z<0b110, "brkbs", null_frag>;
def PTEST_PP : sve_int_ptest<0b010000, "ptest">;
- def PFALSE : sve_int_pfalse<0b000000, "pfalse">;
+ defm PFALSE : sve_int_pfalse<0b000000, "pfalse">;
defm PFIRST : sve_int_pfirst<0b00000, "pfirst", int_aarch64_sve_pfirst>;
defm PNEXT : sve_int_pnext<0b00110, "pnext", int_aarch64_sve_pnext>;
- defm AND_PPzPP : sve_int_pred_log<0b0000, "and", int_aarch64_sve_and_z, and>;
+ defm AND_PPzPP : sve_int_pred_log_and<0b0000, "and", int_aarch64_sve_and_z>;
defm BIC_PPzPP : sve_int_pred_log<0b0001, "bic", int_aarch64_sve_bic_z>;
defm EOR_PPzPP : sve_int_pred_log<0b0010, "eor", int_aarch64_sve_eor_z, xor>;
defm SEL_PPPP : sve_int_pred_log<0b0011, "sel", vselect>;
@@ -1419,6 +1427,16 @@ let Predicates = [HasSVEorStreamingSVE] in {
(INSR_ZV_D ZPR:$Z2, (INSERT_SUBREG (IMPLICIT_DEF),
(LASTB_VPZ_D (PTRUE_D 31), ZPR:$Z1), dsub))>;
+ // Splice with lane bigger or equal to 0
+ def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_ext_imm_0_255 i32:$index)))),
+ (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
+ def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_ext_imm_0_127 i32:$index)))),
+ (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
+ def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_ext_imm_0_63 i32:$index)))),
+ (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
+ def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_ext_imm_0_31 i32:$index)))),
+ (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
+
defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs", SETUGE, SETULE>;
defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi", SETUGT, SETULT>;
defm CMPGE_PPzZZ : sve_int_cmp_0<0b100, "cmpge", SETGE, SETLE>;
@@ -2496,6 +2514,7 @@ let Predicates = [HasSVEorStreamingSVE] in {
// 16-element contiguous store
defm : st1<ST1B, ST1B_IMM, nxv16i8, AArch64st1, nxv16i1, nxv16i8, am_sve_regreg_lsl0>;
+ // Insert scalar into undef[0]
def : Pat<(nxv16i8 (vector_insert (nxv16i8 (undef)), (i32 FPR32:$src), 0)),
(INSERT_SUBREG (nxv16i8 (IMPLICIT_DEF)), FPR32:$src, ssub)>;
def : Pat<(nxv8i16 (vector_insert (nxv8i16 (undef)), (i32 FPR32:$src), 0)),
@@ -2691,17 +2710,6 @@ let Predicates = [HasSVEorStreamingSVE] in {
def : Pat<(vector_extract (nxv2f64 ZPR:$Zs), (i64 0)),
(f64 (EXTRACT_SUBREG ZPR:$Zs, dsub))>;
}
-
- // Splice with lane bigger or equal to 0
- def : Pat<(nxv16i8 (vector_splice (nxv16i8 ZPR:$Z1), (nxv16i8 ZPR:$Z2), (i64 (sve_ext_imm_0_255 i32:$index)))),
- (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
- def : Pat<(nxv8i16 (vector_splice (nxv8i16 ZPR:$Z1), (nxv8i16 ZPR:$Z2), (i64 (sve_ext_imm_0_127 i32:$index)))),
- (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
- def : Pat<(nxv4i32 (vector_splice (nxv4i32 ZPR:$Z1), (nxv4i32 ZPR:$Z2), (i64 (sve_ext_imm_0_63 i32:$index)))),
- (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
- def : Pat<(nxv2i64 (vector_splice (nxv2i64 ZPR:$Z1), (nxv2i64 ZPR:$Z2), (i64 (sve_ext_imm_0_31 i32:$index)))),
- (EXT_ZZI ZPR:$Z1, ZPR:$Z2, imm0_255:$index)>;
-
} // End HasSVEorStreamingSVE
let Predicates = [HasSVE, HasMatMulInt8] in {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA55.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA55.td
index 877c4d2ced41..009219ce3c54 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA55.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA55.td
@@ -235,10 +235,14 @@ def : ReadAdvance<ReadID, 1, [WriteImm,WriteI,
//---
// Miscellaneous
//---
-def : InstRW<[CortexA55WriteVLD1SI,CortexA55WriteLDP1], (instregex "LDPS?W")>;
-def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP1], (instregex "LDPS[^W]")>;
-def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP2], (instregex "LDP(X|D)")>;
-def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP4], (instregex "LDPQ")>;
+def : InstRW<[CortexA55WriteVLD1SI,CortexA55WriteLDP1], (instregex "LDPS?Wi")>;
+def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP1], (instregex "LDPSi")>;
+def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP2], (instregex "LDP(X|D)i")>;
+def : InstRW<[CortexA55WriteVLD1,CortexA55WriteLDP4], (instregex "LDPQi")>;
+def : InstRW<[WriteAdr, CortexA55WriteVLD1SI,CortexA55WriteLDP1], (instregex "LDPS?W(pre|post)")>;
+def : InstRW<[WriteAdr, CortexA55WriteVLD1,CortexA55WriteLDP1], (instregex "LDPS(pre|post)")>;
+def : InstRW<[WriteAdr, CortexA55WriteVLD1,CortexA55WriteLDP2], (instregex "LDP(X|D)(pre|post)")>;
+def : InstRW<[WriteAdr, CortexA55WriteVLD1,CortexA55WriteLDP4], (instregex "LDPQ(pre|post)")>;
def : InstRW<[WriteI], (instrs COPY)>;
//---
// Vector Loads - 64-bit per cycle
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA57.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA57.td
index 168a762241ca..a860aa907fd1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA57.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA57.td
@@ -526,7 +526,7 @@ def : InstRW<[A57Write_5cyc_2V], (instregex "^FRINT[AIMNPXZ](v4f32|v2f64)")>;
def : InstRW<[A57Write_3cyc_2V], (instregex "^(BIF|BIT|BSL|BSP)v16i8")>;
// ASIMD duplicate, gen reg, D-form and Q-form
-def : InstRW<[A57Write_8cyc_1L_1V], (instregex "^CPY")>;
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[A57Write_8cyc_1L_1V], (instregex "^DUPv.+gpr")>;
// ASIMD move, saturating
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA64FX.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
index 1d25a6c00f95..fa10d056b7f7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedA64FX.td
@@ -1891,7 +1891,7 @@ def : InstRW<[A64FXWrite_4Cyc_GI0],
// ASIMD duplicate, gen reg
// ASIMD duplicate, element
def : InstRW<[A64FXWrite_DUPGENERAL], (instregex "^DUPv")>;
-def : InstRW<[A64FXWrite_6Cyc_GI0], (instregex "^CPY")>;
+def : InstRW<[A64FXWrite_6Cyc_GI0], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[A64FXWrite_6Cyc_GI0], (instregex "^DUPv.+gpr")>;
// ASIMD extract
@@ -2512,16 +2512,16 @@ def : InstRW<[A64FXWrite_1Cyc_GI24], (instrs CNTW_XPiI)>;
def : InstRW<[A64FXWrite_6Cyc_GI0], (instrs COMPACT_ZPZ_D, COMPACT_ZPZ_S)>;
// [72] "cpy $Zd, $Pg/m, $Rn";
-//@@@ def : InstRW<[XXXXXX], (instrs CPY_ZPmR_B, CPY_ZPmR_D, CPY_ZPmR_H, CPY_ZPmR_S)>;
+def : InstRW<[A64FXWrite_6Cyc_GI0], (instrs CPY_ZPmR_B, CPY_ZPmR_D, CPY_ZPmR_H, CPY_ZPmR_S)>;
// [73] "cpy $Zd, $Pg/m, $Vn";
-//@@@ def : InstRW<[XXXXXX], (instrs CPY_ZPmV_B, CPY_ZPmV_D, CPY_ZPmV_H, CPY_ZPmV_S)>;
+def : InstRW<[A64FXWrite_6Cyc_GI0], (instrs CPY_ZPmV_B, CPY_ZPmV_D, CPY_ZPmV_H, CPY_ZPmV_S)>;
// [74] "cpy $Zd, $Pg/m, $imm";
-//@@@ def : InstRW<[XXXXXX], (instrs CPY_ZPmI_B, CPY_ZPmI_D, CPY_ZPmI_H, CPY_ZPmI_S)>;
+def : InstRW<[A64FXWrite_6Cyc_GI0], (instrs CPY_ZPmI_B, CPY_ZPmI_D, CPY_ZPmI_H, CPY_ZPmI_S)>;
// [75] "cpy $Zd, $Pg/z, $imm";
-//@@@ def : InstRW<[XXXXXX], (instrs CPY_ZPzI_B, CPY_ZPzI_D, CPY_ZPzI_H, CPY_ZPzI_S)>;
+def : InstRW<[A64FXWrite_6Cyc_GI0], (instrs CPY_ZPzI_B, CPY_ZPzI_D, CPY_ZPzI_H, CPY_ZPzI_S)>;
// [76] "ctermeq $Rn, $Rm";
def : InstRW<[A64FXWrite_2Cyc_GI24], (instrs CTERMEQ_WW, CTERMEQ_XX)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
index 14df8236504b..d66efb82fccc 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM3.td
@@ -669,7 +669,7 @@ def : InstRW<[M3WriteNEONB], (instregex "^DUPv.+gpr")>;
def : InstRW<[M3WriteNSHF1], (instregex "^DUPv.+lane")>;
def : InstRW<[M3WriteNSHF1], (instregex "^EXTv")>;
def : InstRW<[M3WriteNSHF1], (instregex "^[SU]?Q?XTU?Nv")>;
-def : InstRW<[M3WriteNSHF1], (instregex "^CPY")>;
+def : InstRW<[M3WriteNSHF1], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[M3WriteNSHF1], (instregex "^INSv.+lane")>;
def : InstRW<[M3WriteMOVI], (instregex "^MOVI")>;
def : InstRW<[M3WriteNALU1], (instregex "^FMOVv")>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
index 8f740a9a0d35..94e70793e855 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM4.td
@@ -810,7 +810,7 @@ def : InstRW<[M4WriteNALU1], (instregex "^RBITv")>;
def : InstRW<[M4WriteNALU1], (instregex "^(BIF|BIT|BSL|BSP)v")>;
def : InstRW<[M4WriteNALU1], (instregex "^CL[STZ]v")>;
def : InstRW<[M4WriteNEONB], (instregex "^DUPv.+gpr")>;
-def : InstRW<[M4WriteNSHF1], (instregex "^CPY")>;
+def : InstRW<[M4WriteNSHF1], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[M4WriteNSHF1], (instregex "^DUPv.+lane")>;
def : InstRW<[M4WriteNSHF1], (instregex "^EXTv")>;
def : InstRW<[M4WriteNSHT4A], (instregex "^XTNv")>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
index 93e1b66bea03..1db5f5322a64 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedExynosM5.td
@@ -848,7 +848,7 @@ def : InstRW<[M5WriteNALU2], (instregex "^RBITv")>;
def : InstRW<[M5WriteNALU2], (instregex "^(BIF|BIT|BSL|BSP)v")>;
def : InstRW<[M5WriteNALU2], (instregex "^CL[STZ]v")>;
def : InstRW<[M5WriteNEONB], (instregex "^DUPv.+gpr")>;
-def : InstRW<[M5WriteNSHF2], (instregex "^CPY")>;
+def : InstRW<[M5WriteNSHF2], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[M5WriteNSHF2], (instregex "^DUPv.+lane")>;
def : InstRW<[M5WriteNSHF2], (instregex "^EXTv")>;
def : InstRW<[M5WriteNSHT4A], (instregex "^XTNv")>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
index f2cd83caffa2..a3a038f869fb 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedFalkorDetails.td
@@ -908,7 +908,7 @@ def : InstRW<[FalkorWr_ADDSUBsx], (instregex "^SUB(S)?(W|X)r(s|x|x64)$")>;
// -----------------------------------------------------------------------------
def : InstRW<[FalkorWr_1GTOV_1cyc], (instregex "^DUP(v8i8|v4i16|v2i32)(gpr|lane)$")>;
def : InstRW<[FalkorWr_1VXVY_1cyc], (instregex "^DUP(v16i8|v8i16)(gpr|lane)$")>;
-def : InstRW<[FalkorWr_1VXVY_1cyc], (instregex "^CPY(i8|i16|i32|i64)$")>;
+def : InstRW<[FalkorWr_1VXVY_1cyc], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[FalkorWr_1GTOV_1cyc], (instregex "^INSv(i8|i16)(gpr|lane)$")>;
def : InstRW<[FalkorWr_1VTOG_1cyc], (instregex "^(S|U)MOVv.*$")>;
def : InstRW<[FalkorWr_1VXVY_1cyc], (instregex "^(BIF|BIT|BSL|BSP)v8i8$")>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
index e4cae97b5524..ffa0a5e7d91a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX2T99.td
@@ -1499,7 +1499,7 @@ def : InstRW<[THX2T99Write_5Cyc_F01],
// ASIMD duplicate, gen reg
// ASIMD duplicate, element
def : InstRW<[THX2T99Write_5Cyc_F01], (instregex "^DUPv")>;
-def : InstRW<[THX2T99Write_5Cyc_F01], (instregex "^CPY")>;
+def : InstRW<[THX2T99Write_5Cyc_F01], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[THX2T99Write_5Cyc_F01], (instregex "^DUPv.+gpr")>;
// ASIMD extract
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
index 08be2b3a55b3..46a1c217f984 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SchedThunderX3T110.td
@@ -1608,7 +1608,7 @@ def : InstRW<[THX3T110Write_3_4Cyc_F23_F0123],
// ASIMD duplicate, gen reg
// ASIMD duplicate, element
def : InstRW<[THX3T110Write_5Cyc_F0123], (instregex "^DUPv")>;
-def : InstRW<[THX3T110Write_5Cyc_F0123], (instregex "^CPY")>;
+def : InstRW<[THX3T110Write_5Cyc_F0123], (instregex "^DUP(i8|i16|i32|i64)$")>;
def : InstRW<[THX3T110Write_5Cyc_F0123], (instregex "^DUPv.+gpr")>;
// ASIMD extract
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
index 7307961ddb5f..87be7bb6d113 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
@@ -304,7 +304,7 @@ bool AArch64SpeculationHardening::instrumentControlFlow(
// sure if that would actually result in a big performance difference
// though. Maybe RegisterScavenger::findSurvivorBackwards has some logic
// already to do this - but it's unclear if that could easily be used here.
- unsigned TmpReg = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
+ Register TmpReg = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
LLVM_DEBUG(dbgs() << "RS finds "
<< ((TmpReg == 0) ? "no register " : "register ");
if (TmpReg != 0) dbgs() << printReg(TmpReg, TRI) << " ";
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
index d2488f61eb4b..cae6d65bed2d 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
@@ -195,7 +195,7 @@ void AArch64StackTaggingPreRA::uncheckUsesOf(unsigned TaggedReg, int FI) {
void AArch64StackTaggingPreRA::uncheckLoadsAndStores() {
for (auto *I : ReTags) {
- unsigned TaggedReg = I->getOperand(0).getReg();
+ Register TaggedReg = I->getOperand(0).getReg();
int FI = I->getOperand(1).getIndex();
uncheckUsesOf(TaggedReg, FI);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index f7d3dd0bc222..a4f4b8582182 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
using namespace llvm;
@@ -157,13 +158,19 @@ void AArch64Subtarget::initializeProperties() {
break;
case NeoverseN1:
PrefFunctionLogAlignment = 4;
+ PrefLoopLogAlignment = 5;
+ MaxBytesForLoopAlignment = 16;
break;
case NeoverseN2:
PrefFunctionLogAlignment = 4;
+ PrefLoopLogAlignment = 5;
+ MaxBytesForLoopAlignment = 16;
VScaleForTuning = 1;
break;
case NeoverseV1:
PrefFunctionLogAlignment = 4;
+ PrefLoopLogAlignment = 5;
+ MaxBytesForLoopAlignment = 16;
VScaleForTuning = 2;
break;
case Neoverse512TVB:
@@ -228,8 +235,7 @@ AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
IsLittle(LittleEndian),
MinSVEVectorSizeInBits(MinSVEVectorSizeInBitsOverride),
MaxSVEVectorSizeInBits(MaxSVEVectorSizeInBitsOverride), TargetTriple(TT),
- FrameLowering(),
- InstrInfo(initializeSubtargetDependencies(FS, CPU, TuneCPU)), TSInfo(),
+ InstrInfo(initializeSubtargetDependencies(FS, CPU, TuneCPU)),
TLInfo(TM, *this) {
if (AArch64::isX18ReservedByDefault(TT))
ReserveXRegister.set(18);
@@ -367,9 +373,4 @@ void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
MFI.computeMaxCallFrameSize(MF);
}
-bool AArch64Subtarget::useSVEForFixedLengthVectors() const {
- // Prefer NEON unless larger SVE registers are available.
- return hasSVE() && getMinSVEVectorSizeInBits() >= 256;
-}
-
bool AArch64Subtarget::useAA() const { return UseAA; }
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.h
index b3cd5ebd5f65..3e3c0f6aba15 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -94,9 +94,11 @@ protected:
bool HasV8_5aOps = false;
bool HasV8_6aOps = false;
bool HasV8_7aOps = false;
+ bool HasV8_8aOps = false;
bool HasV9_0aOps = false;
bool HasV9_1aOps = false;
bool HasV9_2aOps = false;
+ bool HasV9_3aOps = false;
bool HasV8_0rOps = false;
bool HasCONTEXTIDREL2 = false;
@@ -188,6 +190,10 @@ protected:
bool HasHCX = false;
bool HasLS64 = false;
+ // Armv8.8-A Extensions
+ bool HasHBC = false;
+ bool HasMOPS = false;
+
// Arm SVE2 extensions
bool HasSVE2 = false;
bool HasSVE2AES = false;
@@ -274,6 +280,7 @@ protected:
unsigned MaxPrefetchIterationsAhead = UINT_MAX;
unsigned PrefFunctionLogAlignment = 0;
unsigned PrefLoopLogAlignment = 0;
+ unsigned MaxBytesForLoopAlignment = 0;
unsigned MaxJumpTableSize = 0;
unsigned WideningBaseCost = 0;
@@ -365,6 +372,7 @@ public:
bool hasV9_0aOps() const { return HasV9_0aOps; }
bool hasV9_1aOps() const { return HasV9_1aOps; }
bool hasV9_2aOps() const { return HasV9_2aOps; }
+ bool hasV9_3aOps() const { return HasV9_3aOps; }
bool hasV8_0rOps() const { return HasV8_0rOps; }
bool hasZeroCycleRegMove() const { return HasZeroCycleRegMove; }
@@ -464,6 +472,10 @@ public:
}
unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
+ unsigned getMaxBytesForLoopAlignment() const {
+ return MaxBytesForLoopAlignment;
+ }
+
unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
unsigned getWideningBaseCost() const { return WideningBaseCost; }
@@ -572,6 +584,8 @@ public:
bool hasRCPC_IMMO() const { return HasRCPC_IMMO; }
bool hasEL2VMSA() const { return HasEL2VMSA; }
bool hasEL3() const { return HasEL3; }
+ bool hasHBC() const { return HasHBC; }
+ bool hasMOPS() const { return HasMOPS; }
bool fixCortexA53_835769() const { return FixCortexA53_835769; }
@@ -666,7 +680,10 @@ public:
return MinSVEVectorSizeInBits;
}
- bool useSVEForFixedLengthVectors() const;
+ bool useSVEForFixedLengthVectors() const {
+ // Prefer NEON unless larger SVE registers are available.
+ return hasSVE() && getMinSVEVectorSizeInBits() >= 256;
+ }
unsigned getVScaleForTuning() const { return VScaleForTuning; }
};
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SystemOperands.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SystemOperands.td
index f9fe804865a5..cce5813fe6e9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SystemOperands.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SystemOperands.td
@@ -1333,7 +1333,7 @@ def : RWSysReg<"PRBAR_EL2", 0b11, 0b100, 0b0110, 0b1000, 0b000>;
def : RWSysReg<"PRLAR_EL1", 0b11, 0b000, 0b0110, 0b1000, 0b001>;
def : RWSysReg<"PRLAR_EL2", 0b11, 0b100, 0b0110, 0b1000, 0b001>;
-foreach n = 0-15 in {
+foreach n = 1-15 in {
foreach x = 1-2 in {
//Direct acces to Protection Region Base Address Register for n th MPU region
def : RWSysReg<!strconcat("PRBAR"#n, "_EL"#x),
@@ -1348,7 +1348,7 @@ foreach x = 1-2 in {
let Encoding{13} = !add(x,-1);
}
} //foreach x = 1-2 in
-} //foreach n = 0-15 in
+} //foreach n = 1-15 in
} //let Requires = [{ {AArch64::HasV8_0rOps} }] in
// v8.1a "Privileged Access Never" extension-specific system registers
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.h
index 25e626134317..7d314bce99b1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.h
@@ -20,8 +20,6 @@
namespace llvm {
-class AArch64RegisterBankInfo;
-
class AArch64TargetMachine : public LLVMTargetMachine {
protected:
std::unique_ptr<TargetLoweringObjectFile> TLOF;
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
index dfc66f0cb4c1..7ed934cfabc0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
@@ -25,8 +25,7 @@ void AArch64_ELFTargetObjectFile::Initialize(MCContext &Ctx,
SupportDebugThreadLocalLocation = false;
}
-AArch64_MachoTargetObjectFile::AArch64_MachoTargetObjectFile()
- : TargetLoweringObjectFileMachO() {
+AArch64_MachoTargetObjectFile::AArch64_MachoTargetObjectFile() {
SupportGOTPCRelWithOffset = false;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
index 28324c2ae608..9f098230bbd7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
@@ -13,7 +13,6 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
-class AArch64TargetMachine;
/// This implementation is used for AArch64 ELF targets (Linux in particular).
class AArch64_ELFTargetObjectFile : public TargetLoweringObjectFileELF {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index d21854e38f5a..a4d666a0a3c2 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -331,6 +331,45 @@ AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
}
break;
}
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow: {
+ static const CostTblEntry WithOverflowCostTbl[] = {
+ {Intrinsic::sadd_with_overflow, MVT::i8, 3},
+ {Intrinsic::uadd_with_overflow, MVT::i8, 3},
+ {Intrinsic::sadd_with_overflow, MVT::i16, 3},
+ {Intrinsic::uadd_with_overflow, MVT::i16, 3},
+ {Intrinsic::sadd_with_overflow, MVT::i32, 1},
+ {Intrinsic::uadd_with_overflow, MVT::i32, 1},
+ {Intrinsic::sadd_with_overflow, MVT::i64, 1},
+ {Intrinsic::uadd_with_overflow, MVT::i64, 1},
+ {Intrinsic::ssub_with_overflow, MVT::i8, 3},
+ {Intrinsic::usub_with_overflow, MVT::i8, 3},
+ {Intrinsic::ssub_with_overflow, MVT::i16, 3},
+ {Intrinsic::usub_with_overflow, MVT::i16, 3},
+ {Intrinsic::ssub_with_overflow, MVT::i32, 1},
+ {Intrinsic::usub_with_overflow, MVT::i32, 1},
+ {Intrinsic::ssub_with_overflow, MVT::i64, 1},
+ {Intrinsic::usub_with_overflow, MVT::i64, 1},
+ {Intrinsic::smul_with_overflow, MVT::i8, 5},
+ {Intrinsic::umul_with_overflow, MVT::i8, 4},
+ {Intrinsic::smul_with_overflow, MVT::i16, 5},
+ {Intrinsic::umul_with_overflow, MVT::i16, 4},
+ {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
+ {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
+ {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
+ {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
+ };
+ EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
+ if (MTy.isSimple())
+ if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
+ MTy.getSimpleVT()))
+ return Entry->Cost;
+ break;
+ }
default:
break;
}
@@ -377,12 +416,76 @@ static Optional<Instruction *> processPhiNode(InstCombiner &IC,
return IC.replaceInstUsesWith(II, NPN);
}
+// (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _))))
+// => (binop (pred) (from_svbool _) (from_svbool _))
+//
+// The above transformation eliminates a `to_svbool` in the predicate
+// operand of bitwise operation `binop` by narrowing the vector width of
+// the operation. For example, it would convert a `<vscale x 16 x i1>
+// and` into a `<vscale x 4 x i1> and`. This is profitable because
+// to_svbool must zero the new lanes during widening, whereas
+// from_svbool is free.
+static Optional<Instruction *> tryCombineFromSVBoolBinOp(InstCombiner &IC,
+ IntrinsicInst &II) {
+ auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0));
+ if (!BinOp)
+ return None;
+
+ auto IntrinsicID = BinOp->getIntrinsicID();
+ switch (IntrinsicID) {
+ case Intrinsic::aarch64_sve_and_z:
+ case Intrinsic::aarch64_sve_bic_z:
+ case Intrinsic::aarch64_sve_eor_z:
+ case Intrinsic::aarch64_sve_nand_z:
+ case Intrinsic::aarch64_sve_nor_z:
+ case Intrinsic::aarch64_sve_orn_z:
+ case Intrinsic::aarch64_sve_orr_z:
+ break;
+ default:
+ return None;
+ }
+
+ auto BinOpPred = BinOp->getOperand(0);
+ auto BinOpOp1 = BinOp->getOperand(1);
+ auto BinOpOp2 = BinOp->getOperand(2);
+
+ auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred);
+ if (!PredIntr ||
+ PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool)
+ return None;
+
+ auto PredOp = PredIntr->getOperand(0);
+ auto PredOpTy = cast<VectorType>(PredOp->getType());
+ if (PredOpTy != II.getType())
+ return None;
+
+ IRBuilder<> Builder(II.getContext());
+ Builder.SetInsertPoint(&II);
+
+ SmallVector<Value *> NarrowedBinOpArgs = {PredOp};
+ auto NarrowBinOpOp1 = Builder.CreateIntrinsic(
+ Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1});
+ NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
+ if (BinOpOp1 == BinOpOp2)
+ NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
+ else
+ NarrowedBinOpArgs.push_back(Builder.CreateIntrinsic(
+ Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2}));
+
+ auto NarrowedBinOp =
+ Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs);
+ return IC.replaceInstUsesWith(II, NarrowedBinOp);
+}
+
static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC,
IntrinsicInst &II) {
// If the reinterpret instruction operand is a PHI Node
if (isa<PHINode>(II.getArgOperand(0)))
return processPhiNode(IC, II);
+ if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II))
+ return BinOpCombine;
+
SmallVector<Instruction *, 32> CandidatesForRemoval;
Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
@@ -1129,6 +1232,32 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
return None;
}
+Optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
+ APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) const {
+ switch (II.getIntrinsicID()) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_fcvtxn:
+ case Intrinsic::aarch64_neon_rshrn:
+ case Intrinsic::aarch64_neon_sqrshrn:
+ case Intrinsic::aarch64_neon_sqrshrun:
+ case Intrinsic::aarch64_neon_sqshrn:
+ case Intrinsic::aarch64_neon_sqshrun:
+ case Intrinsic::aarch64_neon_sqxtn:
+ case Intrinsic::aarch64_neon_sqxtun:
+ case Intrinsic::aarch64_neon_uqrshrn:
+ case Intrinsic::aarch64_neon_uqshrn:
+ case Intrinsic::aarch64_neon_uqxtn:
+ SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts);
+ break;
+ }
+
+ return None;
+}
+
bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
ArrayRef<const Value *> Args) {
@@ -1461,6 +1590,15 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
{ ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
{ ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
+ // Bitcasts from float to integer
+ { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 },
+ { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 },
+ { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 },
+
+ // Bitcasts from integer to float
+ { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 },
+ { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 },
+ { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 },
};
if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
@@ -1555,9 +1693,12 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
if (!LT.second.isVector())
return 0;
- // The type may be split. Normalize the index to the new type.
- unsigned Width = LT.second.getVectorNumElements();
- Index = Index % Width;
+ // The type may be split. For fixed-width vectors we can normalize the
+ // index to the new type.
+ if (LT.second.isFixedLengthVector()) {
+ unsigned Width = LT.second.getVectorNumElements();
+ Index = Index % Width;
+ }
// The element at index zero is already inside the vector.
if (Index == 0)
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index c3e1735cd4cd..a6029b9f2445 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -106,6 +106,12 @@ public:
Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) const;
+ Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) const;
+
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
@@ -307,6 +313,10 @@ public:
return 2;
}
+ bool emitGetActiveLaneMask() const {
+ return ST->hasSVE();
+ }
+
bool supportsScalableVectors() const { return ST->hasSVE(); }
bool enableScalableVectorization() const { return ST->hasSVE(); }
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 62038b10fccd..33ed7ae9780e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -48,6 +48,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -3284,6 +3285,8 @@ static const struct Extension {
{"sme", {AArch64::FeatureSME}},
{"sme-f64", {AArch64::FeatureSMEF64}},
{"sme-i64", {AArch64::FeatureSMEI64}},
+ {"hbc", {AArch64::FeatureHBC}},
+ {"mops", {AArch64::FeatureMOPS}},
// FIXME: Unsupported extensions
{"lor", {}},
{"rdma", {}},
@@ -3307,12 +3310,16 @@ static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
Str += "ARMv8.6a";
else if (FBS[AArch64::HasV8_7aOps])
Str += "ARMv8.7a";
+ else if (FBS[AArch64::HasV8_8aOps])
+ Str += "ARMv8.8a";
else if (FBS[AArch64::HasV9_0aOps])
Str += "ARMv9-a";
else if (FBS[AArch64::HasV9_1aOps])
Str += "ARMv9.1a";
else if (FBS[AArch64::HasV9_2aOps])
Str += "ARMv9.2a";
+ else if (FBS[AArch64::HasV9_3aOps])
+ Str += "ARMv9.3a";
else if (FBS[AArch64::HasV8_0rOps])
Str += "ARMv8r";
else {
@@ -4531,7 +4538,7 @@ bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
Mnemonic = Head;
// Handle condition codes for a branch mnemonic
- if (Head == "b" && Next != StringRef::npos) {
+ if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
Start = Next;
Next = Name.find('.', Start + 1);
Head = Name.slice(Start + 1, Next);
@@ -4862,6 +4869,177 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
}
}
+ // Check v8.8-A memops instructions.
+ switch (Inst.getOpcode()) {
+ case AArch64::CPYFP:
+ case AArch64::CPYFPWN:
+ case AArch64::CPYFPRN:
+ case AArch64::CPYFPN:
+ case AArch64::CPYFPWT:
+ case AArch64::CPYFPWTWN:
+ case AArch64::CPYFPWTRN:
+ case AArch64::CPYFPWTN:
+ case AArch64::CPYFPRT:
+ case AArch64::CPYFPRTWN:
+ case AArch64::CPYFPRTRN:
+ case AArch64::CPYFPRTN:
+ case AArch64::CPYFPT:
+ case AArch64::CPYFPTWN:
+ case AArch64::CPYFPTRN:
+ case AArch64::CPYFPTN:
+ case AArch64::CPYFM:
+ case AArch64::CPYFMWN:
+ case AArch64::CPYFMRN:
+ case AArch64::CPYFMN:
+ case AArch64::CPYFMWT:
+ case AArch64::CPYFMWTWN:
+ case AArch64::CPYFMWTRN:
+ case AArch64::CPYFMWTN:
+ case AArch64::CPYFMRT:
+ case AArch64::CPYFMRTWN:
+ case AArch64::CPYFMRTRN:
+ case AArch64::CPYFMRTN:
+ case AArch64::CPYFMT:
+ case AArch64::CPYFMTWN:
+ case AArch64::CPYFMTRN:
+ case AArch64::CPYFMTN:
+ case AArch64::CPYFE:
+ case AArch64::CPYFEWN:
+ case AArch64::CPYFERN:
+ case AArch64::CPYFEN:
+ case AArch64::CPYFEWT:
+ case AArch64::CPYFEWTWN:
+ case AArch64::CPYFEWTRN:
+ case AArch64::CPYFEWTN:
+ case AArch64::CPYFERT:
+ case AArch64::CPYFERTWN:
+ case AArch64::CPYFERTRN:
+ case AArch64::CPYFERTN:
+ case AArch64::CPYFET:
+ case AArch64::CPYFETWN:
+ case AArch64::CPYFETRN:
+ case AArch64::CPYFETN:
+ case AArch64::CPYP:
+ case AArch64::CPYPWN:
+ case AArch64::CPYPRN:
+ case AArch64::CPYPN:
+ case AArch64::CPYPWT:
+ case AArch64::CPYPWTWN:
+ case AArch64::CPYPWTRN:
+ case AArch64::CPYPWTN:
+ case AArch64::CPYPRT:
+ case AArch64::CPYPRTWN:
+ case AArch64::CPYPRTRN:
+ case AArch64::CPYPRTN:
+ case AArch64::CPYPT:
+ case AArch64::CPYPTWN:
+ case AArch64::CPYPTRN:
+ case AArch64::CPYPTN:
+ case AArch64::CPYM:
+ case AArch64::CPYMWN:
+ case AArch64::CPYMRN:
+ case AArch64::CPYMN:
+ case AArch64::CPYMWT:
+ case AArch64::CPYMWTWN:
+ case AArch64::CPYMWTRN:
+ case AArch64::CPYMWTN:
+ case AArch64::CPYMRT:
+ case AArch64::CPYMRTWN:
+ case AArch64::CPYMRTRN:
+ case AArch64::CPYMRTN:
+ case AArch64::CPYMT:
+ case AArch64::CPYMTWN:
+ case AArch64::CPYMTRN:
+ case AArch64::CPYMTN:
+ case AArch64::CPYE:
+ case AArch64::CPYEWN:
+ case AArch64::CPYERN:
+ case AArch64::CPYEN:
+ case AArch64::CPYEWT:
+ case AArch64::CPYEWTWN:
+ case AArch64::CPYEWTRN:
+ case AArch64::CPYEWTN:
+ case AArch64::CPYERT:
+ case AArch64::CPYERTWN:
+ case AArch64::CPYERTRN:
+ case AArch64::CPYERTN:
+ case AArch64::CPYET:
+ case AArch64::CPYETWN:
+ case AArch64::CPYETRN:
+ case AArch64::CPYETN: {
+ unsigned Xd_wb = Inst.getOperand(0).getReg();
+ unsigned Xs_wb = Inst.getOperand(1).getReg();
+ unsigned Xn_wb = Inst.getOperand(2).getReg();
+ unsigned Xd = Inst.getOperand(3).getReg();
+ unsigned Xs = Inst.getOperand(4).getReg();
+ unsigned Xn = Inst.getOperand(5).getReg();
+ if (Xd_wb != Xd)
+ return Error(Loc[0],
+ "invalid CPY instruction, Xd_wb and Xd do not match");
+ if (Xs_wb != Xs)
+ return Error(Loc[0],
+ "invalid CPY instruction, Xs_wb and Xs do not match");
+ if (Xn_wb != Xn)
+ return Error(Loc[0],
+ "invalid CPY instruction, Xn_wb and Xn do not match");
+ if (Xd == Xs)
+ return Error(Loc[0], "invalid CPY instruction, destination and source"
+ " registers are the same");
+ if (Xd == Xn)
+ return Error(Loc[0], "invalid CPY instruction, destination and size"
+ " registers are the same");
+ if (Xs == Xn)
+ return Error(Loc[0], "invalid CPY instruction, source and size"
+ " registers are the same");
+ break;
+ }
+ case AArch64::SETP:
+ case AArch64::SETPT:
+ case AArch64::SETPN:
+ case AArch64::SETPTN:
+ case AArch64::SETM:
+ case AArch64::SETMT:
+ case AArch64::SETMN:
+ case AArch64::SETMTN:
+ case AArch64::SETE:
+ case AArch64::SETET:
+ case AArch64::SETEN:
+ case AArch64::SETETN:
+ case AArch64::SETGP:
+ case AArch64::SETGPT:
+ case AArch64::SETGPN:
+ case AArch64::SETGPTN:
+ case AArch64::SETGM:
+ case AArch64::SETGMT:
+ case AArch64::SETGMN:
+ case AArch64::SETGMTN:
+ case AArch64::MOPSSETGE:
+ case AArch64::MOPSSETGET:
+ case AArch64::MOPSSETGEN:
+ case AArch64::MOPSSETGETN: {
+ unsigned Xd_wb = Inst.getOperand(0).getReg();
+ unsigned Xn_wb = Inst.getOperand(1).getReg();
+ unsigned Xd = Inst.getOperand(2).getReg();
+ unsigned Xn = Inst.getOperand(3).getReg();
+ unsigned Xm = Inst.getOperand(4).getReg();
+ if (Xd_wb != Xd)
+ return Error(Loc[0],
+ "invalid SET instruction, Xd_wb and Xd do not match");
+ if (Xn_wb != Xn)
+ return Error(Loc[0],
+ "invalid SET instruction, Xn_wb and Xn do not match");
+ if (Xd == Xn)
+ return Error(Loc[0], "invalid SET instruction, destination and size"
+ " registers are the same");
+ if (Xd == Xm)
+ return Error(Loc[0], "invalid SET instruction, destination and source"
+ " registers are the same");
+ if (Xn == Xm)
+ return Error(Loc[0], "invalid SET instruction, source and size"
+ " registers are the same");
+ break;
+ }
+ }
// Now check immediate ranges. Separate from the above as there is overlap
// in the instructions being checked and this keeps the nested conditionals
@@ -5931,9 +6109,11 @@ static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
case AArch64::ArchKind::ARMV8_5A:
case AArch64::ArchKind::ARMV8_6A:
case AArch64::ArchKind::ARMV8_7A:
+ case AArch64::ArchKind::ARMV8_8A:
case AArch64::ArchKind::ARMV9A:
case AArch64::ArchKind::ARMV9_1A:
case AArch64::ArchKind::ARMV9_2A:
+ case AArch64::ArchKind::ARMV9_3A:
case AArch64::ArchKind::ARMV8R:
RequestedExtensions.push_back("sm4");
RequestedExtensions.push_back("sha3");
@@ -5956,6 +6136,7 @@ static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
case AArch64::ArchKind::ARMV8_5A:
case AArch64::ArchKind::ARMV8_6A:
case AArch64::ArchKind::ARMV8_7A:
+ case AArch64::ArchKind::ARMV8_8A:
case AArch64::ArchKind::ARMV9A:
case AArch64::ArchKind::ARMV9_1A:
case AArch64::ArchKind::ARMV9_2A:
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 96d410e42be2..9ce00f76d9c7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -238,6 +238,12 @@ static DecodeStatus DecodeSVEIncDecImm(MCInst &Inst, unsigned Imm,
uint64_t Addr, const void *Decoder);
static DecodeStatus DecodeSVCROp(MCInst &Inst, unsigned Imm, uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeCPYMemOpInstruction(MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder);
+static DecodeStatus DecodeSETMemOpInstruction(MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder);
static bool Check(DecodeStatus &Out, DecodeStatus In) {
switch (In) {
@@ -1842,3 +1848,52 @@ static DecodeStatus DecodeSVCROp(MCInst &Inst, unsigned Imm, uint64_t Address,
}
return Fail;
}
+
+static DecodeStatus DecodeCPYMemOpInstruction(MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rs = fieldFromInstruction(insn, 16, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+
+ // None of the registers may alias: if they do, then the instruction is not
+ // merely unpredictable but actually entirely unallocated.
+ if (Rd == Rs || Rs == Rn || Rd == Rn)
+ return MCDisassembler::Fail;
+
+ // All three register operands are written back, so they all appear
+ // twice in the operand list, once as outputs and once as inputs.
+ if (!DecodeGPR64commonRegisterClass(Inst, Rd, Addr, Decoder) ||
+ !DecodeGPR64commonRegisterClass(Inst, Rs, Addr, Decoder) ||
+ !DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder) ||
+ !DecodeGPR64commonRegisterClass(Inst, Rd, Addr, Decoder) ||
+ !DecodeGPR64commonRegisterClass(Inst, Rs, Addr, Decoder) ||
+ !DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder))
+ return MCDisassembler::Fail;
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSETMemOpInstruction(MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rm = fieldFromInstruction(insn, 16, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+
+ // None of the registers may alias: if they do, then the instruction is not
+ // merely unpredictable but actually entirely unallocated.
+ if (Rd == Rm || Rm == Rn || Rd == Rn)
+ return MCDisassembler::Fail;
+
+ // Rd and Rn (not Rm) register operands are written back, so they appear
+ // twice in the operand list, once as outputs and once as inputs.
+ if (!DecodeGPR64commonRegisterClass(Inst, Rd, Addr, Decoder) ||
+ !DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder) ||
+ !DecodeGPR64commonRegisterClass(Inst, Rd, Addr, Decoder) ||
+ !DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder) ||
+ !DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder))
+ return MCDisassembler::Fail;
+
+ return MCDisassembler::Success;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index ac08ee8ae8dd..097b93e4fcca 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -1112,6 +1112,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
return false;
}
+ Info.IsTailCall = CanTailCallOpt;
if (CanTailCallOpt)
return lowerTailCall(MIRBuilder, Info, OutArgs);
@@ -1179,7 +1180,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
if (!determineAndHandleAssignments(
UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
MIRBuilder, Info.CallConv, Info.IsVarArg,
- UsingReturnedArg ? OutArgs[0].Regs[0] : Register()))
+ UsingReturnedArg ? makeArrayRef(OutArgs[0].Regs) : None))
return false;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h
index add0342c90fd..aafb1d19640a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h
@@ -24,9 +24,7 @@ namespace llvm {
class AArch64TargetLowering;
class CCValAssign;
-class DataLayout;
class MachineIRBuilder;
-class MachineRegisterInfo;
class Type;
class AArch64CallLowering: public CallLowering {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 3d9a626d3ac3..1f546ad50d57 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -18,7 +18,6 @@
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
-#include "AArch64GlobalISelUtils.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/ADT/Optional.h"
@@ -472,8 +471,8 @@ private:
AArch64InstructionSelector::AArch64InstructionSelector(
const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
const AArch64RegisterBankInfo &RBI)
- : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI),
+ : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
+ RBI(RBI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "AArch64GenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
@@ -3937,19 +3936,19 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
// vector's elements.
switch (EltSize) {
case 8:
- CopyOpc = AArch64::CPYi8;
+ CopyOpc = AArch64::DUPi8;
ExtractSubReg = AArch64::bsub;
break;
case 16:
- CopyOpc = AArch64::CPYi16;
+ CopyOpc = AArch64::DUPi16;
ExtractSubReg = AArch64::hsub;
break;
case 32:
- CopyOpc = AArch64::CPYi32;
+ CopyOpc = AArch64::DUPi32;
ExtractSubReg = AArch64::ssub;
break;
case 64:
- CopyOpc = AArch64::CPYi64;
+ CopyOpc = AArch64::DUPi64;
ExtractSubReg = AArch64::dsub;
break;
default:
@@ -5469,8 +5468,8 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I,
// Insert the copy from LR/X30 into the entry block, before it can be
// clobbered by anything.
MFI.setReturnAddressIsTaken(true);
- MFReturnAddr = getFunctionLiveInPhysReg(MF, TII, AArch64::LR,
- AArch64::GPR64RegClass);
+ MFReturnAddr = getFunctionLiveInPhysReg(
+ MF, TII, AArch64::LR, AArch64::GPR64RegClass, I.getDebugLoc());
}
if (STI.hasPAuth()) {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
index 35456d95dc2b..e2c46f4b4c1f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
@@ -21,7 +21,6 @@
namespace llvm {
-class LLVMContext;
class AArch64Subtarget;
/// This class provides the information for the target register banks.
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
index 7274ae79f74a..225e0c8e55fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -19,7 +19,6 @@
namespace llvm {
class MCStreamer;
-class Target;
class Triple;
struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 941226b83e44..66cb7a37a958 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -30,11 +30,7 @@ class MCStreamer;
class MCSubtargetInfo;
class MCTargetOptions;
class MCTargetStreamer;
-class StringRef;
class Target;
-class Triple;
-class raw_ostream;
-class raw_pwrite_stream;
MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
index bb488cd7da32..574b22124957 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -334,6 +334,8 @@ multiclass sve_int_ptrue<bits<3> opc, string asm, SDPatternOperator op> {
def SDT_AArch64PTrue : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
def AArch64ptrue : SDNode<"AArch64ISD::PTRUE", SDT_AArch64PTrue>;
+def SDT_AArch64PFalse : SDTypeProfile<1, 0, [SDTCisVec<0>, SDTCVecEltisVT<0, i1>]>;
+def AArch64pfalse : SDNode<"AArch64ISD::PFALSE", SDT_AArch64PFalse>;
let Predicates = [HasSVEorStreamingSVE] in {
defm PTRUE : sve_int_ptrue<0b000, "ptrue", AArch64ptrue>;
@@ -609,6 +611,15 @@ class sve_int_pfalse<bits<6> opc, string asm>
let isReMaterializable = 1;
}
+multiclass sve_int_pfalse<bits<6> opc, string asm> {
+ def NAME : sve_int_pfalse<opc, asm>;
+
+ def : Pat<(nxv16i1 (AArch64pfalse)), (!cast<Instruction>(NAME))>;
+ def : Pat<(nxv8i1 (AArch64pfalse)), (!cast<Instruction>(NAME))>;
+ def : Pat<(nxv4i1 (AArch64pfalse)), (!cast<Instruction>(NAME))>;
+ def : Pat<(nxv2i1 (AArch64pfalse)), (!cast<Instruction>(NAME))>;
+}
+
class sve_int_ptest<bits<6> opc, string asm>
: I<(outs), (ins PPRAny:$Pg, PPR8:$Pn),
asm, "\t$Pg, $Pn",
@@ -1622,6 +1633,18 @@ multiclass sve_int_pred_log<bits<4> opc, string asm, SDPatternOperator op,
!cast<Instruction>(NAME), PTRUE_D>;
}
+multiclass sve_int_pred_log_and<bits<4> opc, string asm, SDPatternOperator op> :
+ sve_int_pred_log<opc, asm, op> {
+ def : Pat<(nxv16i1 (and nxv16i1:$Op1, nxv16i1:$Op2)),
+ (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
+ def : Pat<(nxv8i1 (and nxv8i1:$Op1, nxv8i1:$Op2)),
+ (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
+ def : Pat<(nxv4i1 (and nxv4i1:$Op1, nxv4i1:$Op2)),
+ (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
+ def : Pat<(nxv2i1 (and nxv2i1:$Op1, nxv2i1:$Op2)),
+ (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
+}
+
//===----------------------------------------------------------------------===//
// SVE Logical Mask Immediate Group
//===----------------------------------------------------------------------===//
@@ -1708,6 +1731,9 @@ multiclass sve_int_dup_mask_imm<string asm> {
(!cast<Instruction>(NAME) ZPR32:$Zd, sve_preferred_logical_imm32:$imm), 6>;
def : InstAlias<"mov $Zd, $imm",
(!cast<Instruction>(NAME) ZPR64:$Zd, sve_preferred_logical_imm64:$imm), 5>;
+
+ def : Pat<(nxv2i64 (AArch64dup (i64 logical_imm64:$imm))),
+ (!cast<Instruction>(NAME) logical_imm64:$imm)>;
}
//===----------------------------------------------------------------------===//
@@ -4641,6 +4667,10 @@ multiclass SVE_SETCC_Pat<CondCode cc, CondCode invcc, ValueType predvt,
(cmp $Op1, $Op2, $Op3)>;
def : Pat<(predvt (AArch64setcc_z predvt:$Op1, intvt:$Op2, intvt:$Op3, invcc)),
(cmp $Op1, $Op3, $Op2)>;
+ def : Pat<(predvt (and predvt:$Pg, (AArch64setcc_z (predvt (AArch64ptrue 31)), intvt:$Op2, intvt:$Op3, cc))),
+ (cmp $Pg, $Op2, $Op3)>;
+ def : Pat<(predvt (and predvt:$Pg, (AArch64setcc_z (predvt (AArch64ptrue 31)), intvt:$Op2, intvt:$Op3, invcc))),
+ (cmp $Pg, $Op3, $Op2)>;
}
multiclass SVE_SETCC_Pat_With_Zero<CondCode cc, CondCode invcc, ValueType predvt,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
index 642080a0d40d..4a24162540a5 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
@@ -40,10 +40,6 @@ using namespace llvm::PatternMatch;
#define DEBUG_TYPE "aarch64-sve-intrinsic-opts"
-namespace llvm {
-void initializeSVEIntrinsicOptsPass(PassRegistry &);
-}
-
namespace {
struct SVEIntrinsicOpts : public ModulePass {
static char ID; // Pass identification, replacement for typeid
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index caee2acd2606..5906a5d6b50b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -483,18 +483,20 @@ inline unsigned getNumElementsFromSVEPredPattern(unsigned Pattern) {
}
/// Return specific VL predicate pattern based on the number of elements.
-inline unsigned getSVEPredPatternFromNumElements(unsigned MinNumElts) {
+inline Optional<unsigned>
+getSVEPredPatternFromNumElements(unsigned MinNumElts) {
switch (MinNumElts) {
default:
- llvm_unreachable("unexpected element count for SVE predicate");
+ return None;
case 1:
- return AArch64SVEPredPattern::vl1;
case 2:
- return AArch64SVEPredPattern::vl2;
+ case 3:
case 4:
- return AArch64SVEPredPattern::vl4;
+ case 5:
+ case 6:
+ case 7:
case 8:
- return AArch64SVEPredPattern::vl8;
+ return MinNumElts;
case 16:
return AArch64SVEPredPattern::vl16;
case 32:
@@ -757,7 +759,6 @@ namespace AArch64 {
// <n x (M*P) x t> vector (such as index 1) are undefined.
static constexpr unsigned SVEBitsPerBlock = 128;
static constexpr unsigned SVEMaxBitsPerVector = 2048;
-const unsigned NeonBitsPerVector = 128;
} // end namespace AArch64
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPU.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPU.td
index e606f0e8fc3c..806c0b18637a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -610,12 +610,6 @@ def FeatureDsSrc2Insts : SubtargetFeature<"ds-src2-insts",
"Has ds_*_src2 instructions"
>;
-def FeatureRegisterBanking : SubtargetFeature<"register-banking",
- "HasRegisterBanking",
- "true",
- "Has register banking"
->;
-
def FeatureVOP3Literal : SubtargetFeature<"vop3-literal",
"HasVOP3Literal",
"true",
@@ -826,7 +820,7 @@ def FeatureGFX10 : GCNSubtargetFeatureGeneration<"GFX10",
FeatureSDWA, FeatureSDWAOmod, FeatureSDWAScalar, FeatureSDWASdst,
FeatureFlatInstOffsets, FeatureFlatGlobalInsts, FeatureFlatScratchInsts,
FeatureAddNoCarryInsts, FeatureFmaMixInsts, FeatureGFX8Insts,
- FeatureNoSdstCMPX, FeatureVscnt, FeatureRegisterBanking,
+ FeatureNoSdstCMPX, FeatureVscnt,
FeatureVOP3Literal, FeatureDPP8, FeatureExtendedImageInsts,
FeatureNoDataDepHazard, FeaturePkFmacF16Inst,
FeatureGFX10A16, FeatureSMemTimeInst, FeatureFastDenormalF32, FeatureG16,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.h
index 22be014813b0..5ba9b2cd187e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.h
@@ -26,7 +26,7 @@ class AMDGPUAAResult : public AAResultBase<AMDGPUAAResult> {
const DataLayout &DL;
public:
- explicit AMDGPUAAResult(const DataLayout &DL) : AAResultBase(), DL(DL) {}
+ explicit AMDGPUAAResult(const DataLayout &DL) : DL(DL) {}
AMDGPUAAResult(AMDGPUAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 2f1e7823f65c..cd084fd5440a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -192,8 +192,20 @@ struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- if (!SPReg)
- SPReg = MIRBuilder.buildCopy(PtrTy, MFI->getStackPtrOffsetReg()).getReg(0);
+ if (!SPReg) {
+ const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
+ if (ST.enableFlatScratch()) {
+ // The stack is accessed unswizzled, so we can use a regular copy.
+ SPReg = MIRBuilder.buildCopy(PtrTy,
+ MFI->getStackPtrOffsetReg()).getReg(0);
+ } else {
+ // The address we produce here, without knowing the use context, is going
+ // to be interpreted as a vector address, so we need to convert to a
+ // swizzled address.
+ SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy},
+ {MFI->getStackPtrOffsetReg()}).getReg(0);
+ }
+ }
auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
@@ -615,6 +627,13 @@ bool AMDGPUCallLowering::lowerFormalArguments(
CCInfo.AllocateReg(ImplicitBufferPtrReg);
}
+ // FIXME: This probably isn't defined for mesa
+ if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) {
+ Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
+ MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
+ CCInfo.AllocateReg(FlatScratchInitReg);
+ }
+
SmallVector<ArgInfo, 32> SplitArgs;
unsigned Idx = 0;
unsigned PSInputNum = 0;
@@ -879,13 +898,17 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
Register InputReg;
if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
NeedWorkItemIDX) {
- InputReg = MRI.createGenericVirtualRegister(S32);
- LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX,
- std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX));
+ if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) {
+ InputReg = MRI.createGenericVirtualRegister(S32);
+ LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX,
+ std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX));
+ } else {
+ InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0);
+ }
}
if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
- NeedWorkItemIDY) {
+ NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) {
Register Y = MRI.createGenericVirtualRegister(S32);
LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY),
std::get<2>(WorkitemIDY));
@@ -895,7 +918,7 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
}
if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
- NeedWorkItemIDZ) {
+ NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) {
Register Z = MRI.createGenericVirtualRegister(S32);
LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ),
std::get<2>(WorkitemIDZ));
@@ -904,16 +927,24 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z;
}
- if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
+ if (!InputReg &&
+ (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
InputReg = MRI.createGenericVirtualRegister(S32);
-
- // Workitem ids are already packed, any of present incoming arguments will
- // carry all required fields.
- ArgDescriptor IncomingArg = ArgDescriptor::createArg(
- IncomingArgX ? *IncomingArgX :
+ if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
+ // We're in a situation where the outgoing function requires the workitem
+ // ID, but the calling function does not have it (e.g a graphics function
+ // calling a C calling convention function). This is illegal, but we need
+ // to produce something.
+ MIRBuilder.buildUndef(InputReg);
+ } else {
+ // Workitem ids are already packed, any of present incoming arguments will
+ // carry all required fields.
+ ArgDescriptor IncomingArg = ArgDescriptor::createArg(
+ IncomingArgX ? *IncomingArgX :
IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
- LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg,
- &AMDGPU::VGPR_32RegClass, S32);
+ LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg,
+ &AMDGPU::VGPR_32RegClass, S32);
+ }
}
if (OutgoingArg->isRegister()) {
@@ -1314,6 +1345,7 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
return false;
}
+ Info.IsTailCall = CanTailCallOpt;
if (CanTailCallOpt)
return lowerTailCall(MIRBuilder, Info, OutArgs);
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index a55729586b8d..1920684d8f1f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -150,13 +150,13 @@ class AMDGPUCodeGenPrepare : public FunctionPass,
/// \returns The minimum number of bits needed to store the value of \Op as an
/// unsigned integer. Truncating to this size and then zero-extending to
- /// ScalarSize will not change the value.
- unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const;
+ /// the original will not change the value.
+ unsigned numBitsUnsigned(Value *Op) const;
/// \returns The minimum number of bits needed to store the value of \Op as a
/// signed integer. Truncating to this size and then sign-extending to
- /// ScalarSize will not change the value.
- unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const;
+ /// the original size will not change the value.
+ unsigned numBitsSigned(Value *Op) const;
/// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
/// SelectionDAG has an issue where an and asserting the bits are known
@@ -445,17 +445,12 @@ bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
return true;
}
-unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op,
- unsigned ScalarSize) const {
- KnownBits Known = computeKnownBits(Op, *DL, 0, AC);
- return ScalarSize - Known.countMinLeadingZeros();
+unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op) const {
+ return computeKnownBits(Op, *DL, 0, AC).countMaxActiveBits();
}
-unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op,
- unsigned ScalarSize) const {
- // In order for this to be a signed 24-bit value, bit 23, must
- // be a sign bit.
- return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC) + 1;
+unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op) const {
+ return ComputeMaxSignificantBits(Op, *DL, 0, AC);
}
static void extractValues(IRBuilder<> &Builder,
@@ -532,12 +527,12 @@ bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const {
unsigned LHSBits = 0, RHSBits = 0;
bool IsSigned = false;
- if (ST->hasMulU24() && (LHSBits = numBitsUnsigned(LHS, Size)) <= 24 &&
- (RHSBits = numBitsUnsigned(RHS, Size)) <= 24) {
+ if (ST->hasMulU24() && (LHSBits = numBitsUnsigned(LHS)) <= 24 &&
+ (RHSBits = numBitsUnsigned(RHS)) <= 24) {
IsSigned = false;
- } else if (ST->hasMulI24() && (LHSBits = numBitsSigned(LHS, Size)) <= 24 &&
- (RHSBits = numBitsSigned(RHS, Size)) <= 24) {
+ } else if (ST->hasMulI24() && (LHSBits = numBitsSigned(LHS)) <= 24 &&
+ (RHSBits = numBitsSigned(RHS)) <= 24) {
IsSigned = true;
} else
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 699c6c479455..3ac7c45b3275 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -331,8 +331,7 @@ void MetadataStreamerV2::emitKernelArg(const Argument &Arg) {
if (auto PtrTy = dyn_cast<PointerType>(Arg.getType())) {
if (PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
// FIXME: Should report this for all address spaces
- PointeeAlign = DL.getValueOrABITypeAlignment(Arg.getParamAlign(),
- PtrTy->getElementType());
+ PointeeAlign = Arg.getParamAlign().valueOrOne();
}
}
@@ -731,10 +730,8 @@ void MetadataStreamerV3::emitKernelArg(const Argument &Arg, unsigned &Offset,
// FIXME: Need to distinguish in memory alignment from pointer alignment.
if (auto PtrTy = dyn_cast<PointerType>(Ty)) {
- if (PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
- PointeeAlign = DL.getValueOrABITypeAlignment(Arg.getParamAlign(),
- PtrTy->getElementType());
- }
+ if (PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
+ PointeeAlign = Arg.getParamAlign().valueOrOne();
}
// There's no distinction between byval aggregates and raw aggregates.
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 54177564afbc..b9d0655feef7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -51,7 +51,7 @@ unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
// In order for this to be a signed 24-bit value, bit 23, must
// be a sign bit.
- return DAG.ComputeMinSignedBits(Op);
+ return DAG.ComputeMaxSignificantBits(Op);
}
AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
@@ -360,6 +360,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f16, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i16, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f16, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
@@ -1408,6 +1410,11 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
Start != 1)
return Op;
+ if (((SrcVT == MVT::v8f16 && VT == MVT::v4f16) ||
+ (SrcVT == MVT::v8i16 && VT == MVT::v4i16)) &&
+ (Start == 0 || Start == 4))
+ return Op;
+
DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
VT.getVectorNumElements());
@@ -4626,11 +4633,12 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
RHSKnown = RHSKnown.trunc(24);
if (Opc == AMDGPUISD::MUL_I24) {
- unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
- unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
- unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
- if (MaxValBits >= 32)
+ unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
+ unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
+ unsigned MaxValBits = LHSValBits + RHSValBits;
+ if (MaxValBits > 32)
break;
+ unsigned SignBits = 32 - MaxValBits + 1;
bool LHSNegative = LHSKnown.isNegative();
bool LHSNonNegative = LHSKnown.isNonNegative();
bool LHSPositive = LHSKnown.isStrictlyPositive();
@@ -4639,16 +4647,16 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
bool RHSPositive = RHSKnown.isStrictlyPositive();
if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
- Known.Zero.setHighBits(32 - MaxValBits);
+ Known.Zero.setHighBits(SignBits);
else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
- Known.One.setHighBits(32 - MaxValBits);
+ Known.One.setHighBits(SignBits);
} else {
- unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
- unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
- unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
+ unsigned LHSValBits = LHSKnown.countMaxActiveBits();
+ unsigned RHSValBits = RHSKnown.countMaxActiveBits();
+ unsigned MaxValBits = LHSValBits + RHSValBits;
if (MaxValBits >= 32)
break;
- Known.Zero.setHighBits(32 - MaxValBits);
+ Known.Zero.setBitsFrom(MaxValBits);
}
break;
}
@@ -4904,7 +4912,8 @@ AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
}
}
-bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtactLegal(
+bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal(
unsigned Opc, LLT Ty1, LLT Ty2) const {
- return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
+ return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) &&
+ Ty2 == LLT::scalar(32);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index daaca8737c5d..b41506157b68 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -335,8 +335,8 @@ public:
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
- bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1,
- LLT Ty2) const override;
+ bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
+ LLT Ty2) const override;
};
namespace AMDGPUISD {
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index db84b8766924..4f1d700bcd84 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -58,24 +58,37 @@ static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
// Check if a value can be converted to a 16-bit value without losing
// precision.
-static bool canSafelyConvertTo16Bit(Value &V) {
+// The value is expected to be either a float (IsFloat = true) or an unsigned
+// integer (IsFloat = false).
+static bool canSafelyConvertTo16Bit(Value &V, bool IsFloat) {
Type *VTy = V.getType();
if (VTy->isHalfTy() || VTy->isIntegerTy(16)) {
// The value is already 16-bit, so we don't want to convert to 16-bit again!
return false;
}
- if (ConstantFP *ConstFloat = dyn_cast<ConstantFP>(&V)) {
- // We need to check that if we cast the index down to a half, we do not lose
- // precision.
- APFloat FloatValue(ConstFloat->getValueAPF());
- bool LosesInfo = true;
- FloatValue.convert(APFloat::IEEEhalf(), APFloat::rmTowardZero, &LosesInfo);
- return !LosesInfo;
+ if (IsFloat) {
+ if (ConstantFP *ConstFloat = dyn_cast<ConstantFP>(&V)) {
+ // We need to check that if we cast the index down to a half, we do not
+ // lose precision.
+ APFloat FloatValue(ConstFloat->getValueAPF());
+ bool LosesInfo = true;
+ FloatValue.convert(APFloat::IEEEhalf(), APFloat::rmTowardZero,
+ &LosesInfo);
+ return !LosesInfo;
+ }
+ } else {
+ if (ConstantInt *ConstInt = dyn_cast<ConstantInt>(&V)) {
+ // We need to check that if we cast the index down to an i16, we do not
+ // lose precision.
+ APInt IntValue(ConstInt->getValue());
+ return IntValue.getActiveBits() <= 16;
+ }
}
+
Value *CastSrc;
- if (match(&V, m_FPExt(PatternMatch::m_Value(CastSrc))) ||
- match(&V, m_SExt(PatternMatch::m_Value(CastSrc))) ||
- match(&V, m_ZExt(PatternMatch::m_Value(CastSrc)))) {
+ bool IsExt = IsFloat ? match(&V, m_FPExt(PatternMatch::m_Value(CastSrc)))
+ : match(&V, m_ZExt(PatternMatch::m_Value(CastSrc)));
+ if (IsExt) {
Type *CastSrcTy = CastSrc->getType();
if (CastSrcTy->isHalfTy() || CastSrcTy->isIntegerTy(16))
return true;
@@ -97,13 +110,116 @@ static Value *convertTo16Bit(Value &V, InstCombiner::BuilderTy &Builder) {
llvm_unreachable("Should never be called!");
}
+/// Applies Function(II.Args, II.ArgTys) and replaces the intrinsic call with
+/// the modified arguments.
+static Optional<Instruction *> modifyIntrinsicCall(
+ IntrinsicInst &II, unsigned NewIntr, InstCombiner &IC,
+ std::function<void(SmallVectorImpl<Value *> &, SmallVectorImpl<Type *> &)>
+ Func) {
+ SmallVector<Type *, 4> ArgTys;
+ if (!Intrinsic::getIntrinsicSignature(II.getCalledFunction(), ArgTys))
+ return None;
+
+ SmallVector<Value *, 8> Args(II.args());
+
+ // Modify arguments and types
+ Func(Args, ArgTys);
+
+ Function *I = Intrinsic::getDeclaration(II.getModule(), NewIntr, ArgTys);
+
+ CallInst *NewCall = IC.Builder.CreateCall(I, Args);
+ NewCall->takeName(&II);
+ NewCall->copyMetadata(II);
+ if (isa<FPMathOperator>(NewCall))
+ NewCall->copyFastMathFlags(&II);
+
+ // Erase and replace uses
+ if (!II.getType()->isVoidTy())
+ IC.replaceInstUsesWith(II, NewCall);
+ return IC.eraseInstFromFunction(II);
+}
+
static Optional<Instruction *>
simplifyAMDGCNImageIntrinsic(const GCNSubtarget *ST,
const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
IntrinsicInst &II, InstCombiner &IC) {
+ // Optimize _L to _LZ when _L is zero
+ if (const auto *LZMappingInfo =
+ AMDGPU::getMIMGLZMappingInfo(ImageDimIntr->BaseOpcode)) {
+ if (auto *ConstantLod =
+ dyn_cast<ConstantFP>(II.getOperand(ImageDimIntr->LodIndex))) {
+ if (ConstantLod->isZero() || ConstantLod->isNegative()) {
+ const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
+ AMDGPU::getImageDimIntrinsicByBaseOpcode(LZMappingInfo->LZ,
+ ImageDimIntr->Dim);
+ return modifyIntrinsicCall(
+ II, NewImageDimIntr->Intr, IC, [&](auto &Args, auto &ArgTys) {
+ Args.erase(Args.begin() + ImageDimIntr->LodIndex);
+ });
+ }
+ }
+ }
+
+ // Optimize _mip away, when 'lod' is zero
+ if (const auto *MIPMappingInfo =
+ AMDGPU::getMIMGMIPMappingInfo(ImageDimIntr->BaseOpcode)) {
+ if (auto *ConstantMip =
+ dyn_cast<ConstantInt>(II.getOperand(ImageDimIntr->MipIndex))) {
+ if (ConstantMip->isZero()) {
+ const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
+ AMDGPU::getImageDimIntrinsicByBaseOpcode(MIPMappingInfo->NONMIP,
+ ImageDimIntr->Dim);
+ return modifyIntrinsicCall(
+ II, NewImageDimIntr->Intr, IC, [&](auto &Args, auto &ArgTys) {
+ Args.erase(Args.begin() + ImageDimIntr->MipIndex);
+ });
+ }
+ }
+ }
+
+ // Optimize _bias away when 'bias' is zero
+ if (const auto *BiasMappingInfo =
+ AMDGPU::getMIMGBiasMappingInfo(ImageDimIntr->BaseOpcode)) {
+ if (auto *ConstantBias =
+ dyn_cast<ConstantFP>(II.getOperand(ImageDimIntr->BiasIndex))) {
+ if (ConstantBias->isZero()) {
+ const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
+ AMDGPU::getImageDimIntrinsicByBaseOpcode(BiasMappingInfo->NoBias,
+ ImageDimIntr->Dim);
+ return modifyIntrinsicCall(
+ II, NewImageDimIntr->Intr, IC, [&](auto &Args, auto &ArgTys) {
+ Args.erase(Args.begin() + ImageDimIntr->BiasIndex);
+ ArgTys.erase(ArgTys.begin() + ImageDimIntr->BiasTyArg);
+ });
+ }
+ }
+ }
+
+ // Optimize _offset away when 'offset' is zero
+ if (const auto *OffsetMappingInfo =
+ AMDGPU::getMIMGOffsetMappingInfo(ImageDimIntr->BaseOpcode)) {
+ if (auto *ConstantOffset =
+ dyn_cast<ConstantInt>(II.getOperand(ImageDimIntr->OffsetIndex))) {
+ if (ConstantOffset->isZero()) {
+ const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
+ AMDGPU::getImageDimIntrinsicByBaseOpcode(
+ OffsetMappingInfo->NoOffset, ImageDimIntr->Dim);
+ return modifyIntrinsicCall(
+ II, NewImageDimIntr->Intr, IC, [&](auto &Args, auto &ArgTys) {
+ Args.erase(Args.begin() + ImageDimIntr->OffsetIndex);
+ });
+ }
+ }
+ }
+
+ // Try to use A16 or G16
if (!ST->hasA16() && !ST->hasG16())
return None;
+ // Address is interpreted as float if the instruction has a sampler or as
+ // unsigned int if there is no sampler.
+ bool HasSampler =
+ AMDGPU::getMIMGBaseOpcodeInfo(ImageDimIntr->BaseOpcode)->Sampler;
bool FloatCoord = false;
// true means derivatives can be converted to 16 bit, coordinates not
bool OnlyDerivatives = false;
@@ -112,7 +228,7 @@ simplifyAMDGCNImageIntrinsic(const GCNSubtarget *ST,
OperandIndex < ImageDimIntr->VAddrEnd; OperandIndex++) {
Value *Coord = II.getOperand(OperandIndex);
// If the values are not derived from 16-bit values, we cannot optimize.
- if (!canSafelyConvertTo16Bit(*Coord)) {
+ if (!canSafelyConvertTo16Bit(*Coord, HasSampler)) {
if (OperandIndex < ImageDimIntr->CoordStart ||
ImageDimIntr->GradientStart == ImageDimIntr->CoordStart) {
return None;
@@ -127,43 +243,50 @@ simplifyAMDGCNImageIntrinsic(const GCNSubtarget *ST,
FloatCoord = Coord->getType()->isFloatingPointTy();
}
- if (OnlyDerivatives) {
- if (!ST->hasG16())
- return None;
- } else {
- if (!ST->hasA16())
- OnlyDerivatives = true; // Only supports G16
+ if (!OnlyDerivatives && !ST->hasA16())
+ OnlyDerivatives = true; // Only supports G16
+
+ // Check if there is a bias parameter and if it can be converted to f16
+ if (!OnlyDerivatives && ImageDimIntr->NumBiasArgs != 0) {
+ Value *Bias = II.getOperand(ImageDimIntr->BiasIndex);
+ assert(HasSampler &&
+ "Only image instructions with a sampler can have a bias");
+ if (!canSafelyConvertTo16Bit(*Bias, HasSampler))
+ OnlyDerivatives = true;
}
+ if (OnlyDerivatives && (!ST->hasG16() || ImageDimIntr->GradientStart ==
+ ImageDimIntr->CoordStart))
+ return None;
+
Type *CoordType = FloatCoord ? Type::getHalfTy(II.getContext())
: Type::getInt16Ty(II.getContext());
- SmallVector<Type *, 4> ArgTys;
- if (!Intrinsic::getIntrinsicSignature(II.getCalledFunction(), ArgTys))
- return None;
-
- ArgTys[ImageDimIntr->GradientTyArg] = CoordType;
- if (!OnlyDerivatives)
- ArgTys[ImageDimIntr->CoordTyArg] = CoordType;
- Function *I =
- Intrinsic::getDeclaration(II.getModule(), II.getIntrinsicID(), ArgTys);
+ return modifyIntrinsicCall(
+ II, II.getIntrinsicID(), IC, [&](auto &Args, auto &ArgTys) {
+ ArgTys[ImageDimIntr->GradientTyArg] = CoordType;
+ if (!OnlyDerivatives) {
+ ArgTys[ImageDimIntr->CoordTyArg] = CoordType;
- SmallVector<Value *, 8> Args(II.args());
+ // Change the bias type
+ if (ImageDimIntr->NumBiasArgs != 0)
+ ArgTys[ImageDimIntr->BiasTyArg] = Type::getHalfTy(II.getContext());
+ }
- unsigned EndIndex =
- OnlyDerivatives ? ImageDimIntr->CoordStart : ImageDimIntr->VAddrEnd;
- for (unsigned OperandIndex = ImageDimIntr->GradientStart;
- OperandIndex < EndIndex; OperandIndex++) {
- Args[OperandIndex] =
- convertTo16Bit(*II.getOperand(OperandIndex), IC.Builder);
- }
+ unsigned EndIndex =
+ OnlyDerivatives ? ImageDimIntr->CoordStart : ImageDimIntr->VAddrEnd;
+ for (unsigned OperandIndex = ImageDimIntr->GradientStart;
+ OperandIndex < EndIndex; OperandIndex++) {
+ Args[OperandIndex] =
+ convertTo16Bit(*II.getOperand(OperandIndex), IC.Builder);
+ }
- CallInst *NewCall = IC.Builder.CreateCall(I, Args);
- NewCall->takeName(&II);
- NewCall->copyMetadata(II);
- if (isa<FPMathOperator>(NewCall))
- NewCall->copyFastMathFlags(&II);
- return IC.replaceInstUsesWith(II, NewCall);
+ // Convert the bias
+ if (!OnlyDerivatives && ImageDimIntr->NumBiasArgs != 0) {
+ Value *Bias = II.getOperand(ImageDimIntr->BiasIndex);
+ Args[ImageDimIntr->BiasIndex] = convertTo16Bit(*Bias, IC.Builder);
+ }
+ });
}
bool GCNTTIImpl::canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index b1263618c5db..e7ee36447682 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -20,9 +20,6 @@
namespace llvm {
class GCNSubtarget;
-class MachineFunction;
-class MachineInstr;
-class MachineInstrBuilder;
class MachineMemOperand;
class AMDGPUInstrInfo {
@@ -52,6 +49,9 @@ struct ImageDimIntrinsicInfo {
unsigned BaseOpcode;
MIMGDim Dim;
+ uint8_t NumOffsetArgs;
+ uint8_t NumBiasArgs;
+ uint8_t NumZCompareArgs;
uint8_t NumGradients;
uint8_t NumDmask;
uint8_t NumData;
@@ -60,6 +60,9 @@ struct ImageDimIntrinsicInfo {
uint8_t DMaskIndex;
uint8_t VAddrStart;
+ uint8_t OffsetIndex;
+ uint8_t BiasIndex;
+ uint8_t ZCompareIndex;
uint8_t GradientStart;
uint8_t CoordStart;
uint8_t LodIndex;
@@ -71,6 +74,7 @@ struct ImageDimIntrinsicInfo {
uint8_t TexFailCtrlIndex;
uint8_t CachePolicyIndex;
+ uint8_t BiasTyArg;
uint8_t GradientTyArg;
uint8_t CoordTyArg;
};
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index e16bead81b65..b7d0f0580cda 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -46,8 +46,7 @@ static cl::opt<bool> AllowRiskySelect(
AMDGPUInstructionSelector::AMDGPUInstructionSelector(
const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
const AMDGPUTargetMachine &TM)
- : InstructionSelector(), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
+ : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
STI(STI),
EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
#define GET_GLOBALISEL_PREDICATES_INIT
@@ -1103,7 +1102,18 @@ bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
const DebugLoc &DL = I.getDebugLoc();
Register SrcReg = I.getOperand(2).getReg();
unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
+
auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
+ if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
+ MachineInstr *ICmp =
+ BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
+
+ if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
+ *TRI.getBoolRC(), *MRI))
+ return false;
+ I.eraseFromParent();
+ return true;
+ }
int Opcode = getV_CMPOpcode(Pred, Size);
if (Opcode == -1)
@@ -1234,7 +1244,7 @@ bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
// Get the return address reg and mark it as an implicit live-in
Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
- AMDGPU::SReg_64RegClass);
+ AMDGPU::SReg_64RegClass, DL);
BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
.addReg(LiveIn);
I.eraseFromParent();
@@ -1494,9 +1504,9 @@ static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
if (TexFailCtrl)
IsTexFail = true;
- TFE = (TexFailCtrl & 0x1) ? 1 : 0;
+ TFE = (TexFailCtrl & 0x1) ? true : false;
TexFailCtrl &= ~(uint64_t)0x1;
- LWE = (TexFailCtrl & 0x2) ? 1 : 0;
+ LWE = (TexFailCtrl & 0x2) ? true : false;
TexFailCtrl &= ~(uint64_t)0x2;
return TexFailCtrl == 0;
@@ -1511,10 +1521,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
- const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
- AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
- const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
- AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
unsigned IntrOpcode = Intr->BaseOpcode;
const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
@@ -1523,7 +1529,8 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
Register VDataIn, VDataOut;
LLT VDataTy;
int NumVDataDwords = -1;
- bool IsD16 = false;
+ bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
+ MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
bool Unorm;
if (!BaseOpcode->Sampler)
@@ -1572,16 +1579,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
- // One memoperand is mandatory, except for getresinfo.
- // FIXME: Check this in verifier.
- if (!MI.memoperands_empty()) {
- const MachineMemOperand *MMO = *MI.memoperands_begin();
-
- // Infer d16 from the memory size, as the register type will be mangled by
- // unpacked subtargets, or by TFE.
- IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
- }
-
if (BaseOpcode->Store) {
VDataIn = MI.getOperand(1).getReg();
VDataTy = MRI->getType(VDataIn);
@@ -1596,26 +1593,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
}
}
- // Optimize _L to _LZ when _L is zero
- if (LZMappingInfo) {
- // The legalizer replaced the register with an immediate 0 if we need to
- // change the opcode.
- const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
- if (Lod.isImm()) {
- assert(Lod.getImm() == 0);
- IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
- }
- }
-
- // Optimize _mip away, when 'lod' is zero
- if (MIPMappingInfo) {
- const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
- if (Lod.isImm()) {
- assert(Lod.getImm() == 0);
- IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
- }
- }
-
// Set G16 opcode
if (IsG16 && !IsA16) {
const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
@@ -2562,6 +2539,8 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
Register MaskReg = I.getOperand(2).getReg();
LLT Ty = MRI->getType(DstReg);
LLT MaskTy = MRI->getType(MaskReg);
+ MachineBasicBlock *BB = I.getParent();
+ const DebugLoc &DL = I.getDebugLoc();
const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
@@ -2570,6 +2549,24 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
if (DstRB != SrcRB) // Should only happen for hand written MIR.
return false;
+ // Try to avoid emitting a bit operation when we only need to touch half of
+ // the 64-bit pointer.
+ APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
+ const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
+ const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
+
+ const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
+ const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
+
+ if (!IsVGPR && Ty.getSizeInBits() == 64 &&
+ !CanCopyLow32 && !CanCopyHi32) {
+ auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
+ .addReg(SrcReg)
+ .addReg(MaskReg);
+ I.eraseFromParent();
+ return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
+ }
+
unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
const TargetRegisterClass &RegRC
= IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
@@ -2586,8 +2583,6 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
!RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
return false;
- MachineBasicBlock *BB = I.getParent();
- const DebugLoc &DL = I.getDebugLoc();
if (Ty.getSizeInBits() == 32) {
assert(MaskTy.getSizeInBits() == 32 &&
"ptrmask should have been narrowed during legalize");
@@ -2610,13 +2605,7 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
Register MaskedLo, MaskedHi;
- // Try to avoid emitting a bit operation when we only need to touch half of
- // the 64-bit pointer.
- APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
-
- const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
- const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
- if ((MaskOnes & MaskLo32) == MaskLo32) {
+ if (CanCopyLow32) {
// If all the bits in the low half are 1, we only need a copy for it.
MaskedLo = LoReg;
} else {
@@ -2631,7 +2620,7 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
.addReg(MaskLo);
}
- if ((MaskOnes & MaskHi32) == MaskHi32) {
+ if (CanCopyHi32) {
// If all the bits in the high half are 1, we only need a copy for it.
MaskedHi = HiReg;
} else {
@@ -3123,6 +3112,33 @@ bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
return true;
}
+bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
+ const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
+ MachineBasicBlock *MBB = MI.getParent();
+ const DebugLoc &DL = MI.getDebugLoc();
+
+ if (IsVALU) {
+ BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
+ .addImm(Subtarget->getWavefrontSizeLog2())
+ .addReg(SrcReg);
+ } else {
+ BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
+ .addReg(SrcReg)
+ .addImm(Subtarget->getWavefrontSizeLog2());
+ }
+
+ const TargetRegisterClass &RC =
+ IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
+ if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
+ return false;
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AMDGPUInstructionSelector::select(MachineInstr &I) {
if (I.isPHI())
return selectPHI(I);
@@ -3236,7 +3252,9 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_SHUFFLE_VECTOR:
return selectG_SHUFFLE_VECTOR(I);
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
- case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
const AMDGPU::ImageDimIntrinsicInfo *Intr
= AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
assert(Intr && "not an image intrinsic with image pseudo");
@@ -3252,6 +3270,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case AMDGPU::G_SI_CALL:
I.setDesc(TII.get(AMDGPU::SI_CALL));
return true;
+ case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
+ return selectWaveAddress(I);
default:
return selectImpl(I, *CoverageInfo);
}
@@ -3896,20 +3916,59 @@ bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
}
+// Return the wave level SGPR base address if this is a wave address.
+static Register getWaveAddress(const MachineInstr *Def) {
+ return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
+ ? Def->getOperand(1).getReg()
+ : Register();
+}
+
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectMUBUFScratchOffset(
MachineOperand &Root) const {
- MachineInstr *MI = Root.getParent();
- MachineBasicBlock *MBB = MI->getParent();
+ Register Reg = Root.getReg();
+ const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
+
+ const MachineInstr *Def = MRI->getVRegDef(Reg);
+ if (Register WaveBase = getWaveAddress(Def)) {
+ return {{
+ [=](MachineInstrBuilder &MIB) { // rsrc
+ MIB.addReg(Info->getScratchRSrcReg());
+ },
+ [=](MachineInstrBuilder &MIB) { // soffset
+ MIB.addReg(WaveBase);
+ },
+ [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
+ }};
+ }
int64_t Offset = 0;
+
+ // FIXME: Copy check is a hack
+ Register BasePtr;
+ if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
+ if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
+ return {};
+ const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
+ Register WaveBase = getWaveAddress(BasePtrDef);
+ if (!WaveBase)
+ return {};
+
+ return {{
+ [=](MachineInstrBuilder &MIB) { // rsrc
+ MIB.addReg(Info->getScratchRSrcReg());
+ },
+ [=](MachineInstrBuilder &MIB) { // soffset
+ MIB.addReg(WaveBase);
+ },
+ [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
+ }};
+ }
+
if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
return {};
- const MachineFunction *MF = MBB->getParent();
- const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
-
return {{
[=](MachineInstrBuilder &MIB) { // rsrc
MIB.addReg(Info->getScratchRSrcReg());
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 26996e42af53..42095332d11a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -30,7 +30,6 @@ namespace AMDGPU {
struct ImageDimIntrinsicInfo;
}
-class AMDGPUInstrInfo;
class AMDGPURegisterBankInfo;
class AMDGPUTargetMachine;
class BlockFrequencyInfo;
@@ -42,7 +41,6 @@ class MachineOperand;
class MachineRegisterInfo;
class RegisterBank;
class SIInstrInfo;
-class SIMachineFunctionInfo;
class SIRegisterInfo;
class TargetRegisterClass;
@@ -147,6 +145,7 @@ private:
bool selectGlobalAtomicFadd(MachineInstr &I, MachineOperand &AddrOp,
MachineOperand &DataOp) const;
bool selectBVHIntrinsic(MachineInstr &I) const;
+ bool selectWaveAddress(MachineInstr &I) const;
std::pair<Register, unsigned> selectVOP3ModsImpl(MachineOperand &Root,
bool AllowAbs = true) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 0528b552f475..7d3dbfd7e851 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -18,6 +18,7 @@ class AddressSpacesImpl {
int Local = 3;
int Constant = 4;
int Private = 5;
+ int Constant32Bit = 6;
}
def AddrSpaces : AddressSpacesImpl;
@@ -405,18 +406,23 @@ class Aligned<int Bytes> {
int MinAlignment = Bytes;
}
-class StoreHi16<SDPatternOperator op> : PatFrag <
+class StoreHi16<SDPatternOperator op, ValueType vt> : PatFrag <
(ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)> {
let IsStore = 1;
+ let MemoryVT = vt;
}
-def LoadAddress_constant : AddressSpaceList<[ AddrSpaces.Constant ]>;
-def LoadAddress_global : AddressSpaceList<[ AddrSpaces.Global, AddrSpaces.Constant ]>;
+def LoadAddress_constant : AddressSpaceList<[ AddrSpaces.Constant,
+ AddrSpaces.Constant32Bit ]>;
+def LoadAddress_global : AddressSpaceList<[ AddrSpaces.Global,
+ AddrSpaces.Constant,
+ AddrSpaces.Constant32Bit ]>;
def StoreAddress_global : AddressSpaceList<[ AddrSpaces.Global ]>;
-def LoadAddress_flat : AddressSpaceList<[ AddrSpaces.Flat,
- AddrSpaces.Global,
- AddrSpaces.Constant ]>;
+def LoadAddress_flat : AddressSpaceList<[ AddrSpaces.Flat,
+ AddrSpaces.Global,
+ AddrSpaces.Constant,
+ AddrSpaces.Constant32Bit ]>;
def StoreAddress_flat : AddressSpaceList<[ AddrSpaces.Flat, AddrSpaces.Global ]>;
def LoadAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
@@ -522,9 +528,9 @@ def truncstorei16_#as : PatFrag<(ops node:$val, node:$ptr),
let MemoryVT = i16;
}
-def store_hi16_#as : StoreHi16 <truncstorei16>;
-def truncstorei8_hi16_#as : StoreHi16<truncstorei8>;
-def truncstorei16_hi16_#as : StoreHi16<truncstorei16>;
+def store_hi16_#as : StoreHi16 <truncstorei16, i16>;
+def truncstorei8_hi16_#as : StoreHi16<truncstorei8, i8>;
+def truncstorei16_hi16_#as : StoreHi16<truncstorei16, i16>;
defm atomic_store_#as : binary_atomic_op<atomic_store>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 5046daaed977..04c6f67ed339 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -272,8 +272,8 @@ static bool isLoadStoreSizeLegal(const GCNSubtarget &ST,
const bool IsLoad = Query.Opcode != AMDGPU::G_STORE;
unsigned RegSize = Ty.getSizeInBits();
- unsigned MemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
- unsigned AlignBits = Query.MMODescrs[0].AlignInBits;
+ uint64_t MemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
+ uint64_t AlignBits = Query.MMODescrs[0].AlignInBits;
unsigned AS = Query.Types[1].getAddressSpace();
// All of these need to be custom lowered to cast the pointer operand.
@@ -380,7 +380,7 @@ static bool shouldBitcastLoadStoreType(const GCNSubtarget &ST, const LLT Ty,
/// access up to the alignment. Note this case when the memory access itself
/// changes, not the size of the result register.
static bool shouldWidenLoad(const GCNSubtarget &ST, LLT MemoryTy,
- unsigned AlignInBits, unsigned AddrSpace,
+ uint64_t AlignInBits, unsigned AddrSpace,
unsigned Opcode) {
unsigned SizeInBits = MemoryTy.getSizeInBits();
// We don't want to widen cases that are naturally legal.
@@ -929,10 +929,11 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
getActionDefinitionsBuilder(G_CTPOP)
.legalFor({{S32, S32}, {S32, S64}})
.clampScalar(0, S32, S32)
+ .widenScalarToNextPow2(1, 32)
.clampScalar(1, S32, S64)
.scalarize(0)
- .widenScalarToNextPow2(0, 32)
- .widenScalarToNextPow2(1, 32);
+ .widenScalarToNextPow2(0, 32);
+
// The hardware instructions return a different result on 0 than the generic
// instructions expect. The hardware produces -1, but these produce the
@@ -1172,7 +1173,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
if (MemSize > MaxSize)
return std::make_pair(0, LLT::scalar(MaxSize));
- unsigned Align = Query.MMODescrs[0].AlignInBits;
+ uint64_t Align = Query.MMODescrs[0].AlignInBits;
return std::make_pair(0, LLT::scalar(Align));
})
.fewerElementsIf(
@@ -1295,6 +1296,18 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
if (ST.hasAtomicFaddInsts())
Atomic.legalFor({{S32, GlobalPtr}});
+ if (ST.hasGFX90AInsts()) {
+ // These are legal with some caveats, and should have undergone expansion in
+ // the IR in most situations
+ // TODO: Move atomic expansion into legalizer
+ // TODO: Also supports <2 x f16>
+ Atomic.legalFor({
+ {S32, GlobalPtr},
+ {S64, GlobalPtr},
+ {S64, FlatPtr}
+ });
+ }
+
// BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, and output
// demarshalling
getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
@@ -1345,8 +1358,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
}, changeTo(1, S16));
Shifts.maxScalarIf(typeIs(0, S16), 1, S16);
Shifts.clampScalar(1, S32, S32);
- Shifts.clampScalar(0, S16, S64);
Shifts.widenScalarToNextPow2(0, 16);
+ Shifts.clampScalar(0, S16, S64);
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
.minScalar(0, S16)
@@ -1357,8 +1370,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
// expansion for the shifted type will produce much worse code if it hasn't
// been truncated already.
Shifts.clampScalar(1, S32, S32);
- Shifts.clampScalar(0, S32, S64);
Shifts.widenScalarToNextPow2(0, 32);
+ Shifts.clampScalar(0, S32, S64);
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
.minScalar(0, S32)
@@ -1812,6 +1825,27 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
}
+/// Return true if the value is a known valid address, such that a null check is
+/// not necessary.
+static bool isKnownNonNull(Register Val, MachineRegisterInfo &MRI,
+ const AMDGPUTargetMachine &TM, unsigned AddrSpace) {
+ MachineInstr *Def = MRI.getVRegDef(Val);
+ switch (Def->getOpcode()) {
+ case AMDGPU::G_FRAME_INDEX:
+ case AMDGPU::G_GLOBAL_VALUE:
+ case AMDGPU::G_BLOCK_ADDR:
+ return true;
+ case AMDGPU::G_CONSTANT: {
+ const ConstantInt *CI = Def->getOperand(1).getCImm();
+ return CI->getSExtValue() != TM.getNullPointerValue(AddrSpace);
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
@@ -1862,6 +1896,14 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS);
+
+ if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ // Extract low 32-bits of the pointer.
+ B.buildExtract(Dst, Src, 0);
+ MI.eraseFromParent();
+ return true;
+ }
+
unsigned NullVal = TM.getNullPointerValue(DestAS);
auto SegmentNull = B.buildConstant(DstTy, NullVal);
@@ -1884,24 +1926,29 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
if (!ST.hasFlatAddressSpace())
return false;
- auto SegmentNull =
- B.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
- auto FlatNull =
- B.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
-
Register ApertureReg = getSegmentAperture(SrcAS, MRI, B);
if (!ApertureReg.isValid())
return false;
- auto CmpRes =
- B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, SegmentNull.getReg(0));
-
// Coerce the type of the low half of the result so we can use merge_values.
Register SrcAsInt = B.buildPtrToInt(S32, Src).getReg(0);
// TODO: Should we allow mismatched types but matching sizes in merges to
// avoid the ptrtoint?
auto BuildPtr = B.buildMerge(DstTy, {SrcAsInt, ApertureReg});
+
+ if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ B.buildCopy(Dst, BuildPtr);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ auto SegmentNull = B.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
+ auto FlatNull = B.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
+
+ auto CmpRes =
+ B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, SegmentNull.getReg(0));
+
B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull);
MI.eraseFromParent();
@@ -1959,6 +2006,7 @@ bool AMDGPULegalizerInfo::legalizeFceil(
// TODO: Should this propagate fast-math-flags?
B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
+ MI.eraseFromParent();
return true;
}
@@ -2213,10 +2261,12 @@ bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
LLT EltTy = VecTy.getElementType();
assert(EltTy == MRI.getType(Dst));
- if (IdxVal < VecTy.getNumElements())
- B.buildExtract(Dst, Vec, IdxVal * EltTy.getSizeInBits());
- else
+ if (IdxVal < VecTy.getNumElements()) {
+ auto Unmerge = B.buildUnmerge(EltTy, Vec);
+ B.buildCopy(Dst, Unmerge.getReg(IdxVal));
+ } else {
B.buildUndef(Dst);
+ }
MI.eraseFromParent();
return true;
@@ -2245,11 +2295,20 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
LLT VecTy = MRI.getType(Vec);
LLT EltTy = VecTy.getElementType();
assert(EltTy == MRI.getType(Ins));
+ (void)Ins;
- if (IdxVal < VecTy.getNumElements())
- B.buildInsert(Dst, Vec, Ins, IdxVal * EltTy.getSizeInBits());
- else
+ unsigned NumElts = VecTy.getNumElements();
+ if (IdxVal < NumElts) {
+ SmallVector<Register, 8> SrcRegs;
+ for (unsigned i = 0; i < NumElts; ++i)
+ SrcRegs.push_back(MRI.createGenericVirtualRegister(EltTy));
+ B.buildUnmerge(SrcRegs, Vec);
+
+ SrcRegs[IdxVal] = MI.getOperand(2).getReg();
+ B.buildMerge(Dst, SrcRegs);
+ } else {
B.buildUndef(Dst);
+ }
MI.eraseFromParent();
return true;
@@ -2502,7 +2561,7 @@ bool AMDGPULegalizerInfo::legalizeLoad(LegalizerHelper &Helper,
const LLT MemTy = MMO->getMemoryType();
const Align MemAlign = MMO->getAlign();
const unsigned MemSize = MemTy.getSizeInBits();
- const unsigned AlignInBits = 8 * MemAlign.value();
+ const uint64_t AlignInBits = 8 * MemAlign.value();
// Widen non-power-of-2 loads to the alignment if needed
if (shouldWidenLoad(ST, MemTy, AlignInBits, AddrSpace, MI.getOpcode())) {
@@ -2832,8 +2891,8 @@ bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
assert(Register::isPhysicalRegister(SrcReg) && "Physical register expected");
assert(DstReg.isVirtual() && "Virtual register expected");
- Register LiveIn = getFunctionLiveInPhysReg(B.getMF(), B.getTII(), SrcReg, *ArgRC,
- ArgTy);
+ Register LiveIn = getFunctionLiveInPhysReg(B.getMF(), B.getTII(), SrcReg,
+ *ArgRC, B.getDebugLoc(), ArgTy);
if (Arg->isMasked()) {
// TODO: Should we try to emit this once in the entry block?
const LLT S32 = LLT::scalar(32);
@@ -2842,6 +2901,8 @@ bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
Register AndMaskSrc = LiveIn;
+ // TODO: Avoid clearing the high bits if we know workitem id y/z are always
+ // 0.
if (Shift != 0) {
auto ShiftAmt = B.buildConstant(S32, Shift);
AndMaskSrc = B.buildLShr(S32, LiveIn, ShiftAmt).getReg(0);
@@ -4106,7 +4167,6 @@ static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP;
- case Intrinsic::amdgcn_buffer_atomic_fadd:
case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD;
@@ -4213,15 +4273,18 @@ static void packImage16bitOpsToDwords(MachineIRBuilder &B, MachineInstr &MI,
if ((I < Intr->GradientStart) ||
(I >= Intr->GradientStart && I < Intr->CoordStart && !IsG16) ||
(I >= Intr->CoordStart && !IsA16)) {
- // Handle any gradient or coordinate operands that should not be packed
if ((I < Intr->GradientStart) && IsA16 &&
(B.getMRI()->getType(AddrReg) == S16)) {
+ assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument");
// Special handling of bias when A16 is on. Bias is of type half but
// occupies full 32-bit.
PackedAddrs.push_back(
B.buildBuildVector(V2S16, {AddrReg, B.buildUndef(S16).getReg(0)})
.getReg(0));
} else {
+ assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) &&
+ "Bias needs to be converted to 16 bit in A16 mode");
+ // Handle any gradient or coordinate operands that should not be packed
AddrReg = B.buildBitcast(V2S16, AddrReg).getReg(0);
PackedAddrs.push_back(AddrReg);
}
@@ -4320,6 +4383,8 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
const LLT V2S16 = LLT::fixed_vector(2, 16);
unsigned DMask = 0;
+ Register VData = MI.getOperand(NumDefs == 0 ? 1 : 0).getReg();
+ LLT Ty = MRI->getType(VData);
// Check for 16 bit addresses and pack if true.
LLT GradTy =
@@ -4328,6 +4393,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
MRI->getType(MI.getOperand(ArgOffset + Intr->CoordStart).getReg());
const bool IsG16 = GradTy == S16;
const bool IsA16 = AddrTy == S16;
+ const bool IsD16 = Ty.getScalarType() == S16;
int DMaskLanes = 0;
if (!BaseOpcode->Atomic) {
@@ -4347,8 +4413,11 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
Observer.changingInstr(MI);
auto ChangedInstr = make_scope_exit([&] { Observer.changedInstr(MI); });
- unsigned NewOpcode = NumDefs == 0 ?
- AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE : AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD;
+ const unsigned StoreOpcode = IsD16 ? AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16
+ : AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE;
+ const unsigned LoadOpcode = IsD16 ? AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16
+ : AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD;
+ unsigned NewOpcode = NumDefs == 0 ? StoreOpcode : LoadOpcode;
// Track that we legalized this
MI.setDesc(B.getTII().get(NewOpcode));
@@ -4381,44 +4450,6 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
unsigned CorrectedNumVAddrs = Intr->NumVAddrs;
- // Optimize _L to _LZ when _L is zero
- if (const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
- AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode)) {
- const ConstantFP *ConstantLod;
-
- if (mi_match(MI.getOperand(ArgOffset + Intr->LodIndex).getReg(), *MRI,
- m_GFCst(ConstantLod))) {
- if (ConstantLod->isZero() || ConstantLod->isNegative()) {
- // Set new opcode to _lz variant of _l, and change the intrinsic ID.
- const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
- AMDGPU::getImageDimIntrinsicByBaseOpcode(LZMappingInfo->LZ,
- Intr->Dim);
-
- // The starting indexes should remain in the same place.
- --CorrectedNumVAddrs;
-
- MI.getOperand(MI.getNumExplicitDefs())
- .setIntrinsicID(static_cast<Intrinsic::ID>(NewImageDimIntr->Intr));
- MI.RemoveOperand(ArgOffset + Intr->LodIndex);
- Intr = NewImageDimIntr;
- }
- }
- }
-
- // Optimize _mip away, when 'lod' is zero
- if (AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode)) {
- int64_t ConstantLod;
- if (mi_match(MI.getOperand(ArgOffset + Intr->MipIndex).getReg(), *MRI,
- m_ICst(ConstantLod))) {
- if (ConstantLod == 0) {
- // TODO: Change intrinsic opcode and remove operand instead or replacing
- // it with 0, as the _L to _LZ handling is done above.
- MI.getOperand(ArgOffset + Intr->MipIndex).ChangeToImmediate(0);
- --CorrectedNumVAddrs;
- }
- }
- }
-
// Rewrite the addressing register layout before doing anything else.
if (BaseOpcode->Gradients && !ST.hasG16() && (IsA16 != IsG16)) {
// 16 bit gradients are supported, but are tied to the A16 control
@@ -4494,9 +4525,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
if (BaseOpcode->Store) { // No TFE for stores?
// TODO: Handle dmask trim
- Register VData = MI.getOperand(1).getReg();
- LLT Ty = MRI->getType(VData);
- if (!Ty.isVector() || Ty.getElementType() != S16)
+ if (!Ty.isVector() || !IsD16)
return true;
Register RepackedReg = handleD16VData(B, *MRI, VData, true);
@@ -4508,9 +4537,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
}
Register DstReg = MI.getOperand(0).getReg();
- LLT Ty = MRI->getType(DstReg);
const LLT EltTy = Ty.getScalarType();
- const bool IsD16 = Ty.getScalarType() == S16;
const int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
// Confirm that the return type is large enough for the dmask specified
@@ -4918,6 +4945,12 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
return true;
}
+static bool replaceWithConstant(MachineIRBuilder &B, MachineInstr &MI, int64_t C) {
+ B.buildConstant(MI.getOperand(0).getReg(), C);
+ MI.eraseFromParent();
+ return true;
+}
+
bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
MachineIRBuilder &B = Helper.MIRBuilder;
@@ -5021,12 +5054,20 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_implicitarg_ptr:
return legalizeImplicitArgPtr(MI, MRI, B);
case Intrinsic::amdgcn_workitem_id_x:
+ if (ST.getMaxWorkitemID(B.getMF().getFunction(), 0) == 0)
+ return replaceWithConstant(B, MI, 0);
return legalizePreloadedArgIntrin(MI, MRI, B,
AMDGPUFunctionArgInfo::WORKITEM_ID_X);
case Intrinsic::amdgcn_workitem_id_y:
+ if (ST.getMaxWorkitemID(B.getMF().getFunction(), 1) == 0)
+ return replaceWithConstant(B, MI, 0);
+
return legalizePreloadedArgIntrin(MI, MRI, B,
AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
case Intrinsic::amdgcn_workitem_id_z:
+ if (ST.getMaxWorkitemID(B.getMF().getFunction(), 2) == 0)
+ return replaceWithConstant(B, MI, 0);
+
return legalizePreloadedArgIntrin(MI, MRI, B,
AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
case Intrinsic::amdgcn_workgroup_id_x:
@@ -5105,16 +5146,29 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_struct_buffer_atomic_inc:
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
- case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
- case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
- case Intrinsic::amdgcn_buffer_atomic_fadd:
case Intrinsic::amdgcn_raw_buffer_atomic_fmin:
case Intrinsic::amdgcn_struct_buffer_atomic_fmin:
case Intrinsic::amdgcn_raw_buffer_atomic_fmax:
case Intrinsic::amdgcn_struct_buffer_atomic_fmax:
return legalizeBufferAtomic(MI, B, IntrID);
+ case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
+ case Intrinsic::amdgcn_struct_buffer_atomic_fadd: {
+ Register DstReg = MI.getOperand(0).getReg();
+ if (!MRI.use_empty(DstReg) && !ST.hasGFX90AInsts()) {
+ Function &F = B.getMF().getFunction();
+ DiagnosticInfoUnsupported NoFpRet(
+ F, "return versions of fp atomics not supported", B.getDebugLoc(),
+ DS_Error);
+ F.getContext().diagnose(NoFpRet);
+ B.buildUndef(DstReg);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ return legalizeBufferAtomic(MI, B, IntrID);
+ }
case Intrinsic::amdgcn_atomic_inc:
return legalizeAtomicIncDec(MI, B, true);
case Intrinsic::amdgcn_atomic_dec:
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 7faf0436f995..964a41d3d740 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -21,7 +21,6 @@
namespace llvm {
class GCNTargetMachine;
-class LLVMContext;
class GCNSubtarget;
class MachineIRBuilder;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 49cf6db5197f..c28427758ac7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -58,9 +58,6 @@ private:
// "FuncName" exists. It may create a new function prototype in pre-link mode.
FunctionCallee getFunction(Module *M, const FuncInfo &fInfo);
- // Replace a normal function with its native version.
- bool replaceWithNative(CallInst *CI, const FuncInfo &FInfo);
-
bool parseFunctionName(const StringRef &FMangledName, FuncInfo &FInfo);
bool TDOFold(CallInst *CI, const FuncInfo &FInfo);
@@ -90,24 +87,6 @@ private:
double& Res1, Constant *copr0, Constant *copr1, Constant *copr2);
bool evaluateCall(CallInst *aCI, const FuncInfo &FInfo);
- // exp
- bool fold_exp(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
- // exp2
- bool fold_exp2(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
- // exp10
- bool fold_exp10(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
- // log
- bool fold_log(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
- // log2
- bool fold_log2(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
- // log10
- bool fold_log10(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
-
// sqrt
bool fold_sqrt(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
@@ -623,7 +602,8 @@ bool AMDGPULibCalls::fold(CallInst *CI, AliasAnalysis *AA) {
Function *Callee = CI->getCalledFunction();
// Ignore indirect calls.
- if (Callee == 0) return false;
+ if (Callee == nullptr)
+ return false;
BasicBlock *BB = CI->getParent();
LLVMContext &Context = CI->getParent()->getContext();
@@ -778,27 +758,6 @@ bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
return false;
}
-bool AMDGPULibCalls::replaceWithNative(CallInst *CI, const FuncInfo &FInfo) {
- Module *M = CI->getModule();
- if (getArgType(FInfo) != AMDGPULibFunc::F32 ||
- FInfo.getPrefix() != AMDGPULibFunc::NOPFX ||
- !HasNative(FInfo.getId()))
- return false;
-
- AMDGPULibFunc nf = FInfo;
- nf.setPrefix(AMDGPULibFunc::NATIVE);
- if (FunctionCallee FPExpr = getFunction(M, nf)) {
- LLVM_DEBUG(dbgs() << "AMDIC: " << *CI << " ---> ");
-
- CI->setCalledFunction(FPExpr);
-
- LLVM_DEBUG(dbgs() << *CI << '\n');
-
- return true;
- }
- return false;
-}
-
// [native_]half_recip(c) ==> 1.0/c
bool AMDGPULibCalls::fold_recip(CallInst *CI, IRBuilder<> &B,
const FuncInfo &FInfo) {
@@ -1402,8 +1361,8 @@ AllocaInst* AMDGPULibCalls::insertAlloca(CallInst *UI, IRBuilder<> &B,
Function *UCallee = UI->getCalledFunction();
Type *RetType = UCallee->getReturnType();
B.SetInsertPoint(&*ItNew);
- AllocaInst *Alloc = B.CreateAlloca(RetType, 0,
- std::string(prefix) + UI->getName());
+ AllocaInst *Alloc =
+ B.CreateAlloca(RetType, nullptr, std::string(prefix) + UI->getName());
Alloc->setAlignment(
Align(UCallee->getParent()->getDataLayout().getTypeAllocSize(RetType)));
return Alloc;
@@ -1724,7 +1683,8 @@ bool AMDGPUSimplifyLibCalls::runOnFunction(Function &F) {
// Ignore indirect calls.
Function *Callee = CI->getCalledFunction();
- if (Callee == 0) continue;
+ if (Callee == nullptr)
+ continue;
LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << "\n";
dbgs().flush());
@@ -1757,7 +1717,7 @@ PreservedAnalyses AMDGPUSimplifyLibCallsPass::run(Function &F,
// Ignore indirect calls.
Function *Callee = CI->getCalledFunction();
- if (Callee == 0)
+ if (Callee == nullptr)
continue;
LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << "\n";
@@ -1783,9 +1743,10 @@ bool AMDGPUUseNativeCalls::runOnFunction(Function &F) {
// Ignore indirect calls.
Function *Callee = CI->getCalledFunction();
- if (Callee == 0) continue;
+ if (Callee == nullptr)
+ continue;
- if(Simplifier.useNative(CI))
+ if (Simplifier.useNative(CI))
Changed = true;
}
}
@@ -1811,7 +1772,7 @@ PreservedAnalyses AMDGPUUseNativeCallsPass::run(Function &F,
// Ignore indirect calls.
Function *Callee = CI->getCalledFunction();
- if (Callee == 0)
+ if (Callee == nullptr)
continue;
if (Simplifier.useNative(CI))
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibFunc.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibFunc.h
index c97223b047e8..dc0ac72016f3 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibFunc.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibFunc.h
@@ -10,6 +10,7 @@
#define _AMDGPU_LIBFUNC_H_
#include "llvm/ADT/StringRef.h"
+#include <memory>
namespace llvm {
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 0c743a77092c..593388a4d819 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -15,9 +15,8 @@
using namespace llvm;
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF)
- : MachineFunctionInfo(), Mode(MF.getFunction()),
- IsEntryFunction(
- AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
+ : Mode(MF.getFunction()), IsEntryFunction(AMDGPU::isEntryFunctionCC(
+ MF.getFunction().getCallingConv())),
IsModuleEntryFunction(
AMDGPU::isModuleEntryFunctionCC(MF.getFunction().getCallingConv())),
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
index 10ff50040c6a..48cf46b5f871 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
@@ -15,8 +15,6 @@
namespace llvm {
-class GCNSubtarget;
-
class AMDGPUMachineFunction : public MachineFunctionInfo {
/// A map to keep track of local memory objects and their offsets within the
/// local memory space.
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPTNote.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPTNote.h
index 8af7979dba8b..5cefc83e25e0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPTNote.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPTNote.h
@@ -29,4 +29,4 @@ const char NoteNameV3[] = "AMDGPU";
} // End namespace ElfNote
} // End namespace AMDGPU
} // End namespace llvm
-#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUNOTETYPE_H
+#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUPTNOTE_H
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp
index 7c4eb71882c7..f91f31508ad2 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp
@@ -463,7 +463,7 @@ bool AMDGPUPrintfRuntimeBindingImpl::lowerPrintfForGpu(Module &M) {
WhatToStore.push_back(Arg);
}
} else if (isa<FixedVectorType>(ArgType)) {
- Type *IType = NULL;
+ Type *IType = nullptr;
uint32_t EleCount = cast<FixedVectorType>(ArgType)->getNumElements();
uint32_t EleSize = ArgType->getScalarSizeInBits();
uint32_t TotalSize = EleCount * EleSize;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index f9a9fe403ff6..2d8126a49327 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -789,6 +789,17 @@ bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
Align Alignment =
DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
+
+ // HIP uses an extern unsized array in local address space for dynamically
+ // allocated shared memory. In that case, we have to disable the promotion.
+ if (GV->hasExternalLinkage() && AllocSize == 0) {
+ LocalMemLimit = 0;
+ LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
+ "local memory. Promoting to local memory "
+ "disabled.\n");
+ return false;
+ }
+
AllocatedSizes.emplace_back(AllocSize, Alignment);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
index 3ce67a733c10..0df6f4d45b06 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
@@ -36,6 +36,7 @@ protected:
MachineIRBuilder &B;
MachineFunction &MF;
MachineRegisterInfo &MRI;
+ const GCNSubtarget &Subtarget;
const RegisterBankInfo &RBI;
const TargetRegisterInfo &TRI;
const SIInstrInfo &TII;
@@ -44,9 +45,9 @@ protected:
public:
AMDGPURegBankCombinerHelper(MachineIRBuilder &B, CombinerHelper &Helper)
: B(B), MF(B.getMF()), MRI(*B.getMRI()),
- RBI(*MF.getSubtarget().getRegBankInfo()),
- TRI(*MF.getSubtarget().getRegisterInfo()),
- TII(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()), Helper(Helper){};
+ Subtarget(MF.getSubtarget<GCNSubtarget>()),
+ RBI(*Subtarget.getRegBankInfo()), TRI(*Subtarget.getRegisterInfo()),
+ TII(*Subtarget.getInstrInfo()), Helper(Helper){};
bool isVgprRegBank(Register Reg);
Register getAsVgpr(Register Reg);
@@ -193,7 +194,10 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToMed3(
MachineInstr &MI, Med3MatchInfo &MatchInfo) {
Register Dst = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(Dst);
- if (Ty != LLT::scalar(16) && Ty != LLT::scalar(32))
+
+ // med3 for f16 is only available on gfx9+, and not available for v2f16.
+ if ((Ty != LLT::scalar(16) || !Subtarget.hasMed3_16()) &&
+ Ty != LLT::scalar(32))
return false;
auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index c60012bcfe2e..de2dccef804a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -718,8 +718,11 @@ bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
const TargetRegisterClass *WaveRC = TRI->getWaveMaskRegClass();
const unsigned WaveAndOpc = Subtarget.isWave32() ?
AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
- const unsigned MovTermOpc = Subtarget.isWave32() ?
- AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term;
+ const unsigned MovExecOpc =
+ Subtarget.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+ const unsigned MovExecTermOpc =
+ Subtarget.isWave32() ? AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term;
+
const unsigned XorTermOpc = Subtarget.isWave32() ?
AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
const unsigned AndSaveExecOpc = Subtarget.isWave32() ?
@@ -996,12 +999,12 @@ bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
B.buildInstr(AMDGPU::SI_WATERFALL_LOOP).addMBB(LoopBB);
// Save the EXEC mask before the loop.
- BuildMI(MBB, MBB.end(), DL, TII->get(MovTermOpc), SaveExecReg)
+ BuildMI(MBB, MBB.end(), DL, TII->get(MovExecOpc), SaveExecReg)
.addReg(ExecReg);
// Restore the EXEC mask after the loop.
B.setMBB(*RestoreExecBB);
- B.buildInstr(MovTermOpc)
+ B.buildInstr(MovExecTermOpc)
.addDef(ExecReg)
.addReg(SaveExecReg);
@@ -2953,7 +2956,9 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
break;
}
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
- case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
const AMDGPU::RsrcIntrinsic *RSrcIntrin
= AMDGPU::lookupRsrcIntrinsic(MI.getIntrinsicID());
assert(RSrcIntrin && RSrcIntrin->IsImage);
@@ -3691,6 +3696,16 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = AMDGPU::getValueMapping(SrcBankID, 32);
break;
}
+ case AMDGPU::G_AMDGPU_WAVE_ADDRESS: {
+ // This case is weird because we expect a physical register in the source,
+ // but need to set a bank anyway.
+ //
+ // We could select the result to SGPR or VGPR, but for the one current use
+ // it's more practical to always use VGPR.
+ OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
+ OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32);
+ break;
+ }
case AMDGPU::G_INSERT: {
unsigned BankID = getMappingType(MRI, MI);
unsigned DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
@@ -4078,7 +4093,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_mqsad_pk_u16_u8:
case Intrinsic::amdgcn_mqsad_u32_u8:
case Intrinsic::amdgcn_cvt_pk_u8_f32:
- case Intrinsic::amdgcn_alignbit:
case Intrinsic::amdgcn_alignbyte:
case Intrinsic::amdgcn_perm:
case Intrinsic::amdgcn_fdot2:
@@ -4276,7 +4290,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
- case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
+ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
auto IntrID = MI.getIntrinsicID();
const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID);
assert(RSrcIntrin && "missing RsrcIntrinsic for image intrinsic");
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
index 45f7c2f369bd..1c6c63dd5b25 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
@@ -353,7 +353,7 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
// off any return attributes, e.g. zeroext doesn't make sense with a struct.
NewFunc->stealArgumentListFrom(F);
- AttrBuilder RetAttrs;
+ AttributeMask RetAttrs;
RetAttrs.addAttribute(Attribute::SExt);
RetAttrs.addAttribute(Attribute::ZExt);
RetAttrs.addAttribute(Attribute::NoAlias);
@@ -433,7 +433,7 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
PointerType *ArgType = cast<PointerType>(Arg.getType());
- auto *EltTy = ArgType->getElementType();
+ auto *EltTy = ArgType->getPointerElementType();
const auto Align =
DL->getValueOrABITypeAlignment(Arg.getParamAlign(), EltTy);
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index cd05797fdbdb..e82f9232b114 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -269,7 +269,6 @@ GCNSubtarget::GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
HasGetWaveIdInst(false),
HasSMemTimeInst(false),
HasShaderCyclesRegister(false),
- HasRegisterBanking(false),
HasVOP3Literal(false),
HasNoDataDepHazard(false),
FlatAddressSpace(false),
@@ -772,11 +771,11 @@ unsigned GCNSubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const {
}
unsigned
-GCNSubtarget::getBaseReservedNumSGPRs(const bool HasFlatScratchInit) const {
+GCNSubtarget::getBaseReservedNumSGPRs(const bool HasFlatScratch) const {
if (getGeneration() >= AMDGPUSubtarget::GFX10)
return 2; // VCC. FLAT_SCRATCH and XNACK are no longer in SGPRs.
- if (HasFlatScratchInit || HasArchitectedFlatScratch) {
+ if (HasFlatScratch || HasArchitectedFlatScratch) {
if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
return 6; // FLAT_SCRATCH, XNACK, VCC (in that order).
if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)
@@ -794,20 +793,11 @@ unsigned GCNSubtarget::getReservedNumSGPRs(const MachineFunction &MF) const {
}
unsigned GCNSubtarget::getReservedNumSGPRs(const Function &F) const {
- // The logic to detect if the function has
- // flat scratch init is slightly different than how
- // SIMachineFunctionInfo constructor derives.
- // We don't use amdgpu-calls, amdgpu-stack-objects
- // attributes and isAmdHsaOrMesa here as it doesn't really matter.
- // TODO: Outline this derivation logic and have just
- // one common function in the backend to avoid duplication.
- bool isEntry = AMDGPU::isEntryFunctionCC(F.getCallingConv());
- bool FunctionHasFlatScratchInit = false;
- if (hasFlatAddressSpace() && isEntry && !flatScratchIsArchitected() &&
- enableFlatScratch()) {
- FunctionHasFlatScratchInit = true;
- }
- return getBaseReservedNumSGPRs(FunctionHasFlatScratchInit);
+ // In principle we do not need to reserve SGPR pair used for flat_scratch if
+ // we know flat instructions do not access the stack anywhere in the
+ // program. For now assume it's needed if we have flat instructions.
+ const bool KernelUsesFlatScratch = hasFlatAddressSpace();
+ return getBaseReservedNumSGPRs(KernelUsesFlatScratch);
}
unsigned GCNSubtarget::computeOccupancy(const Function &F, unsigned LDSSize,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 88ed4b2b7a24..7f1b94be4ffe 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -212,7 +212,19 @@ public:
/// Returns the offset in bytes from the start of the input buffer
/// of the first explicit kernel argument.
unsigned getExplicitKernelArgOffset(const Function &F) const {
- return isAmdHsaOrMesa(F) ? 0 : 36;
+ switch (TargetTriple.getOS()) {
+ case Triple::AMDHSA:
+ case Triple::AMDPAL:
+ case Triple::Mesa3D:
+ return 0;
+ case Triple::UnknownOS:
+ default:
+ // For legacy reasons unknown/other is treated as a different version of
+ // mesa.
+ return 36;
+ }
+
+ llvm_unreachable("invalid triple OS");
}
/// \returns Maximum number of work groups per compute unit supported by the
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 226646a96953..dd3676f3b707 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -21,8 +21,6 @@
namespace llvm {
-class ScheduleDAGMILive;
-
//===----------------------------------------------------------------------===//
// AMDGPU Target Machine (R600+)
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 09c5eb192e1f..a8df7789c8a1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -844,15 +844,8 @@ bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
TLI->ComputeConstraintToUse(TC, SDValue());
- Register AssignedReg;
- const TargetRegisterClass *RC;
- std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
- TRI, TC.ConstraintCode, TC.ConstraintVT);
- if (AssignedReg) {
- // FIXME: This is a workaround for getRegForInlineAsmConstraint
- // returning VS_32
- RC = TRI->getPhysRegClass(AssignedReg);
- }
+ const TargetRegisterClass *RC = TLI->getRegForInlineAsmConstraint(
+ TRI, TC.ConstraintCode, TC.ConstraintVT).second;
// For AGPR constraints null is returned on subtargets without AGPRs, so
// assume divergent for null.
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 2bb59086f391..c1c88d9a7462 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -62,7 +62,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
public:
AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
- : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
+ : Kind(Kind_), AsmParser(AsmParser_) {}
using Ptr = std::unique_ptr<AMDGPUOperand>;
@@ -1548,6 +1548,7 @@ private:
bool validateVccOperand(unsigned Reg) const;
bool validateVOPLiteral(const MCInst &Inst, const OperandVector &Operands);
bool validateMAIAccWrite(const MCInst &Inst, const OperandVector &Operands);
+ bool validateMFMA(const MCInst &Inst, const OperandVector &Operands);
bool validateAGPRLdSt(const MCInst &Inst) const;
bool validateVGPRAlign(const MCInst &Inst) const;
bool validateGWS(const MCInst &Inst, const OperandVector &Operands);
@@ -3613,6 +3614,40 @@ bool AMDGPUAsmParser::validateMAIAccWrite(const MCInst &Inst,
return true;
}
+bool AMDGPUAsmParser::validateMFMA(const MCInst &Inst,
+ const OperandVector &Operands) {
+ const unsigned Opc = Inst.getOpcode();
+ const MCInstrDesc &Desc = MII.get(Opc);
+
+ if ((Desc.TSFlags & SIInstrFlags::IsMAI) == 0)
+ return true;
+
+ const int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
+ if (Src2Idx == -1)
+ return true;
+
+ const MCOperand &Src2 = Inst.getOperand(Src2Idx);
+ if (!Src2.isReg())
+ return true;
+
+ MCRegister Src2Reg = Src2.getReg();
+ MCRegister DstReg = Inst.getOperand(0).getReg();
+ if (Src2Reg == DstReg)
+ return true;
+
+ const MCRegisterInfo *TRI = getContext().getRegisterInfo();
+ if (TRI->getRegClass(Desc.OpInfo[0].RegClass).getSizeInBits() <= 128)
+ return true;
+
+ if (isRegIntersect(Src2Reg, DstReg, TRI)) {
+ Error(getRegLoc(mc2PseudoReg(Src2Reg), Operands),
+ "source 2 operand must not partially overlap with dst");
+ return false;
+ }
+
+ return true;
+}
+
bool AMDGPUAsmParser::validateDivScale(const MCInst &Inst) {
switch (Inst.getOpcode()) {
default:
@@ -4297,6 +4332,9 @@ bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
if (!validateMAIAccWrite(Inst, Operands)) {
return false;
}
+ if (!validateMFMA(Inst, Operands)) {
+ return false;
+ }
if (!validateCoherencyBits(Inst, Operands, IDLoc)) {
return false;
}
@@ -4568,7 +4606,13 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
uint64_t AccumOffset = 0;
SMRange SGPRRange;
uint64_t NextFreeSGPR = 0;
- unsigned UserSGPRCount = 0;
+
+ // Count the number of user SGPRs implied from the enabled feature bits.
+ unsigned ImpliedUserSGPRCount = 0;
+
+ // Track if the asm explicitly contains the directive for the user SGPR
+ // count.
+ Optional<unsigned> ExplicitUserSGPRCount;
bool ReserveVCC = true;
bool ReserveFlatScr = true;
Optional<bool> EnableWavefrontSize32;
@@ -4617,6 +4661,8 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (!isUInt<sizeof(KD.kernarg_size) * CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
KD.kernarg_size = Val;
+ } else if (ID == ".amdhsa_user_sgpr_count") {
+ ExplicitUserSGPRCount = Val;
} else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
@@ -4626,31 +4672,31 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
Val, ValRange);
if (Val)
- UserSGPRCount += 4;
+ ImpliedUserSGPRCount += 4;
} else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
ValRange);
if (Val)
- UserSGPRCount += 2;
+ ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
ValRange);
if (Val)
- UserSGPRCount += 2;
+ ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
Val, ValRange);
if (Val)
- UserSGPRCount += 2;
+ ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
ValRange);
if (Val)
- UserSGPRCount += 2;
+ ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
@@ -4660,13 +4706,13 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
ValRange);
if (Val)
- UserSGPRCount += 2;
+ ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
Val, ValRange);
if (Val)
- UserSGPRCount += 1;
+ ImpliedUserSGPRCount += 1;
} else if (ID == ".amdhsa_wavefront_size32") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
@@ -4850,6 +4896,13 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
SGPRBlocks);
+ if (ExplicitUserSGPRCount && ImpliedUserSGPRCount > *ExplicitUserSGPRCount)
+ return TokError("amdgpu_user_sgpr_count smaller than than implied by "
+ "enabled user SGPRs");
+
+ unsigned UserSGPRCount =
+ ExplicitUserSGPRCount ? *ExplicitUserSGPRCount : ImpliedUserSGPRCount;
+
if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
return TokError("too many user SGPRs enabled");
AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/DSInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/DSInstructions.td
index 104b5160b985..c4043177b618 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -89,7 +89,6 @@ class DS_Real <DS_Pseudo ps> :
!if(!or(ps.has_data0, ps.has_gws_data0), data0{9}, 0));
}
-
// DS Pseudo instructions
class DS_0A1D_NORET<string opName, RegisterClass rc = VGPR_32>
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/FLATInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/FLATInstructions.td
index c7ec5308e6d0..c530d3cb49f0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -915,7 +915,7 @@ class FlatSignedAtomicPatNoRtn <FLAT_Pseudo inst, SDPatternOperator node, ValueT
class FlatSignedAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt,
ValueType data_vt = vt> : GCNPat <
(vt (node (GlobalOffset i64:$vaddr, i16:$offset), data_vt:$data)),
- (inst $vaddr, $data, $offset)
+ (inst VReg_64:$vaddr, getVregSrcForVT<data_vt>.ret:$data, $offset)
>;
class ScratchLoadSignedPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : GCNPat <
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 0f8dd0b3bf58..c0592f6f3c7a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -95,7 +95,9 @@ static bool isDGEMM(unsigned Opcode) {
return Opcode == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
Opcode == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64 ||
Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_e64 ||
- Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64;
+ Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64 ||
+ Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64 ||
+ Opcode == AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64;
}
static bool isXDL(const GCNSubtarget &ST, const MachineInstr &MI) {
@@ -1438,7 +1440,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
if (!Use.isReg())
continue;
- unsigned Reg = Use.getReg();
+ Register Reg = Use.getReg();
bool FullReg;
const MachineInstr *MI1;
@@ -1477,6 +1479,8 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
switch (Opc1) {
case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
+ case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
+ case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
if (!isXDL(ST, *MI))
NeedWaitStates = DMFMA16x16WritesVGPROverlappedSrcCWaitStates;
break;
@@ -1509,6 +1513,8 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
switch (Opc1) {
case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
+ case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
+ case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
NeedWaitStates = DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates;
break;
case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h
index 162121c2c525..716bc027a894 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.h
@@ -25,7 +25,6 @@ class MachineFunction;
class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
-class ScheduleDAG;
class SIInstrInfo;
class SIRegisterInfo;
class GCNSubtarget;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index 82c09378acac..fb106d98c162 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -27,7 +27,7 @@ void llvm::printLivesAt(SlotIndex SI,
<< *LIS.getInstructionFromIndex(SI);
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- const unsigned Reg = Register::index2VirtReg(I);
+ const Register Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
@@ -487,7 +487,7 @@ void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
const MachineRegisterInfo &MRI) {
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto It = LiveRegs.find(Reg);
if (It != LiveRegs.end() && It->second.any())
OS << ' ' << printVRegOrUnit(Reg, TRI) << ':'
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 53d6ff0aa731..a6e42ad3dfca 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -140,4 +140,4 @@ public:
} // End namespace llvm
-#endif // GCNSCHEDSTRATEGY_H
+#endif // LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index d8bc0b2df2bd..0cd2cfa2f0e7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -153,7 +153,6 @@ protected:
bool HasGetWaveIdInst;
bool HasSMemTimeInst;
bool HasShaderCyclesRegister;
- bool HasRegisterBanking;
bool HasVOP3Literal;
bool HasNoDataDepHazard;
bool FlatAddressSpace;
@@ -723,10 +722,6 @@ public:
return HasShaderCyclesRegister;
}
- bool hasRegisterBanking() const {
- return HasRegisterBanking;
- }
-
bool hasVOP3Literal() const {
return HasVOP3Literal;
}
@@ -1029,7 +1024,7 @@ public:
/// \returns Reserved number of SGPRs. This is common
/// utility function called by MachineFunction and
/// Function variants of getReservedNumSGPRs.
- unsigned getBaseReservedNumSGPRs(const bool HasFlatScratchInit) const;
+ unsigned getBaseReservedNumSGPRs(const bool HasFlatScratch) const;
/// \returns Reserved number of SGPRs for given machine function \p MF.
unsigned getReservedNumSGPRs(const MachineFunction &MF) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index b68b4b12e750..76663b563150 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -1397,21 +1397,26 @@ void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
unsigned Vmcnt, Expcnt, Lgkmcnt;
decodeWaitcnt(ISA, SImm16, Vmcnt, Expcnt, Lgkmcnt);
+ bool IsDefaultVmcnt = Vmcnt == getVmcntBitMask(ISA);
+ bool IsDefaultExpcnt = Expcnt == getExpcntBitMask(ISA);
+ bool IsDefaultLgkmcnt = Lgkmcnt == getLgkmcntBitMask(ISA);
+ bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
+
bool NeedSpace = false;
- if (Vmcnt != getVmcntBitMask(ISA)) {
+ if (!IsDefaultVmcnt || PrintAll) {
O << "vmcnt(" << Vmcnt << ')';
NeedSpace = true;
}
- if (Expcnt != getExpcntBitMask(ISA)) {
+ if (!IsDefaultExpcnt || PrintAll) {
if (NeedSpace)
O << ' ';
O << "expcnt(" << Expcnt << ')';
NeedSpace = true;
}
- if (Lgkmcnt != getLgkmcntBitMask(ISA)) {
+ if (!IsDefaultLgkmcnt || PrintAll) {
if (NeedSpace)
O << ' ';
O << "lgkmcnt(" << Lgkmcnt << ')';
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 7708579a4491..ded3fb7ab8d9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -15,8 +15,7 @@
using namespace llvm;
AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Triple &TT,
- const MCTargetOptions &Options)
- : MCAsmInfoELF() {
+ const MCTargetOptions &Options) {
CodePointerSize = (TT.getArch() == Triple::amdgcn) ? 8 : 4;
StackGrowsUp = true;
HasSingleParameterDotFile = false;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index 9a9a2c973f44..9578bdb0bad0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -319,6 +319,10 @@ void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
<< KD.private_segment_fixed_size << '\n';
OS << "\t\t.amdhsa_kernarg_size " << KD.kernarg_size << '\n';
+ PRINT_FIELD(OS, ".amdhsa_user_sgpr_count", KD,
+ compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT);
+
if (!hasArchitectedFlatScratch(STI))
PRINT_FIELD(
OS, ".amdhsa_user_sgpr_private_segment_buffer", KD,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 6dd886367302..cf03fd682143 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -131,6 +131,38 @@ def MIMGMIPMappingTable : GenericTable {
let PrimaryKeyName = "getMIMGMIPMappingInfo";
}
+class MIMGBiasMapping<MIMGBaseOpcode bias, MIMGBaseOpcode nobias> {
+ MIMGBaseOpcode Bias = bias;
+ MIMGBaseOpcode NoBias = nobias;
+}
+
+def MIMGBiasMappingTable : GenericTable {
+ let FilterClass = "MIMGBiasMapping";
+ let CppTypeName = "MIMGBiasMappingInfo";
+ let Fields = ["Bias", "NoBias"];
+ string TypeOf_Bias = "MIMGBaseOpcode";
+ string TypeOf_NoBias = "MIMGBaseOpcode";
+
+ let PrimaryKey = ["Bias"];
+ let PrimaryKeyName = "getMIMGBiasMappingInfo";
+}
+
+class MIMGOffsetMapping<MIMGBaseOpcode offset, MIMGBaseOpcode nooffset> {
+ MIMGBaseOpcode Offset = offset;
+ MIMGBaseOpcode NoOffset = nooffset;
+}
+
+def MIMGOffsetMappingTable : GenericTable {
+ let FilterClass = "MIMGOffsetMapping";
+ let CppTypeName = "MIMGOffsetMappingInfo";
+ let Fields = ["Offset", "NoOffset"];
+ string TypeOf_Offset = "MIMGBaseOpcode";
+ string TypeOf_NoOffset = "MIMGBaseOpcode";
+
+ let PrimaryKey = ["Offset"];
+ let PrimaryKeyName = "getMIMGOffsetMappingInfo";
+}
+
class MIMGG16Mapping<MIMGBaseOpcode g, MIMGBaseOpcode g16> {
MIMGBaseOpcode G = g;
MIMGBaseOpcode G16 = g16;
@@ -1070,6 +1102,9 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
AMDGPUDimProps Dim = I.P.Dim;
AMDGPUImageDimIntrinsicEval DimEval = AMDGPUImageDimIntrinsicEval<I.P>;
+ bits<8> NumOffsetArgs = DimEval.NumOffsetArgs;
+ bits<8> NumBiasArgs = DimEval.NumBiasArgs;
+ bits<8> NumZCompareArgs = DimEval.NumZCompareArgs;
bits<8> NumGradients = DimEval.NumGradientArgs;
bits<8> NumDmask = DimEval.NumDmaskArgs;
bits<8> NumData = DimEval.NumDataArgs;
@@ -1078,6 +1113,9 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
bits<8> DMaskIndex = DimEval.DmaskArgIndex;
bits<8> VAddrStart = DimEval.VAddrArgIndex;
+ bits<8> OffsetIndex = DimEval.OffsetArgIndex;
+ bits<8> BiasIndex = DimEval.BiasArgIndex;
+ bits<8> ZCompareIndex = DimEval.ZCompareArgIndex;
bits<8> GradientStart = DimEval.GradientArgIndex;
bits<8> CoordStart = DimEval.CoordArgIndex;
bits<8> LodIndex = DimEval.LodArgIndex;
@@ -1089,6 +1127,8 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
bits<8> TexFailCtrlIndex = DimEval.TexFailCtrlArgIndex;
bits<8> CachePolicyIndex = DimEval.CachePolicyArgIndex;
+ bits<8> BiasTyArg = !add(I.P.NumRetAndDataAnyTypes,
+ !if(!eq(NumOffsetArgs, 0), 0, I.P.ExtraAddrArgs[0].Type.isAny));
bits<8> GradientTyArg = !add(I.P.NumRetAndDataAnyTypes,
!foldl(0, I.P.ExtraAddrArgs, cnt, arg, !add(cnt, arg.Type.isAny)));
bits<8> CoordTyArg = !add(GradientTyArg, !if(I.P.Gradients, 1, 0));
@@ -1096,10 +1136,10 @@ class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
def ImageDimIntrinsicTable : GenericTable {
let FilterClass = "ImageDimIntrinsicInfo";
- let Fields = ["Intr", "BaseOpcode", "Dim", "NumGradients", "NumDmask", "NumData", "NumVAddrs", "NumArgs",
- "DMaskIndex", "VAddrStart", "GradientStart", "CoordStart", "LodIndex", "MipIndex", "VAddrEnd",
+ let Fields = ["Intr", "BaseOpcode", "Dim", "NumOffsetArgs", "NumBiasArgs", "NumZCompareArgs", "NumGradients", "NumDmask", "NumData", "NumVAddrs", "NumArgs",
+ "DMaskIndex", "VAddrStart", "OffsetIndex", "BiasIndex", "ZCompareIndex", "GradientStart", "CoordStart", "LodIndex", "MipIndex", "VAddrEnd",
"RsrcIndex", "SampIndex", "UnormIndex", "TexFailCtrlIndex", "CachePolicyIndex",
- "GradientTyArg", "CoordTyArg"];
+ "BiasTyArg", "GradientTyArg", "CoordTyArg"];
string TypeOf_BaseOpcode = "MIMGBaseOpcode";
string TypeOf_Dim = "MIMGDim";
@@ -1132,6 +1172,66 @@ def : MIMGLZMapping<IMAGE_GATHER4_C_L_O, IMAGE_GATHER4_C_LZ_O>;
def : MIMGMIPMapping<IMAGE_LOAD_MIP, IMAGE_LOAD>;
def : MIMGMIPMapping<IMAGE_STORE_MIP, IMAGE_STORE>;
+// Bias to NoBias Optimization Mapping
+def : MIMGBiasMapping<IMAGE_SAMPLE_B, IMAGE_SAMPLE>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_CL, IMAGE_SAMPLE_CL>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B, IMAGE_SAMPLE_C>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_CL, IMAGE_SAMPLE_C_CL>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_O, IMAGE_SAMPLE_O>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_CL_O, IMAGE_SAMPLE_CL_O>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_O, IMAGE_SAMPLE_C_O>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_CL_O, IMAGE_SAMPLE_C_CL_O>;
+def : MIMGBiasMapping<IMAGE_GATHER4_B, IMAGE_GATHER4>;
+def : MIMGBiasMapping<IMAGE_GATHER4_B_CL, IMAGE_GATHER4_CL>;
+def : MIMGBiasMapping<IMAGE_GATHER4_C_B, IMAGE_GATHER4_C>;
+def : MIMGBiasMapping<IMAGE_GATHER4_C_B_CL, IMAGE_GATHER4_C_CL>;
+def : MIMGBiasMapping<IMAGE_GATHER4_B_O, IMAGE_GATHER4_O>;
+def : MIMGBiasMapping<IMAGE_GATHER4_B_CL_O, IMAGE_GATHER4_CL_O>;
+def : MIMGBiasMapping<IMAGE_GATHER4_C_B_O, IMAGE_GATHER4_C_O>;
+def : MIMGBiasMapping<IMAGE_GATHER4_C_B_CL_O, IMAGE_GATHER4_C_CL_O>;
+
+// Offset to NoOffset Optimization Mapping
+def : MIMGOffsetMapping<IMAGE_SAMPLE_O, IMAGE_SAMPLE>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CL_O, IMAGE_SAMPLE_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_O, IMAGE_SAMPLE_D>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_CL_O, IMAGE_SAMPLE_D_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_O_G16, IMAGE_SAMPLE_D_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_CL_O_G16, IMAGE_SAMPLE_D_CL_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_L_O, IMAGE_SAMPLE_L>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_B_O, IMAGE_SAMPLE_B>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_B_CL_O, IMAGE_SAMPLE_B_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_LZ_O, IMAGE_SAMPLE_LZ>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_O, IMAGE_SAMPLE_C>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CL_O, IMAGE_SAMPLE_C_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_D_O, IMAGE_SAMPLE_C_D>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_D_CL_O, IMAGE_SAMPLE_C_D_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_D_O_G16, IMAGE_SAMPLE_C_D_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_D_CL_O_G16, IMAGE_SAMPLE_C_D_CL_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_L_O, IMAGE_SAMPLE_C_L>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_B_CL_O, IMAGE_SAMPLE_C_B_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_B_O, IMAGE_SAMPLE_C_B>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_LZ_O, IMAGE_SAMPLE_C_LZ>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_O, IMAGE_GATHER4>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_CL_O, IMAGE_GATHER4_CL>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_L_O, IMAGE_GATHER4_L>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_B_O, IMAGE_GATHER4_B>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_B_CL_O, IMAGE_GATHER4_B_CL>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_LZ_O, IMAGE_GATHER4_LZ>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_O, IMAGE_GATHER4_C>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_CL_O, IMAGE_GATHER4_C_CL>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_L_O, IMAGE_GATHER4_C_L>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_B_O, IMAGE_GATHER4_C_B>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_B_CL_O, IMAGE_GATHER4_C_B_CL>;
+def : MIMGOffsetMapping<IMAGE_GATHER4_C_LZ_O, IMAGE_GATHER4_C_LZ>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_O, IMAGE_SAMPLE_CD>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_CL_O, IMAGE_SAMPLE_CD_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_O, IMAGE_SAMPLE_C_CD>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_CL_O, IMAGE_SAMPLE_C_CD_CL>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_O_G16, IMAGE_SAMPLE_CD_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_CL_O_G16, IMAGE_SAMPLE_CD_CL_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_O_G16, IMAGE_SAMPLE_C_CD_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_CL_O_G16, IMAGE_SAMPLE_C_CD_CL_G16>;
+
// G to G16 Optimization Mapping
def : MIMGG16Mapping<IMAGE_SAMPLE_D, IMAGE_SAMPLE_D_G16>;
def : MIMGG16Mapping<IMAGE_SAMPLE_D_CL, IMAGE_SAMPLE_D_CL_G16>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
index f9a9a6127322..1e75a0432ec3 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
@@ -19,7 +19,6 @@
namespace llvm {
-class R600InstrInfo;
class R600Subtarget;
class R600TargetLowering final : public AMDGPUTargetLowering {
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600InstrInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600InstrInfo.h
index fc567f1a1fca..bc8a4786df77 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600InstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600InstrInfo.h
@@ -29,7 +29,6 @@ enum : uint64_t {
};
}
-class AMDGPUTargetMachine;
class DFAPacketizer;
class MachineFunction;
class MachineInstr;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600Subtarget.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600Subtarget.h
index 94403b88f21a..92d559b1f8e6 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600Subtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600Subtarget.h
@@ -21,12 +21,6 @@
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
-namespace llvm {
-
-class MCInstrInfo;
-
-} // namespace llvm
-
#define GET_SUBTARGETINFO_HEADER
#include "R600GenSubtargetInfo.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index 397b2f873515..b81fac36fc95 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -245,6 +245,12 @@ Value *SIAnnotateControlFlow::handleLoopCondition(
return CallInst::Create(IfBreak, Args, "", Insert);
}
+ if (isa<Argument>(Cond)) {
+ Instruction *Insert = L->getHeader()->getFirstNonPHIOrDbgOrLifetime();
+ Value *Args[] = { Cond, Broken };
+ return CallInst::Create(IfBreak, Args, "", Insert);
+ }
+
llvm_unreachable("Unhandled loop condition!");
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIDefines.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIDefines.h
index 580e4bc417a4..107ee5ed5532 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -379,6 +379,8 @@ enum Id { // HwRegCode, (6) [5:0]
ID_FLAT_SCR_LO = 20,
ID_FLAT_SCR_HI = 21,
ID_XNACK_MASK = 22,
+ ID_HW_ID1 = 23,
+ ID_HW_ID2 = 24,
ID_POPS_PACKER = 25,
ID_SHADER_CYCLES = 29,
ID_SYMBOLIC_FIRST_GFX1030_ = ID_SHADER_CYCLES,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 1f93284fc7ee..33954e11d6c6 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -300,6 +300,13 @@ static bool updateOperand(FoldCandidate &Fold,
assert(!Fold.needsShrink() && "not handled");
if (Fold.isImm()) {
+ if (Old.isTied()) {
+ int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
+ if (NewMFMAOpc == -1)
+ return false;
+ MI->setDesc(TII.get(NewMFMAOpc));
+ MI->untieRegOperand(0);
+ }
Old.ChangeToImmediate(Fold.ImmToFold);
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index d4fe74ecb96e..6078f4a0577a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1195,7 +1195,8 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized(
}
} else if (TII->isStoreToStackSlot(MI, FrameIndex) ||
TII->isLoadFromStackSlot(MI, FrameIndex))
- NonVGPRSpillFIs.set(FrameIndex);
+ if (!MFI.isFixedObjectIndex(FrameIndex))
+ NonVGPRSpillFIs.set(FrameIndex);
}
}
@@ -1320,16 +1321,14 @@ void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF,
const BitVector AllSavedRegs = SavedRegs;
SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask());
- // If clearing VGPRs changed the mask, we will have some CSR VGPR spills.
- const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs;
-
// We have to anticipate introducing CSR VGPR spills or spill of caller
// save VGPR reserved for SGPR spills as we now always create stack entry
- // for it, if we don't have any stack objects already, since we require
- // an FP if there is a call and stack.
+ // for it, if we don't have any stack objects already, since we require a FP
+ // if there is a call and stack. We will allocate a VGPR for SGPR spills if
+ // there are any SGPR spills. Whether they are CSR spills or otherwise.
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
const bool WillHaveFP =
- FrameInfo.hasCalls() && (HaveAnyCSRVGPR || MFI->VGPRReservedForSGPRSpill);
+ FrameInfo.hasCalls() && (AllSavedRegs.any() || MFI->hasSpilledSGPRs());
// FP will be specially managed like SP.
if (WillHaveFP || hasFP(MF))
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.h
index 56fbb875ffd9..7949dcfa6632 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.h
@@ -13,11 +13,6 @@
namespace llvm {
-class SIInstrInfo;
-class SIMachineFunctionInfo;
-class SIRegisterInfo;
-class GCNSubtarget;
-
class SIFrameLowering final : public AMDGPUFrameLowering {
public:
SIFrameLowering(StackDirection D, Align StackAl, int LAO,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9f138136e6e9..561866b5a398 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -45,10 +45,6 @@ static cl::opt<bool> DisableLoopAlignment(
cl::desc("Do not align and prefetch loops"),
cl::init(false));
-static cl::opt<bool> VGPRReserveforSGPRSpill(
- "amdgpu-reserve-vgpr-for-sgpr-spill",
- cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true));
-
static cl::opt<bool> UseDivergentRegisterIndexing(
"amdgpu-use-divergent-register-indexing",
cl::Hidden,
@@ -138,6 +134,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
+ addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass);
+ addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass);
}
addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
@@ -273,7 +271,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32,
MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64,
- MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32 }) {
+ MVT::v8i16, MVT::v8f16, MVT::v16i64, MVT::v16f64,
+ MVT::v32i32, MVT::v32f32 }) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
@@ -615,7 +614,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
if (STI.hasMadF16())
setOperationAction(ISD::FMAD, MVT::f16, Legal);
- for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
+ for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16, MVT::v8i16,
+ MVT::v8f16}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
@@ -677,6 +677,21 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::STORE, MVT::v4f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
+ setOperationAction(ISD::LOAD, MVT::v8i16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32);
+ setOperationAction(ISD::LOAD, MVT::v8f16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32);
+
+ setOperationAction(ISD::STORE, MVT::v4i16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
+ setOperationAction(ISD::STORE, MVT::v4f16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
+
+ setOperationAction(ISD::STORE, MVT::v8i16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32);
+ setOperationAction(ISD::STORE, MVT::v8f16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32);
+
setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
@@ -686,6 +701,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Expand);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Expand);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Expand);
+
if (!Subtarget->hasVOP3PInsts()) {
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
@@ -703,9 +722,20 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
+ setOperationAction(ISD::FMINNUM_IEEE, MVT::v8f16, Custom);
+ setOperationAction(ISD::FMAXNUM_IEEE, MVT::v8f16, Custom);
setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
+ setOperationAction(ISD::FMINNUM, MVT::v8f16, Expand);
+ setOperationAction(ISD::FMAXNUM, MVT::v8f16, Expand);
+
+ for (MVT Vec16 : { MVT::v8i16, MVT::v8f16 }) {
+ setOperationAction(ISD::BUILD_VECTOR, Vec16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec16, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, Vec16, Expand);
+ }
}
if (Subtarget->hasVOP3PInsts()) {
@@ -739,34 +769,42 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f16, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
- setOperationAction(ISD::SHL, MVT::v4i16, Custom);
- setOperationAction(ISD::SRA, MVT::v4i16, Custom);
- setOperationAction(ISD::SRL, MVT::v4i16, Custom);
- setOperationAction(ISD::ADD, MVT::v4i16, Custom);
- setOperationAction(ISD::SUB, MVT::v4i16, Custom);
- setOperationAction(ISD::MUL, MVT::v4i16, Custom);
+ for (MVT VT : { MVT::v4i16, MVT::v8i16 }) {
+ // Split vector operations.
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::ADD, VT, Custom);
+ setOperationAction(ISD::SUB, VT, Custom);
+ setOperationAction(ISD::MUL, VT, Custom);
- setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
- setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
- setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
- setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
+ setOperationAction(ISD::SMIN, VT, Custom);
+ setOperationAction(ISD::SMAX, VT, Custom);
+ setOperationAction(ISD::UMIN, VT, Custom);
+ setOperationAction(ISD::UMAX, VT, Custom);
- setOperationAction(ISD::UADDSAT, MVT::v4i16, Custom);
- setOperationAction(ISD::SADDSAT, MVT::v4i16, Custom);
- setOperationAction(ISD::USUBSAT, MVT::v4i16, Custom);
- setOperationAction(ISD::SSUBSAT, MVT::v4i16, Custom);
+ setOperationAction(ISD::UADDSAT, VT, Custom);
+ setOperationAction(ISD::SADDSAT, VT, Custom);
+ setOperationAction(ISD::USUBSAT, VT, Custom);
+ setOperationAction(ISD::SSUBSAT, VT, Custom);
+ }
- setOperationAction(ISD::FADD, MVT::v4f16, Custom);
- setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
- setOperationAction(ISD::FMA, MVT::v4f16, Custom);
+ for (MVT VT : { MVT::v4f16, MVT::v8f16 }) {
+ // Split vector operations.
+ setOperationAction(ISD::FADD, VT, Custom);
+ setOperationAction(ISD::FMUL, VT, Custom);
+ setOperationAction(ISD::FMA, VT, Custom);
+ setOperationAction(ISD::FCANONICALIZE, VT, Custom);
+ }
setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
- setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
@@ -803,7 +841,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FABS, MVT::v2f16, Custom);
}
- for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
+ for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
+ MVT::v8i16, MVT::v8f16 }) {
setOperationAction(ISD::SELECT, VT, Custom);
}
@@ -2776,6 +2815,7 @@ void SITargetLowering::passSpecialInputs(
SelectionDAG &DAG = CLI.DAG;
const SDLoc &DL = CLI.DL;
+ const Function &F = DAG.getMachineFunction().getFunction();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
@@ -2887,11 +2927,16 @@ void SITargetLowering::passSpecialInputs(
// If incoming ids are not packed we need to pack them.
if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
- NeedWorkItemIDX)
- InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
+ NeedWorkItemIDX) {
+ if (Subtarget->getMaxWorkitemID(F, 0) != 0) {
+ InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
+ } else {
+ InputReg = DAG.getConstant(0, DL, MVT::i32);
+ }
+ }
if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
- NeedWorkItemIDY) {
+ NeedWorkItemIDY && Subtarget->getMaxWorkitemID(F, 1) != 0) {
SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
DAG.getShiftAmountConstant(10, MVT::i32, SL));
@@ -2900,7 +2945,7 @@ void SITargetLowering::passSpecialInputs(
}
if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
- NeedWorkItemIDZ) {
+ NeedWorkItemIDZ && Subtarget->getMaxWorkitemID(F, 2) != 0) {
SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
DAG.getShiftAmountConstant(20, MVT::i32, SL));
@@ -2909,13 +2954,21 @@ void SITargetLowering::passSpecialInputs(
}
if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
- // Workitem ids are already packed, any of present incoming arguments
- // will carry all required fields.
- ArgDescriptor IncomingArg = ArgDescriptor::createArg(
- IncomingArgX ? *IncomingArgX :
- IncomingArgY ? *IncomingArgY :
- *IncomingArgZ, ~0u);
- InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
+ if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
+ // We're in a situation where the outgoing function requires the workitem
+ // ID, but the calling function does not have it (e.g a graphics function
+ // calling a C calling convention function). This is illegal, but we need
+ // to produce something.
+ InputReg = DAG.getUNDEF(MVT::i32);
+ } else {
+ // Workitem ids are already packed, any of present incoming arguments
+ // will carry all required fields.
+ ArgDescriptor IncomingArg = ArgDescriptor::createArg(
+ IncomingArgX ? *IncomingArgX :
+ IncomingArgY ? *IncomingArgY :
+ *IncomingArgZ, ~0u);
+ InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
+ }
}
if (OutgoingArg->isRegister()) {
@@ -4600,7 +4653,8 @@ SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||
- VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32);
+ VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 ||
+ VT == MVT::v16f32 || VT == MVT::v32f32);
SDValue Lo0, Hi0;
std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
@@ -4621,21 +4675,26 @@ SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
SelectionDAG &DAG) const {
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
- assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||
- VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32);
+ assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||
+ VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 ||
+ VT == MVT::v16f32 || VT == MVT::v32f32);
SDValue Lo0, Hi0;
- std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
+ SDValue Op0 = Op.getOperand(0);
+ std::tie(Lo0, Hi0) = Op0.getValueType().isVector()
+ ? DAG.SplitVectorOperand(Op.getNode(), 0)
+ : std::make_pair(Op0, Op0);
SDValue Lo1, Hi1;
std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
SDValue Lo2, Hi2;
std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
SDLoc SL(Op);
+ auto ResVT = DAG.GetSplitDestVTs(VT);
- SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
+ SDValue OpLo = DAG.getNode(Opc, SL, ResVT.first, Lo0, Lo1, Lo2,
Op->getFlags());
- SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
+ SDValue OpHi = DAG.getNode(Opc, SL, ResVT.second, Hi0, Hi1, Hi2,
Op->getFlags());
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
@@ -5297,7 +5356,7 @@ SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
if (IsIEEEMode)
return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
- if (VT == MVT::v4f16)
+ if (VT == MVT::v4f16 || VT == MVT::v8f16)
return splitBinaryVectorOp(Op, DAG);
return Op;
}
@@ -5501,6 +5560,22 @@ SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
MachineMemOperand::MOInvariant);
}
+/// Return true if the value is a known valid address, such that a null check is
+/// not necessary.
+static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
+ const AMDGPUTargetMachine &TM, unsigned AddrSpace) {
+ if (isa<FrameIndexSDNode>(Val) || isa<GlobalAddressSDNode>(Val) ||
+ isa<BasicBlockSDNode>(Val))
+ return true;
+
+ if (auto *ConstVal = dyn_cast<ConstantSDNode>(Val))
+ return ConstVal->getSExtValue() != TM.getNullPointerValue(AddrSpace);
+
+ // TODO: Search through arithmetic, handle arguments and loads
+ // marked nonnull.
+ return false;
+}
+
SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
@@ -5508,48 +5583,64 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SDValue Src = ASC->getOperand(0);
SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+ unsigned SrcAS = ASC->getSrcAddressSpace();
const AMDGPUTargetMachine &TM =
static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
// flat -> local/private
- if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+ if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
unsigned DestAS = ASC->getDestAddressSpace();
if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
+ SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
+
+ if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ return Ptr;
+
unsigned NullVal = TM.getNullPointerValue(DestAS);
SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
- SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
- return DAG.getNode(ISD::SELECT, SL, MVT::i32,
- NonNull, Ptr, SegmentNullPtr);
+ return DAG.getNode(ISD::SELECT, SL, MVT::i32, NonNull, Ptr,
+ SegmentNullPtr);
}
}
// local/private -> flat
if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
- unsigned SrcAS = ASC->getSrcAddressSpace();
-
if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
+
+ SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
+ SDValue CvtPtr =
+ DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
+ CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
+
+ if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ return CvtPtr;
+
unsigned NullVal = TM.getNullPointerValue(SrcAS);
SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
SDValue NonNull
= DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
- SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
- SDValue CvtPtr
- = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
-
- return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
- DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
+ return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, CvtPtr,
FlatNullPtr);
}
}
+ if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
+ Op.getValueType() == MVT::i64) {
+ const SIMachineFunctionInfo *Info =
+ DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
+ SDValue Hi = DAG.getConstant(Info->get32BitAddressHighBits(), SL, MVT::i32);
+ SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Hi);
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+ }
+
if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Src.getValueType() == MVT::i64)
return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
@@ -5676,7 +5767,6 @@ SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
EVT VecVT = Vec.getValueType();
unsigned VecSize = VecVT.getSizeInBits();
EVT EltVT = VecVT.getVectorElementType();
- assert(VecSize <= 64);
DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
@@ -5687,6 +5777,28 @@ SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
return Combined;
+ if (VecSize == 128) {
+ SDValue Lo, Hi;
+ EVT LoVT, HiVT;
+ SDValue V2 = DAG.getBitcast(MVT::v2i64, Vec);
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
+ Lo =
+ DAG.getBitcast(LoVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64,
+ V2, DAG.getConstant(0, SL, MVT::i32)));
+ Hi =
+ DAG.getBitcast(HiVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64,
+ V2, DAG.getConstant(1, SL, MVT::i32)));
+ EVT IdxVT = Idx.getValueType();
+ unsigned NElem = VecVT.getVectorNumElements();
+ assert(isPowerOf2_32(NElem));
+ SDValue IdxMask = DAG.getConstant(NElem / 2 - 1, SL, IdxVT);
+ SDValue NewIdx = DAG.getNode(ISD::AND, SL, IdxVT, Idx, IdxMask);
+ SDValue Half = DAG.getSelectCC(SL, Idx, IdxMask, Hi, Lo, ISD::SETUGT);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Half, NewIdx);
+ }
+
+ assert(VecSize <= 64);
+
unsigned EltSize = EltVT.getSizeInBits();
assert(isPowerOf2_32(EltSize));
@@ -5769,20 +5881,27 @@ SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
SDLoc SL(Op);
EVT VT = Op.getValueType();
- if (VT == MVT::v4i16 || VT == MVT::v4f16) {
- EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
+ if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8i16 || VT == MVT::v8f16) {
+ EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
+ VT.getVectorNumElements() / 2);
+ MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits());
// Turn into pair of packed build_vectors.
// TODO: Special case for constants that can be materialized with s_mov_b64.
- SDValue Lo = DAG.getBuildVector(HalfVT, SL,
- { Op.getOperand(0), Op.getOperand(1) });
- SDValue Hi = DAG.getBuildVector(HalfVT, SL,
- { Op.getOperand(2), Op.getOperand(3) });
+ SmallVector<SDValue, 4> LoOps, HiOps;
+ for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I != E; ++I) {
+ LoOps.push_back(Op.getOperand(I));
+ HiOps.push_back(Op.getOperand(I + E));
+ }
+ SDValue Lo = DAG.getBuildVector(HalfVT, SL, LoOps);
+ SDValue Hi = DAG.getBuildVector(HalfVT, SL, HiOps);
- SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
- SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
+ SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Lo);
+ SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Hi);
- SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
+ SDValue Blend = DAG.getBuildVector(MVT::getVectorVT(HalfIntVT, 2), SL,
+ { CastLo, CastHi });
return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
}
@@ -6155,10 +6274,6 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
- const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
- AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
- const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
- AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
unsigned IntrOpcode = Intr->BaseOpcode;
bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget);
@@ -6246,28 +6361,6 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd;
SmallVector<SDValue, 4> VAddrs;
- // Optimize _L to _LZ when _L is zero
- if (LZMappingInfo) {
- if (auto *ConstantLod = dyn_cast<ConstantFPSDNode>(
- Op.getOperand(ArgOffset + Intr->LodIndex))) {
- if (ConstantLod->isZero() || ConstantLod->isNegative()) {
- IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
- VAddrEnd--; // remove 'lod'
- }
- }
- }
-
- // Optimize _mip away, when 'lod' is zero
- if (MIPMappingInfo) {
- if (auto *ConstantLod = dyn_cast<ConstantSDNode>(
- Op.getOperand(ArgOffset + Intr->MipIndex))) {
- if (ConstantLod->isZero()) {
- IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
- VAddrEnd--; // remove 'mip'
- }
- }
- }
-
// Check for 16 bit addresses or derivatives and pack if true.
MVT VAddrVT =
Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType();
@@ -6283,12 +6376,18 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
// Push back extra arguments.
for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) {
if (IsA16 && (Op.getOperand(ArgOffset + I).getValueType() == MVT::f16)) {
+ assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument");
// Special handling of bias when A16 is on. Bias is of type half but
// occupies full 32-bit.
- SDValue bias = DAG.getBuildVector( MVT::v2f16, DL, {Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)});
- VAddrs.push_back(bias);
- } else
+ SDValue Bias = DAG.getBuildVector(
+ MVT::v2f16, DL,
+ {Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)});
+ VAddrs.push_back(Bias);
+ } else {
+ assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) &&
+ "Bias needs to be converted to 16 bit in A16 mode");
VAddrs.push_back(Op.getOperand(ArgOffset + I));
+ }
}
if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) {
@@ -6731,14 +6830,23 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return getPreloadedValue(DAG, *MFI, VT,
AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
case Intrinsic::amdgcn_workitem_id_x:
+ if (Subtarget->getMaxWorkitemID(MF.getFunction(), 0) == 0)
+ return DAG.getConstant(0, DL, MVT::i32);
+
return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
SDLoc(DAG.getEntryNode()),
MFI->getArgInfo().WorkItemIDX);
case Intrinsic::amdgcn_workitem_id_y:
+ if (Subtarget->getMaxWorkitemID(MF.getFunction(), 1) == 0)
+ return DAG.getConstant(0, DL, MVT::i32);
+
return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
SDLoc(DAG.getEntryNode()),
MFI->getArgInfo().WorkItemIDY);
case Intrinsic::amdgcn_workitem_id_z:
+ if (Subtarget->getMaxWorkitemID(MF.getFunction(), 2) == 0)
+ return DAG.getConstant(0, DL, MVT::i32);
+
return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
SDLoc(DAG.getEntryNode()),
MFI->getArgInfo().WorkItemIDZ);
@@ -6899,9 +7007,6 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
DAG.getConstant(1, SL, MVT::i32));
return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
}
- case Intrinsic::amdgcn_alignbit:
- return DAG.getNode(ISD::FSHR, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::amdgcn_perm:
return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
@@ -8408,21 +8513,14 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
+ if (VT.getSizeInBits() == 128)
+ return splitTernaryVectorOp(Op, DAG);
+
assert(VT.getSizeInBits() == 64);
SDLoc DL(Op);
SDValue Cond = Op.getOperand(0);
- if (Subtarget->hasScalarCompareEq64() && Op->getOperand(0)->hasOneUse() &&
- !Op->isDivergent()) {
- if (VT == MVT::i64)
- return Op;
- SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(1));
- SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(2));
- return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getSelect(DL, MVT::i64, Cond, LHS, RHS));
- }
-
SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
SDValue One = DAG.getConstant(1, DL, MVT::i32);
@@ -9550,6 +9648,9 @@ SDValue SITargetLowering::performOrCombine(SDNode *N,
SDValue SITargetLowering::performXorCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
+ if (SDValue RV = reassociateScalarOps(N, DCI.DAG))
+ return RV;
+
EVT VT = N->getValueType(0);
if (VT != MVT::i64)
return SDValue();
@@ -10462,6 +10563,9 @@ SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
if (VT != MVT::i32 && VT != MVT::i64)
return SDValue();
+ if (DAG.isBaseWithConstantOffset(SDValue(N, 0)))
+ return SDValue();
+
unsigned Opc = N->getOpcode();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -10483,12 +10587,6 @@ SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
if (Op1->isDivergent())
std::swap(Op1, Op2);
- // If either operand is constant this will conflict with
- // DAGCombiner::ReassociateOps().
- if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
- DAG.isConstantIntBuildVectorOrConstantInt(Op1))
- return SDValue();
-
SDLoc SL(N);
SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
return DAG.getNode(Opc, SL, VT, Add1, Op2);
@@ -11130,7 +11228,9 @@ SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) ||
- Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
+ Node->getConstantOperandVal(LWEIdx))
+ ? true
+ : false;
unsigned TFCLane = 0;
bool HasChain = Node->getNumValues() > 1;
@@ -11719,25 +11819,51 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
return std::make_pair(0U, RC);
}
- if (Constraint.size() > 1) {
- if (Constraint[1] == 'v') {
+ if (Constraint.startswith("{") && Constraint.endswith("}")) {
+ StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
+ if (RegName.consume_front("v")) {
RC = &AMDGPU::VGPR_32RegClass;
- } else if (Constraint[1] == 's') {
+ } else if (RegName.consume_front("s")) {
RC = &AMDGPU::SGPR_32RegClass;
- } else if (Constraint[1] == 'a') {
+ } else if (RegName.consume_front("a")) {
RC = &AMDGPU::AGPR_32RegClass;
}
if (RC) {
uint32_t Idx;
- bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
- if (!Failed && Idx < RC->getNumRegs())
- return std::make_pair(RC->getRegister(Idx), RC);
+ if (RegName.consume_front("[")) {
+ uint32_t End;
+ bool Failed = RegName.consumeInteger(10, Idx);
+ Failed |= !RegName.consume_front(":");
+ Failed |= RegName.consumeInteger(10, End);
+ Failed |= !RegName.consume_back("]");
+ if (!Failed) {
+ uint32_t Width = (End - Idx + 1) * 32;
+ MCRegister Reg = RC->getRegister(Idx);
+ if (SIRegisterInfo::isVGPRClass(RC))
+ RC = TRI->getVGPRClassForBitWidth(Width);
+ else if (SIRegisterInfo::isSGPRClass(RC))
+ RC = TRI->getSGPRClassForBitWidth(Width);
+ else if (SIRegisterInfo::isAGPRClass(RC))
+ RC = TRI->getAGPRClassForBitWidth(Width);
+ if (RC) {
+ Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
+ return std::make_pair(Reg, RC);
+ }
+ }
+ } else {
+ bool Failed = RegName.getAsInteger(10, Idx);
+ if (!Failed && Idx < RC->getNumRegs())
+ return std::make_pair(RC->getRegister(Idx), RC);
+ }
}
}
- // FIXME: Returns VS_32 for physical SGPR constraints
- return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+ auto Ret = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+ if (Ret.first)
+ Ret.second = TRI->getPhysRegClass(Ret.first);
+
+ return Ret;
}
static bool isImmConstraint(StringRef Constraint) {
@@ -11975,13 +12101,6 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
}
TargetLoweringBase::finalizeLowering(MF);
-
- // Allocate a VGPR for future SGPR Spill if
- // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used
- // FIXME: We won't need this hack if we split SGPR allocation from VGPR
- if (VGPRReserveforSGPRSpill && TRI->spillSGPRToVGPR() &&
- !Info->VGPRReservedForSGPRSpill && !Info->isEntryFunction())
- Info->reserveVGPRforSGPRSpills(MF);
}
void SITargetLowering::computeKnownBitsForFrameIndex(
@@ -12441,17 +12560,10 @@ bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
for (auto &TC : TargetConstraints) {
if (TC.Type == InlineAsm::isOutput) {
ComputeConstraintToUse(TC, SDValue());
- unsigned AssignedReg;
- const TargetRegisterClass *RC;
- std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint(
- SIRI, TC.ConstraintCode, TC.ConstraintVT);
- if (RC) {
- MachineRegisterInfo &MRI = MF.getRegInfo();
- if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg))
- return true;
- else if (SIRI->isSGPRClass(RC))
- return true;
- }
+ const TargetRegisterClass *RC = getRegForInlineAsmConstraint(
+ SIRI, TC.ConstraintCode, TC.ConstraintVT).second;
+ if (RC && SIRI->isSGPRClass(RC))
+ return true;
}
}
}
@@ -12475,3 +12587,27 @@ SITargetLowering::getTypeLegalizationCost(const DataLayout &DL,
Cost.first += (Size + 255) / 256;
return Cost;
}
+
+bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const {
+ SDNode::use_iterator I = N->use_begin(), E = N->use_end();
+ for (; I != E; ++I) {
+ if (MemSDNode *M = dyn_cast<MemSDNode>(*I)) {
+ if (getBasePtrIndex(M) == I.getOperandNo())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
+ SDValue N1) const {
+ if (!N0.hasOneUse())
+ return false;
+ // Take care of the oportunity to keep N0 uniform
+ if (N0->isDivergent() || !N1->isDivergent())
+ return true;
+ // Check if we have a good chance to form the memory access pattern with the
+ // base and offset
+ return (DAG.isBaseWithConstantOffset(N0) &&
+ hasMemSDNodeUser(*N0->use_begin()));
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 1315cc15dd02..bf81e082b478 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -449,6 +449,11 @@ public:
bool isSDNodeSourceOfDivergence(const SDNode *N,
FunctionLoweringInfo *FLI, LegacyDivergenceAnalysis *DA) const override;
+ bool hasMemSDNodeUser(SDNode *N) const;
+
+ bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
+ SDValue N1) const override;
+
bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
unsigned MaxDepth = 5) const;
bool isCanonicalized(Register Reg, MachineFunction &MF,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 6fbe5d45ce0a..f8a10bc8ef6f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -863,7 +863,7 @@ bool SIInsertWaitcnts::applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
Wait.ExpCnt = ~0u;
LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
- << "Old Instr: " << MI << "New Instr: " << *WaitcntInstr
+ << "Old Instr: " << *MI << "New Instr: " << *WaitcntInstr
<< '\n');
} else {
WaitcntInstr->eraseFromParent();
@@ -886,7 +886,7 @@ bool SIInsertWaitcnts::applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
Wait.VsCnt = ~0u;
LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
- << "Old Instr: " << MI
+ << "Old Instr: " << *MI
<< "New Instr: " << *WaitcntVsCntInstr << '\n');
} else {
WaitcntVsCntInstr->eraseFromParent();
@@ -1382,7 +1382,6 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
for (auto T : inst_counter_types()) {
// Merge event flags for this counter
- const bool OldOutOfOrder = counterOutOfOrder(T);
const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T];
const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
if (OtherEvents & ~OldEvents)
@@ -1425,7 +1424,7 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
}
}
- if (RegStrictDom && !OldOutOfOrder)
+ if (RegStrictDom)
StrictDom = true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 1755b93538ce..0a2f9381e71f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -130,10 +130,24 @@ bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
return false;
}
+static bool readsExecAsData(const MachineInstr &MI) {
+ if (MI.isCompare())
+ return true;
+
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case AMDGPU::V_READFIRSTLANE_B32:
+ return true;
+ }
+
+ return false;
+}
+
bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const {
// Any implicit use of exec by VALU is not a real register read.
return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() &&
- isVALU(*MO.getParent());
+ isVALU(*MO.getParent()) && !readsExecAsData(*MO.getParent());
}
bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
@@ -3184,10 +3198,14 @@ MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 ||
Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
+ int NewMFMAOpc = -1;
switch (Opc) {
default:
- return nullptr;
+ NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc);
+ if (NewMFMAOpc == -1)
+ return nullptr;
+ break;
case AMDGPU::V_MAC_F16_e64:
case AMDGPU::V_FMAC_F16_e64:
IsF16 = true;
@@ -3216,6 +3234,19 @@ MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
}
}
+ MachineInstrBuilder MIB;
+ MachineBasicBlock &MBB = *MI.getParent();
+
+ if (NewMFMAOpc != -1) {
+ MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc));
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
+ MIB.add(MI.getOperand(I));
+ updateLiveVariables(LV, MI, *MIB);
+ if (LIS)
+ LIS->ReplaceMachineInstrInMaps(MI, *MIB);
+ return MIB;
+ }
+
const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
const MachineOperand *Src0Mods =
@@ -3226,8 +3257,6 @@ MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
- MachineInstrBuilder MIB;
- MachineBasicBlock &MBB = *MI.getParent();
if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 &&
// If we have an SGPR input, we will violate the constant bus restriction.
@@ -4520,6 +4549,14 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
}
+ if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
+ const MachineOperand &SrcOp = MI.getOperand(1);
+ if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
+ ErrInfo = "pseudo expects only physical SGPRs";
+ return false;
+ }
+ }
+
return true;
}
@@ -6122,11 +6159,8 @@ MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst,
continue;
case AMDGPU::S_CSELECT_B32:
- lowerSelect32(Worklist, Inst, MDT);
- Inst.eraseFromParent();
- continue;
case AMDGPU::S_CSELECT_B64:
- splitSelect64(Worklist, Inst, MDT);
+ lowerSelect(Worklist, Inst, MDT);
Inst.eraseFromParent();
continue;
case AMDGPU::S_CMP_EQ_I32:
@@ -6304,8 +6338,8 @@ SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
return std::make_pair(false, nullptr);
}
-void SIInstrInfo::lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst,
- MachineDominatorTree *MDT) const {
+void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
+ MachineDominatorTree *MDT) const {
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
@@ -6380,95 +6414,6 @@ void SIInstrInfo::lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst,
addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
}
-void SIInstrInfo::splitSelect64(SetVectorType &Worklist, MachineInstr &Inst,
- MachineDominatorTree *MDT) const {
- // Split S_CSELECT_B64 into a pair of S_CSELECT_B32 and lower them
- // further.
- const DebugLoc &DL = Inst.getDebugLoc();
- MachineBasicBlock::iterator MII = Inst;
- MachineBasicBlock &MBB = *Inst.getParent();
- MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
-
- // Get the original operands.
- MachineOperand &Dest = Inst.getOperand(0);
- MachineOperand &Src0 = Inst.getOperand(1);
- MachineOperand &Src1 = Inst.getOperand(2);
- MachineOperand &Cond = Inst.getOperand(3);
-
- Register SCCSource = Cond.getReg();
- bool IsSCC = (SCCSource == AMDGPU::SCC);
-
- // If this is a trivial select where the condition is effectively not SCC
- // (SCCSource is a source of copy to SCC), then the select is semantically
- // equivalent to copying SCCSource. Hence, there is no need to create
- // V_CNDMASK, we can just use that and bail out.
- if (!IsSCC && (Src0.isImm() && Src0.getImm() == -1) &&
- (Src1.isImm() && Src1.getImm() == 0)) {
- MRI.replaceRegWith(Dest.getReg(), SCCSource);
- return;
- }
-
- // Prepare the split destination.
- Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
- Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-
- // Split the source operands.
- const TargetRegisterClass *Src0RC = nullptr;
- const TargetRegisterClass *Src0SubRC = nullptr;
- if (Src0.isReg()) {
- Src0RC = MRI.getRegClass(Src0.getReg());
- Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
- }
- const TargetRegisterClass *Src1RC = nullptr;
- const TargetRegisterClass *Src1SubRC = nullptr;
- if (Src1.isReg()) {
- Src1RC = MRI.getRegClass(Src1.getReg());
- Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
- }
- // Split lo.
- MachineOperand SrcReg0Sub0 =
- buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
- MachineOperand SrcReg1Sub0 =
- buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
- // Split hi.
- MachineOperand SrcReg0Sub1 =
- buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
- MachineOperand SrcReg1Sub1 =
- buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
- // Select the lo part.
- MachineInstr *LoHalf =
- BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub0)
- .add(SrcReg0Sub0)
- .add(SrcReg1Sub0);
- // Replace the condition operand with the original one.
- LoHalf->getOperand(3).setReg(SCCSource);
- Worklist.insert(LoHalf);
- // Select the hi part.
- MachineInstr *HiHalf =
- BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub1)
- .add(SrcReg0Sub1)
- .add(SrcReg1Sub1);
- // Replace the condition operand with the original one.
- HiHalf->getOperand(3).setReg(SCCSource);
- Worklist.insert(HiHalf);
- // Merge them back to the original 64-bit one.
- BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
- .addReg(DestSub0)
- .addImm(AMDGPU::sub0)
- .addReg(DestSub1)
- .addImm(AMDGPU::sub1);
- MRI.replaceRegWith(Dest.getReg(), FullDestReg);
-
- // Try to legalize the operands in case we need to swap the order to keep
- // it valid.
- legalizeOperands(*LoHalf, MDT);
- legalizeOperands(*HiHalf, MDT);
-
- // Move all users of this moved value.
- addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
-}
-
void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
MachineInstr &Inst) const {
MachineBasicBlock &MBB = *Inst.getParent();
@@ -7820,6 +7765,12 @@ int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
}
}
+ if (isMAI(Opcode)) {
+ int MFMAOp = AMDGPU::getMFMAEarlyClobberOp(Opcode);
+ if (MFMAOp != -1)
+ Opcode = MFMAOp;
+ }
+
int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
// -1 means that Opcode is already a native instruction.
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index dd9ea2b53ca2..e551d6c7223f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -78,11 +78,8 @@ private:
moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
MachineDominatorTree *MDT = nullptr) const;
- void lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst,
- MachineDominatorTree *MDT = nullptr) const;
-
- void splitSelect64(SetVectorType &Worklist, MachineInstr &Inst,
- MachineDominatorTree *MDT = nullptr) const;
+ void lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
+ MachineDominatorTree *MDT = nullptr) const;
void lowerScalarAbs(SetVectorType &Worklist,
MachineInstr &Inst) const;
@@ -1249,6 +1246,10 @@ namespace AMDGPU {
LLVM_READONLY
int getFlatScratchInstSVfromSS(uint16_t Opcode);
+ /// \returns earlyclobber version of a MAC MFMA is exists.
+ LLVM_READONLY
+ int getMFMAEarlyClobberOp(uint16_t Opcode);
+
const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
const uint64_t RSRC_ELEMENT_SIZE_SHIFT = (32 + 19);
const uint64_t RSRC_INDEX_STRIDE_SHIFT = (32 + 21);
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index dda92d3d25ff..713a08907e99 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2588,6 +2588,14 @@ def getFlatScratchInstSVfromSS : InstrMapping {
let ValueCols = [["SV"]];
}
+def getMFMAEarlyClobberOp : InstrMapping {
+ let FilterClass = "MFMATable";
+ let RowFields = ["FMAOp"];
+ let ColFields = ["IsMac"];
+ let KeyCol = ["1"];
+ let ValueCols = [["0"]];
+}
+
include "SIInstructions.td"
include "DSInstructions.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstructions.td
index 636337ede000..7be63ae6964b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1011,7 +1011,7 @@ def : GCNPat <
}
def : GCNPat <
- (i32 (ctpop i32:$popcnt)),
+ (i32 (DivergentUnaryFrag<ctpop> i32:$popcnt)),
(V_BCNT_U32_B32_e64 VSrc_b32:$popcnt, (i32 0))
>;
@@ -1020,6 +1020,14 @@ def : GCNPat <
(V_BCNT_U32_B32_e64 $popcnt, $val)
>;
+def : GCNPat <
+ (i64 (DivergentUnaryFrag<ctpop> i64:$src)),
+ (REG_SEQUENCE VReg_64,
+ (V_BCNT_U32_B32_e64 (i32 (EXTRACT_SUBREG i64:$src, sub1)),
+ (i32 (V_BCNT_U32_B32_e64 (i32 (EXTRACT_SUBREG i64:$src, sub0)), (i32 0)))), sub0,
+ (i32 (V_MOV_B32_e32 (i32 0))), sub1)
+>;
+
/********** ============================================ **********/
/********** Extraction, Insertion, Building and Casting **********/
/********** ============================================ **********/
@@ -1184,6 +1192,26 @@ def : Pat <
(v2f16 (EXTRACT_SUBREG v4f16:$vec, sub1))
>;
+def : Pat <
+ (extract_subvector v8i16:$vec, (i32 0)),
+ (v4i16 (EXTRACT_SUBREG v8i16:$vec, sub0_sub1))
+>;
+
+def : Pat <
+ (extract_subvector v8i16:$vec, (i32 4)),
+ (v4i16 (EXTRACT_SUBREG v8i16:$vec, sub2_sub3))
+>;
+
+def : Pat <
+ (extract_subvector v8f16:$vec, (i32 0)),
+ (v4f16 (EXTRACT_SUBREG v8f16:$vec, sub0_sub1))
+>;
+
+def : Pat <
+ (extract_subvector v8f16:$vec, (i32 4)),
+ (v4f16 (EXTRACT_SUBREG v8f16:$vec, sub2_sub3))
+>;
+
foreach Index = 0-31 in {
def Extract_Element_v32i32_#Index : Extract_Element <
i32, v32i32, Index, !cast<SubRegIndex>(sub#Index)
@@ -1279,6 +1307,26 @@ def : BitConvert <v2i64, v2f64, VReg_128>;
def : BitConvert <v2f64, v2i64, VReg_128>;
def : BitConvert <v4f32, v2i64, VReg_128>;
def : BitConvert <v2i64, v4f32, VReg_128>;
+def : BitConvert <v8i16, v4i32, SReg_128>;
+def : BitConvert <v4i32, v8i16, SReg_128>;
+def : BitConvert <v8f16, v4f32, VReg_128>;
+def : BitConvert <v8f16, v4i32, VReg_128>;
+def : BitConvert <v4f32, v8f16, VReg_128>;
+def : BitConvert <v4i32, v8f16, VReg_128>;
+def : BitConvert <v8i16, v8f16, VReg_128>;
+def : BitConvert <v8f16, v8i16, VReg_128>;
+def : BitConvert <v4f32, v8i16, VReg_128>;
+def : BitConvert <v8i16, v4f32, VReg_128>;
+def : BitConvert <v8i16, v8f16, SReg_128>;
+def : BitConvert <v8i16, v2i64, SReg_128>;
+def : BitConvert <v8i16, v2f64, SReg_128>;
+def : BitConvert <v8f16, v2i64, SReg_128>;
+def : BitConvert <v8f16, v2f64, SReg_128>;
+def : BitConvert <v8f16, v8i16, SReg_128>;
+def : BitConvert <v2i64, v8i16, SReg_128>;
+def : BitConvert <v2f64, v8i16, SReg_128>;
+def : BitConvert <v2i64, v8f16, SReg_128>;
+def : BitConvert <v2f64, v8f16, SReg_128>;
// 160-bit bitcast
def : BitConvert <v5i32, v5f32, SReg_160>;
@@ -1762,44 +1810,44 @@ def BFIImm32 : PatFrag<
// (y & x) | (z & ~x)
def : AMDGPUPat <
(DivergentBinFrag<or> (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
- (V_BFI_B32_e64 $x, $y, $z)
+ (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
>;
// (y & C) | (z & ~C)
def : AMDGPUPat <
(BFIImm32 i32:$x, i32:$y, i32:$z),
- (V_BFI_B32_e64 $x, $y, $z)
+ (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
>;
// 64-bit version
def : AMDGPUPat <
(DivergentBinFrag<or> (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
- (REG_SEQUENCE SReg_64,
- (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub0))), sub0,
- (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub1))), sub1)
+ (REG_SEQUENCE VReg_64,
+ (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub0))), sub0,
+ (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub1))), sub1)
>;
// SHA-256 Ch function
// z ^ (x & (y ^ z))
def : AMDGPUPat <
(DivergentBinFrag<xor> i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
- (V_BFI_B32_e64 $x, $y, $z)
+ (V_BFI_B32_e64 VSrc_b32:$x, VSrc_b32:$y, VSrc_b32:$z)
>;
// 64-bit version
def : AMDGPUPat <
(DivergentBinFrag<xor> i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
- (REG_SEQUENCE SReg_64,
- (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub0))), sub0,
- (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub1))), sub1)
+ (REG_SEQUENCE VReg_64,
+ (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub0))), sub0,
+ (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub1))), sub1)
>;
def : AMDGPUPat <
@@ -2725,21 +2773,21 @@ def : AMDGPUPat <
def : AMDGPUPat <
(DivergentBinFrag<or> (and i32:$x, i32:$z),
(and i32:$y, (or i32:$x, i32:$z))),
- (V_BFI_B32_e64 (V_XOR_B32_e64 i32:$x, i32:$y), i32:$z, i32:$y)
+ (V_BFI_B32_e64 (V_XOR_B32_e64 VSrc_b32:$x, VSrc_b32:$y), VSrc_b32:$z, VSrc_b32:$y)
>;
def : AMDGPUPat <
(DivergentBinFrag<or> (and i64:$x, i64:$z),
(and i64:$y, (or i64:$x, i64:$z))),
- (REG_SEQUENCE SReg_64,
- (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub0))),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub0)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub0))), sub0,
- (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub1))),
- (i32 (EXTRACT_SUBREG SReg_64:$z, sub1)),
- (i32 (EXTRACT_SUBREG SReg_64:$y, sub1))), sub1)
+ (REG_SEQUENCE VReg_64,
+ (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub0))),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub0)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub0))), sub0,
+ (V_BFI_B32_e64 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub1))),
+ (i32 (EXTRACT_SUBREG VReg_64:$z, sub1)),
+ (i32 (EXTRACT_SUBREG VReg_64:$y, sub1))), sub1)
>;
multiclass IntMed3Pat<Instruction med3Inst,
@@ -2825,6 +2873,15 @@ class AMDGPUGenericInstruction : GenericInstruction {
let Namespace = "AMDGPU";
}
+// Convert a wave address to a swizzled vector address (i.e. this is
+// for copying the stack pointer to a vector address appropriate to
+// use in the offset field of mubuf instructions).
+def G_AMDGPU_WAVE_ADDRESS : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
+}
+
// Returns -1 if the input is zero.
def G_AMDGPU_FFBH_U32 : AMDGPUGenericInstruction {
let OutOperandList = (outs type0:$dst);
@@ -3027,6 +3084,16 @@ def G_AMDGPU_INTRIN_IMAGE_LOAD : AMDGPUGenericInstruction {
let mayStore = 1;
}
+def G_AMDGPU_INTRIN_IMAGE_LOAD_D16 : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$intrin, variable_ops);
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+
+ // FIXME: Use separate opcode for atomics.
+ let mayStore = 1;
+}
+
// This is equivalent to the G_INTRINSIC*, but the operands may have
// been legalized depending on the subtarget requirements.
def G_AMDGPU_INTRIN_IMAGE_STORE : AMDGPUGenericInstruction {
@@ -3036,6 +3103,13 @@ def G_AMDGPU_INTRIN_IMAGE_STORE : AMDGPUGenericInstruction {
let mayStore = 1;
}
+def G_AMDGPU_INTRIN_IMAGE_STORE_D16 : AMDGPUGenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins unknown:$intrin, variable_ops);
+ let hasSideEffects = 0;
+ let mayStore = 1;
+}
+
def G_AMDGPU_INTRIN_BVH_INTERSECT_RAY : AMDGPUGenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins unknown:$intrin, variable_ops);
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index f4d9002e930e..c18637bdbc43 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -105,6 +105,7 @@ class SILoadStoreOptimizer : public MachineFunctionPass {
unsigned DMask;
InstClassEnum InstClass;
unsigned CPol = 0;
+ bool IsAGPR;
bool UseST64;
int AddrIdx[MaxAddressRegs];
const MachineOperand *AddrReg[MaxAddressRegs];
@@ -158,8 +159,7 @@ class SILoadStoreOptimizer : public MachineFunctionPass {
return true;
}
- void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
- const GCNSubtarget &STM);
+ void setMI(MachineBasicBlock::iterator MI, const SILoadStoreOptimizer &LSO);
};
struct BaseRegisters {
@@ -484,15 +484,16 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
}
void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
- const SIInstrInfo &TII,
- const GCNSubtarget &STM) {
+ const SILoadStoreOptimizer &LSO) {
I = MI;
unsigned Opc = MI->getOpcode();
- InstClass = getInstClass(Opc, TII);
+ InstClass = getInstClass(Opc, *LSO.TII);
if (InstClass == UNKNOWN)
return;
+ IsAGPR = LSO.TRI->hasAGPRs(LSO.getDataRegClass(*MI));
+
switch (InstClass) {
case DS_READ:
EltSize =
@@ -505,7 +506,7 @@ void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
: 4;
break;
case S_BUFFER_LOAD_IMM:
- EltSize = AMDGPU::convertSMRDOffsetUnits(STM, 4);
+ EltSize = AMDGPU::convertSMRDOffsetUnits(*LSO.STM, 4);
break;
default:
EltSize = 4;
@@ -513,7 +514,7 @@ void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
}
if (InstClass == MIMG) {
- DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
+ DMask = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
// Offset is not considered for MIMG instructions.
Offset = 0;
} else {
@@ -522,17 +523,17 @@ void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
}
if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE)
- Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
+ Format = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
- Width = getOpcodeWidth(*I, TII);
+ Width = getOpcodeWidth(*I, *LSO.TII);
if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
Offset &= 0xffff;
} else if (InstClass != MIMG) {
- CPol = TII.getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm();
+ CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm();
}
- AddressRegs Regs = getRegs(Opc, TII);
+ AddressRegs Regs = getRegs(Opc, *LSO.TII);
NumAddresses = 0;
for (unsigned J = 0; J < Regs.NumVAddrs; J++)
@@ -910,19 +911,10 @@ bool SILoadStoreOptimizer::checkAndPrepareMerge(
}
const unsigned InstSubclass = getInstSubclass(Opc, *TII);
- // Do not merge VMEM buffer instructions with "swizzled" bit set.
- int Swizzled =
- AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
- if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
- return false;
-
DenseSet<Register> RegDefsToMove;
DenseSet<Register> PhysRegUsesToMove;
addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
- const TargetRegisterClass *DataRC = getDataRegClass(*CI.I);
- bool IsAGPR = TRI->hasAGPRs(DataRC);
-
MachineBasicBlock::iterator E = std::next(Paired.I);
MachineBasicBlock::iterator MBBI = std::next(CI.I);
MachineBasicBlock::iterator MBBE = CI.I->getParent()->end();
@@ -971,15 +963,6 @@ bool SILoadStoreOptimizer::checkAndPrepareMerge(
continue;
}
- // Don't merge volatiles.
- if (MBBI->hasOrderedMemoryRef())
- return false;
-
- int Swizzled =
- AMDGPU::getNamedOperandIdx(MBBI->getOpcode(), AMDGPU::OpName::swz);
- if (Swizzled != -1 && MBBI->getOperand(Swizzled).getImm())
- return false;
-
// Handle a case like
// DS_WRITE_B32 addr, v, idx0
// w = DS_READ_B32 addr, idx0
@@ -991,17 +974,6 @@ bool SILoadStoreOptimizer::checkAndPrepareMerge(
continue;
if (&*MBBI == &*Paired.I) {
- if (TRI->hasAGPRs(getDataRegClass(*MBBI)) != IsAGPR)
- return false;
- // FIXME: nothing is illegal in a ds_write2 opcode with two AGPR data
- // operands. However we are reporting that ds_write2 shall have
- // only VGPR data so that machine copy propagation does not
- // create an illegal instruction with a VGPR and AGPR sources.
- // Consequenctially if we create such instruction the verifier
- // will complain.
- if (IsAGPR && CI.InstClass == DS_WRITE)
- return false;
-
// We need to go through the list of instructions that we plan to
// move and make sure they are all safe to move down past the merged
// instruction.
@@ -1542,49 +1514,36 @@ unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
std::pair<unsigned, unsigned>
SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI,
const CombineInfo &Paired) {
-
- assert(CI.Width != 0 && Paired.Width != 0 && "Width cannot be zero");
-
bool ReverseOrder;
if (CI.InstClass == MIMG) {
assert(
(countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) &&
"No overlaps");
ReverseOrder = CI.DMask > Paired.DMask;
- } else
+ } else {
ReverseOrder = CI.Offset > Paired.Offset;
+ }
unsigned Idx0;
unsigned Idx1;
- if (CI.Width + Paired.Width > 4) {
- assert(CI.Width == 4 && Paired.Width == 4);
+ static const unsigned Idxs[5][4] = {
+ {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
+ {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4},
+ {AMDGPU::sub2, AMDGPU::sub2_sub3, AMDGPU::sub2_sub3_sub4, AMDGPU::sub2_sub3_sub4_sub5},
+ {AMDGPU::sub3, AMDGPU::sub3_sub4, AMDGPU::sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6},
+ {AMDGPU::sub4, AMDGPU::sub4_sub5, AMDGPU::sub4_sub5_sub6, AMDGPU::sub4_sub5_sub6_sub7},
+ };
- if (ReverseOrder) {
- Idx1 = AMDGPU::sub0_sub1_sub2_sub3;
- Idx0 = AMDGPU::sub4_sub5_sub6_sub7;
- } else {
- Idx0 = AMDGPU::sub0_sub1_sub2_sub3;
- Idx1 = AMDGPU::sub4_sub5_sub6_sub7;
- }
+ assert(CI.Width >= 1 && CI.Width <= 4);
+ assert(Paired.Width >= 1 && Paired.Width <= 4);
+
+ if (ReverseOrder) {
+ Idx1 = Idxs[0][Paired.Width - 1];
+ Idx0 = Idxs[Paired.Width][CI.Width - 1];
} else {
- static const unsigned Idxs[4][4] = {
- {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
- {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, 0},
- {AMDGPU::sub2, AMDGPU::sub2_sub3, 0, 0},
- {AMDGPU::sub3, 0, 0, 0},
- };
-
- assert(CI.Width >= 1 && CI.Width <= 3);
- assert(Paired.Width >= 1 && Paired.Width <= 3);
-
- if (ReverseOrder) {
- Idx1 = Idxs[0][Paired.Width - 1];
- Idx0 = Idxs[Paired.Width][CI.Width - 1];
- } else {
- Idx0 = Idxs[0][CI.Width - 1];
- Idx1 = Idxs[CI.Width][Paired.Width - 1];
- }
+ Idx0 = Idxs[0][CI.Width - 1];
+ Idx1 = Idxs[CI.Width][Paired.Width - 1];
}
return std::make_pair(Idx0, Idx1);
@@ -1847,7 +1806,8 @@ bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
return false;
- if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
+ if (MI.mayLoad() &&
+ TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != nullptr)
return false;
if (AnchorList.count(&MI))
@@ -1988,6 +1948,7 @@ void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
std::list<std::list<CombineInfo> > &MergeableInsts) const {
for (std::list<CombineInfo> &AddrList : MergeableInsts) {
if (AddrList.front().InstClass == CI.InstClass &&
+ AddrList.front().IsAGPR == CI.IsAGPR &&
AddrList.front().hasSameBaseAddress(*CI.I)) {
AddrList.emplace_back(CI);
return;
@@ -2030,13 +1991,29 @@ SILoadStoreOptimizer::collectMergeableInsts(
if (InstClass == UNKNOWN)
continue;
+ // Do not merge VMEM buffer instructions with "swizzled" bit set.
+ int Swizzled =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
+ if (Swizzled != -1 && MI.getOperand(Swizzled).getImm())
+ continue;
+
CombineInfo CI;
- CI.setMI(MI, *TII, *STM);
+ CI.setMI(MI, *this);
CI.Order = Order++;
if (!CI.hasMergeableAddress(*MRI))
continue;
+ if (CI.InstClass == DS_WRITE && CI.IsAGPR) {
+ // FIXME: nothing is illegal in a ds_write2 opcode with two AGPR data
+ // operands. However we are reporting that ds_write2 shall have
+ // only VGPR data so that machine copy propagation does not
+ // create an illegal instruction with a VGPR and AGPR sources.
+ // Consequenctially if we create such instruction the verifier
+ // will complain.
+ continue;
+ }
+
LLVM_DEBUG(dbgs() << "Mergeable: " << MI);
addInstToMergeableList(CI, MergeableInsts);
@@ -2144,54 +2121,54 @@ SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
case DS_READ: {
MachineBasicBlock::iterator NewMI =
mergeRead2Pair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
break;
}
case DS_WRITE: {
MachineBasicBlock::iterator NewMI =
mergeWrite2Pair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
break;
}
case S_BUFFER_LOAD_IMM: {
MachineBasicBlock::iterator NewMI =
mergeSBufferLoadImmPair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 8;
break;
}
case BUFFER_LOAD: {
MachineBasicBlock::iterator NewMI =
mergeBufferLoadPair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
break;
}
case BUFFER_STORE: {
MachineBasicBlock::iterator NewMI =
mergeBufferStorePair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
break;
}
case MIMG: {
MachineBasicBlock::iterator NewMI =
mergeImagePair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
break;
}
case TBUFFER_LOAD: {
MachineBasicBlock::iterator NewMI =
mergeTBufferLoadPair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
break;
}
case TBUFFER_STORE: {
MachineBasicBlock::iterator NewMI =
mergeTBufferStorePair(CI, Paired, InstsToMove);
- CI.setMI(NewMI, *TII, *STM);
+ CI.setMI(NewMI, *this);
OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
break;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 3168bcd53eda..e1018bdfde46 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -56,6 +56,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
@@ -90,6 +91,8 @@ private:
unsigned OrSaveExecOpc;
unsigned Exec;
+ bool EnableOptimizeEndCf = false;
+
bool hasKill(const MachineBasicBlock *Begin, const MachineBasicBlock *End);
void emitIf(MachineInstr &MI);
@@ -579,10 +582,10 @@ void SILowerControlFlow::combineMasks(MachineInstr &MI) {
void SILowerControlFlow::optimizeEndCf() {
// If the only instruction immediately following this END_CF is an another
// END_CF in the only successor we can avoid emitting exec mask restore here.
- if (!RemoveRedundantEndcf)
+ if (!EnableOptimizeEndCf)
return;
- for (MachineInstr *MI : LoweredEndCf) {
+ for (MachineInstr *MI : reverse(LoweredEndCf)) {
MachineBasicBlock &MBB = *MI->getParent();
auto Next =
skipIgnoreExecInstsTrivialSucc(MBB, std::next(MI->getIterator()));
@@ -807,6 +810,8 @@ bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
TRI = &TII->getRegisterInfo();
+ EnableOptimizeEndCf =
+ RemoveRedundantEndcf && MF.getTarget().getOptLevel() > CodeGenOpt::None;
// This doesn't actually need LiveIntervals, but we can preserve them.
LIS = getAnalysisIfAvailable<LiveIntervals>();
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 55196fe334e6..0fbdbef6fcce 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -127,7 +127,7 @@ static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
// FIXME: Just emit the readlane/writelane directly
if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
for (const CalleeSavedInfo &CI : reverse(CSI)) {
- unsigned Reg = CI.getReg();
+ Register Reg = CI.getReg();
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, MVT::i32);
@@ -239,50 +239,6 @@ bool SILowerSGPRSpills::spillCalleeSavedRegs(MachineFunction &MF) {
return false;
}
-// Find lowest available VGPR and use it as VGPR reserved for SGPR spills.
-static bool lowerShiftReservedVGPR(MachineFunction &MF,
- const GCNSubtarget &ST) {
- SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
- const Register PreReservedVGPR = FuncInfo->VGPRReservedForSGPRSpill;
- // Early out if pre-reservation of a VGPR for SGPR spilling is disabled.
- if (!PreReservedVGPR)
- return false;
-
- // If there are no free lower VGPRs available, default to using the
- // pre-reserved register instead.
- const SIRegisterInfo *TRI = ST.getRegisterInfo();
- Register LowestAvailableVGPR =
- TRI->findUnusedRegister(MF.getRegInfo(), &AMDGPU::VGPR_32RegClass, MF);
- if (!LowestAvailableVGPR)
- LowestAvailableVGPR = PreReservedVGPR;
-
- MachineFrameInfo &FrameInfo = MF.getFrameInfo();
- // Create a stack object for a possible spill in the function prologue.
- // Note Non-CSR VGPR also need this as we may overwrite inactive lanes.
- Optional<int> FI = FrameInfo.CreateSpillStackObject(4, Align(4));
-
- // Find saved info about the pre-reserved register.
- const auto *ReservedVGPRInfoItr =
- llvm::find_if(FuncInfo->getSGPRSpillVGPRs(),
- [PreReservedVGPR](const auto &SpillRegInfo) {
- return SpillRegInfo.VGPR == PreReservedVGPR;
- });
-
- assert(ReservedVGPRInfoItr != FuncInfo->getSGPRSpillVGPRs().end());
- auto Index =
- std::distance(FuncInfo->getSGPRSpillVGPRs().begin(), ReservedVGPRInfoItr);
-
- FuncInfo->setSGPRSpillVGPRs(LowestAvailableVGPR, FI, Index);
-
- for (MachineBasicBlock &MBB : MF) {
- assert(LowestAvailableVGPR.isValid() && "Did not find an available VGPR");
- MBB.addLiveIn(LowestAvailableVGPR);
- MBB.sortUniqueLiveIns();
- }
-
- return true;
-}
-
bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
@@ -304,11 +260,6 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
if (!MFI.hasStackObjects() && !HasCSRs) {
SaveBlocks.clear();
RestoreBlocks.clear();
- if (FuncInfo->VGPRReservedForSGPRSpill) {
- // Free the reserved VGPR for later possible use by frame lowering.
- FuncInfo->removeVGPRForSGPRSpill(FuncInfo->VGPRReservedForSGPRSpill, MF);
- MRI.freezeReservedRegs(MF);
- }
return false;
}
@@ -326,8 +277,6 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
// This operates under the assumption that only other SGPR spills are users
// of the frame index.
- lowerShiftReservedVGPR(MF, ST);
-
// To track the spill frame indices handled in this pass.
BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
@@ -375,8 +324,6 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
FuncInfo->removeDeadFrameIndices(MFI);
MadeChange = true;
- } else if (FuncInfo->VGPRReservedForSGPRSpill) {
- FuncInfo->removeVGPRForSGPRSpill(FuncInfo->VGPRReservedForSGPRSpill, MF);
}
SaveBlocks.clear();
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 3ce368ef4db9..cca8565c9ff9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -118,10 +118,12 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
if (IsKernel || !F.hasFnAttribute("amdgpu-no-workitem-id-x"))
WorkItemIDX = true;
- if (!F.hasFnAttribute("amdgpu-no-workitem-id-y"))
+ if (!F.hasFnAttribute("amdgpu-no-workitem-id-y") &&
+ ST.getMaxWorkitemID(F, 1) != 0)
WorkItemIDY = true;
- if (!F.hasFnAttribute("amdgpu-no-workitem-id-z"))
+ if (!F.hasFnAttribute("amdgpu-no-workitem-id-z") &&
+ ST.getMaxWorkitemID(F, 2) != 0)
WorkItemIDZ = true;
if (!F.hasFnAttribute("amdgpu-no-dispatch-ptr"))
@@ -274,7 +276,6 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned WaveSize = ST.getWavefrontSize();
- SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
unsigned Size = FrameInfo.getObjectSize(FI);
unsigned NumLanes = Size / 4;
@@ -291,16 +292,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
Register LaneVGPR;
unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize);
- // Reserve a VGPR (when NumVGPRSpillLanes = 0, WaveSize, 2*WaveSize, ..) and
- // when one of the two conditions is true:
- // 1. One reserved VGPR being tracked by VGPRReservedForSGPRSpill is not yet
- // reserved.
- // 2. All spill lanes of reserved VGPR(s) are full and another spill lane is
- // required.
- if (FuncInfo->VGPRReservedForSGPRSpill && NumVGPRSpillLanes < WaveSize) {
- assert(FuncInfo->VGPRReservedForSGPRSpill == SpillVGPRs.back().VGPR);
- LaneVGPR = FuncInfo->VGPRReservedForSGPRSpill;
- } else if (VGPRIndex == 0) {
+ if (VGPRIndex == 0) {
LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
if (LaneVGPR == AMDGPU::NoRegister) {
// We have no VGPRs left for spilling SGPRs. Reset because we will not
@@ -308,6 +300,8 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
SGPRToVGPRSpills.erase(FI);
NumVGPRSpillLanes -= I;
+ // FIXME: We can run out of free registers with split allocation if
+ // IPRA is enabled and a called function already uses every VGPR.
#if 0
DiagnosticInfoResourceLimit DiagOutOfRegs(MF.getFunction(),
"VGPRs for SGPR spilling",
@@ -340,21 +334,6 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
return true;
}
-/// Reserve a VGPR for spilling of SGPRs
-bool SIMachineFunctionInfo::reserveVGPRforSGPRSpills(MachineFunction &MF) {
- const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- const SIRegisterInfo *TRI = ST.getRegisterInfo();
- SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
-
- Register LaneVGPR = TRI->findUnusedRegister(
- MF.getRegInfo(), &AMDGPU::VGPR_32RegClass, MF, true);
- if (LaneVGPR == Register())
- return false;
- SpillVGPRs.push_back(SGPRSpillVGPR(LaneVGPR, None));
- FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR;
- return true;
-}
-
/// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI.
/// Either AGPR is spilled to VGPR to vice versa.
/// Returns true if a \p FI can be eliminated completely.
@@ -616,24 +595,6 @@ bool SIMachineFunctionInfo::initializeBaseYamlFields(
return false;
}
-// Remove VGPR which was reserved for SGPR spills if there are no spilled SGPRs
-bool SIMachineFunctionInfo::removeVGPRForSGPRSpill(Register ReservedVGPR,
- MachineFunction &MF) {
- for (auto *i = SpillVGPRs.begin(); i < SpillVGPRs.end(); i++) {
- if (i->VGPR == ReservedVGPR) {
- SpillVGPRs.erase(i);
-
- for (MachineBasicBlock &MBB : MF) {
- MBB.removeLiveIn(ReservedVGPR);
- MBB.sortUniqueLiveIns();
- }
- this->VGPRReservedForSGPRSpill = AMDGPU::NoRegister;
- return true;
- }
- }
- return false;
-}
-
bool SIMachineFunctionInfo::usesAGPRs(const MachineFunction &MF) const {
if (UsesAGPRs)
return *UsesAGPRs;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 8accbf611c5f..8e821274bb77 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -502,7 +502,6 @@ public: // FIXME
Register SGPRForBPSaveRestoreCopy;
Optional<int> BasePointerSaveIndex;
- Register VGPRReservedForSGPRSpill;
bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg);
public:
@@ -528,7 +527,6 @@ public:
void setSGPRSpillVGPRs(Register NewVGPR, Optional<int> newFI, int Index) {
SpillVGPRs[Index].VGPR = NewVGPR;
SpillVGPRs[Index].FI = newFI;
- VGPRReservedForSGPRSpill = NewVGPR;
}
bool removeVGPRForSGPRSpill(Register ReservedVGPR, MachineFunction &MF);
@@ -556,7 +554,6 @@ public:
bool haveFreeLanesForSGPRSpill(const MachineFunction &MF,
unsigned NumLane) const;
bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI);
- bool reserveVGPRforSGPRSpills(MachineFunction &MF);
bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR);
void removeDeadFrameIndices(MachineFrameInfo &MFI);
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
index 69eab762f05c..24a8879b5684 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
@@ -188,7 +188,7 @@ void SIModeRegister::insertSetreg(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned Offset = countTrailingZeros<unsigned>(InstrMode.Mask);
unsigned Width = countTrailingOnes<unsigned>(InstrMode.Mask >> Offset);
unsigned Value = (InstrMode.Mode >> Offset) & ((1 << Width) - 1);
- BuildMI(MBB, MI, 0, TII->get(AMDGPU::S_SETREG_IMM32_B32))
+ BuildMI(MBB, MI, nullptr, TII->get(AMDGPU::S_SETREG_IMM32_B32))
.addImm(Value)
.addImm(((Width - 1) << AMDGPU::Hwreg::WIDTH_M1_SHIFT_) |
(Offset << AMDGPU::Hwreg::OFFSET_SHIFT_) |
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp
index 6bf6c45d8cf6..e13e33ed5457 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp
@@ -155,6 +155,11 @@ public:
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::IsSSA);
}
+
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoPHIs);
+ }
};
} // end anonymous namespace
@@ -366,47 +371,42 @@ void SIOptimizeVGPRLiveRange::collectWaterfallCandidateRegisters(
// Re-calculate the liveness of \p Reg in the THEN-region
void SIOptimizeVGPRLiveRange::updateLiveRangeInThenRegion(
Register Reg, MachineBasicBlock *If, MachineBasicBlock *Flow) const {
-
- SmallPtrSet<MachineBasicBlock *, 16> PHIIncoming;
-
- MachineBasicBlock *ThenEntry = nullptr;
- for (auto *Succ : If->successors()) {
- if (Succ != Flow) {
- ThenEntry = Succ;
- break;
+ SetVector<MachineBasicBlock *> Blocks;
+ SmallVector<MachineBasicBlock *> WorkList({If});
+
+ // Collect all successors until we see the flow block, where we should
+ // reconverge.
+ while (!WorkList.empty()) {
+ auto *MBB = WorkList.pop_back_val();
+ for (auto *Succ : MBB->successors()) {
+ if (Succ != Flow && !Blocks.contains(Succ)) {
+ WorkList.push_back(Succ);
+ Blocks.insert(Succ);
+ }
}
}
- assert(ThenEntry && "No successor in Then region?");
LiveVariables::VarInfo &OldVarInfo = LV->getVarInfo(Reg);
- df_iterator_default_set<MachineBasicBlock *, 16> Visited;
-
- for (MachineBasicBlock *MBB : depth_first_ext(ThenEntry, Visited)) {
- if (MBB == Flow)
- break;
-
+ for (MachineBasicBlock *MBB : Blocks) {
// Clear Live bit, as we will recalculate afterwards
LLVM_DEBUG(dbgs() << "Clear AliveBlock " << printMBBReference(*MBB)
<< '\n');
OldVarInfo.AliveBlocks.reset(MBB->getNumber());
}
+ SmallPtrSet<MachineBasicBlock *, 4> PHIIncoming;
+
// Get the blocks the Reg should be alive through
for (auto I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end(); I != E;
++I) {
auto *UseMI = I->getParent();
if (UseMI->isPHI() && I->readsReg()) {
- if (Visited.contains(UseMI->getParent()))
+ if (Blocks.contains(UseMI->getParent()))
PHIIncoming.insert(UseMI->getOperand(I.getOperandNo() + 1).getMBB());
}
}
- Visited.clear();
-
- for (MachineBasicBlock *MBB : depth_first_ext(ThenEntry, Visited)) {
- if (MBB == Flow)
- break;
-
+ for (MachineBasicBlock *MBB : Blocks) {
SmallVector<MachineInstr *> Uses;
// PHI instructions has been processed before.
findNonPHIUsesInBlock(Reg, MBB, Uses);
@@ -433,7 +433,7 @@ void SIOptimizeVGPRLiveRange::updateLiveRangeInThenRegion(
// Set the isKilled flag if we get new Kills in the THEN region.
for (auto *MI : OldVarInfo.Kills) {
- if (Visited.contains(MI->getParent()))
+ if (Blocks.contains(MI->getParent()))
MI->addRegisterKilled(Reg, TRI);
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 340e2b48e5cd..eb9452f4b85e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -617,7 +617,7 @@ def Pseudo_SReg_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16
let HasSGPR = 1;
}
-def Pseudo_SReg_128 : SIRegisterClass<"AMDGPU", [v4i32, v2i64, v2f64], 32,
+def Pseudo_SReg_128 : SIRegisterClass<"AMDGPU", [v4i32, v2i64, v2f64, v8i16, v8f16], 32,
(add PRIVATE_RSRC_REG)> {
let isAllocatable = 0;
let CopyCost = -1;
@@ -784,7 +784,7 @@ multiclass SRegClass<int numRegs, int priority,
}
defm "" : SRegClass<3, 14, [v3i32, v3f32], SGPR_96Regs, TTMP_96Regs>;
-defm "" : SRegClass<4, 15, [v4i32, v4f32, v2i64], SGPR_128Regs, TTMP_128Regs>;
+defm "" : SRegClass<4, 15, [v4i32, v4f32, v2i64, v2f64, v8i16, v8f16], SGPR_128Regs, TTMP_128Regs>;
defm "" : SRegClass<5, 16, [v5i32, v5f32], SGPR_160Regs, TTMP_160Regs>;
defm "" : SRegClass<6, 17, [v6i32, v6f32, v3i64, v3f64], SGPR_192Regs, TTMP_192Regs>;
defm "" : SRegClass<7, 18, [v7i32, v7f32], SGPR_224Regs, TTMP_224Regs>;
@@ -824,7 +824,7 @@ multiclass VRegClass<int numRegs, list<ValueType> regTypes, dag regList> {
defm VReg_64 : VRegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16, p0, p1, p4],
(add VGPR_64)>;
defm VReg_96 : VRegClass<3, [v3i32, v3f32], (add VGPR_96)>;
-defm VReg_128 : VRegClass<4, [v4i32, v4f32, v2i64, v2f64], (add VGPR_128)>;
+defm VReg_128 : VRegClass<4, [v4i32, v4f32, v2i64, v2f64, v8i16, v8f16], (add VGPR_128)>;
defm VReg_160 : VRegClass<5, [v5i32, v5f32], (add VGPR_160)>;
defm VReg_192 : VRegClass<6, [v6i32, v6f32, v3i64, v3f64], (add VGPR_192)>;
@@ -846,7 +846,7 @@ multiclass ARegClass<int numRegs, list<ValueType> regTypes, dag regList> {
defm AReg_64 : ARegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16],
(add AGPR_64)>;
defm AReg_96 : ARegClass<3, [v3i32, v3f32], (add AGPR_96)>;
-defm AReg_128 : ARegClass<4, [v4i32, v4f32, v2i64, v2f64], (add AGPR_128)>;
+defm AReg_128 : ARegClass<4, [v4i32, v4f32, v2i64, v2f64, v8i16, v8f16], (add AGPR_128)>;
defm AReg_160 : ARegClass<5, [v5i32, v5f32], (add AGPR_160)>;
defm AReg_192 : ARegClass<6, [v6i32, v6f32, v3i64, v3f64], (add AGPR_192)>;
defm AReg_224 : ARegClass<7, [v7i32, v7f32], (add AGPR_224)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 77ee3c0ff0e4..46efb3c605c6 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -861,12 +861,16 @@ MachineInstr *SIWholeQuadMode::lowerKillF32(MachineBasicBlock &MBB,
MachineInstr *VcmpMI;
const MachineOperand &Op0 = MI.getOperand(0);
const MachineOperand &Op1 = MI.getOperand(1);
+
+ // VCC represents lanes killed.
+ Register VCC = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
+
if (TRI->isVGPR(*MRI, Op0.getReg())) {
Opcode = AMDGPU::getVOPe32(Opcode);
VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode)).add(Op1).add(Op0);
} else {
VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode))
- .addReg(AMDGPU::VCC, RegState::Define)
+ .addReg(VCC, RegState::Define)
.addImm(0) // src0 modifiers
.add(Op1)
.addImm(0) // src1 modifiers
@@ -874,9 +878,6 @@ MachineInstr *SIWholeQuadMode::lowerKillF32(MachineBasicBlock &MBB,
.addImm(0); // omod
}
- // VCC represents lanes killed.
- Register VCC = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
-
MachineInstr *MaskUpdateMI =
BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
.addReg(LiveMaskReg)
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SOPInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 1713586dcf5b..3f7837f7dbf1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -246,10 +246,10 @@ let Defs = [SCC] in {
def S_BCNT0_I32_B32 : SOP1_32 <"s_bcnt0_i32_b32">;
def S_BCNT0_I32_B64 : SOP1_32_64 <"s_bcnt0_i32_b64">;
def S_BCNT1_I32_B32 : SOP1_32 <"s_bcnt1_i32_b32",
- [(set i32:$sdst, (ctpop i32:$src0))]
+ [(set i32:$sdst, (UniformUnaryFrag<ctpop> i32:$src0))]
>;
def S_BCNT1_I32_B64 : SOP1_32_64 <"s_bcnt1_i32_b64",
- [(set i32:$sdst, (ctpop i64:$src0))]
+ [(set i32:$sdst, (UniformUnaryFrag<ctpop> i64:$src0))]
>;
} // End Defs = [SCC]
@@ -518,10 +518,9 @@ let Uses = [SCC] in {
def S_CSELECT_B32 : SOP2_32 <"s_cselect_b32",
[(set i32:$sdst, (SelectPat<select> i32:$src0, i32:$src1))]
>;
- def S_CSELECT_B64 : SOP2_64 <"s_cselect_b64",
- [(set i64:$sdst, (SelectPat<select> i64:$src0, i64:$src1))]
- >;
}
+
+ def S_CSELECT_B64 : SOP2_64 <"s_cselect_b64">;
} // End Uses = [SCC]
let Defs = [SCC] in {
@@ -551,11 +550,11 @@ def S_XOR_B64 : SOP2_64 <"s_xor_b64",
>;
def S_XNOR_B32 : SOP2_32 <"s_xnor_b32",
- [(set i32:$sdst, (not (xor_oneuse i32:$src0, i32:$src1)))]
+ [(set i32:$sdst, (UniformUnaryFrag<not> (xor_oneuse i32:$src0, i32:$src1)))]
>;
def S_XNOR_B64 : SOP2_64 <"s_xnor_b64",
- [(set i64:$sdst, (not (xor_oneuse i64:$src0, i64:$src1)))]
+ [(set i64:$sdst, (UniformUnaryFrag<not> (xor_oneuse i64:$src0, i64:$src1)))]
>;
def S_NAND_B32 : SOP2_32 <"s_nand_b32",
@@ -1371,7 +1370,7 @@ def : GCNPat <
>;
def : GCNPat <
- (i64 (ctpop i64:$src)),
+ (i64 (UniformUnaryFrag<ctpop> i64:$src)),
(i64 (REG_SEQUENCE SReg_64,
(i32 (COPY_TO_REGCLASS (S_BCNT1_I32_B64 $src), SReg_32)), sub0,
(S_MOV_B32 (i32 0)), sub1))
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp
index 0bee9022975e..18c348d1cf89 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp
@@ -79,8 +79,8 @@ const char* const IdSymbolic[] = {
"HW_REG_FLAT_SCR_LO",
"HW_REG_FLAT_SCR_HI",
"HW_REG_XNACK_MASK",
- nullptr, // HW_ID1, no predictable values
- nullptr, // HW_ID2, no predictable values
+ "HW_REG_HW_ID1",
+ "HW_REG_HW_ID2",
"HW_REG_POPS_PACKER",
nullptr,
nullptr,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index d20eaaaa65e8..1e96266eb06c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -132,6 +132,8 @@ bool isHsaAbiVersion3Or4(const MCSubtargetInfo *STI) {
#define GET_MIMGInfoTable_IMPL
#define GET_MIMGLZMappingTable_IMPL
#define GET_MIMGMIPMappingTable_IMPL
+#define GET_MIMGBiasMappingTable_IMPL
+#define GET_MIMGOffsetMappingTable_IMPL
#define GET_MIMGG16MappingTable_IMPL
#include "AMDGPUGenSearchableTables.inc"
@@ -410,7 +412,7 @@ void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
}
std::string AMDGPUTargetID::toString() const {
- std::string StringRep = "";
+ std::string StringRep;
raw_string_ostream StreamRep(StringRep);
auto TargetTriple = STI.getTargetTriple();
@@ -421,7 +423,7 @@ std::string AMDGPUTargetID::toString() const {
<< TargetTriple.getOSName() << '-'
<< TargetTriple.getEnvironmentName() << '-';
- std::string Processor = "";
+ std::string Processor;
// TODO: Following else statement is present here because we used various
// alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
// Remove once all aliases are removed from GCNProcessors.td.
@@ -432,7 +434,7 @@ std::string AMDGPUTargetID::toString() const {
Twine(Version.Stepping))
.str();
- std::string Features = "";
+ std::string Features;
if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVersion) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
@@ -1018,9 +1020,18 @@ static unsigned getLastSymbolicHwreg(const MCSubtargetInfo &STI) {
}
bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI) {
- return
- ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) &&
- IdSymbolic[Id] && (Id != ID_XNACK_MASK || !AMDGPU::isGFX10_BEncoding(STI));
+ switch (Id) {
+ case ID_HW_ID:
+ return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
+ case ID_HW_ID1:
+ case ID_HW_ID2:
+ return isGFX10Plus(STI);
+ case ID_XNACK_MASK:
+ return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
+ default:
+ return ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) &&
+ IdSymbolic[Id];
+ }
}
bool isValidHwreg(int64_t Id) {
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 061c74c0ace6..89f928eb8b92 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -64,6 +64,7 @@ struct GcnBufferFormatInfo {
#define GET_MIMGEncoding_DECL
#define GET_MIMGLZMapping_DECL
#define GET_MIMGMIPMapping_DECL
+#define GET_MIMGBiASMapping_DECL
#include "AMDGPUGenSearchableTables.inc"
namespace IsaInfo {
@@ -330,6 +331,16 @@ struct MIMGMIPMappingInfo {
MIMGBaseOpcode NONMIP;
};
+struct MIMGBiasMappingInfo {
+ MIMGBaseOpcode Bias;
+ MIMGBaseOpcode NoBias;
+};
+
+struct MIMGOffsetMappingInfo {
+ MIMGBaseOpcode Offset;
+ MIMGBaseOpcode NoOffset;
+};
+
struct MIMGG16MappingInfo {
MIMGBaseOpcode G;
MIMGBaseOpcode G16;
@@ -342,6 +353,12 @@ LLVM_READONLY
const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(unsigned MIP);
LLVM_READONLY
+const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(unsigned Bias);
+
+LLVM_READONLY
+const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(unsigned Offset);
+
+LLVM_READONLY
const MIMGG16MappingInfo *getMIMGG16MappingInfo(unsigned G);
LLVM_READONLY
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 8d232ffe4114..b9ff814a4dc5 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -637,9 +637,9 @@ class divergent_i64_BinOp <SDPatternOperator Op, Instruction Inst> :
)
>;
-def : divergent_i64_BinOp <and, V_AND_B32_e32>;
-def : divergent_i64_BinOp <or, V_OR_B32_e32>;
-def : divergent_i64_BinOp <xor, V_XOR_B32_e32>;
+def : divergent_i64_BinOp <and, V_AND_B32_e64>;
+def : divergent_i64_BinOp <or, V_OR_B32_e64>;
+def : divergent_i64_BinOp <xor, V_XOR_B32_e64>;
let SubtargetPredicate = Has16BitInsts in {
@@ -688,6 +688,36 @@ let SubtargetPredicate = HasDLInsts in {
let isReMaterializable = 1 in
defm V_XNOR_B32 : VOP2Inst <"v_xnor_b32", VOP_I32_I32_I32, xnor>;
+def : GCNPat<
+ (i32 (DivergentUnaryFrag<not> (xor_oneuse i32:$src0, i32:$src1))),
+ (i32 (V_XNOR_B32_e64 $src0, $src1))
+>;
+
+def : GCNPat<
+ (i32 (DivergentBinFrag<xor_oneuse> (not i32:$src0), i32:$src1)),
+ (i32 (V_XNOR_B32_e64 $src0, $src1))
+>;
+
+def : GCNPat<
+ (i64 (DivergentUnaryFrag<not> (xor_oneuse i64:$src0, i64:$src1))),
+ (REG_SEQUENCE VReg_64, (i32 (V_XNOR_B32_e64
+ (i32 (EXTRACT_SUBREG $src0, sub0)),
+ (i32 (EXTRACT_SUBREG $src1, sub0)))), sub0,
+ (i32 (V_XNOR_B32_e64
+ (i32 (EXTRACT_SUBREG $src0, sub1)),
+ (i32 (EXTRACT_SUBREG $src1, sub1)))), sub1)
+>;
+
+def : GCNPat<
+ (i64 (DivergentBinFrag<xor_oneuse> (not i64:$src0), i64:$src1)),
+ (REG_SEQUENCE VReg_64, (i32 (V_XNOR_B32_e64
+ (i32 (EXTRACT_SUBREG $src0, sub0)),
+ (i32 (EXTRACT_SUBREG $src1, sub0)))), sub0,
+ (i32 (V_XNOR_B32_e64
+ (i32 (EXTRACT_SUBREG $src0, sub1)),
+ (i32 (EXTRACT_SUBREG $src1, sub1)))), sub1)
+>;
+
let Constraints = "$vdst = $src2",
DisableEncoding = "$src2",
isConvertibleToThreeAddress = 1,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 32222b3eb93c..707475ceccee 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -388,6 +388,12 @@ class VOPProfileMAI<VOPProfile P, RegisterOperand _SrcRC, RegisterOperand _DstRC
let HasModifiers = 0;
let Asm64 = "$vdst, $src0, $src1, $src2$cbsz$abid$blgp";
let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp);
+ // Dst and SrcC cannot partially overlap if SrcC/Dst is bigger than 4 VGPRs.
+ // We then create two versions of the instruction: with tied dst and src2
+ // and with the eralyclobber flag on the dst. This is strciter than the
+ // actual HW restriction. In particular earlyclobber also affects src0 and
+ // src1 allocation which is not required.
+ bit NoDstOverlap = !gt(DstVT.Size, 128);
}
def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI<VOP_V4F32_F32_F32_V4F32, AISrc_128_f32, ADst_128>;
@@ -426,6 +432,11 @@ def VOPProfileMAI_F32_V4I16_X32_VCD : VOPProfileMAI<VOP_V32F32_V4I16_V4I16_V32F
def VOPProfileMAI_F64_16X16X4F64_VCD : VOPProfileMAI<VOP_V4F64_F64_F64_V4F64, VISrc_256_f64, VDst_256, AVSrc_64>;
def VOPProfileMAI_F64_4X4X4F64_VCD : VOPProfileMAI<VOP_F64_F64_F64_F64, VISrc_64_f64, VDst_64, AVSrc_64>;
+class MFMATable <bit is_mac, string Name> {
+ bit IsMac = is_mac;
+ string FMAOp = Name;
+}
+
let Predicates = [HasMAIInsts] in {
let isAsCheapAsAMove = 1, isReMaterializable = 1 in {
@@ -435,13 +446,31 @@ let isAsCheapAsAMove = 1, isReMaterializable = 1 in {
} // End isMoveImm = 1
} // End isAsCheapAsAMove = 1, isReMaterializable = 1
-multiclass MAIInst<string OpName, string P, SDPatternOperator node> {
+multiclass MAIInst<string OpName, string P, SDPatternOperator node,
+ bit NoDstOverlap = !cast<VOPProfileMAI>("VOPProfileMAI_" # P).NoDstOverlap> {
let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in {
// FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported.
- defm "" : VOP3Inst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P), node>;
-
- let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in
- defm _vgprcd : VOP3Inst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD")>;
+ let Constraints = !if(NoDstOverlap, "@earlyclobber $vdst", "") in {
+ defm "" : VOP3Inst<OpName, !cast<VOPProfileMAI>("VOPProfileMAI_" # P), !if(NoDstOverlap, null_frag, node)>,
+ MFMATable<0, NAME # "_e64">;
+
+ let SubtargetPredicate = isGFX90APlus, Mnemonic = OpName in
+ defm _vgprcd : VOP3Inst<OpName # "_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD")>,
+ MFMATable<0, NAME # "_vgprcd_e64">;
+ }
+
+ foreach _ = BoolToList<NoDstOverlap>.ret in {
+ let Constraints = !if(NoDstOverlap, "$vdst = $src2", ""),
+ isConvertibleToThreeAddress = NoDstOverlap,
+ Mnemonic = OpName in {
+ defm "_mac" : VOP3Inst<OpName # "_mac", !cast<VOPProfileMAI>("VOPProfileMAI_" # P), node>,
+ MFMATable<1, NAME # "_e64">;
+
+ let SubtargetPredicate = isGFX90APlus in
+ defm _mac_vgprcd : VOP3Inst<OpName # "_mac_vgprcd", !cast<VOPProfileMAI>("VOPProfileMAI_" # P # "_VCD")>,
+ MFMATable<1, NAME # "_vgprcd_e64">;
+ }
+ }
} // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1
}
@@ -517,6 +546,7 @@ multiclass VOP3P_Real_MAI<bits<7> op> {
}
}
+let Constraints = "" in {
multiclass VOP3P_Real_MFMA_gfx90a<bits<7> op> {
let SubtargetPredicate = isGFX90AOnly,
AssemblerPredicate = isGFX90AOnly, DecoderNamespace = "GFX90A" in {
@@ -536,6 +566,7 @@ multiclass VOP3P_Real_MFMA<bits<7> op> :
let DecoderNamespace = "GFX8";
}
}
+}
defm V_PK_MAD_I16 : VOP3P_Real_vi <0x00>;
defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x01>;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARM.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARM.h
index 1d5e45aec06c..979371bf7cf6 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARM.h
@@ -25,12 +25,9 @@ class ARMAsmPrinter;
class ARMBaseTargetMachine;
class ARMRegisterBankInfo;
class ARMSubtarget;
-struct BasicBlockInfo;
class Function;
class FunctionPass;
class InstructionSelector;
-class MachineBasicBlock;
-class MachineFunction;
class MachineInstr;
class MCInst;
class PassRegistry;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td b/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td
index 8173fe4036a8..4efbdbb2abc8 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARM.td
@@ -512,8 +512,7 @@ def HasV6T2Ops : SubtargetFeature<"v6t2", "HasV6T2Ops", "true",
def HasV7Ops : SubtargetFeature<"v7", "HasV7Ops", "true",
"Support ARM v7 instructions",
- [HasV6T2Ops, FeaturePerfMon,
- FeatureV7Clrex]>;
+ [HasV6T2Ops, FeatureV7Clrex]>;
def HasV8MMainlineOps :
SubtargetFeature<"v8m.main", "HasV8MMainlineOps", "true",
@@ -522,7 +521,7 @@ def HasV8MMainlineOps :
def HasV8Ops : SubtargetFeature<"v8", "HasV8Ops", "true",
"Support ARM v8 instructions",
- [HasV7Ops, FeatureAcquireRelease]>;
+ [HasV7Ops, FeaturePerfMon, FeatureAcquireRelease]>;
def HasV8_1aOps : SubtargetFeature<"v8.1a", "HasV8_1aOps", "true",
"Support ARM v8.1a instructions",
@@ -553,6 +552,10 @@ def HasV8_7aOps : SubtargetFeature<"v8.7a", "HasV8_7aOps", "true",
"Support ARM v8.7a instructions",
[HasV8_6aOps]>;
+def HasV8_8aOps : SubtargetFeature<"v8.8a", "HasV8_8aOps", "true",
+ "Support ARM v8.8a instructions",
+ [HasV8_7aOps]>;
+
def HasV9_0aOps : SubtargetFeature<"v9a", "HasV9_0aOps", "true",
"Support ARM v9a instructions",
[HasV8_5aOps]>;
@@ -565,6 +568,10 @@ def HasV9_2aOps : SubtargetFeature<"v9.2a", "HasV9_2aOps", "true",
"Support ARM v9.2a instructions",
[HasV8_7aOps, HasV9_1aOps]>;
+def HasV9_3aOps : SubtargetFeature<"v9.3a", "HasV9_3aOps", "true",
+ "Support ARM v9.3a instructions",
+ [HasV8_8aOps, HasV9_2aOps]>;
+
def HasV8_1MMainlineOps : SubtargetFeature<
"v8.1m.main", "HasV8_1MMainlineOps", "true",
"Support ARM v8-1M Mainline instructions",
@@ -757,7 +764,8 @@ def ARMv7a : Architecture<"armv7-a", "ARMv7a", [HasV7Ops,
FeatureNEON,
FeatureDB,
FeatureDSP,
- FeatureAClass]>;
+ FeatureAClass,
+ FeaturePerfMon]>;
def ARMv7ve : Architecture<"armv7ve", "ARMv7ve", [HasV7Ops,
FeatureNEON,
@@ -766,13 +774,15 @@ def ARMv7ve : Architecture<"armv7ve", "ARMv7ve", [HasV7Ops,
FeatureTrustZone,
FeatureMP,
FeatureVirtualization,
- FeatureAClass]>;
+ FeatureAClass,
+ FeaturePerfMon]>;
def ARMv7r : Architecture<"armv7-r", "ARMv7r", [HasV7Ops,
FeatureDB,
FeatureDSP,
FeatureHWDivThumb,
- FeatureRClass]>;
+ FeatureRClass,
+ FeaturePerfMon]>;
def ARMv7m : Architecture<"armv7-m", "ARMv7m", [HasV7Ops,
FeatureThumb2,
@@ -894,6 +904,19 @@ def ARMv87a : Architecture<"armv8.7-a", "ARMv87a", [HasV8_7aOps,
FeatureCRC,
FeatureRAS,
FeatureDotProd]>;
+def ARMv88a : Architecture<"armv8.8-a", "ARMv88a", [HasV8_8aOps,
+ FeatureAClass,
+ FeatureDB,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeatureDSP,
+ FeatureTrustZone,
+ FeatureMP,
+ FeatureVirtualization,
+ FeatureCrypto,
+ FeatureCRC,
+ FeatureRAS,
+ FeatureDotProd]>;
def ARMv9a : Architecture<"armv9-a", "ARMv9a", [HasV9_0aOps,
FeatureAClass,
@@ -931,6 +954,19 @@ def ARMv92a : Architecture<"armv9.2-a", "ARMv92a", [HasV9_2aOps,
FeatureCRC,
FeatureRAS,
FeatureDotProd]>;
+def ARMv93a : Architecture<"armv9.3-a", "ARMv93a", [HasV9_3aOps,
+ FeatureAClass,
+ FeatureDB,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeatureDSP,
+ FeatureTrustZone,
+ FeatureMP,
+ FeatureVirtualization,
+ FeatureCrypto,
+ FeatureCRC,
+ FeatureRAS,
+ FeatureDotProd]>;
def ARMv8r : Architecture<"armv8-r", "ARMv8r", [HasV8Ops,
FeatureRClass,
@@ -1425,8 +1461,7 @@ def : ProcNoItin<"neoverse-n1", [ARMv82a,
def : ProcNoItin<"neoverse-n2", [ARMv85a,
FeatureBF16,
- FeatureMatMulInt8,
- FeaturePerfMon]>;
+ FeatureMatMulInt8]>;
def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
FeatureHasRetAddrStack,
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 884f38ff6c58..cde715880376 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -4868,6 +4868,36 @@ bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
return false;
}
}
+
+ // Check the address model by taking the first Imm operand and checking it is
+ // legal for that addressing mode.
+ ARMII::AddrMode AddrMode =
+ (ARMII::AddrMode)(MI.getDesc().TSFlags & ARMII::AddrModeMask);
+ switch (AddrMode) {
+ default:
+ break;
+ case ARMII::AddrModeT2_i7:
+ case ARMII::AddrModeT2_i7s2:
+ case ARMII::AddrModeT2_i7s4:
+ case ARMII::AddrModeT2_i8:
+ case ARMII::AddrModeT2_i8pos:
+ case ARMII::AddrModeT2_i8neg:
+ case ARMII::AddrModeT2_i8s4:
+ case ARMII::AddrModeT2_i12: {
+ uint32_t Imm = 0;
+ for (auto Op : MI.operands()) {
+ if (Op.isImm()) {
+ Imm = Op.getImm();
+ break;
+ }
+ }
+ if (!isLegalAddressImm(MI.getOpcode(), Imm, this)) {
+ ErrInfo = "Incorrect AddrMode Imm for instruction";
+ return false;
+ }
+ break;
+ }
+ }
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 81ec4d09a408..b15ef094d9d2 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -534,7 +534,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &
MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
.addImm(ArgAssigner.StackOffset)
- .addImm(0)
+ .addImm(-1ULL)
.add(predOps(ARMCC::AL));
return true;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.h
index 87b18f811747..38095617fb4f 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMCallLowering.h
@@ -23,7 +23,6 @@
namespace llvm {
class ARMTargetLowering;
-class MachineFunction;
class MachineInstrBuilder;
class MachineIRBuilder;
class Value;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index fa244786a80d..2f083561bbd4 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1144,7 +1144,7 @@ static bool determineFPRegsToClear(const MachineInstr &MI,
if (!Op.isReg())
continue;
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
if (Op.isDef()) {
if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
(Reg >= ARM::D0 && Reg <= ARM::D15) ||
@@ -1356,7 +1356,7 @@ void ARMExpandPseudo::CMSESaveClearFPRegsV8(
std::vector<unsigned> NonclearedFPRegs;
for (const MachineOperand &Op : MBBI->operands()) {
if (Op.isReg() && Op.isUse()) {
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
assert(!ARM::DPRRegClass.contains(Reg) ||
ARM::DPR_VFP2RegClass.contains(Reg));
assert(!ARM::QPRRegClass.contains(Reg));
@@ -1451,9 +1451,9 @@ void ARMExpandPseudo::CMSESaveClearFPRegsV8(
// restore FPSCR from stack and clear bits 0-4, 7, 28-31
// The other bits are program global according to the AAPCS
if (passesFPReg) {
- BuildMI(MBB, MBBI, DL, TII->get(ARM::t2LDRi8), SpareReg)
+ BuildMI(MBB, MBBI, DL, TII->get(ARM::tLDRspi), SpareReg)
.addReg(ARM::SP)
- .addImm(0x40)
+ .addImm(0x10)
.add(predOps(ARMCC::AL));
BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
.addReg(SpareReg)
@@ -1543,7 +1543,7 @@ void ARMExpandPseudo::CMSERestoreFPRegsV8(
std::vector<unsigned> NonclearedFPRegs;
for (const MachineOperand &Op : MBBI->operands()) {
if (Op.isReg() && Op.isDef()) {
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
assert(!ARM::DPRRegClass.contains(Reg) ||
ARM::DPR_VFP2RegClass.contains(Reg));
assert(!ARM::QPRRegClass.contains(Reg));
@@ -1663,7 +1663,7 @@ static bool definesOrUsesFPReg(const MachineInstr &MI) {
for (const MachineOperand &Op : MI.operands()) {
if (!Op.isReg())
continue;
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
(Reg >= ARM::D0 && Reg <= ARM::D15) ||
(Reg >= ARM::S0 && Reg <= ARM::S31))
@@ -2201,7 +2201,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
}
case ARM::tBLXNS_CALL: {
DebugLoc DL = MBBI->getDebugLoc();
- unsigned JumpReg = MBBI->getOperand(0).getReg();
+ Register JumpReg = MBBI->getOperand(0).getReg();
// Figure out which registers are live at the point immediately before the
// call. When we indiscriminately push a set of registers, the live
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp
index 28a076edd6dc..5d94b99d4c5d 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -319,7 +319,7 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, unsigned Op1) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
// Make sure the input operands are sufficiently constrained to be legal
@@ -346,7 +346,7 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
// Make sure the input operand is sufficiently constrained to be legal
@@ -371,7 +371,7 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) {
@@ -392,7 +392,7 @@ unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(SrcReg));
@@ -402,7 +402,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
@@ -428,7 +428,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
Imm = ARM_AM::getFP32Imm(Val);
Opc = ARM::FCONSTS;
}
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg).addImm(Imm));
return DestReg;
@@ -440,7 +440,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
// MachineConstantPool wants an explicit alignment.
Align Alignment = DL.getPrefTypeAlign(CFP->getType());
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
// The extra reg is for addrmode5.
@@ -462,7 +462,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(CI->getZExtValue()));
@@ -478,7 +478,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(Imm));
@@ -531,7 +531,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
: &ARM::GPRRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
// FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
@@ -589,7 +589,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
if (IsPositionIndependent) {
unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DbgLoc, TII.get(Opc), NewDestReg)
@@ -605,7 +605,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
(Subtarget->isTargetMachO() && IsIndirect) ||
Subtarget->genLongCalls()) {
MachineInstrBuilder MIB;
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
if (isThumb2)
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
@@ -657,7 +657,7 @@ unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
if (SI != FuncInfo.StaticAllocaMap.end()) {
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -832,7 +832,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
: &ARM::GPRRegClass;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
@@ -991,7 +991,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
// If we had an unaligned load of a float we've converted it to an regular
// load. Now we must move from the GRP to the FP register.
if (needVMOV) {
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(ResultReg));
@@ -1044,7 +1044,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
- unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
+ Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
: &ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
@@ -1095,7 +1095,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
if (!Subtarget->hasVFP2Base()) return false;
// Unaligned stores need special handling. Floats require word-alignment.
if (Alignment && Alignment < 4) {
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
@@ -1257,7 +1257,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
(isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
- unsigned OpReg = getRegForValue(TI->getOperand(0));
+ Register OpReg = getRegForValue(TI->getOperand(0));
OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
@@ -1284,7 +1284,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
return true;
}
- unsigned CmpReg = getRegForValue(BI->getCondition());
+ Register CmpReg = getRegForValue(BI->getCondition());
if (CmpReg == 0) return false;
// We've been divorced from our compare! Our block was split, and
@@ -1315,7 +1315,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
- unsigned AddrReg = getRegForValue(I->getOperand(0));
+ Register AddrReg = getRegForValue(I->getOperand(0));
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
@@ -1406,7 +1406,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
break;
}
- unsigned SrcReg1 = getRegForValue(Src1Value);
+ Register SrcReg1 = getRegForValue(Src1Value);
if (SrcReg1 == 0) return false;
unsigned SrcReg2 = 0;
@@ -1468,7 +1468,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
: &ARM::GPRRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = fastMaterializeConstant(Zero);
// ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
@@ -1488,10 +1488,10 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
if (!I->getType()->isDoubleTy() ||
!V->getType()->isFloatTy()) return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(&ARM::DPRRegClass);
+ Register Result = createResultReg(&ARM::DPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTDS), Result)
.addReg(Op));
@@ -1507,10 +1507,10 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
if (!(I->getType()->isFloatTy() &&
V->getType()->isDoubleTy())) return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(&ARM::SPRRegClass);
+ Register Result = createResultReg(&ARM::SPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTSD), Result)
.addReg(Op));
@@ -1535,7 +1535,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0) return false;
// Handle sign-extension.
@@ -1556,7 +1556,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
else return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg).addReg(FP));
updateValueMap(I, ResultReg);
@@ -1572,7 +1572,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
if (!isTypeLegal(RetTy, DstVT))
return false;
- unsigned Op = getRegForValue(I->getOperand(0));
+ Register Op = getRegForValue(I->getOperand(0));
if (Op == 0) return false;
unsigned Opc;
@@ -1583,7 +1583,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
else return false;
// f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg).addReg(Op));
@@ -1604,9 +1604,9 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
// Things need to be register sized for register moves.
if (VT != MVT::i32) return false;
- unsigned CondReg = getRegForValue(I->getOperand(0));
+ Register CondReg = getRegForValue(I->getOperand(0));
if (CondReg == 0) return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
// Check to see if we can use an immediate in the conditional move.
@@ -1649,7 +1649,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
else
MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (!UseImm) {
Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
@@ -1752,15 +1752,15 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ Register SrcReg1 = getRegForValue(I->getOperand(0));
if (SrcReg1 == 0) return false;
// TODO: Often the 2nd operand is an immediate, which can be encoded directly
// in the instruction, rather then materializing the value in a register.
- unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ Register SrcReg2 = getRegForValue(I->getOperand(1));
if (SrcReg2 == 0) return false;
- unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
+ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1803,13 +1803,13 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
Opc = is64bit ? ARM::VMULD : ARM::VMULS;
break;
}
- unsigned Op1 = getRegForValue(I->getOperand(0));
+ Register Op1 = getRegForValue(I->getOperand(0));
if (Op1 == 0) return false;
- unsigned Op2 = getRegForValue(I->getOperand(1));
+ Register Op2 = getRegForValue(I->getOperand(1));
if (Op2 == 0) return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Op1).addReg(Op2));
@@ -2022,7 +2022,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackUp))
- .addImm(NumBytes).addImm(0));
+ .addImm(NumBytes).addImm(-1ULL));
// Now the return value.
if (RetVT != MVT::isVoid) {
@@ -2101,7 +2101,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
F.isVarArg()));
const Value *RV = Ret->getOperand(0);
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -2226,7 +2226,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
ArgVTs.reserve(I->getNumOperands());
ArgFlags.reserve(I->getNumOperands());
for (Value *Op : I->operands()) {
- unsigned Arg = getRegForValue(Op);
+ Register Arg = getRegForValue(Op);
if (Arg == 0) return false;
Type *ArgTy = Op->getType();
@@ -2588,7 +2588,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg) return false;
// Because the high bits are undefined, a truncate doesn't generate
@@ -2744,7 +2744,7 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool isZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg) return false;
EVT SrcEVT, DestEVT;
@@ -2788,7 +2788,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
}
Value *Src1Value = I->getOperand(0);
- unsigned Reg1 = getRegForValue(Src1Value);
+ Register Reg1 = getRegForValue(Src1Value);
if (Reg1 == 0) return false;
unsigned Reg2 = 0;
@@ -2797,7 +2797,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
if (Reg2 == 0) return false;
}
- unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
+ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
if(ResultReg == 0) return false;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2975,7 +2975,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
MIB.add(predOps(ARMCC::AL));
// Fix the address by adding pc.
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
: ARM::PICADD;
DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
@@ -2987,7 +2987,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
MIB.add(predOps(ARMCC::AL));
if (UseGOT_PREL && Subtarget->isThumb()) {
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
.addReg(DestReg)
@@ -3057,11 +3057,11 @@ bool ARMFastISel::fastLowerArguments() {
for (const Argument &Arg : F->args()) {
unsigned ArgNo = Arg.getArgNo();
unsigned SrcReg = GPRArgRegs[ArgNo];
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 4b59f9cb94ce..1f2f6f7497e0 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -516,7 +516,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Determine spill area sizes.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -751,7 +751,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock::iterator Pos = std::next(GPRCS1Push);
int CFIIndex;
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -784,7 +784,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
if (GPRCS2Size > 0) {
MachineBasicBlock::iterator Pos = std::next(GPRCS2Push);
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -794,7 +794,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop(MF)) {
unsigned DwarfReg = MRI->getDwarfRegNum(
- Reg == ARM::R12 ? (unsigned)ARM::RA_AUTH_CODE : Reg, true);
+ Reg == ARM::R12 ? ARM::RA_AUTH_CODE : Reg, true);
unsigned Offset = MFI.getObjectOffset(FI);
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
@@ -812,7 +812,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// instructions in the prologue.
MachineBasicBlock::iterator Pos = std::next(LastPush);
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
if ((Reg >= ARM::D0 && Reg <= ARM::D31) &&
(Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs())) {
@@ -1144,7 +1144,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
while (i != 0) {
unsigned LastReg = 0;
for (; i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
+ Register Reg = CSI[i-1].getReg();
if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue;
// D-registers in the aligned area DPRCS2 are NOT spilled here.
@@ -1237,7 +1237,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
bool DeleteRet = false;
for (; i != 0; --i) {
CalleeSavedInfo &Info = CSI[i-1];
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue;
// The aligned reloads from area DPRCS2 are not inserted here.
@@ -1812,7 +1812,7 @@ bool ARMFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// shrinkwrapping can cause clobbering of r12 when the PAC code is
// generated. A follow-up patch will fix this in a more performant manner.
if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(
- false /*SpillsLR */))
+ true /* SpillsLR */))
return false;
return true;
@@ -2353,7 +2353,7 @@ bool ARMFrameLowering::assignCalleeSavedSpillSlots(
// LR, R7, R6, R5, R4, <R12>, R11, R10, R9, R8, D15-D8
CSI.insert(find_if(CSI,
[=](const auto &CS) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
return Reg == ARM::R10 || Reg == ARM::R11 ||
Reg == ARM::R8 || Reg == ARM::R9 ||
ARM::DPRRegClass.contains(Reg);
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
index f083fa6662e9..0d201a67af46 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -164,7 +164,7 @@ static bool getBaseOffset(const MachineInstr &MI, const MachineOperand *&BaseOp,
ARMBankConflictHazardRecognizer::ARMBankConflictHazardRecognizer(
const ScheduleDAG *DAG, int64_t CPUBankMask, bool CPUAssumeITCMConflict)
- : ScheduleHazardRecognizer(), MF(DAG->MF), DL(DAG->MF.getDataLayout()),
+ : MF(DAG->MF), DL(DAG->MF.getDataLayout()),
DataMask(DataBankMask.getNumOccurrences() ? int64_t(DataBankMask)
: CPUBankMask),
AssumeITCMBankConflict(AssumeITCMConflict.getNumOccurrences()
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.h
index c1f1bcd0a629..66a1477e5e08 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMHazardRecognizer.h
@@ -34,7 +34,7 @@ class ARMHazardRecognizerFPMLx : public ScheduleHazardRecognizer {
unsigned FpMLxStalls = 0;
public:
- ARMHazardRecognizerFPMLx() : ScheduleHazardRecognizer() { MaxLookAhead = 1; }
+ ARMHazardRecognizerFPMLx() { MaxLookAhead = 1; }
HazardType getHazardType(SUnit *SU, int Stalls) override;
void Reset() override;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index bb2859c766c2..98c8133282a2 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -3227,7 +3227,7 @@ bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
if (!ImmAPF.getExactInverse(&ToConvert))
return false;
}
- APSInt Converted(64, 0);
+ APSInt Converted(64, false);
bool IsExact;
ToConvert.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
&IsExact);
@@ -5737,8 +5737,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
// them into a GPRPair.
SDLoc dl(N);
- SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
- : SDValue(nullptr,0);
+ SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps - 1) : SDValue();
SmallVector<bool, 8> OpChanged;
// Glue node will be appended late.
@@ -5801,8 +5800,8 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
SDValue V0 = N->getOperand(i+1);
SDValue V1 = N->getOperand(i+2);
- unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
- unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
+ Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
+ Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
SDValue PairedReg;
MachineRegisterInfo &MRI = MF->getRegInfo();
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 3d45db349644..fe4e6b24367a 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2899,7 +2899,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
unsigned Bytes = Arg.getValueSizeInBits() / 8;
int FI = std::numeric_limits<int>::max();
if (Arg.getOpcode() == ISD::CopyFromReg) {
- unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+ Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
@@ -4018,7 +4018,7 @@ SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
assert(Mask && "Missing call preserved mask for calling convention");
// Mark LR an implicit live-in.
- unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
SDValue ReturnAddress =
DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
@@ -4272,7 +4272,7 @@ SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
RC = &ARM::GPRRegClass;
// Transform the arguments stored in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
SDValue ArgValue2;
@@ -4342,7 +4342,7 @@ int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
- unsigned VReg = MF.addLiveIn(Reg, RC);
+ Register VReg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo(OrigArg, 4 * i));
@@ -4527,7 +4527,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this value is passed in r0 and has the returned attribute (e.g.
@@ -6065,7 +6065,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
}
// Return LR, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
@@ -14682,7 +14682,9 @@ static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// Check that N is CMPZ(CSINC(0, 0, CC, X)), return X if valid.
+// Check that N is CMPZ(CSINC(0, 0, CC, X)),
+// or CMPZ(CMOV(1, 0, CC, $cpsr, X))
+// return X if valid.
static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) {
if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(Cmp->getOperand(1)))
return SDValue();
@@ -14696,12 +14698,24 @@ static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) {
CSInc.getConstantOperandVal(1) == 1 && CSInc->hasOneUse())
CSInc = CSInc.getOperand(0);
- if (CSInc.getOpcode() != ARMISD::CSINC ||
- !isNullConstant(CSInc.getOperand(0)) ||
- !isNullConstant(CSInc.getOperand(1)) || !CSInc->hasOneUse())
- return SDValue();
- CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2);
- return CSInc.getOperand(3);
+ if (CSInc.getOpcode() == ARMISD::CSINC &&
+ isNullConstant(CSInc.getOperand(0)) &&
+ isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) {
+ CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2);
+ return CSInc.getOperand(3);
+ }
+ if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(0)) &&
+ isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) {
+ CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2);
+ return CSInc.getOperand(4);
+ }
+ if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(1)) &&
+ isNullConstant(CSInc.getOperand(0)) && CSInc->hasOneUse()) {
+ CC = ARMCC::getOppositeCondition(
+ (ARMCC::CondCodes)CSInc.getConstantOperandVal(2));
+ return CSInc.getOperand(4);
+ }
+ return SDValue();
}
static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) {
@@ -15412,13 +15426,13 @@ static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
return SDValue();
SDLoc DL(Trunc);
- if (isVMOVNTruncMask(N->getMask(), VT, 0))
+ if (isVMOVNTruncMask(N->getMask(), VT, false))
return DAG.getNode(
ARMISD::VMOVN, DL, VT,
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(0)),
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)),
DAG.getConstant(1, DL, MVT::i32));
- else if (isVMOVNTruncMask(N->getMask(), VT, 1))
+ else if (isVMOVNTruncMask(N->getMask(), VT, true))
return DAG.getNode(
ARMISD::VMOVN, DL, VT,
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)),
@@ -18218,13 +18232,13 @@ SDValue ARMTargetLowering::PerformMVETruncCombine(
SmallVector<int, 8> Mask(S0->getMask().begin(), S0->getMask().end());
Mask.append(S1->getMask().begin(), S1->getMask().end());
- if (isVMOVNTruncMask(Mask, VT, 0))
+ if (isVMOVNTruncMask(Mask, VT, false))
return DAG.getNode(
ARMISD::VMOVN, DL, VT,
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(0)),
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)),
DAG.getConstant(1, DL, MVT::i32));
- if (isVMOVNTruncMask(Mask, VT, 1))
+ if (isVMOVNTruncMask(Mask, VT, true))
return DAG.getNode(
ARMISD::VMOVN, DL, VT,
DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)),
@@ -20775,10 +20789,10 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
return true;
}
@@ -20787,10 +20801,10 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getElementType());
+ Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index 5dee5e04af81..00db13f2eb52 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -28,8 +28,7 @@
#include "llvm/MC/MCInst.h"
using namespace llvm;
-ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI() {}
+ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI) : ARMBaseInstrInfo(STI) {}
/// Return the noop instruction to use for a noop.
MCInst ARMInstrInfo::getNop() const {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrNEON.td
index aaf3280ea150..357aa6d062e9 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -4526,64 +4526,48 @@ let Predicates = [HasNEON, HasV8_1a] in {
defm VQRDMLAH : N3VInt3_HS<1, 0, 0b1011, 1, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vqrdmlah", "s",
null_frag>;
- def : Pat<(v4i16 (saddsat
- (v4i16 DPR:$src1),
- (v4i16 (int_arm_neon_vqrdmulh (v4i16 DPR:$Vn),
- (v4i16 DPR:$Vm))))),
+ def : Pat<(v4i16 (int_arm_neon_vqrdmlah (v4i16 DPR:$src1), (v4i16 DPR:$Vn),
+ (v4i16 DPR:$Vm))),
(v4i16 (VQRDMLAHv4i16 DPR:$src1, DPR:$Vn, DPR:$Vm))>;
- def : Pat<(v2i32 (saddsat
- (v2i32 DPR:$src1),
- (v2i32 (int_arm_neon_vqrdmulh (v2i32 DPR:$Vn),
- (v2i32 DPR:$Vm))))),
+ def : Pat<(v2i32 (int_arm_neon_vqrdmlah (v2i32 DPR:$src1), (v2i32 DPR:$Vn),
+ (v2i32 DPR:$Vm))),
(v2i32 (VQRDMLAHv2i32 DPR:$src1, DPR:$Vn, DPR:$Vm))>;
- def : Pat<(v8i16 (saddsat
- (v8i16 QPR:$src1),
- (v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$Vn),
- (v8i16 QPR:$Vm))))),
+ def : Pat<(v8i16 (int_arm_neon_vqrdmlah (v8i16 QPR:$src1), (v8i16 QPR:$Vn),
+ (v8i16 QPR:$Vm))),
(v8i16 (VQRDMLAHv8i16 QPR:$src1, QPR:$Vn, QPR:$Vm))>;
- def : Pat<(v4i32 (saddsat
- (v4i32 QPR:$src1),
- (v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$Vn),
- (v4i32 QPR:$Vm))))),
+ def : Pat<(v4i32 (int_arm_neon_vqrdmlah (v4i32 QPR:$src1), (v4i32 QPR:$Vn),
+ (v4i32 QPR:$Vm))),
(v4i32 (VQRDMLAHv4i32 QPR:$src1, QPR:$Vn, QPR:$Vm))>;
defm VQRDMLAHsl : N3VMulOpSL_HS<0b1110, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vqrdmlah", "s",
null_frag>;
- def : Pat<(v4i16 (saddsat
- (v4i16 DPR:$src1),
- (v4i16 (int_arm_neon_vqrdmulh
+ def : Pat<(v4i16 (int_arm_neon_vqrdmlah (v4i16 DPR:$src1),
(v4i16 DPR:$Vn),
(v4i16 (ARMvduplane (v4i16 DPR_8:$Vm),
- imm:$lane)))))),
+ imm:$lane)))),
(v4i16 (VQRDMLAHslv4i16 DPR:$src1, DPR:$Vn, DPR_8:$Vm,
imm:$lane))>;
- def : Pat<(v2i32 (saddsat
- (v2i32 DPR:$src1),
- (v2i32 (int_arm_neon_vqrdmulh
+ def : Pat<(v2i32 (int_arm_neon_vqrdmlah (v2i32 DPR:$src1),
(v2i32 DPR:$Vn),
(v2i32 (ARMvduplane (v2i32 DPR_VFP2:$Vm),
- imm:$lane)))))),
+ imm:$lane)))),
(v2i32 (VQRDMLAHslv2i32 DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm,
imm:$lane))>;
- def : Pat<(v8i16 (saddsat
- (v8i16 QPR:$src1),
- (v8i16 (int_arm_neon_vqrdmulh
+ def : Pat<(v8i16 (int_arm_neon_vqrdmlah (v8i16 QPR:$src1),
(v8i16 QPR:$src2),
(v8i16 (ARMvduplane (v8i16 QPR:$src3),
- imm:$lane)))))),
+ imm:$lane)))),
(v8i16 (VQRDMLAHslv8i16 (v8i16 QPR:$src1),
(v8i16 QPR:$src2),
(v4i16 (EXTRACT_SUBREG
QPR:$src3,
(DSubReg_i16_reg imm:$lane))),
(SubReg_i16_lane imm:$lane)))>;
- def : Pat<(v4i32 (saddsat
- (v4i32 QPR:$src1),
- (v4i32 (int_arm_neon_vqrdmulh
+ def : Pat<(v4i32 (int_arm_neon_vqrdmlah (v4i32 QPR:$src1),
(v4i32 QPR:$src2),
(v4i32 (ARMvduplane (v4i32 QPR:$src3),
- imm:$lane)))))),
+ imm:$lane)))),
(v4i32 (VQRDMLAHslv4i32 (v4i32 QPR:$src1),
(v4i32 QPR:$src2),
(v2i32 (EXTRACT_SUBREG
@@ -4596,63 +4580,47 @@ let Predicates = [HasNEON, HasV8_1a] in {
defm VQRDMLSH : N3VInt3_HS<1, 0, 0b1100, 1, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vqrdmlsh", "s",
null_frag>;
- def : Pat<(v4i16 (ssubsat
- (v4i16 DPR:$src1),
- (v4i16 (int_arm_neon_vqrdmulh (v4i16 DPR:$Vn),
- (v4i16 DPR:$Vm))))),
+ def : Pat<(v4i16 (int_arm_neon_vqrdmlsh (v4i16 DPR:$src1), (v4i16 DPR:$Vn),
+ (v4i16 DPR:$Vm))),
(v4i16 (VQRDMLSHv4i16 DPR:$src1, DPR:$Vn, DPR:$Vm))>;
- def : Pat<(v2i32 (ssubsat
- (v2i32 DPR:$src1),
- (v2i32 (int_arm_neon_vqrdmulh (v2i32 DPR:$Vn),
- (v2i32 DPR:$Vm))))),
+ def : Pat<(v2i32 (int_arm_neon_vqrdmlsh (v2i32 DPR:$src1), (v2i32 DPR:$Vn),
+ (v2i32 DPR:$Vm))),
(v2i32 (VQRDMLSHv2i32 DPR:$src1, DPR:$Vn, DPR:$Vm))>;
- def : Pat<(v8i16 (ssubsat
- (v8i16 QPR:$src1),
- (v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$Vn),
- (v8i16 QPR:$Vm))))),
+ def : Pat<(v8i16 (int_arm_neon_vqrdmlsh (v8i16 QPR:$src1), (v8i16 QPR:$Vn),
+ (v8i16 QPR:$Vm))),
(v8i16 (VQRDMLSHv8i16 QPR:$src1, QPR:$Vn, QPR:$Vm))>;
- def : Pat<(v4i32 (ssubsat
- (v4i32 QPR:$src1),
- (v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$Vn),
- (v4i32 QPR:$Vm))))),
+ def : Pat<(v4i32 (int_arm_neon_vqrdmlsh (v4i32 QPR:$src1), (v4i32 QPR:$Vn),
+ (v4i32 QPR:$Vm))),
(v4i32 (VQRDMLSHv4i32 QPR:$src1, QPR:$Vn, QPR:$Vm))>;
defm VQRDMLSHsl : N3VMulOpSL_HS<0b1111, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vqrdmlsh", "s",
null_frag>;
- def : Pat<(v4i16 (ssubsat
- (v4i16 DPR:$src1),
- (v4i16 (int_arm_neon_vqrdmulh
+ def : Pat<(v4i16 (int_arm_neon_vqrdmlsh (v4i16 DPR:$src1),
(v4i16 DPR:$Vn),
(v4i16 (ARMvduplane (v4i16 DPR_8:$Vm),
- imm:$lane)))))),
+ imm:$lane)))),
(v4i16 (VQRDMLSHslv4i16 DPR:$src1, DPR:$Vn, DPR_8:$Vm, imm:$lane))>;
- def : Pat<(v2i32 (ssubsat
- (v2i32 DPR:$src1),
- (v2i32 (int_arm_neon_vqrdmulh
+ def : Pat<(v2i32 (int_arm_neon_vqrdmlsh (v2i32 DPR:$src1),
(v2i32 DPR:$Vn),
(v2i32 (ARMvduplane (v2i32 DPR_VFP2:$Vm),
- imm:$lane)))))),
+ imm:$lane)))),
(v2i32 (VQRDMLSHslv2i32 DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm,
imm:$lane))>;
- def : Pat<(v8i16 (ssubsat
- (v8i16 QPR:$src1),
- (v8i16 (int_arm_neon_vqrdmulh
+ def : Pat<(v8i16 (int_arm_neon_vqrdmlsh (v8i16 QPR:$src1),
(v8i16 QPR:$src2),
(v8i16 (ARMvduplane (v8i16 QPR:$src3),
- imm:$lane)))))),
+ imm:$lane)))),
(v8i16 (VQRDMLSHslv8i16 (v8i16 QPR:$src1),
(v8i16 QPR:$src2),
(v4i16 (EXTRACT_SUBREG
QPR:$src3,
(DSubReg_i16_reg imm:$lane))),
(SubReg_i16_lane imm:$lane)))>;
- def : Pat<(v4i32 (ssubsat
- (v4i32 QPR:$src1),
- (v4i32 (int_arm_neon_vqrdmulh
+ def : Pat<(v4i32 (int_arm_neon_vqrdmlsh (v4i32 QPR:$src1),
(v4i32 QPR:$src2),
(v4i32 (ARMvduplane (v4i32 QPR:$src3),
- imm:$lane)))))),
+ imm:$lane)))),
(v4i32 (VQRDMLSHslv4i32 (v4i32 QPR:$src1),
(v4i32 QPR:$src2),
(v2i32 (EXTRACT_SUBREG
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 8be4e3f160e3..188b5562cac9 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -171,8 +171,8 @@ createARMInstructionSelector(const ARMBaseTargetMachine &TM,
ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM,
const ARMSubtarget &STI,
const ARMRegisterBankInfo &RBI)
- : InstructionSelector(), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI), STI(STI), Opcodes(STI),
+ : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI),
+ STI(STI), Opcodes(STI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "ARMGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
index 6649750bb388..ff4647dd46fd 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -15,4 +15,4 @@ using namespace llvm;
void ARMRegisterInfo::anchor() { }
-ARMRegisterInfo::ARMRegisterInfo() : ARMBaseRegisterInfo() {}
+ARMRegisterInfo::ARMRegisterInfo() {}
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.h
index 87c0f322d3b3..2971b765a6fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMRegisterInfo.h
@@ -17,8 +17,6 @@
namespace llvm {
-class ARMSubtarget;
-
struct ARMRegisterInfo : public ARMBaseRegisterInfo {
virtual void anchor();
public:
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 36c4bbaafcbf..2dd25234dc50 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -15,7 +15,6 @@
#include "ARMCallLowering.h"
#include "ARMLegalizerInfo.h"
#include "ARMRegisterBankInfo.h"
-#include "ARMSubtarget.h"
#include "ARMFrameLowering.h"
#include "ARMInstrInfo.h"
#include "ARMSubtarget.h"
@@ -35,6 +34,7 @@
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h
index e61b90af31b0..1c2b7ee6ba35 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -121,6 +121,7 @@ protected:
ARMv85a,
ARMv86a,
ARMv87a,
+ ARMv88a,
ARMv8a,
ARMv8mBaseline,
ARMv8mMainline,
@@ -129,6 +130,7 @@ protected:
ARMv9a,
ARMv91a,
ARMv92a,
+ ARMv93a,
};
public:
@@ -174,10 +176,12 @@ protected:
bool HasV8_4aOps = false;
bool HasV8_5aOps = false;
bool HasV8_6aOps = false;
+ bool HasV8_8aOps = false;
bool HasV8_7aOps = false;
bool HasV9_0aOps = false;
bool HasV9_1aOps = false;
bool HasV9_2aOps = false;
+ bool HasV9_3aOps = false;
bool HasV8MBaselineOps = false;
bool HasV8MMainlineOps = false;
bool HasV8_1MMainlineOps = false;
@@ -635,9 +639,11 @@ public:
bool hasV8_5aOps() const { return HasV8_5aOps; }
bool hasV8_6aOps() const { return HasV8_6aOps; }
bool hasV8_7aOps() const { return HasV8_7aOps; }
+ bool hasV8_8aOps() const { return HasV8_8aOps; }
bool hasV9_0aOps() const { return HasV9_0aOps; }
bool hasV9_1aOps() const { return HasV9_1aOps; }
bool hasV9_2aOps() const { return HasV9_2aOps; }
+ bool hasV9_3aOps() const { return HasV9_3aOps; }
bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 0b314ac2a41e..c38970f8e341 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -43,6 +43,7 @@
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ARMTargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
index 8c5438f7093b..936cae17f004 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -54,9 +54,7 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
}
}
-const MCRegister ARMElfTargetObjectFile::getStaticBase() const {
- return ARM::R9;
-}
+MCRegister ARMElfTargetObjectFile::getStaticBase() const { return ARM::R9; }
const MCExpr *ARMElfTargetObjectFile::
getIndirectSymViaRWPI(const MCSymbol *Sym) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.h
index 8b13198fe144..47334b9a8a45 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetObjectFile.h
@@ -17,14 +17,13 @@ namespace llvm {
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
public:
- ARMElfTargetObjectFile()
- : TargetLoweringObjectFileELF() {
+ ARMElfTargetObjectFile() {
PLTRelativeVariantKind = MCSymbolRefExpr::VK_ARM_PREL31;
}
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
- const MCRegister getStaticBase() const override;
+ MCRegister getStaticBase() const override;
const MCExpr *getIndirectSymViaRWPI(const MCSymbol *Sym) const override;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 602c6745d310..e0750a9945d2 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1116,18 +1116,6 @@ bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
return false;
- // This method is called in 2 places:
- // - from the vectorizer with a scalar type, in which case we need to get
- // this as good as we can with the limited info we have (and rely on the cost
- // model for the rest).
- // - from the masked intrinsic lowering pass with the actual vector type.
- // For MVE, we have a custom lowering pass that will already have custom
- // legalised any gathers that we can to MVE intrinsics, and want to expand all
- // the rest. The pass runs before the masked intrinsic lowering pass, so if we
- // are here, we know we want to expand.
- if (isa<VectorType>(Ty))
- return false;
-
unsigned EltWidth = Ty->getScalarSizeInBits();
return ((EltWidth == 32 && Alignment >= 4) ||
(EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index a56886d4fc11..5bb84899e5ef 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -189,6 +189,18 @@ public:
return isLegalMaskedLoad(DataTy, Alignment);
}
+ bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
+ // For MVE, we have a custom lowering pass that will already have custom
+ // legalised any gathers that we can lower to MVE intrinsics, and want to
+ // expand all the rest. The pass runs before the masked intrinsic lowering
+ // pass.
+ return true;
+ }
+
+ bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
+ return forceScalarizeMaskedGather(VTy, Alignment);
+ }
+
bool isLegalMaskedGather(Type *Ty, Align Alignment);
bool isLegalMaskedScatter(Type *Ty, Align Alignment) {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index bfe078b06861..c7734cc2cf11 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -921,7 +921,7 @@ class ARMOperand : public MCParsedAsmOperand {
};
public:
- ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ ARMOperand(KindTy K) : Kind(K) {}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
@@ -1870,7 +1870,7 @@ public:
}
template <int shift> bool isMemRegRQOffset() const {
- if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
+ if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
return false;
if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 851acea94022..23430dfc017a 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -1049,11 +1049,11 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
unsigned Kind = Fixup.getKind();
if (Kind >= FirstLiteralRelocationKind)
return;
- unsigned NumBytes = getFixupKindNumBytes(Kind);
MCContext &Ctx = Asm.getContext();
Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
if (!Value)
return; // Doesn't change encoding.
+ const unsigned NumBytes = getFixupKindNumBytes(Kind);
unsigned Offset = Fixup.getOffset();
assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
@@ -1123,9 +1123,8 @@ uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
DenseMap<unsigned, int> RegOffsets;
int FloatRegCount = 0;
// Process each .cfi directive and build up compact unwind info.
- for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
+ for (const MCCFIInstruction &Inst : Instrs) {
unsigned Reg;
- const MCCFIInstruction &Inst = Instrs[i];
switch (Inst.getOperation()) {
case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
CFARegisterOffset = Inst.getOffset();
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index 37d81e4b0af1..df8f54d14a86 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -87,7 +87,7 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
if (IsPCRel) {
switch (Fixup.getTargetKind()) {
default:
- Ctx.reportFatalError(Fixup.getLoc(), "unsupported relocation on symbol");
+ Ctx.reportError(Fixup.getLoc(), "unsupported relocation on symbol");
return ELF::R_ARM_NONE;
case FK_Data_4:
switch (Modifier) {
@@ -159,7 +159,7 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
}
switch (Kind) {
default:
- Ctx.reportFatalError(Fixup.getLoc(), "unsupported relocation on symbol");
+ Ctx.reportError(Fixup.getLoc(), "unsupported relocation on symbol");
return ELF::R_ARM_NONE;
case FK_Data_1:
switch (Modifier) {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index e060e59e3759..16bc0ca179a7 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -264,10 +264,8 @@ void ARMTargetAsmStreamer::emitInst(uint32_t Inst, char Suffix) {
void ARMTargetAsmStreamer::emitUnwindRaw(int64_t Offset,
const SmallVectorImpl<uint8_t> &Opcodes) {
OS << "\t.unwind_raw " << Offset;
- for (SmallVectorImpl<uint8_t>::const_iterator OCI = Opcodes.begin(),
- OCE = Opcodes.end();
- OCI != OCE; ++OCI)
- OS << ", 0x" << Twine::utohexstr(*OCI);
+ for (uint8_t Opcode : Opcodes)
+ OS << ", 0x" << Twine::utohexstr(Opcode);
OS << '\n';
}
@@ -788,6 +786,7 @@ void ARMTargetELFStreamer::emitArchDefaultAttributes() {
case ARM::ArchKind::ARMV9A:
case ARM::ArchKind::ARMV9_1A:
case ARM::ArchKind::ARMV9_2A:
+ case ARM::ArchKind::ARMV9_3A:
S.setAttributeItem(CPU_arch_profile, ApplicationProfile, false);
S.setAttributeItem(ARM_ISA_use, Allowed, false);
S.setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index 05e5a473a3c6..17ca1866cf95 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -338,8 +338,8 @@ void ARM_MC::initLLVMToCVRegMapping(MCRegisterInfo *MRI) {
{codeview::RegisterId::ARM_NQ14, ARM::Q14},
{codeview::RegisterId::ARM_NQ15, ARM::Q15},
};
- for (unsigned I = 0; I < array_lengthof(RegMap); ++I)
- MRI->mapLLVMRegToCVReg(RegMap[I].Reg, static_cast<int>(RegMap[I].CVReg));
+ for (const auto &I : RegMap)
+ MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
}
static MCRegisterInfo *createARMMCRegisterInfo(const Triple &Triple) {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index 7ccdc6f85500..5c8f9bfdca08 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -36,8 +36,6 @@ class MCTargetStreamer;
class StringRef;
class Target;
class Triple;
-class raw_ostream;
-class raw_pwrite_stream;
namespace ARM_MC {
std::string ParseARMTriple(const Triple &TT, StringRef CPU);
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index 54e80a095dd4..71a82a1e3271 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -167,7 +167,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
DebugLoc dl;
Register FramePtr = RegInfo->getFrameRegister(MF);
- unsigned BasePtr = RegInfo->getBaseRegister();
+ Register BasePtr = RegInfo->getBaseRegister();
int CFAOffset = 0;
// Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
@@ -206,7 +206,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -267,7 +267,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
.setMIFlags(MachineInstr::FrameSetup);
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -348,7 +348,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
// Emit call frame information for the callee-saved high registers.
for (auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -376,7 +376,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
// at this point in the prologue, so pick one.
unsigned ScratchRegister = ARM::NoRegister;
for (auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (isARMLowRegister(Reg) && !(HasFP && Reg == FramePtr)) {
ScratchRegister = Reg;
break;
@@ -531,7 +531,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned ScratchRegister = ARM::NoRegister;
bool HasFP = hasFP(MF);
for (auto &I : MFI.getCalleeSavedInfo()) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (isARMLowRegister(Reg) && !(HasFP && Reg == FramePtr)) {
ScratchRegister = Reg;
break;
@@ -825,7 +825,7 @@ bool Thumb1FrameLowering::spillCalleeSavedRegisters(
// LoRegs for saving HiRegs.
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR) {
LoRegsToSave[Reg] = true;
@@ -949,7 +949,7 @@ bool Thumb1FrameLowering::restoreCalleeSavedRegisters(
ARMRegSet CopyRegs;
for (CalleeSavedInfo I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR) {
LoRegsToRestore[Reg] = true;
@@ -1022,7 +1022,7 @@ bool Thumb1FrameLowering::restoreCalleeSavedRegisters(
bool NeedsPop = false;
for (CalleeSavedInfo &Info : llvm::reverse(CSI)) {
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
// High registers (excluding lr) have already been dealt with
if (!(ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR))
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index 4b18f5e20d40..1a36c2ca9152 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -21,7 +21,7 @@
using namespace llvm;
Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI() {}
+ : ARMBaseInstrInfo(STI) {}
/// Return the noop instruction to use for a noop.
MCInst Thumb1InstrInfo::getNop() const {
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index e6d51796ba4d..a83ff5e51004 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -18,7 +18,6 @@
namespace llvm {
class ARMSubtarget;
-class ScheduleHazardRecognizer;
class Thumb2InstrInfo : public ARMBaseInstrInfo {
ThumbRegisterInfo RI;
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 1164b6ebbac3..1cc5422523f1 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -1147,9 +1147,8 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
// predecessors.
ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
bool Modified = false;
- for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
- I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
- Modified |= ReduceMBB(**I);
+ for (MachineBasicBlock *MBB : RPOT)
+ Modified |= ReduceMBB(*MBB);
return Modified;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
index 4da6f6ab6994..5d2bc4ebe191 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -37,7 +37,7 @@ extern cl::opt<bool> ReuseFrameIndexVals;
using namespace llvm;
-ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
+ThumbRegisterInfo::ThumbRegisterInfo() {}
const TargetRegisterClass *
ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h
index 143c339c0664..0b512172ba10 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVR.h
@@ -28,7 +28,6 @@ FunctionPass *createAVRISelDag(AVRTargetMachine &TM,
FunctionPass *createAVRExpandPseudoPass();
FunctionPass *createAVRFrameAnalyzerPass();
FunctionPass *createAVRRelaxMemPass();
-FunctionPass *createAVRDynAllocaSRPass();
FunctionPass *createAVRBranchSelectionPass();
void initializeAVRShiftExpandPass(PassRegistry &);
@@ -39,17 +38,56 @@ void initializeAVRRelaxMemPass(PassRegistry &);
namespace AVR {
/// An integer that identifies all of the supported AVR address spaces.
-enum AddressSpace { DataMemory, ProgramMemory };
+enum AddressSpace {
+ DataMemory,
+ ProgramMemory,
+ ProgramMemory1,
+ ProgramMemory2,
+ ProgramMemory3,
+ ProgramMemory4,
+ ProgramMemory5,
+ NumAddrSpaces,
+};
/// Checks if a given type is a pointer to program memory.
template <typename T> bool isProgramMemoryAddress(T *V) {
- return cast<PointerType>(V->getType())->getAddressSpace() == ProgramMemory;
+ auto *PT = cast<PointerType>(V->getType());
+ assert(PT != nullptr && "unexpected MemSDNode");
+ return PT->getAddressSpace() == ProgramMemory ||
+ PT->getAddressSpace() == ProgramMemory1 ||
+ PT->getAddressSpace() == ProgramMemory2 ||
+ PT->getAddressSpace() == ProgramMemory3 ||
+ PT->getAddressSpace() == ProgramMemory4 ||
+ PT->getAddressSpace() == ProgramMemory5;
+}
+
+template <typename T> AddressSpace getAddressSpace(T *V) {
+ auto *PT = cast<PointerType>(V->getType());
+ assert(PT != nullptr && "unexpected MemSDNode");
+ unsigned AS = PT->getAddressSpace();
+ if (AS < NumAddrSpaces)
+ return static_cast<AddressSpace>(AS);
+ return NumAddrSpaces;
}
inline bool isProgramMemoryAccess(MemSDNode const *N) {
- auto V = N->getMemOperand()->getValue();
+ auto *V = N->getMemOperand()->getValue();
+ if (V != nullptr && isProgramMemoryAddress(V))
+ return true;
+ return false;
+}
- return (V != nullptr) ? isProgramMemoryAddress(V) : false;
+// Get the index of the program memory bank.
+// -1: not program memory
+// 0: ordinary program memory
+// 1~5: extended program memory
+inline int getProgramMemoryBank(MemSDNode const *N) {
+ auto *V = N->getMemOperand()->getValue();
+ if (V == nullptr || !isProgramMemoryAddress(V))
+ return -1;
+ AddressSpace AS = getAddressSpace(V);
+ assert(ProgramMemory <= AS && AS <= ProgramMemory5);
+ return static_cast<int>(AS - ProgramMemory);
}
} // end of namespace AVR
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td
index 87874c5c50b2..b4bc35e191c0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRCallingConv.td
@@ -36,4 +36,4 @@ def ArgCC_AVR_Vararg : CallingConv<[
//===----------------------------------------------------------------------===//
def CSR_Normal : CalleeSavedRegs<(add R29, R28, (sequence "R%u", 17, 2))>;
-def CSR_Interrupts : CalleeSavedRegs<(add(sequence "R%u", 31, 0))>;
+def CSR_Interrupts : CalleeSavedRegs<(add(sequence "R%u", 31, 2))>;
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
index cb85d73772c5..144ae2b320f9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
@@ -92,6 +92,7 @@ private:
/// Specific shift implementation.
bool expandLSLB7Rd(Block &MBB, BlockIt MBBI);
bool expandLSRB7Rd(Block &MBB, BlockIt MBBI);
+ bool expandASRB6Rd(Block &MBB, BlockIt MBBI);
bool expandASRB7Rd(Block &MBB, BlockIt MBBI);
bool expandLSLW4Rd(Block &MBB, BlockIt MBBI);
bool expandLSRW4Rd(Block &MBB, BlockIt MBBI);
@@ -101,6 +102,9 @@ private:
bool expandLSLW12Rd(Block &MBB, BlockIt MBBI);
bool expandLSRW12Rd(Block &MBB, BlockIt MBBI);
+ // Common implementation of LPMWRdZ and ELPMWRdZ.
+ bool expandLPMWELPMW(Block &MBB, BlockIt MBBI, bool IsExt);
+
/// Scavenges a free GPR8 register for use.
Register scavengeGPR8(MachineInstr &MI);
};
@@ -808,18 +812,25 @@ bool AVRExpandPseudo::expand<AVR::LDDWRdPtrQ>(Block &MBB, BlockIt MBBI) {
return true;
}
-template <>
-bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
+bool AVRExpandPseudo::expandLPMWELPMW(Block &MBB, BlockIt MBBI, bool IsExt) {
MachineInstr &MI = *MBBI;
Register DstLoReg, DstHiReg;
Register DstReg = MI.getOperand(0).getReg();
Register TmpReg = 0; // 0 for no temporary register
Register SrcReg = MI.getOperand(1).getReg();
bool SrcIsKill = MI.getOperand(1).isKill();
- unsigned OpLo = AVR::LPMRdZPi;
- unsigned OpHi = AVR::LPMRdZ;
+ unsigned OpLo = IsExt ? AVR::ELPMRdZPi : AVR::LPMRdZPi;
+ unsigned OpHi = IsExt ? AVR::ELPMRdZ : AVR::LPMRdZ;
TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+ // Set the I/O register RAMPZ for ELPM.
+ if (IsExt) {
+ const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>();
+ Register Bank = MI.getOperand(2).getReg();
+ // out RAMPZ, rtmp
+ buildMI(MBB, MBBI, AVR::OUTARr).addImm(STI.getIORegRAMPZ()).addReg(Bank);
+ }
+
// Use a temporary register if src and dst registers are the same.
if (DstReg == SrcReg)
TmpReg = scavengeGPR8(MI);
@@ -857,8 +868,51 @@ bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
}
template <>
+bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
+ return expandLPMWELPMW(MBB, MBBI, false);
+}
+
+template <>
+bool AVRExpandPseudo::expand<AVR::ELPMWRdZ>(Block &MBB, BlockIt MBBI) {
+ return expandLPMWELPMW(MBB, MBBI, true);
+}
+
+template <>
+bool AVRExpandPseudo::expand<AVR::ELPMBRdZ>(Block &MBB, BlockIt MBBI) {
+ MachineInstr &MI = *MBBI;
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ Register BankReg = MI.getOperand(2).getReg();
+ bool SrcIsKill = MI.getOperand(1).isKill();
+ const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>();
+
+ // Set the I/O register RAMPZ for ELPM (out RAMPZ, rtmp).
+ buildMI(MBB, MBBI, AVR::OUTARr).addImm(STI.getIORegRAMPZ()).addReg(BankReg);
+
+ // Load byte.
+ auto MILB = buildMI(MBB, MBBI, AVR::ELPMRdZ)
+ .addReg(DstReg, RegState::Define)
+ .addReg(SrcReg, getKillRegState(SrcIsKill));
+
+ MILB.setMemRefs(MI.memoperands());
+
+ MI.eraseFromParent();
+ return true;
+}
+
+template <>
bool AVRExpandPseudo::expand<AVR::LPMWRdZPi>(Block &MBB, BlockIt MBBI) {
- llvm_unreachable("wide LPMPi is unimplemented");
+ llvm_unreachable("16-bit LPMPi is unimplemented");
+}
+
+template <>
+bool AVRExpandPseudo::expand<AVR::ELPMBRdZPi>(Block &MBB, BlockIt MBBI) {
+ llvm_unreachable("byte ELPMPi is unimplemented");
+}
+
+template <>
+bool AVRExpandPseudo::expand<AVR::ELPMWRdZPi>(Block &MBB, BlockIt MBBI) {
+ llvm_unreachable("16-bit ELPMPi is unimplemented");
}
template <typename Func>
@@ -1411,6 +1465,30 @@ bool AVRExpandPseudo::expand<AVR::LSLWRd>(Block &MBB, BlockIt MBBI) {
return true;
}
+template <>
+bool AVRExpandPseudo::expand<AVR::LSLWHiRd>(Block &MBB, BlockIt MBBI) {
+ MachineInstr &MI = *MBBI;
+ Register DstLoReg, DstHiReg;
+ Register DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ bool DstIsKill = MI.getOperand(1).isKill();
+ bool ImpIsDead = MI.getOperand(2).isDead();
+ TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+
+ // add hireg, hireg <==> lsl hireg
+ auto MILSL =
+ buildMI(MBB, MBBI, AVR::ADDRdRr)
+ .addReg(DstHiReg, RegState::Define, getDeadRegState(DstIsDead))
+ .addReg(DstHiReg, getKillRegState(DstIsKill))
+ .addReg(DstHiReg, getKillRegState(DstIsKill));
+
+ if (ImpIsDead)
+ MILSL->getOperand(3).setIsDead();
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AVRExpandPseudo::expandLSLW4Rd(Block &MBB, BlockIt MBBI) {
MachineInstr &MI = *MBBI;
Register DstLoReg, DstHiReg;
@@ -1586,6 +1664,29 @@ bool AVRExpandPseudo::expand<AVR::LSRWRd>(Block &MBB, BlockIt MBBI) {
return true;
}
+template <>
+bool AVRExpandPseudo::expand<AVR::LSRWLoRd>(Block &MBB, BlockIt MBBI) {
+ MachineInstr &MI = *MBBI;
+ Register DstLoReg, DstHiReg;
+ Register DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ bool DstIsKill = MI.getOperand(1).isKill();
+ bool ImpIsDead = MI.getOperand(2).isDead();
+ TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+
+ // lsr loreg
+ auto MILSR =
+ buildMI(MBB, MBBI, AVR::LSRRd)
+ .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstLoReg, getKillRegState(DstIsKill));
+
+ if (ImpIsDead)
+ MILSR->getOperand(2).setIsDead();
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AVRExpandPseudo::expandLSRW4Rd(Block &MBB, BlockIt MBBI) {
MachineInstr &MI = *MBBI;
Register DstLoReg, DstHiReg;
@@ -1773,6 +1874,29 @@ bool AVRExpandPseudo::expand<AVR::ASRWRd>(Block &MBB, BlockIt MBBI) {
return true;
}
+template <>
+bool AVRExpandPseudo::expand<AVR::ASRWLoRd>(Block &MBB, BlockIt MBBI) {
+ MachineInstr &MI = *MBBI;
+ Register DstLoReg, DstHiReg;
+ Register DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ bool DstIsKill = MI.getOperand(1).isKill();
+ bool ImpIsDead = MI.getOperand(2).isDead();
+ TRI->splitReg(DstReg, DstLoReg, DstHiReg);
+
+ // asr loreg
+ auto MIASR =
+ buildMI(MBB, MBBI, AVR::ASRRd)
+ .addReg(DstLoReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstLoReg, getKillRegState(DstIsKill));
+
+ if (ImpIsDead)
+ MIASR->getOperand(2).setIsDead();
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AVRExpandPseudo::expandASRW8Rd(Block &MBB, BlockIt MBBI) {
MachineInstr &MI = *MBBI;
Register DstLoReg, DstHiReg;
@@ -1921,6 +2045,44 @@ bool AVRExpandPseudo::expand<AVR::LSRBNRd>(Block &MBB, BlockIt MBBI) {
}
}
+bool AVRExpandPseudo::expandASRB6Rd(Block &MBB, BlockIt MBBI) {
+ MachineInstr &MI = *MBBI;
+ Register DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ bool DstIsKill = MI.getOperand(1).isKill();
+
+ // bst r24, 6
+ // lsl r24
+ // sbc r24, r24
+ // bld r24, 0
+
+ buildMI(MBB, MBBI, AVR::BST)
+ .addReg(DstReg)
+ .addImm(6)
+ ->getOperand(2)
+ .setIsUndef(true);
+
+ buildMI(MBB, MBBI, AVR::ADDRdRr) // LSL Rd <==> ADD Rd, Rd
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg, getKillRegState(DstIsKill))
+ .addReg(DstReg, getKillRegState(DstIsKill));
+
+ buildMI(MBB, MBBI, AVR::SBCRdRr)
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg, getKillRegState(DstIsKill))
+ .addReg(DstReg, getKillRegState(DstIsKill));
+
+ buildMI(MBB, MBBI, AVR::BLD)
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg, getKillRegState(DstIsKill))
+ .addImm(0)
+ ->getOperand(3)
+ .setIsKill();
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool AVRExpandPseudo::expandASRB7Rd(Block &MBB, BlockIt MBBI) {
MachineInstr &MI = *MBBI;
Register DstReg = MI.getOperand(0).getReg();
@@ -1957,6 +2119,8 @@ bool AVRExpandPseudo::expand<AVR::ASRBNRd>(Block &MBB, BlockIt MBBI) {
MachineInstr &MI = *MBBI;
unsigned Imm = MI.getOperand(2).getImm();
switch (Imm) {
+ case 6:
+ return expandASRB6Rd(MBB, MBBI);
case 7:
return expandASRB7Rd(MBB, MBBI);
default:
@@ -2158,6 +2322,10 @@ bool AVRExpandPseudo::expandMI(Block &MBB, BlockIt MBBI) {
EXPAND(AVR::LDDWRdPtrQ);
EXPAND(AVR::LPMWRdZ);
EXPAND(AVR::LPMWRdZPi);
+ EXPAND(AVR::ELPMBRdZ);
+ EXPAND(AVR::ELPMWRdZ);
+ EXPAND(AVR::ELPMBRdZPi);
+ EXPAND(AVR::ELPMWRdZPi);
EXPAND(AVR::AtomicLoad8);
EXPAND(AVR::AtomicLoad16);
EXPAND(AVR::AtomicStore8);
@@ -2189,6 +2357,9 @@ bool AVRExpandPseudo::expandMI(Block &MBB, BlockIt MBBI) {
EXPAND(AVR::RORWRd);
EXPAND(AVR::ROLWRd);
EXPAND(AVR::ASRWRd);
+ EXPAND(AVR::LSLWHiRd);
+ EXPAND(AVR::LSRWLoRd);
+ EXPAND(AVR::ASRWLoRd);
EXPAND(AVR::LSLWNRd);
EXPAND(AVR::LSRWNRd);
EXPAND(AVR::ASRWNRd);
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp
index 543d94875037..b3bc9ede205e 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRFrameLowering.cpp
@@ -79,11 +79,6 @@ void AVRFrameLowering::emitPrologue(MachineFunction &MF,
.addReg(AVR::R0, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
BuildMI(MBB, MBBI, DL, TII.get(AVR::EORRdRr))
- .addReg(AVR::R0, RegState::Define)
- .addReg(AVR::R0, RegState::Kill)
- .addReg(AVR::R0, RegState::Kill)
- .setMIFlag(MachineInstr::FrameSetup);
- BuildMI(MBB, MBBI, DL, TII.get(AVR::EORRdRr))
.addReg(AVR::R1, RegState::Define)
.addReg(AVR::R1, RegState::Kill)
.addReg(AVR::R1, RegState::Kill)
@@ -176,7 +171,7 @@ void AVRFrameLowering::emitEpilogue(MachineFunction &MF,
const AVRInstrInfo &TII = *STI.getInstrInfo();
// Early exit if there is no need to restore the frame pointer.
- if (!FrameSize) {
+ if (!FrameSize && !MF.getFrameInfo().hasVarSizedObjects()) {
restoreStatusRegister(MF, MBB);
return;
}
@@ -193,22 +188,24 @@ void AVRFrameLowering::emitEpilogue(MachineFunction &MF,
--MBBI;
}
- unsigned Opcode;
+ if (FrameSize) {
+ unsigned Opcode;
- // Select the optimal opcode depending on how big it is.
- if (isUInt<6>(FrameSize)) {
- Opcode = AVR::ADIWRdK;
- } else {
- Opcode = AVR::SUBIWRdK;
- FrameSize = -FrameSize;
- }
+ // Select the optimal opcode depending on how big it is.
+ if (isUInt<6>(FrameSize)) {
+ Opcode = AVR::ADIWRdK;
+ } else {
+ Opcode = AVR::SUBIWRdK;
+ FrameSize = -FrameSize;
+ }
- // Restore the frame pointer by doing FP += <size>.
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opcode), AVR::R29R28)
- .addReg(AVR::R29R28, RegState::Kill)
- .addImm(FrameSize);
- // The SREG implicit def is dead.
- MI->getOperand(3).setIsDead();
+ // Restore the frame pointer by doing FP += <size>.
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opcode), AVR::R29R28)
+ .addReg(AVR::R29R28, RegState::Kill)
+ .addImm(FrameSize);
+ // The SREG implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
// Write back R29R28 to SP and temporarily disable interrupts.
BuildMI(MBB, MBBI, DL, TII.get(AVR::SPWRITE), AVR::SP)
@@ -230,7 +227,8 @@ bool AVRFrameLowering::hasFP(const MachineFunction &MF) const {
const AVRMachineFunctionInfo *FuncInfo = MF.getInfo<AVRMachineFunctionInfo>();
return (FuncInfo->getHasSpills() || FuncInfo->getHasAllocas() ||
- FuncInfo->getHasStackArgs());
+ FuncInfo->getHasStackArgs() ||
+ MF.getFrameInfo().hasVarSizedObjects());
}
bool AVRFrameLowering::spillCalleeSavedRegisters(
@@ -248,7 +246,7 @@ bool AVRFrameLowering::spillCalleeSavedRegisters(
AVRMachineFunctionInfo *AVRFI = MF.getInfo<AVRMachineFunctionInfo>();
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsNotLiveIn = !MBB.isLiveIn(Reg);
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
@@ -286,7 +284,7 @@ bool AVRFrameLowering::restoreCalleeSavedRegisters(
const TargetInstrInfo &TII = *STI.getInstrInfo();
for (const CalleeSavedInfo &CCSI : CSI) {
- unsigned Reg = CCSI.getReg();
+ Register Reg = CCSI.getReg();
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
@@ -480,56 +478,4 @@ char AVRFrameAnalyzer::ID = 0;
/// Creates instance of the frame analyzer pass.
FunctionPass *createAVRFrameAnalyzerPass() { return new AVRFrameAnalyzer(); }
-/// Create the Dynalloca Stack Pointer Save/Restore pass.
-/// Insert a copy of SP before allocating the dynamic stack memory and restore
-/// it in function exit to restore the original SP state. This avoids the need
-/// of reserving a register pair for a frame pointer.
-struct AVRDynAllocaSR : public MachineFunctionPass {
- static char ID;
- AVRDynAllocaSR() : MachineFunctionPass(ID) {}
-
- bool runOnMachineFunction(MachineFunction &MF) override {
- // Early exit when there are no variable sized objects in the function.
- if (!MF.getFrameInfo().hasVarSizedObjects()) {
- return false;
- }
-
- const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
- MachineBasicBlock &EntryMBB = MF.front();
- MachineBasicBlock::iterator MBBI = EntryMBB.begin();
- DebugLoc DL = EntryMBB.findDebugLoc(MBBI);
-
- Register SPCopy =
- MF.getRegInfo().createVirtualRegister(&AVR::DREGSRegClass);
-
- // Create a copy of SP in function entry before any dynallocas are
- // inserted.
- BuildMI(EntryMBB, MBBI, DL, TII.get(AVR::COPY), SPCopy).addReg(AVR::SP);
-
- // Restore SP in all exit basic blocks.
- for (MachineBasicBlock &MBB : MF) {
- // If last instruction is a return instruction, add a restore copy.
- if (!MBB.empty() && MBB.back().isReturn()) {
- MBBI = MBB.getLastNonDebugInstr();
- DL = MBBI->getDebugLoc();
- BuildMI(MBB, MBBI, DL, TII.get(AVR::COPY), AVR::SP)
- .addReg(SPCopy, RegState::Kill);
- }
- }
-
- return true;
- }
-
- StringRef getPassName() const override {
- return "AVR dynalloca stack pointer save/restore";
- }
-};
-
-char AVRDynAllocaSR::ID = 0;
-
-/// createAVRDynAllocaSRPass - returns an instance of the dynalloca stack
-/// pointer save/restore pass.
-FunctionPass *createAVRDynAllocaSRPass() { return new AVRDynAllocaSR(); }
-
} // end of namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
index 7ec2629ab45d..df364cae671c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
@@ -38,7 +38,7 @@ public:
bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base, SDValue &Disp);
bool selectIndexedLoad(SDNode *N);
- unsigned selectIndexedProgMemLoad(const LoadSDNode *LD, MVT VT);
+ unsigned selectIndexedProgMemLoad(const LoadSDNode *LD, MVT VT, int Bank);
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode,
std::vector<SDValue> &OutOps) override;
@@ -165,35 +165,31 @@ bool AVRDAGToDAGISel::selectIndexedLoad(SDNode *N) {
return true;
}
-unsigned AVRDAGToDAGISel::selectIndexedProgMemLoad(const LoadSDNode *LD,
- MVT VT) {
- ISD::MemIndexedMode AM = LD->getAddressingMode();
-
+unsigned AVRDAGToDAGISel::selectIndexedProgMemLoad(const LoadSDNode *LD, MVT VT,
+ int Bank) {
// Progmem indexed loads only work in POSTINC mode.
- if (LD->getExtensionType() != ISD::NON_EXTLOAD || AM != ISD::POST_INC) {
+ if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
+ LD->getAddressingMode() != ISD::POST_INC)
return 0;
- }
+
+ // Feature ELPM is needed for loading from extended program memory.
+ assert((Bank == 0 || Subtarget->hasELPM()) &&
+ "cannot load from extended program memory on this mcu");
unsigned Opcode = 0;
int Offs = cast<ConstantSDNode>(LD->getOffset())->getSExtValue();
switch (VT.SimpleTy) {
- case MVT::i8: {
- if (Offs != 1) {
- return 0;
- }
- Opcode = AVR::LPMRdZPi;
+ case MVT::i8:
+ if (Offs == 1)
+ Opcode = Bank > 0 ? AVR::ELPMBRdZPi : AVR::LPMRdZPi;
break;
- }
- case MVT::i16: {
- if (Offs != 2) {
- return 0;
- }
- Opcode = AVR::LPMWRdZPi;
+ case MVT::i16:
+ if (Offs == 2)
+ Opcode = Bank > 0 ? AVR::ELPMWRdZPi : AVR::LPMWRdZPi;
break;
- }
default:
- return 0;
+ break;
}
return Opcode;
@@ -360,7 +356,12 @@ template <> bool AVRDAGToDAGISel::select<ISD::LOAD>(SDNode *N) {
return selectIndexedLoad(N);
}
- assert(Subtarget->hasLPM() && "cannot load from program memory on this mcu");
+ if (!Subtarget->hasLPM())
+ report_fatal_error("cannot load from program memory on this mcu");
+
+ int ProgMemBank = AVR::getProgramMemoryBank(LD);
+ if (ProgMemBank < 0 || ProgMemBank > 5)
+ report_fatal_error("unexpected program memory bank");
// This is a flash memory load, move the pointer into R31R30 and emit
// the lpm instruction.
@@ -374,25 +375,48 @@ template <> bool AVRDAGToDAGISel::select<ISD::LOAD>(SDNode *N) {
Ptr = CurDAG->getCopyFromReg(Chain, DL, AVR::R31R30, MVT::i16,
Chain.getValue(1));
- SDValue RegZ = CurDAG->getRegister(AVR::R31R30, MVT::i16);
-
// Check if the opcode can be converted into an indexed load.
- if (unsigned LPMOpc = selectIndexedProgMemLoad(LD, VT)) {
+ if (unsigned LPMOpc = selectIndexedProgMemLoad(LD, VT, ProgMemBank)) {
// It is legal to fold the load into an indexed load.
- ResNode =
- CurDAG->getMachineNode(LPMOpc, DL, VT, MVT::i16, MVT::Other, Ptr, RegZ);
- ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1));
+ if (ProgMemBank == 0) {
+ ResNode =
+ CurDAG->getMachineNode(LPMOpc, DL, VT, MVT::i16, MVT::Other, Ptr);
+ } else {
+ // Do not combine the LDI instruction into the ELPM pseudo instruction,
+ // since it may be reused by other ELPM pseudo instructions.
+ SDValue NC = CurDAG->getTargetConstant(ProgMemBank, DL, MVT::i8);
+ auto *NP = CurDAG->getMachineNode(AVR::LDIRdK, DL, MVT::i8, NC);
+ ResNode = CurDAG->getMachineNode(LPMOpc, DL, VT, MVT::i16, MVT::Other,
+ Ptr, SDValue(NP, 0));
+ }
} else {
// Selecting an indexed load is not legal, fallback to a normal load.
switch (VT.SimpleTy) {
case MVT::i8:
- ResNode = CurDAG->getMachineNode(AVR::LPMRdZ, DL, MVT::i8, MVT::Other,
- Ptr, RegZ);
+ if (ProgMemBank == 0) {
+ ResNode =
+ CurDAG->getMachineNode(AVR::LPMRdZ, DL, MVT::i8, MVT::Other, Ptr);
+ } else {
+ // Do not combine the LDI instruction into the ELPM pseudo instruction,
+ // since it may be reused by other ELPM pseudo instructions.
+ SDValue NC = CurDAG->getTargetConstant(ProgMemBank, DL, MVT::i8);
+ auto *NP = CurDAG->getMachineNode(AVR::LDIRdK, DL, MVT::i8, NC);
+ ResNode = CurDAG->getMachineNode(AVR::ELPMBRdZ, DL, MVT::i8, MVT::Other,
+ Ptr, SDValue(NP, 0));
+ }
break;
case MVT::i16:
- ResNode = CurDAG->getMachineNode(AVR::LPMWRdZ, DL, MVT::i16, MVT::Other,
- Ptr, RegZ);
- ReplaceUses(SDValue(N, 1), SDValue(ResNode, 1));
+ if (ProgMemBank == 0) {
+ ResNode =
+ CurDAG->getMachineNode(AVR::LPMWRdZ, DL, MVT::i16, MVT::Other, Ptr);
+ } else {
+ // Do not combine the LDI instruction into the ELPM pseudo instruction,
+ // since LDI requires the destination register in range R16~R31.
+ SDValue NC = CurDAG->getTargetConstant(ProgMemBank, DL, MVT::i8);
+ auto *NP = CurDAG->getMachineNode(AVR::LDIRdK, DL, MVT::i8, NC);
+ ResNode = CurDAG->getMachineNode(AVR::ELPMWRdZ, DL, MVT::i16,
+ MVT::Other, Ptr, SDValue(NP, 0));
+ }
break;
default:
llvm_unreachable("Unsupported VT!");
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp
index a6f2afb87102..a58fedf6cd36 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -359,6 +359,11 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
Victim = DAG.getNode(AVRISD::LSRBN, dl, VT, Victim,
DAG.getConstant(7, dl, VT));
ShiftAmount = 0;
+ } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {
+ // Optimize ASR when ShiftAmount == 6.
+ Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
+ DAG.getConstant(6, dl, VT));
+ ShiftAmount = 0;
} else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
// Optimize ASR when ShiftAmount == 7.
Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
@@ -387,16 +392,22 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
DAG.getConstant(8, dl, VT));
ShiftAmount -= 8;
+ // Only operate on the higher byte for remaining shift bits.
+ Opc8 = AVRISD::LSLHI;
break;
case ISD::SRL:
Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
DAG.getConstant(8, dl, VT));
ShiftAmount -= 8;
+ // Only operate on the lower byte for remaining shift bits.
+ Opc8 = AVRISD::LSRLO;
break;
case ISD::SRA:
Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
DAG.getConstant(8, dl, VT));
ShiftAmount -= 8;
+ // Only operate on the lower byte for remaining shift bits.
+ Opc8 = AVRISD::ASRLO;
break;
default:
break;
@@ -407,11 +418,22 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
DAG.getConstant(12, dl, VT));
ShiftAmount -= 12;
+ // Only operate on the higher byte for remaining shift bits.
+ Opc8 = AVRISD::LSLHI;
break;
case ISD::SRL:
Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
DAG.getConstant(12, dl, VT));
ShiftAmount -= 12;
+ // Only operate on the lower byte for remaining shift bits.
+ Opc8 = AVRISD::LSRLO;
+ break;
+ case ISD::SRA:
+ Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
+ DAG.getConstant(8, dl, VT));
+ ShiftAmount -= 8;
+ // Only operate on the lower byte for remaining shift bits.
+ Opc8 = AVRISD::ASRLO;
break;
default:
break;
@@ -874,7 +896,8 @@ bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
// Allow reg+<6bit> offset.
if (Offs < 0)
Offs = -Offs;
- if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) {
+ if (AM.BaseGV == nullptr && AM.HasBaseReg && AM.Scale == 0 &&
+ isUInt<6>(Offs)) {
return true;
}
@@ -1169,7 +1192,7 @@ SDValue AVRTargetLowering::LowerFormalArguments(
llvm_unreachable("Unknown argument type!");
}
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// :NOTE: Clang should not promote any i8 into i16 but for safety the
@@ -1672,6 +1695,18 @@ MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
return BB;
}
+// Insert a read from R1, which almost always contains the value 0.
+MachineBasicBlock *
+AVRTargetLowering::insertCopyR1(MachineInstr &MI, MachineBasicBlock *BB) const {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ MachineBasicBlock::iterator I(MI);
+ BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::COPY))
+ .add(MI.getOperand(0))
+ .addReg(AVR::R1);
+ MI.eraseFromParent();
+ return BB;
+}
+
MachineBasicBlock *
AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
@@ -1694,6 +1729,8 @@ AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case AVR::MULRdRr:
case AVR::MULSRdRr:
return insertMul(MI, MBB);
+ case AVR::CopyR1:
+ return insertCopyR1(MI, MBB);
}
assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
@@ -2012,7 +2049,7 @@ void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result;
SDLoc DL(Op);
EVT Ty = Op.getValueType();
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h
index 3ae036b66bcb..116417b61566 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.h
@@ -38,12 +38,15 @@ enum NodeType {
LSL, ///< Logical shift left.
LSLBN, ///< Byte logical shift left N bits.
LSLWN, ///< Word logical shift left N bits.
+ LSLHI, ///< Higher 8-bit of word logical shift left.
LSR, ///< Logical shift right.
LSRBN, ///< Byte logical shift right N bits.
LSRWN, ///< Word logical shift right N bits.
+ LSRLO, ///< Lower 8-bit of word logical shift right.
ASR, ///< Arithmetic shift right.
ASRBN, ///< Byte arithmetic shift right N bits.
ASRWN, ///< Word arithmetic shift right N bits.
+ ASRLO, ///< Lower 8-bit of word arithmetic shift right.
ROR, ///< Bit rotate right.
ROL, ///< Bit rotate left.
LSLLOOP, ///< A loop of single logical shift left instructions.
@@ -184,6 +187,8 @@ protected:
private:
MachineBasicBlock *insertShift(MachineInstr &MI, MachineBasicBlock *BB) const;
MachineBasicBlock *insertMul(MachineInstr &MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *insertCopyR1(MachineInstr &MI,
+ MachineBasicBlock *BB) const;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp
index 51060018a5ca..ac52c47f93d5 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.cpp
@@ -304,11 +304,11 @@ bool AVRInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
}
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
UnCondBrIter = MBB.end();
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td
index c7f423292da0..2b96dc0b833a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRInstrInfo.td
@@ -60,6 +60,9 @@ def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>;
def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>;
def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>;
def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>;
+def AVRlslhi : SDNode<"AVRISD::LSLHI", SDTIntUnaryOp>;
+def AVRlsrlo : SDNode<"AVRISD::LSRLO", SDTIntUnaryOp>;
+def AVRasrlo : SDNode<"AVRISD::ASRLO", SDTIntUnaryOp>;
def AVRlslbn : SDNode<"AVRISD::LSLBN", SDTIntBinOp>;
def AVRlsrbn : SDNode<"AVRISD::LSRBN", SDTIntBinOp>;
def AVRasrbn : SDNode<"AVRISD::ASRBN", SDTIntBinOp>;
@@ -1391,7 +1394,7 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in {
// ldd Rd, P+q
// ldd Rd+1, P+q+1
let Constraints = "@earlyclobber $dst" in def LDDWRdPtrQ
- : Pseudo<(outs DREGS_WITHOUT_YZ_WORKAROUND
+ : Pseudo<(outs DREGS
: $dst),
(ins memri
: $memri),
@@ -1699,21 +1702,34 @@ let mayLoad = 1, hasSideEffects = 0 in {
: F16<0b1001010111011000, (outs), (ins), "elpm", []>,
Requires<[HasELPM]>;
- def ELPMRdZ : FLPMX<1, 0,
- (outs GPR8
- : $dst),
- (ins ZREG
- : $z),
+ def ELPMRdZ : FLPMX<1, 0, (outs GPR8:$dst), (ins ZREG:$z),
"elpm\t$dst, $z", []>,
Requires<[HasELPMX]>;
- let Defs = [R31R30] in def ELPMRdZPi : FLPMX<1, 1,
- (outs GPR8
- : $dst),
- (ins ZREG
- : $z),
- "elpm\t$dst, $z+", []>,
- Requires<[HasELPMX]>;
+ let Defs = [R31R30] in {
+ def ELPMRdZPi : FLPMX<1, 1, (outs GPR8:$dst), (ins ZREG:$z),
+ "elpm\t$dst, $z+", []>,
+ Requires<[HasELPMX]>;
+ }
+
+ // These pseudos are combination of the OUT and ELPM instructions.
+ let Defs = [R31R30], hasSideEffects = 1 in {
+ def ELPMBRdZ : Pseudo<(outs GPR8:$dst), (ins ZREG:$z, LD8:$p),
+ "elpmb\t$dst, $z, $p", []>,
+ Requires<[HasELPMX]>;
+
+ def ELPMWRdZ : Pseudo<(outs DREGS:$dst), (ins ZREG:$z, LD8:$p),
+ "elpmw\t$dst, $z, $p", []>,
+ Requires<[HasELPMX]>;
+
+ def ELPMBRdZPi : Pseudo<(outs GPR8:$dst), (ins ZREG:$z, LD8:$p),
+ "elpmb\t$dst, $z+, $p", []>,
+ Requires<[HasELPMX]>;
+
+ def ELPMWRdZPi : Pseudo<(outs DREGS:$dst), (ins ZREG:$z, LD8:$p),
+ "elpmw\t$dst, $z+, $p", []>,
+ Requires<[HasELPMX]>;
+ }
}
// Store program memory operations.
@@ -1848,6 +1864,9 @@ let Constraints = "$src = $rd", Defs = [SREG] in {
: $src)),
(implicit SREG)]>;
+ def LSLWHiRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "lslwhi\t$rd",
+ [(set i16:$rd, (AVRlslhi i16:$src)), (implicit SREG)]>;
+
def LSLWNRd : Pseudo<(outs DLDREGS
: $rd),
(ins DREGS
@@ -1895,6 +1914,9 @@ let Constraints = "$src = $rd", Defs = [SREG] in {
: $src)),
(implicit SREG)]>;
+ def LSRWLoRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "lsrwlo\t$rd",
+ [(set i16:$rd, (AVRlsrlo i16:$src)), (implicit SREG)]>;
+
def LSRWNRd : Pseudo<(outs DLDREGS
: $rd),
(ins DREGS
@@ -1968,6 +1990,9 @@ let Constraints = "$src = $rd", Defs = [SREG] in {
: $src)),
(implicit SREG)]>;
+ def ASRWLoRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "asrwlo\t$rd",
+ [(set i16:$rd, (AVRasrlo i16:$src)), (implicit SREG)]>;
+
def ROLBRd : Pseudo<(outs GPR8
: $rd),
(ins GPR8
@@ -2365,6 +2390,10 @@ def Asr16 : ShiftPseudo<(outs DREGS
: $src, i8
: $cnt))]>;
+// lowered to a copy from R1, which contains the value zero.
+let usesCustomInserter=1 in
+def CopyR1 : Pseudo<(outs GPR8:$rd), (ins), "clrz\t$rd", [(set i8:$rd, 0)]>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
index 1886debaf492..5dd7f5c55695 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -44,10 +44,7 @@ AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t *
AVRRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
- const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
-
- return AFI->isInterruptOrSignalHandler() ? CSR_Interrupts_RegMask
- : CSR_Normal_RegMask;
+ return CSR_Normal_RegMask;
}
BitVector AVRRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h
index fa27d9283209..2c5647b52c1c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.h
@@ -27,7 +27,7 @@ public:
public:
const uint16_t *
- getCalleeSavedRegs(const MachineFunction *MF = 0) const override;
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const override;
BitVector getReservedRegs(const MachineFunction &MF) const override;
@@ -39,7 +39,7 @@ public:
/// Stack Frame Processing Methods
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
- RegScavenger *RS = NULL) const override;
+ RegScavenger *RS = nullptr) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td
index bb4e86ca0536..c5fda788fe4d 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRRegisterInfo.td
@@ -178,26 +178,6 @@ def DREGSMOVW : RegisterClass<"AVR", [i16], 8,
R29R28, R17R16, R15R14, R13R12, R11R10, R9R8,
R7R6, R5R4, R3R2, R1R0)>;
-// The 16-bit DREGS register class, excluding the Z pointer register.
-//
-// This is used by instructions which cause high pointer register
-// contention which leads to an assertion in the register allocator.
-//
-// There is no technical reason why instructions that use this class
-// cannot use Z; it's simply a workaround a regalloc bug.
-//
-// More information can be found in PR39553.
-def DREGS_WITHOUT_YZ_WORKAROUND
- : RegisterClass<"AVR", [i16], 8,
- (
- // Return value and arguments.
- add R25R24, R19R18, R21R20, R23R22,
- // Scratch registers.
- R27R26,
- // Callee saved registers.
- R17R16, R15R14, R13R12, R11R10, R9R8, R7R6, R5R4, R3R2,
- R1R0)>;
-
// 16-bit register class for immediate instructions.
def DLDREGS : RegisterClass<"AVR", [i16], 8,
(
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp
index 990e1c57e63f..8a5481423e9f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.cpp
@@ -40,8 +40,7 @@ AVRSubtarget::AVRSubtarget(const Triple &TT, const std::string &CPU,
m_hasTinyEncoding(false), m_hasMemMappedGPR(false),
m_FeatureSetDummy(false),
- InstrInfo(), FrameLowering(),
- TLInfo(TM, initializeSubtargetDependencies(CPU, FS, TM)), TSInfo() {
+ TLInfo(TM, initializeSubtargetDependencies(CPU, FS, TM)) {
// Parse features string.
ParseSubtargetFeatures(CPU, /*TuneCPU*/ CPU, FS);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h
index 90b9cd4da7c1..f8ca191b1868 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRSubtarget.h
@@ -91,6 +91,9 @@ public:
return ELFArch;
}
+ /// Get I/O register address.
+ int getIORegRAMPZ(void) const { return 0x3b; }
+
private:
/// The ELF e_flags architecture.
unsigned ELFArch;
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp
index 65740f7c2306..22b9ba3ece07 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetMachine.cpp
@@ -70,7 +70,6 @@ public:
bool addInstSelector() override;
void addPreSched2() override;
void addPreEmitPass() override;
- void addPreRegAlloc() override;
};
} // namespace
@@ -118,11 +117,6 @@ bool AVRPassConfig::addInstSelector() {
return false;
}
-void AVRPassConfig::addPreRegAlloc() {
- // Create the dynalloc SP save/restore pass to handle variable sized allocas.
- addPass(createAVRDynAllocaSRPass());
-}
-
void AVRPassConfig::addPreSched2() {
addPass(createAVRRelaxMemPass());
addPass(createAVRExpandPseudoPass());
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp
index c7715ca1f51b..fe8e863be1a3 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "AVRTargetObjectFile.h"
+#include "AVRTargetMachine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/IR/DerivedTypes.h"
@@ -22,14 +23,60 @@ void AVRTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) {
Base::Initialize(Ctx, TM);
ProgmemDataSection =
Ctx.getELFSection(".progmem.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ Progmem1DataSection =
+ Ctx.getELFSection(".progmem1.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ Progmem2DataSection =
+ Ctx.getELFSection(".progmem2.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ Progmem3DataSection =
+ Ctx.getELFSection(".progmem3.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ Progmem4DataSection =
+ Ctx.getELFSection(".progmem4.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+ Progmem5DataSection =
+ Ctx.getELFSection(".progmem5.data", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
}
MCSection *AVRTargetObjectFile::SelectSectionForGlobal(
const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
- // Global values in flash memory are placed in the progmem.data section
+ // Global values in flash memory are placed in the progmem*.data section
// unless they already have a user assigned section.
- if (AVR::isProgramMemoryAddress(GO) && !GO->hasSection() && Kind.isReadOnly())
- return ProgmemDataSection;
+ const auto &AVRTM = static_cast<const AVRTargetMachine &>(TM);
+ if (AVR::isProgramMemoryAddress(GO) && !GO->hasSection() &&
+ Kind.isReadOnly()) {
+ // The AVR subtarget should support LPM to access section '.progmem*.data'.
+ if (!AVRTM.getSubtargetImpl()->hasLPM()) {
+ // TODO: Get the global object's location in source file.
+ getContext().reportError(
+ SMLoc(),
+ "Current AVR subtarget does not support accessing program memory");
+ return Base::SelectSectionForGlobal(GO, Kind, TM);
+ }
+ // The AVR subtarget should support ELPM to access section
+ // '.progmem[1|2|3|4|5].data'.
+ if (!AVRTM.getSubtargetImpl()->hasELPM() &&
+ AVR::getAddressSpace(GO) != AVR::ProgramMemory) {
+ // TODO: Get the global object's location in source file.
+ getContext().reportError(SMLoc(),
+ "Current AVR subtarget does not support "
+ "accessing extended program memory");
+ return ProgmemDataSection;
+ }
+ switch (AVR::getAddressSpace(GO)) {
+ case AVR::ProgramMemory: // address space 1
+ return ProgmemDataSection;
+ case AVR::ProgramMemory1: // address space 2
+ return Progmem1DataSection;
+ case AVR::ProgramMemory2: // address space 3
+ return Progmem2DataSection;
+ case AVR::ProgramMemory3: // address space 4
+ return Progmem3DataSection;
+ case AVR::ProgramMemory4: // address space 5
+ return Progmem4DataSection;
+ case AVR::ProgramMemory5: // address space 6
+ return Progmem5DataSection;
+ default:
+ llvm_unreachable("unexpected program memory index");
+ }
+ }
// Otherwise, we work the same way as ELF.
return Base::SelectSectionForGlobal(GO, Kind, TM);
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h
index 53d8510d9a21..609849b44029 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AVRTargetObjectFile.h
@@ -25,6 +25,11 @@ public:
private:
MCSection *ProgmemDataSection;
+ MCSection *Progmem1DataSection;
+ MCSection *Progmem2DataSection;
+ MCSection *Progmem3DataSection;
+ MCSection *Progmem4DataSection;
+ MCSection *Progmem5DataSection;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index 95ecd28200ba..f19e7840eb31 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -107,13 +107,13 @@ class AVROperand : public MCParsedAsmOperand {
public:
AVROperand(StringRef Tok, SMLoc const &S)
- : Base(), Kind(k_Token), Tok(Tok), Start(S), End(S) {}
+ : Kind(k_Token), Tok(Tok), Start(S), End(S) {}
AVROperand(unsigned Reg, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(k_Register), RegImm({Reg, nullptr}), Start(S), End(E) {}
+ : Kind(k_Register), RegImm({Reg, nullptr}), Start(S), End(E) {}
AVROperand(MCExpr const *Imm, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(k_Immediate), RegImm({0, Imm}), Start(S), End(E) {}
+ : Kind(k_Immediate), RegImm({0, Imm}), Start(S), End(E) {}
AVROperand(unsigned Reg, MCExpr const *Imm, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(k_Memri), RegImm({Reg, Imm}), Start(S), End(E) {}
+ : Kind(k_Memri), RegImm({Reg, Imm}), Start(S), End(E) {}
struct RegisterImmediate {
unsigned Reg;
@@ -281,7 +281,7 @@ bool AVRAsmParser::invalidOperand(SMLoc const &Loc,
OperandVector const &Operands,
uint64_t const &ErrorInfo) {
SMLoc ErrorLoc = Loc;
- char const *Diag = 0;
+ char const *Diag = nullptr;
if (ErrorInfo != ~0U) {
if (ErrorInfo >= Operands.size()) {
diff --git a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
index a3a4d63932c0..3624ade854c0 100644
--- a/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
@@ -47,7 +47,7 @@ static void signed_width(unsigned Width, uint64_t Value,
" to " + std::to_string(Max) + ")";
if (Ctx) {
- Ctx->reportFatalError(Fixup.getLoc(), Diagnostic);
+ Ctx->reportError(Fixup.getLoc(), Diagnostic);
} else {
llvm_unreachable(Diagnostic.c_str());
}
@@ -66,7 +66,7 @@ static void unsigned_width(unsigned Width, uint64_t Value,
" (expected an integer in the range 0 to " + std::to_string(Max) + ")";
if (Ctx) {
- Ctx->reportFatalError(Fixup.getLoc(), Diagnostic);
+ Ctx->reportError(Fixup.getLoc(), Diagnostic);
} else {
llvm_unreachable(Diagnostic.c_str());
}
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
index 50298bf5e943..697deb117bcb 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
@@ -101,7 +101,7 @@ struct BPFOperand : public MCParsedAsmOperand {
ImmOp Imm;
};
- BPFOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ BPFOperand(KindTy K) : Kind(K) {}
public:
BPFOperand(const BPFOperand &o) : MCParsedAsmOperand() {
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
index ab7e848409d9..46141e69d9d4 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
@@ -1002,7 +1002,7 @@ bool BPFAbstractMemberAccess::transformGEPChain(CallInst *Call,
VarType = Type::getInt64Ty(BB->getContext()); // 64bit ptr or enum value
GV = new GlobalVariable(*M, VarType, false, GlobalVariable::ExternalLinkage,
- NULL, AccessKey);
+ nullptr, AccessKey);
GV->addAttribute(BPFCoreSharedInfo::AmaAttr);
GV->setMetadata(LLVMContext::MD_preserve_access_index, TypeMeta);
GEPGlobals[AccessKey] = GV;
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 90723ac04f64..0587cb0e16e3 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -325,7 +325,7 @@ SDValue BPFTargetLowering::LowerFormalArguments(
default: {
errs() << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getEVTString() << '\n';
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
case MVT::i32:
case MVT::i64:
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIChecking.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIChecking.cpp
index eb8c48ac49de..2bc2302cf55c 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIChecking.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIChecking.cpp
@@ -41,7 +41,7 @@ private:
// Initialize class variables.
void initialize(MachineFunction &MFParm);
- bool processAtomicInsts(void);
+ bool processAtomicInsts();
public:
@@ -151,7 +151,7 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {
return false;
}
-bool BPFMIPreEmitChecking::processAtomicInsts(void) {
+bool BPFMIPreEmitChecking::processAtomicInsts() {
for (MachineBasicBlock &MBB : *MF) {
for (MachineInstr &MI : MBB) {
if (MI.getOpcode() != BPF::XADDW &&
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIPeephole.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIPeephole.cpp
index 354980e4bf3c..7f69c8a63443 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIPeephole.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMIPeephole.cpp
@@ -56,8 +56,8 @@ private:
bool isInsnFrom32Def(MachineInstr *DefInsn);
bool isPhiFrom32Def(MachineInstr *MovMI);
bool isMovFrom32Def(MachineInstr *MovMI);
- bool eliminateZExtSeq(void);
- bool eliminateZExt(void);
+ bool eliminateZExtSeq();
+ bool eliminateZExt();
std::set<MachineInstr *> PhiInsns;
@@ -172,7 +172,7 @@ bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI)
return true;
}
-bool BPFMIPeephole::eliminateZExtSeq(void) {
+bool BPFMIPeephole::eliminateZExtSeq() {
MachineInstr* ToErase = nullptr;
bool Eliminated = false;
@@ -240,7 +240,7 @@ bool BPFMIPeephole::eliminateZExtSeq(void) {
return Eliminated;
}
-bool BPFMIPeephole::eliminateZExt(void) {
+bool BPFMIPeephole::eliminateZExt() {
MachineInstr* ToErase = nullptr;
bool Eliminated = false;
@@ -312,7 +312,7 @@ private:
// Initialize class variables.
void initialize(MachineFunction &MFParm);
- bool eliminateRedundantMov(void);
+ bool eliminateRedundantMov();
public:
@@ -334,7 +334,7 @@ void BPFMIPreEmitPeephole::initialize(MachineFunction &MFParm) {
LLVM_DEBUG(dbgs() << "*** BPF PreEmit peephole pass ***\n\n");
}
-bool BPFMIPreEmitPeephole::eliminateRedundantMov(void) {
+bool BPFMIPreEmitPeephole::eliminateRedundantMov() {
MachineInstr* ToErase = nullptr;
bool Eliminated = false;
@@ -405,7 +405,7 @@ private:
// Initialize class variables.
void initialize(MachineFunction &MFParm);
- bool eliminateTruncSeq(void);
+ bool eliminateTruncSeq();
public:
@@ -452,7 +452,7 @@ void BPFMIPeepholeTruncElim::initialize(MachineFunction &MFParm) {
// are 32-bit registers, but later on, kernel verifier will rewrite
// it with 64-bit value. Therefore, truncating the value after the
// load will result in incorrect code.
-bool BPFMIPeepholeTruncElim::eliminateTruncSeq(void) {
+bool BPFMIPeepholeTruncElim::eliminateTruncSeq() {
MachineInstr* ToErase = nullptr;
bool Eliminated = false;
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp
index 7e829ea43e89..b4232875383c 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp
@@ -55,7 +55,7 @@ private:
// Initialize class variables.
void initialize(MachineFunction &MFParm);
- bool removeLD(void);
+ bool removeLD();
void processCandidate(MachineRegisterInfo *MRI, MachineBasicBlock &MBB,
MachineInstr &MI, Register &SrcReg, Register &DstReg,
const GlobalValue *GVal, bool IsAma);
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveDIType.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveDIType.cpp
index 36237b2fc4fd..6dfb7dc39922 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveDIType.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFPreserveDIType.cpp
@@ -105,10 +105,10 @@ static bool BPFPreserveDITypeImpl(Function &F) {
BasicBlock *BB = Call->getParent();
IntegerType *VarType = Type::getInt64Ty(BB->getContext());
- std::string GVName = BaseName + std::to_string(Count) + "$" +
- std::to_string(Reloc);
+ std::string GVName =
+ BaseName + std::to_string(Count) + "$" + std::to_string(Reloc);
GlobalVariable *GV = new GlobalVariable(
- *M, VarType, false, GlobalVariable::ExternalLinkage, NULL, GVName);
+ *M, VarType, false, GlobalVariable::ExternalLinkage, nullptr, GVName);
GV->addAttribute(BPFCoreSharedInfo::TypeIdAttr);
GV->setMetadata(LLVMContext::MD_preserve_access_index, MD);
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFSubtarget.cpp
index 77e3cd393f87..e4d98b85e58b 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFSubtarget.cpp
@@ -59,6 +59,6 @@ void BPFSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
BPFSubtarget::BPFSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
- : BPFGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), InstrInfo(),
+ : BPFGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
FrameLowering(initializeSubtargetDependencies(CPU, FS)),
TLInfo(TM, *this) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
index 0c510686a13b..d536aed1d211 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
@@ -1366,7 +1366,7 @@ void BTFDebug::processGlobals(bool ProcessingMapDef) {
// Calculate symbol size
const DataLayout &DL = Global.getParent()->getDataLayout();
- uint32_t Size = DL.getTypeAllocSize(Global.getType()->getElementType());
+ uint32_t Size = DL.getTypeAllocSize(Global.getValueType());
DataSecEntries[std::string(SecName)]->addDataSecEntry(VarId,
Asm->getSymbol(&Global), Size);
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp
index e0aeec989879..200c72a07ed6 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/MCTargetDesc/BPFInstPrinter.cpp
@@ -50,7 +50,7 @@ static void printExpr(const MCExpr *Expr, raw_ostream &O) {
void BPFInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O, const char *Modifier) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ assert((Modifier == nullptr || Modifier[0] == 0) && "No modifiers supported");
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
O << getRegisterName(Op.getReg());
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
index 29b99a84a6cd..a62bd111cba9 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
@@ -303,6 +303,14 @@ public:
bool isRegSeq() const { return isRegSeqTemplate<CSKY::R0, CSKY::R31>(); }
+ bool isRegSeqV1() const {
+ return isRegSeqTemplate<CSKY::F0_32, CSKY::F15_32>();
+ }
+
+ bool isRegSeqV2() const {
+ return isRegSeqTemplate<CSKY::F0_32, CSKY::F31_32>();
+ }
+
static bool isLegalRegList(unsigned from, unsigned to) {
if (from == 0 && to == 0)
return true;
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.h
index 357b1e96e606..401d6fa1a0a5 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.h
@@ -21,6 +21,9 @@ class CSKYTargetMachine;
class FunctionPass;
FunctionPass *createCSKYISelDag(CSKYTargetMachine &TM);
+FunctionPass *createCSKYConstantIslandPass();
+
+void initializeCSKYConstantIslandsPass(PassRegistry &);
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.td
index e26781ca6aa1..ddb7fe93706e 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.td
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKY.td
@@ -11,6 +11,40 @@ include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
// CSKY subtarget features and instruction predicates.
//===----------------------------------------------------------------------===//
+def ModeHardFloat :
+ SubtargetFeature<"hard-float", "UseHardFloat",
+ "true", "Use hard floating point features">;
+def ModeHardFloatABI :
+ SubtargetFeature<"hard-float-abi", "UseHardFloatABI",
+ "true", "Use hard floating point ABI to pass args">;
+
+def FeatureFPUV2_SF
+ : SubtargetFeature<"fpuv2_sf", "HasFPUv2SingleFloat", "true",
+ "Enable FPUv2 single float instructions">;
+def HasFPUv2_SF : Predicate<"Subtarget->hasFPUv2SingleFloat()">,
+ AssemblerPredicate<(all_of FeatureFPUV2_SF),
+ "Enable FPUv2 single float instructions">;
+
+def FeatureFPUV2_DF
+ : SubtargetFeature<"fpuv2_df", "HasFPUv2DoubleFloat", "true",
+ "Enable FPUv2 double float instructions">;
+def HasFPUv2_DF : Predicate<"Subtarget->hasFPUv2DoubleFloat()">,
+ AssemblerPredicate<(all_of FeatureFPUV2_DF),
+ "Enable FPUv2 double float instructions">;
+
+def FeatureFPUV3_SF
+ : SubtargetFeature<"fpuv3_sf", "HasFPUv3SingleFloat", "true",
+ "Enable FPUv3 single float instructions">;
+def HasFPUv3_SF : Predicate<"Subtarget->hasFPUv3SingleFloat()">,
+ AssemblerPredicate<(all_of FeatureFPUV3_SF),
+ "Enable FPUv3 single float instructions">;
+
+def FeatureFPUV3_DF
+ : SubtargetFeature<"fpuv3_df", "HasFPUv3DoubleFloat", "true",
+ "Enable FPUv3 double float instructions">;
+def HasFPUv3_DF : Predicate<"Subtarget->hasFPUv3DoubleFloat()">,
+ AssemblerPredicate<(all_of FeatureFPUV3_DF),
+ "Enable FPUv3 double float instructions">;
def FeatureBTST16 : SubtargetFeature<"btst16", "HasBTST16", "true",
"Use the 16-bit btsti instruction">;
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.cpp
index 85129f78e726..c8269eeacfdb 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CSKYAsmPrinter.h"
#include "CSKY.h"
+#include "CSKYConstantPoolValue.h"
#include "CSKYTargetMachine.h"
#include "MCTargetDesc/CSKYInstPrinter.h"
#include "MCTargetDesc/CSKYMCExpr.h"
@@ -38,6 +39,7 @@ CSKYAsmPrinter::CSKYAsmPrinter(llvm::TargetMachine &TM,
: AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this) {}
bool CSKYAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ MCP = MF.getConstantPool();
Subtarget = &MF.getSubtarget<CSKYSubtarget>();
return AsmPrinter::runOnMachineFunction(MF);
}
@@ -56,16 +58,166 @@ void CSKYAsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
// instructions) auto-generated.
#include "CSKYGenMCPseudoLowering.inc"
+void CSKYAsmPrinter::expandTLSLA(const MachineInstr *MI) {
+ const CSKYInstrInfo *TII = Subtarget->getInstrInfo();
+
+ DebugLoc DL = MI->getDebugLoc();
+
+ MCSymbol *PCLabel = OutContext.getOrCreateSymbol(
+ Twine(MAI->getPrivateGlobalPrefix()) + "PC" + Twine(getFunctionNumber()) +
+ "_" + Twine(MI->getOperand(3).getImm()));
+
+ OutStreamer->emitLabel(PCLabel);
+
+ auto Instr = BuildMI(*MF, DL, TII->get(CSKY::LRW32))
+ .add(MI->getOperand(0))
+ .add(MI->getOperand(2));
+ MCInst LRWInst;
+ MCInstLowering.Lower(Instr, LRWInst);
+ EmitToStreamer(*OutStreamer, LRWInst);
+
+ Instr = BuildMI(*MF, DL, TII->get(CSKY::GRS32))
+ .add(MI->getOperand(1))
+ .addSym(PCLabel);
+ MCInst GRSInst;
+ MCInstLowering.Lower(Instr, GRSInst);
+ EmitToStreamer(*OutStreamer, GRSInst);
+ return;
+}
+
+void CSKYAsmPrinter::emitCustomConstantPool(const MachineInstr *MI) {
+
+ // This instruction represents a floating constant pool in the function.
+ // The first operand is the ID# for this instruction, the second is the
+ // index into the MachineConstantPool that this is, the third is the size
+ // in bytes of this constant pool entry.
+ // The required alignment is specified on the basic block holding this MI.
+ unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
+ unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
+
+ // If this is the first entry of the pool, mark it.
+ if (!InConstantPool) {
+ OutStreamer->emitValueToAlignment(4);
+ InConstantPool = true;
+ }
+
+ OutStreamer->emitLabel(GetCPISymbol(LabelId));
+
+ const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx];
+ if (MCPE.isMachineConstantPoolEntry())
+ emitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
+ else
+ emitGlobalConstant(MF->getDataLayout(), MCPE.Val.ConstVal);
+ return;
+}
+
+void CSKYAsmPrinter::emitFunctionBodyEnd() {
+ // Make sure to terminate any constant pools that were at the end
+ // of the function.
+ if (!InConstantPool)
+ return;
+ InConstantPool = false;
+}
+
void CSKYAsmPrinter::emitInstruction(const MachineInstr *MI) {
// Do any auto-generated pseudo lowerings.
if (emitPseudoExpansionLowering(*OutStreamer, MI))
return;
+ // If we just ended a constant pool, mark it as such.
+ if (InConstantPool && MI->getOpcode() != CSKY::CONSTPOOL_ENTRY) {
+ InConstantPool = false;
+ }
+
+ if (MI->getOpcode() == CSKY::PseudoTLSLA32)
+ return expandTLSLA(MI);
+
+ if (MI->getOpcode() == CSKY::CONSTPOOL_ENTRY)
+ return emitCustomConstantPool(MI);
+
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
EmitToStreamer(*OutStreamer, TmpInst);
}
+// Convert a CSKY-specific constant pool modifier into the associated
+// MCSymbolRefExpr variant kind.
+static CSKYMCExpr::VariantKind
+getModifierVariantKind(CSKYCP::CSKYCPModifier Modifier) {
+ switch (Modifier) {
+ case CSKYCP::NO_MOD:
+ return CSKYMCExpr::VK_CSKY_None;
+ case CSKYCP::ADDR:
+ return CSKYMCExpr::VK_CSKY_ADDR;
+ case CSKYCP::GOT:
+ return CSKYMCExpr::VK_CSKY_GOT;
+ case CSKYCP::GOTOFF:
+ return CSKYMCExpr::VK_CSKY_GOTOFF;
+ case CSKYCP::PLT:
+ return CSKYMCExpr::VK_CSKY_PLT;
+ case CSKYCP::TLSGD:
+ return CSKYMCExpr::VK_CSKY_TLSGD;
+ case CSKYCP::TLSLE:
+ return CSKYMCExpr::VK_CSKY_TLSLE;
+ case CSKYCP::TLSIE:
+ return CSKYMCExpr::VK_CSKY_TLSIE;
+ }
+ llvm_unreachable("Invalid CSKYCPModifier!");
+}
+
+void CSKYAsmPrinter::emitMachineConstantPoolValue(
+ MachineConstantPoolValue *MCPV) {
+ int Size = getDataLayout().getTypeAllocSize(MCPV->getType());
+ CSKYConstantPoolValue *CCPV = static_cast<CSKYConstantPoolValue *>(MCPV);
+ MCSymbol *MCSym;
+
+ if (CCPV->isBlockAddress()) {
+ const BlockAddress *BA =
+ cast<CSKYConstantPoolConstant>(CCPV)->getBlockAddress();
+ MCSym = GetBlockAddressSymbol(BA);
+ } else if (CCPV->isGlobalValue()) {
+ const GlobalValue *GV = cast<CSKYConstantPoolConstant>(CCPV)->getGV();
+ MCSym = getSymbol(GV);
+ } else if (CCPV->isMachineBasicBlock()) {
+ const MachineBasicBlock *MBB = cast<CSKYConstantPoolMBB>(CCPV)->getMBB();
+ MCSym = MBB->getSymbol();
+ } else if (CCPV->isJT()) {
+ signed JTI = cast<CSKYConstantPoolJT>(CCPV)->getJTI();
+ MCSym = GetJTISymbol(JTI);
+ } else {
+ assert(CCPV->isExtSymbol() && "unrecognized constant pool value");
+ StringRef Sym = cast<CSKYConstantPoolSymbol>(CCPV)->getSymbol();
+ MCSym = GetExternalSymbolSymbol(Sym);
+ }
+ // Create an MCSymbol for the reference.
+ const MCExpr *Expr =
+ MCSymbolRefExpr::create(MCSym, MCSymbolRefExpr::VK_None, OutContext);
+
+ if (CCPV->getPCAdjustment()) {
+
+ MCSymbol *PCLabel = OutContext.getOrCreateSymbol(
+ Twine(MAI->getPrivateGlobalPrefix()) + "PC" +
+ Twine(getFunctionNumber()) + "_" + Twine(CCPV->getLabelID()));
+
+ const MCExpr *PCRelExpr = MCSymbolRefExpr::create(PCLabel, OutContext);
+ if (CCPV->mustAddCurrentAddress()) {
+ // We want "(<expr> - .)", but MC doesn't have a concept of the '.'
+ // label, so just emit a local label end reference that instead.
+ MCSymbol *DotSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(DotSym);
+ const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
+ PCRelExpr = MCBinaryExpr::createSub(PCRelExpr, DotExpr, OutContext);
+ }
+ Expr = MCBinaryExpr::createSub(Expr, PCRelExpr, OutContext);
+ }
+
+ // Create an MCSymbol for the reference.
+ Expr = CSKYMCExpr::create(Expr, getModifierVariantKind(CCPV->getModifier()),
+ OutContext);
+
+ OutStreamer->emitValue(Expr, Size);
+}
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeCSKYAsmPrinter() {
RegisterAsmPrinter<CSKYAsmPrinter> X(getTheCSKYTarget());
}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.h
index b30311e0ca64..04a253d349c8 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYAsmPrinter.h
@@ -20,6 +20,15 @@ class LLVM_LIBRARY_VISIBILITY CSKYAsmPrinter : public AsmPrinter {
const CSKYSubtarget *Subtarget;
+ bool InConstantPool = false;
+
+ /// Keep a pointer to constantpool entries of the current
+ /// MachineFunction.
+ MachineConstantPool *MCP;
+
+ void expandTLSLA(const MachineInstr *MI);
+ void emitCustomConstantPool(const MachineInstr *MI);
+
public:
explicit CSKYAsmPrinter(TargetMachine &TM,
std::unique_ptr<MCStreamer> Streamer);
@@ -33,9 +42,16 @@ public:
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
+ void emitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override;
+
+ void emitFunctionBodyEnd() override;
+
void emitInstruction(const MachineInstr *MI) override;
bool runOnMachineFunction(MachineFunction &MF) override;
+
+ // we emit constant pools customly!
+ void emitConstantPool() override{};
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
new file mode 100644
index 000000000000..3ac335e2ad9d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
@@ -0,0 +1,1376 @@
+//===- CSKYConstantIslandPass.cpp - Emit PC Relative loads ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+// Loading constants inline is expensive on CSKY and it's in general better
+// to place the constant nearby in code space and then it can be loaded with a
+// simple 16/32 bit load instruction like lrw.
+//
+// The constants can be not just numbers but addresses of functions and labels.
+// This can be particularly helpful in static relocation mode for embedded
+// non-linux targets.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKY.h"
+#include "CSKYConstantPoolValue.h"
+#include "CSKYMachineFunctionInfo.h"
+#include "CSKYSubtarget.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "CSKY-constant-islands"
+
+STATISTIC(NumCPEs, "Number of constpool entries");
+STATISTIC(NumSplit, "Number of uncond branches inserted");
+STATISTIC(NumCBrFixed, "Number of cond branches fixed");
+STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
+
+namespace {
+
+using Iter = MachineBasicBlock::iterator;
+using ReverseIter = MachineBasicBlock::reverse_iterator;
+
+/// CSKYConstantIslands - Due to limited PC-relative displacements, CSKY
+/// requires constant pool entries to be scattered among the instructions
+/// inside a function. To do this, it completely ignores the normal LLVM
+/// constant pool; instead, it places constants wherever it feels like with
+/// special instructions.
+///
+/// The terminology used in this pass includes:
+/// Islands - Clumps of constants placed in the function.
+/// Water - Potential places where an island could be formed.
+/// CPE - A constant pool entry that has been placed somewhere, which
+/// tracks a list of users.
+
+class CSKYConstantIslands : public MachineFunctionPass {
+ /// BasicBlockInfo - Information about the offset and size of a single
+ /// basic block.
+ struct BasicBlockInfo {
+ /// Offset - Distance from the beginning of the function to the beginning
+ /// of this basic block.
+ ///
+ /// Offsets are computed assuming worst case padding before an aligned
+ /// block. This means that subtracting basic block offsets always gives a
+ /// conservative estimate of the real distance which may be smaller.
+ ///
+ /// Because worst case padding is used, the computed offset of an aligned
+ /// block may not actually be aligned.
+ unsigned Offset = 0;
+
+ /// Size - Size of the basic block in bytes. If the block contains
+ /// inline assembly, this is a worst case estimate.
+ ///
+ /// The size does not include any alignment padding whether from the
+ /// beginning of the block, or from an aligned jump table at the end.
+ unsigned Size = 0;
+
+ BasicBlockInfo() = default;
+
+ unsigned postOffset() const { return Offset + Size; }
+ };
+
+ std::vector<BasicBlockInfo> BBInfo;
+
+ /// WaterList - A sorted list of basic blocks where islands could be placed
+ /// (i.e. blocks that don't fall through to the following block, due
+ /// to a return, unreachable, or unconditional branch).
+ std::vector<MachineBasicBlock *> WaterList;
+
+ /// NewWaterList - The subset of WaterList that was created since the
+ /// previous iteration by inserting unconditional branches.
+ SmallSet<MachineBasicBlock *, 4> NewWaterList;
+
+ using water_iterator = std::vector<MachineBasicBlock *>::iterator;
+
+ /// CPUser - One user of a constant pool, keeping the machine instruction
+ /// pointer, the constant pool being referenced, and the max displacement
+ /// allowed from the instruction to the CP. The HighWaterMark records the
+ /// highest basic block where a new CPEntry can be placed. To ensure this
+ /// pass terminates, the CP entries are initially placed at the end of the
+ /// function and then move monotonically to lower addresses. The
+ /// exception to this rule is when the current CP entry for a particular
+ /// CPUser is out of range, but there is another CP entry for the same
+ /// constant value in range. We want to use the existing in-range CP
+ /// entry, but if it later moves out of range, the search for new water
+ /// should resume where it left off. The HighWaterMark is used to record
+ /// that point.
+ struct CPUser {
+ MachineInstr *MI;
+ MachineInstr *CPEMI;
+ MachineBasicBlock *HighWaterMark;
+
+ private:
+ unsigned MaxDisp;
+
+ public:
+ bool NegOk;
+
+ CPUser(MachineInstr *Mi, MachineInstr *Cpemi, unsigned Maxdisp, bool Neg)
+ : MI(Mi), CPEMI(Cpemi), MaxDisp(Maxdisp), NegOk(Neg) {
+ HighWaterMark = CPEMI->getParent();
+ }
+
+ /// getMaxDisp - Returns the maximum displacement supported by MI.
+ unsigned getMaxDisp() const { return MaxDisp - 16; }
+
+ void setMaxDisp(unsigned Val) { MaxDisp = Val; }
+ };
+
+ /// CPUsers - Keep track of all of the machine instructions that use various
+ /// constant pools and their max displacement.
+ std::vector<CPUser> CPUsers;
+
+ /// CPEntry - One per constant pool entry, keeping the machine instruction
+ /// pointer, the constpool index, and the number of CPUser's which
+ /// reference this entry.
+ struct CPEntry {
+ MachineInstr *CPEMI;
+ unsigned CPI;
+ unsigned RefCount;
+
+ CPEntry(MachineInstr *Cpemi, unsigned Cpi, unsigned Rc = 0)
+ : CPEMI(Cpemi), CPI(Cpi), RefCount(Rc) {}
+ };
+
+ /// CPEntries - Keep track of all of the constant pool entry machine
+ /// instructions. For each original constpool index (i.e. those that
+ /// existed upon entry to this pass), it keeps a vector of entries.
+ /// Original elements are cloned as we go along; the clones are
+ /// put in the vector of the original element, but have distinct CPIs.
+ std::vector<std::vector<CPEntry>> CPEntries;
+
+ /// ImmBranch - One per immediate branch, keeping the machine instruction
+ /// pointer, conditional or unconditional, the max displacement,
+ /// and (if isCond is true) the corresponding unconditional branch
+ /// opcode.
+ struct ImmBranch {
+ MachineInstr *MI;
+ unsigned MaxDisp : 31;
+ bool IsCond : 1;
+ int UncondBr;
+
+ ImmBranch(MachineInstr *Mi, unsigned Maxdisp, bool Cond, int Ubr)
+ : MI(Mi), MaxDisp(Maxdisp), IsCond(Cond), UncondBr(Ubr) {}
+ };
+
+ /// ImmBranches - Keep track of all the immediate branch instructions.
+ ///
+ std::vector<ImmBranch> ImmBranches;
+
+ const CSKYSubtarget *STI = nullptr;
+ const CSKYInstrInfo *TII;
+ CSKYMachineFunctionInfo *MFI;
+ MachineFunction *MF = nullptr;
+ MachineConstantPool *MCP = nullptr;
+
+ unsigned PICLabelUId;
+
+ void initPICLabelUId(unsigned UId) { PICLabelUId = UId; }
+
+ unsigned createPICLabelUId() { return PICLabelUId++; }
+
+public:
+ static char ID;
+
+ CSKYConstantIslands() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override { return "CSKY Constant Islands"; }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+ void doInitialPlacement(std::vector<MachineInstr *> &CPEMIs);
+ CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
+ Align getCPEAlign(const MachineInstr &CPEMI);
+ void initializeFunctionInfo(const std::vector<MachineInstr *> &CPEMIs);
+ unsigned getOffsetOf(MachineInstr *MI) const;
+ unsigned getUserOffset(CPUser &) const;
+ void dumpBBs();
+
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset, unsigned Disp,
+ bool NegativeOK);
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U);
+
+ void computeBlockSize(MachineBasicBlock *MBB);
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr &MI);
+ void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
+ void adjustBBOffsetsAfter(MachineBasicBlock *BB);
+ bool decrementCPEReferenceCount(unsigned CPI, MachineInstr *CPEMI);
+ int findInRangeCPEntry(CPUser &U, unsigned UserOffset);
+ bool findAvailableWater(CPUser &U, unsigned UserOffset,
+ water_iterator &WaterIter);
+ void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
+ MachineBasicBlock *&NewMBB);
+ bool handleConstantPoolUser(unsigned CPUserIndex);
+ void removeDeadCPEMI(MachineInstr *CPEMI);
+ bool removeUnusedCPEntries();
+ bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned Disp, bool NegOk,
+ bool DoDump = false);
+ bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water, CPUser &U,
+ unsigned &Growth);
+ bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
+ bool fixupImmediateBr(ImmBranch &Br);
+ bool fixupConditionalBr(ImmBranch &Br);
+ bool fixupUnconditionalBr(ImmBranch &Br);
+};
+} // end anonymous namespace
+
+char CSKYConstantIslands::ID = 0;
+
+bool CSKYConstantIslands::isOffsetInRange(unsigned UserOffset,
+ unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset, U.getMaxDisp(), U.NegOk);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+/// print block size and offset information - debugging
+LLVM_DUMP_METHOD void CSKYConstantIslands::dumpBBs() {
+ for (unsigned J = 0, E = BBInfo.size(); J != E; ++J) {
+ const BasicBlockInfo &BBI = BBInfo[J];
+ dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
+ << format(" size=%#x\n", BBInfo[J].Size);
+ }
+}
+#endif
+
+bool CSKYConstantIslands::runOnMachineFunction(MachineFunction &Mf) {
+ MF = &Mf;
+ MCP = Mf.getConstantPool();
+ STI = &static_cast<const CSKYSubtarget &>(Mf.getSubtarget());
+
+ LLVM_DEBUG(dbgs() << "***** CSKYConstantIslands: "
+ << MCP->getConstants().size() << " CP entries, aligned to "
+ << MCP->getConstantPoolAlign().value() << " bytes *****\n");
+
+ TII = STI->getInstrInfo();
+ MFI = MF->getInfo<CSKYMachineFunctionInfo>();
+
+ // This pass invalidates liveness information when it splits basic blocks.
+ MF->getRegInfo().invalidateLiveness();
+
+ // Renumber all of the machine basic blocks in the function, guaranteeing that
+ // the numbers agree with the position of the block in the function.
+ MF->RenumberBlocks();
+
+ bool MadeChange = false;
+
+ // Perform the initial placement of the constant pool entries. To start with,
+ // we put them all at the end of the function.
+ std::vector<MachineInstr *> CPEMIs;
+ if (!MCP->isEmpty())
+ doInitialPlacement(CPEMIs);
+
+ /// The next UID to take is the first unused one.
+ initPICLabelUId(CPEMIs.size());
+
+ // Do the initial scan of the function, building up information about the
+ // sizes of each block, the location of all the water, and finding all of the
+ // constant pool users.
+ initializeFunctionInfo(CPEMIs);
+ CPEMIs.clear();
+ LLVM_DEBUG(dumpBBs());
+
+ /// Remove dead constant pool entries.
+ MadeChange |= removeUnusedCPEntries();
+
+ // Iteratively place constant pool entries and fix up branches until there
+ // is no change.
+ unsigned NoCPIters = 0, NoBRIters = 0;
+ (void)NoBRIters;
+ while (true) {
+ LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
+ bool CPChange = false;
+ for (unsigned I = 0, E = CPUsers.size(); I != E; ++I)
+ CPChange |= handleConstantPoolUser(I);
+ if (CPChange && ++NoCPIters > 30)
+ report_fatal_error("Constant Island pass failed to converge!");
+ LLVM_DEBUG(dumpBBs());
+
+ // Clear NewWaterList now. If we split a block for branches, it should
+ // appear as "new water" for the next iteration of constant pool placement.
+ NewWaterList.clear();
+
+ LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
+ bool BRChange = false;
+ for (unsigned I = 0, E = ImmBranches.size(); I != E; ++I)
+ BRChange |= fixupImmediateBr(ImmBranches[I]);
+ if (BRChange && ++NoBRIters > 30)
+ report_fatal_error("Branch Fix Up pass failed to converge!");
+ LLVM_DEBUG(dumpBBs());
+ if (!CPChange && !BRChange)
+ break;
+ MadeChange = true;
+ }
+
+ LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
+
+ BBInfo.clear();
+ WaterList.clear();
+ CPUsers.clear();
+ CPEntries.clear();
+ ImmBranches.clear();
+ return MadeChange;
+}
+
+/// doInitialPlacement - Perform the initial placement of the constant pool
+/// entries. To start with, we put them all at the end of the function.
+void CSKYConstantIslands::doInitialPlacement(
+ std::vector<MachineInstr *> &CPEMIs) {
+ // Create the basic block to hold the CPE's.
+ MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
+ MF->push_back(BB);
+
+ // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
+ const Align MaxAlign = MCP->getConstantPoolAlign();
+
+ // Mark the basic block as required by the const-pool.
+ BB->setAlignment(Align(2));
+
+ // The function needs to be as aligned as the basic blocks. The linker may
+ // move functions around based on their alignment.
+ MF->ensureAlignment(BB->getAlignment());
+
+ // Order the entries in BB by descending alignment. That ensures correct
+ // alignment of all entries as long as BB is sufficiently aligned. Keep
+ // track of the insertion point for each alignment. We are going to bucket
+ // sort the entries as they are created.
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(Log2(MaxAlign) + 1,
+ BB->end());
+
+ // Add all of the constants from the constant pool to the end block, use an
+ // identity mapping of CPI's to CPE's.
+ const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
+
+ const DataLayout &TD = MF->getDataLayout();
+ for (unsigned I = 0, E = CPs.size(); I != E; ++I) {
+ unsigned Size = CPs[I].getSizeInBytes(TD);
+ assert(Size >= 4 && "Too small constant pool entry");
+ Align Alignment = CPs[I].getAlign();
+ // Verify that all constant pool entries are a multiple of their alignment.
+ // If not, we would have to pad them out so that instructions stay aligned.
+ assert(isAligned(Alignment, Size) && "CP Entry not multiple of 4 bytes!");
+
+ // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
+ unsigned LogAlign = Log2(Alignment);
+ MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
+
+ MachineInstr *CPEMI =
+ BuildMI(*BB, InsAt, DebugLoc(), TII->get(CSKY::CONSTPOOL_ENTRY))
+ .addImm(I)
+ .addConstantPoolIndex(I)
+ .addImm(Size);
+
+ CPEMIs.push_back(CPEMI);
+
+ // Ensure that future entries with higher alignment get inserted before
+ // CPEMI. This is bucket sort with iterators.
+ for (unsigned A = LogAlign + 1; A <= Log2(MaxAlign); ++A)
+ if (InsPoint[A] == InsAt)
+ InsPoint[A] = CPEMI;
+ // Add a new CPEntry, but no corresponding CPUser yet.
+ CPEntries.emplace_back(1, CPEntry(CPEMI, I));
+ ++NumCPEs;
+ LLVM_DEBUG(dbgs() << "Moved CPI#" << I << " to end of function, size = "
+ << Size << ", align = " << Alignment.value() << '\n');
+ }
+ LLVM_DEBUG(BB->dump());
+}
+
+/// BBHasFallthrough - Return true if the specified basic block can fallthrough
+/// into the block immediately after it.
+static bool bbHasFallthrough(MachineBasicBlock *MBB) {
+ // Get the next machine basic block in the function.
+ MachineFunction::iterator MBBI = MBB->getIterator();
+ // Can't fall off end of function.
+ if (std::next(MBBI) == MBB->getParent()->end())
+ return false;
+
+ MachineBasicBlock *NextBB = &*std::next(MBBI);
+ for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end();
+ I != E; ++I)
+ if (*I == NextBB)
+ return true;
+
+ return false;
+}
+
+/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
+/// look up the corresponding CPEntry.
+CSKYConstantIslands::CPEntry *
+CSKYConstantIslands::findConstPoolEntry(unsigned CPI,
+ const MachineInstr *CPEMI) {
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ // Number of entries per constpool index should be small, just do a
+ // linear search.
+ for (unsigned I = 0, E = CPEs.size(); I != E; ++I) {
+ if (CPEs[I].CPEMI == CPEMI)
+ return &CPEs[I];
+ }
+ return nullptr;
+}
+
+/// getCPEAlign - Returns the required alignment of the constant pool entry
+/// represented by CPEMI. Alignment is measured in log2(bytes) units.
+Align CSKYConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
+ assert(CPEMI.getOpcode() == CSKY::CONSTPOOL_ENTRY);
+
+ unsigned CPI = CPEMI.getOperand(1).getIndex();
+ assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
+ return MCP->getConstants()[CPI].getAlign();
+}
+
+/// initializeFunctionInfo - Do the initial scan of the function, building up
+/// information about the sizes of each block, the location of all the water,
+/// and finding all of the constant pool users.
+void CSKYConstantIslands::initializeFunctionInfo(
+ const std::vector<MachineInstr *> &CPEMIs) {
+ BBInfo.clear();
+ BBInfo.resize(MF->getNumBlockIDs());
+
+ // First thing, compute the size of all basic blocks, and see if the function
+ // has any inline assembly in it. If so, we have to be conservative about
+ // alignment assumptions, as we don't know for sure the size of any
+ // instructions in the inline assembly.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
+ computeBlockSize(&*I);
+
+ // Compute block offsets.
+ adjustBBOffsetsAfter(&MF->front());
+
+ // Now go back through the instructions and build up our data structures.
+ for (MachineBasicBlock &MBB : *MF) {
+ // If this block doesn't fall through into the next MBB, then this is
+ // 'water' that a constant pool island could be placed.
+ if (!bbHasFallthrough(&MBB))
+ WaterList.push_back(&MBB);
+ for (MachineInstr &MI : MBB) {
+ if (MI.isDebugInstr())
+ continue;
+
+ int Opc = MI.getOpcode();
+ if (MI.isBranch() && !MI.isIndirectBranch()) {
+ bool IsCond = MI.isConditionalBranch();
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ int UOpc = CSKY::BR32;
+
+ switch (MI.getOpcode()) {
+ case CSKY::BR16:
+ case CSKY::BF16:
+ case CSKY::BT16:
+ Bits = 10;
+ Scale = 2;
+ break;
+ default:
+ Bits = 16;
+ Scale = 2;
+ break;
+ }
+
+ // Record this immediate branch.
+ unsigned MaxOffs = ((1 << (Bits - 1)) - 1) * Scale;
+ ImmBranches.push_back(ImmBranch(&MI, MaxOffs, IsCond, UOpc));
+ }
+
+ if (Opc == CSKY::CONSTPOOL_ENTRY)
+ continue;
+
+ // Scan the instructions for constant pool operands.
+ for (unsigned Op = 0, E = MI.getNumOperands(); Op != E; ++Op)
+ if (MI.getOperand(Op).isCPI()) {
+ // We found one. The addressing mode tells us the max displacement
+ // from the PC that this instruction permits.
+
+ // Basic size info comes from the TSFlags field.
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ bool NegOk = false;
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unknown addressing mode for CP reference!");
+ case CSKY::MOVIH32:
+ case CSKY::ORI32:
+ continue;
+ case CSKY::PseudoTLSLA32:
+ case CSKY::JSRI32:
+ case CSKY::JMPI32:
+ case CSKY::LRW32:
+ case CSKY::LRW32_Gen:
+ Bits = 16;
+ Scale = 4;
+ break;
+ case CSKY::f2FLRW_S:
+ case CSKY::f2FLRW_D:
+ Bits = 8;
+ Scale = 4;
+ break;
+ case CSKY::GRS32:
+ Bits = 17;
+ Scale = 2;
+ NegOk = true;
+ break;
+ }
+ // Remember that this is a user of a CP entry.
+ unsigned CPI = MI.getOperand(Op).getIndex();
+ MachineInstr *CPEMI = CPEMIs[CPI];
+ unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
+ CPUsers.push_back(CPUser(&MI, CPEMI, MaxOffs, NegOk));
+
+ // Increment corresponding CPEntry reference count.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Cannot find a corresponding CPEntry!");
+ CPE->RefCount++;
+
+ // Instructions can only use one CP entry, don't bother scanning the
+ // rest of the operands.
+ break;
+ }
+ }
+ }
+}
+
+/// computeBlockSize - Compute the size and some alignment information for MBB.
+/// This function updates BBInfo directly.
+void CSKYConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
+ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
+ BBI.Size = 0;
+
+ for (const MachineInstr &MI : *MBB)
+ BBI.Size += TII->getInstSizeInBytes(MI);
+}
+
+/// getOffsetOf - Return the current offset of the specified machine instruction
+/// from the start of the function. This offset changes as stuff is moved
+/// around inside the function.
+unsigned CSKYConstantIslands::getOffsetOf(MachineInstr *MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // The offset is composed of two things: the sum of the sizes of all MBB's
+ // before this instruction's block, and the offset from the start of the block
+ // it is in.
+ unsigned Offset = BBInfo[MBB->getNumber()].Offset;
+
+ // Sum instructions before MI in MBB.
+ for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
+ assert(I != MBB->end() && "Didn't find MI in its own basic block?");
+ Offset += TII->getInstSizeInBytes(*I);
+ }
+ return Offset;
+}
+
+/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
+/// ID.
+static bool compareMbbNumbers(const MachineBasicBlock *LHS,
+ const MachineBasicBlock *RHS) {
+ return LHS->getNumber() < RHS->getNumber();
+}
+
+/// updateForInsertedWaterBlock - When a block is newly inserted into the
+/// machine function, it upsets all of the block numbers. Renumber the blocks
+/// and update the arrays that parallel this numbering.
+void CSKYConstantIslands::updateForInsertedWaterBlock(
+ MachineBasicBlock *NewBB) {
+ // Renumber the MBB's to keep them consecutive.
+ NewBB->getParent()->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add NewMBB as having
+ // available water after it.
+ water_iterator IP = llvm::lower_bound(WaterList, NewBB, compareMbbNumbers);
+ WaterList.insert(IP, NewBB);
+}
+
+unsigned CSKYConstantIslands::getUserOffset(CPUser &U) const {
+ unsigned UserOffset = getOffsetOf(U.MI);
+
+ UserOffset &= ~3u;
+
+ return UserOffset;
+}
+
+/// Split the basic block containing MI into two blocks, which are joined by
+/// an unconditional branch. Update data structures and renumber blocks to
+/// account for this change and returns the newly created block.
+MachineBasicBlock *
+CSKYConstantIslands::splitBlockBeforeInstr(MachineInstr &MI) {
+ MachineBasicBlock *OrigBB = MI.getParent();
+
+ // Create a new MBB for the code after the OrigBB.
+ MachineBasicBlock *NewBB =
+ MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
+ MachineFunction::iterator MBBI = ++OrigBB->getIterator();
+ MF->insert(MBBI, NewBB);
+
+ // Splice the instructions starting with MI over to NewBB.
+ NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
+
+ // Add an unconditional branch from OrigBB to NewBB.
+ // Note the new unconditional branch is not being recorded.
+ // There doesn't seem to be meaningful DebugInfo available; this doesn't
+ // correspond to anything in the source.
+
+ // TODO: Add support for 16bit instr.
+ BuildMI(OrigBB, DebugLoc(), TII->get(CSKY::BR32)).addMBB(NewBB);
+ ++NumSplit;
+
+ // Update the CFG. All succs of OrigBB are now succs of NewBB.
+ NewBB->transferSuccessors(OrigBB);
+
+ // OrigBB branches to NewBB.
+ OrigBB->addSuccessor(NewBB);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ // This is almost the same as updateForInsertedWaterBlock, except that
+ // the Water goes after OrigBB, not NewBB.
+ MF->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add OrigMBB as having
+ // available water after it (but not if it's already there, which happens
+ // when splitting before a conditional branch that is followed by an
+ // unconditional branch - in that case we want to insert NewBB).
+ water_iterator IP = llvm::lower_bound(WaterList, OrigBB, compareMbbNumbers);
+ MachineBasicBlock *WaterBB = *IP;
+ if (WaterBB == OrigBB)
+ WaterList.insert(std::next(IP), NewBB);
+ else
+ WaterList.insert(IP, OrigBB);
+ NewWaterList.insert(OrigBB);
+
+ // Figure out how large the OrigBB is. As the first half of the original
+ // block, it cannot contain a tablejump. The size includes
+ // the new jump we added. (It should be possible to do this without
+ // recounting everything, but it's very confusing, and this is rarely
+ // executed.)
+ computeBlockSize(OrigBB);
+
+ // Figure out how large the NewMBB is. As the second half of the original
+ // block, it may contain a tablejump.
+ computeBlockSize(NewBB);
+
+ // All BBOffsets following these blocks must be modified.
+ adjustBBOffsetsAfter(OrigBB);
+
+ return NewBB;
+}
+
+/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
+/// reference) is within MaxDisp of TrialOffset (a proposed location of a
+/// constant pool entry).
+bool CSKYConstantIslands::isOffsetInRange(unsigned UserOffset,
+ unsigned TrialOffset,
+ unsigned MaxDisp, bool NegativeOK) {
+ if (UserOffset <= TrialOffset) {
+ // User before the Trial.
+ if (TrialOffset - UserOffset <= MaxDisp)
+ return true;
+ } else if (NegativeOK) {
+ if (UserOffset - TrialOffset <= MaxDisp)
+ return true;
+ }
+ return false;
+}
+
+/// isWaterInRange - Returns true if a CPE placed after the specified
+/// Water (a basic block) will be in range for the specific MI.
+///
+/// Compute how much the function will grow by inserting a CPE after Water.
+bool CSKYConstantIslands::isWaterInRange(unsigned UserOffset,
+ MachineBasicBlock *Water, CPUser &U,
+ unsigned &Growth) {
+ unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset();
+ unsigned NextBlockOffset;
+ Align NextBlockAlignment;
+ MachineFunction::const_iterator NextBlock = ++Water->getIterator();
+ if (NextBlock == MF->end()) {
+ NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
+ NextBlockAlignment = Align(4);
+ } else {
+ NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
+ NextBlockAlignment = NextBlock->getAlignment();
+ }
+ unsigned Size = U.CPEMI->getOperand(2).getImm();
+ unsigned CPEEnd = CPEOffset + Size;
+
+ // The CPE may be able to hide in the alignment padding before the next
+ // block. It may also cause more padding to be required if it is more aligned
+ // that the next block.
+ if (CPEEnd > NextBlockOffset) {
+ Growth = CPEEnd - NextBlockOffset;
+ // Compute the padding that would go at the end of the CPE to align the next
+ // block.
+ Growth += offsetToAlignment(CPEEnd, NextBlockAlignment);
+
+ // If the CPE is to be inserted before the instruction, that will raise
+ // the offset of the instruction. Also account for unknown alignment padding
+ // in blocks between CPE and the user.
+ if (CPEOffset < UserOffset)
+ UserOffset += Growth;
+ } else
+ // CPE fits in existing padding.
+ Growth = 0;
+
+ return isOffsetInRange(UserOffset, CPEOffset, U);
+}
+
+/// isCPEntryInRange - Returns true if the distance between specific MI and
+/// specific ConstPool entry instruction can fit in MI's displacement field.
+bool CSKYConstantIslands::isCPEntryInRange(MachineInstr *MI,
+ unsigned UserOffset,
+ MachineInstr *CPEMI,
+ unsigned MaxDisp, bool NegOk,
+ bool DoDump) {
+ unsigned CPEOffset = getOffsetOf(CPEMI);
+
+ if (DoDump) {
+ LLVM_DEBUG({
+ unsigned Block = MI->getParent()->getNumber();
+ const BasicBlockInfo &BBI = BBInfo[Block];
+ dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
+ << " max delta=" << MaxDisp
+ << format(" insn address=%#x", UserOffset) << " in "
+ << printMBBReference(*MI->getParent()) << ": "
+ << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
+ << format("CPE address=%#x offset=%+d: ", CPEOffset,
+ int(CPEOffset - UserOffset));
+ });
+ }
+
+ return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
+}
+
+#ifndef NDEBUG
+/// BBIsJumpedOver - Return true of the specified basic block's only predecessor
+/// unconditionally branches to its only successor.
+static bool bbIsJumpedOver(MachineBasicBlock *MBB) {
+ if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
+ return false;
+ MachineBasicBlock *Succ = *MBB->succ_begin();
+ MachineBasicBlock *Pred = *MBB->pred_begin();
+ MachineInstr *PredMI = &Pred->back();
+ if (PredMI->getOpcode() == CSKY::BR32 /*TODO: change to 16bit instr. */)
+ return PredMI->getOperand(0).getMBB() == Succ;
+ return false;
+}
+#endif
+
+void CSKYConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
+ unsigned BBNum = BB->getNumber();
+ for (unsigned I = BBNum + 1, E = MF->getNumBlockIDs(); I < E; ++I) {
+ // Get the offset and known bits at the end of the layout predecessor.
+ // Include the alignment of the current block.
+ unsigned Offset = BBInfo[I - 1].Offset + BBInfo[I - 1].Size;
+ BBInfo[I].Offset = Offset;
+ }
+}
+
+/// decrementCPEReferenceCount - find the constant pool entry with index CPI
+/// and instruction CPEMI, and decrement its refcount. If the refcount
+/// becomes 0 remove the entry and instruction. Returns true if we removed
+/// the entry, false if we didn't.
+bool CSKYConstantIslands::decrementCPEReferenceCount(unsigned CPI,
+ MachineInstr *CPEMI) {
+ // Find the old entry. Eliminate it if it is no longer used.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Unexpected!");
+ if (--CPE->RefCount == 0) {
+ removeDeadCPEMI(CPEMI);
+ CPE->CPEMI = nullptr;
+ --NumCPEs;
+ return true;
+ }
+ return false;
+}
+
+/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
+/// if not, see if an in-range clone of the CPE is in range, and if so,
+/// change the data structures so the user references the clone. Returns:
+/// 0 = no existing entry found
+/// 1 = entry found, and there were no code insertions or deletions
+/// 2 = entry found, and there were code insertions or deletions
+int CSKYConstantIslands::findInRangeCPEntry(CPUser &U, unsigned UserOffset) {
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+
+ // Check to see if the CPE is already in-range.
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
+ true)) {
+ LLVM_DEBUG(dbgs() << "In range\n");
+ return 1;
+ }
+
+ // No. Look for previously created clones of the CPE that are in range.
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ for (unsigned I = 0, E = CPEs.size(); I != E; ++I) {
+ // We already tried this one
+ if (CPEs[I].CPEMI == CPEMI)
+ continue;
+ // Removing CPEs can leave empty entries, skip
+ if (CPEs[I].CPEMI == nullptr)
+ continue;
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[I].CPEMI, U.getMaxDisp(),
+ U.NegOk)) {
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[I].CPI << "\n");
+ // Point the CPUser node to the replacement
+ U.CPEMI = CPEs[I].CPEMI;
+ // Change the CPI in the instruction operand to refer to the clone.
+ for (unsigned J = 0, E = UserMI->getNumOperands(); J != E; ++J)
+ if (UserMI->getOperand(J).isCPI()) {
+ UserMI->getOperand(J).setIndex(CPEs[I].CPI);
+ break;
+ }
+ // Adjust the refcount of the clone...
+ CPEs[I].RefCount++;
+ // ...and the original. If we didn't remove the old entry, none of the
+ // addresses changed, so we don't need another pass.
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
+ }
+ }
+ return 0;
+}
+
+/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
+/// the specific unconditional branch instruction.
+static inline unsigned getUnconditionalBrDisp(int Opc) {
+ unsigned Bits, Scale;
+
+ switch (Opc) {
+ case CSKY::BR16:
+ Bits = 10;
+ Scale = 2;
+ break;
+ case CSKY::BR32:
+ Bits = 16;
+ Scale = 2;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ unsigned MaxOffs = ((1 << (Bits - 1)) - 1) * Scale;
+ return MaxOffs;
+}
+
+/// findAvailableWater - Look for an existing entry in the WaterList in which
+/// we can place the CPE referenced from U so it's within range of U's MI.
+/// Returns true if found, false if not. If it returns true, WaterIter
+/// is set to the WaterList entry.
+/// To ensure that this pass
+/// terminates, the CPE location for a particular CPUser is only allowed to
+/// move to a lower address, so search backward from the end of the list and
+/// prefer the first water that is in range.
+bool CSKYConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
+ water_iterator &WaterIter) {
+ if (WaterList.empty())
+ return false;
+
+ unsigned BestGrowth = ~0u;
+ for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
+ --IP) {
+ MachineBasicBlock *WaterBB = *IP;
+ // Check if water is in range and is either at a lower address than the
+ // current "high water mark" or a new water block that was created since
+ // the previous iteration by inserting an unconditional branch. In the
+ // latter case, we want to allow resetting the high water mark back to
+ // this new water since we haven't seen it before. Inserting branches
+ // should be relatively uncommon and when it does happen, we want to be
+ // sure to take advantage of it for all the CPEs near that block, so that
+ // we don't insert more branches than necessary.
+ unsigned Growth;
+ if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
+ (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
+ NewWaterList.count(WaterBB)) &&
+ Growth < BestGrowth) {
+ // This is the least amount of required padding seen so far.
+ BestGrowth = Growth;
+ WaterIter = IP;
+ LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
+ << " Growth=" << Growth << '\n');
+
+ // Keep looking unless it is perfect.
+ if (BestGrowth == 0)
+ return true;
+ }
+ if (IP == B)
+ break;
+ }
+ return BestGrowth != ~0u;
+}
+
+/// createNewWater - No existing WaterList entry will work for
+/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
+/// block is used if in range, and the conditional branch munged so control
+/// flow is correct. Otherwise the block is split to create a hole with an
+/// unconditional branch around it. In either case NewMBB is set to a
+/// block following which the new island can be inserted (the WaterList
+/// is not adjusted).
+void CSKYConstantIslands::createNewWater(unsigned CPUserIndex,
+ unsigned UserOffset,
+ MachineBasicBlock *&NewMBB) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ MachineBasicBlock *UserMBB = UserMI->getParent();
+ const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
+
+ // If the block does not end in an unconditional branch already, and if the
+ // end of the block is within range, make new water there.
+ if (bbHasFallthrough(UserMBB)) {
+ // Size of branch to insert.
+ unsigned Delta = 4;
+ // Compute the offset where the CPE will begin.
+ unsigned CPEOffset = UserBBI.postOffset() + Delta;
+
+ if (isOffsetInRange(UserOffset, CPEOffset, U)) {
+ LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
+ << format(", expected CPE offset %#x\n", CPEOffset));
+ NewMBB = &*++UserMBB->getIterator();
+ // Add an unconditional branch from UserMBB to fallthrough block. Record
+ // it for branch lengthening; this new branch will not get out of range,
+ // but if the preceding conditional branch is out of range, the targets
+ // will be exchanged, and the altered branch may be out of range, so the
+ // machinery has to know about it.
+
+ // TODO: Add support for 16bit instr.
+ int UncondBr = CSKY::BR32;
+ auto *NewMI = BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
+ .addMBB(NewMBB)
+ .getInstr();
+ unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
+ ImmBranches.push_back(
+ ImmBranch(&UserMBB->back(), MaxDisp, false, UncondBr));
+ BBInfo[UserMBB->getNumber()].Size += TII->getInstSizeInBytes(*NewMI);
+ adjustBBOffsetsAfter(UserMBB);
+ return;
+ }
+ }
+
+ // What a big block. Find a place within the block to split it.
+
+ // Try to split the block so it's fully aligned. Compute the latest split
+ // point where we can add a 4-byte branch instruction, and then align to
+ // Align which is the largest possible alignment in the function.
+ const Align Align = MF->getAlignment();
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
+
+ // The 4 in the following is for the unconditional branch we'll be inserting
+ // Alignment of the island is handled
+ // inside isOffsetInRange.
+ BaseInsertOffset -= 4;
+
+ LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << Log2(Align) << '\n');
+
+ // This could point off the end of the block if we've already got constant
+ // pool entries following this block; only the last one is in the water list.
+ // Back past any possible branches (allow for a conditional and a maximally
+ // long unconditional).
+ if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
+ BaseInsertOffset = UserBBI.postOffset() - 8;
+ LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ }
+ unsigned EndInsertOffset =
+ BaseInsertOffset + 4 + CPEMI->getOperand(2).getImm();
+ MachineBasicBlock::iterator MI = UserMI;
+ ++MI;
+ unsigned CPUIndex = CPUserIndex + 1;
+ unsigned NumCPUsers = CPUsers.size();
+ for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
+ Offset < BaseInsertOffset;
+ Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
+ assert(MI != UserMBB->end() && "Fell off end of block");
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ CPUser &U = CPUsers[CPUIndex];
+ if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
+ // Shift intertion point by one unit of alignment so it is within reach.
+ BaseInsertOffset -= Align.value();
+ EndInsertOffset -= Align.value();
+ }
+ // This is overly conservative, as we don't account for CPEMIs being
+ // reused within the block, but it doesn't matter much. Also assume CPEs
+ // are added in order with alignment padding. We may eventually be able
+ // to pack the aligned CPEs better.
+ EndInsertOffset += U.CPEMI->getOperand(2).getImm();
+ CPUIndex++;
+ }
+ }
+
+ NewMBB = splitBlockBeforeInstr(*--MI);
+}
+
+/// handleConstantPoolUser - Analyze the specified user, checking to see if it
+/// is out-of-range. If so, pick up the constant pool value and move it some
+/// place in-range. Return true if we changed any addresses (thus must run
+/// another pass of branch lengthening), false otherwise.
+bool CSKYConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ // Compute this only once, it's expensive.
+ unsigned UserOffset = getUserOffset(U);
+
+ // See if the current entry is within range, or there is a clone of it
+ // in range.
+ int result = findInRangeCPEntry(U, UserOffset);
+ if (result == 1)
+ return false;
+ if (result == 2)
+ return true;
+
+ // Look for water where we can place this CPE.
+ MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
+ MachineBasicBlock *NewMBB;
+ water_iterator IP;
+ if (findAvailableWater(U, UserOffset, IP)) {
+ LLVM_DEBUG(dbgs() << "Found water in range\n");
+ MachineBasicBlock *WaterBB = *IP;
+
+ // If the original WaterList entry was "new water" on this iteration,
+ // propagate that to the new island. This is just keeping NewWaterList
+ // updated to match the WaterList, which will be updated below.
+ if (NewWaterList.erase(WaterBB))
+ NewWaterList.insert(NewIsland);
+
+ // The new CPE goes before the following block (NewMBB).
+ NewMBB = &*++WaterBB->getIterator();
+ } else {
+ LLVM_DEBUG(dbgs() << "No water found\n");
+ createNewWater(CPUserIndex, UserOffset, NewMBB);
+
+ // splitBlockBeforeInstr adds to WaterList, which is important when it is
+ // called while handling branches so that the water will be seen on the
+ // next iteration for constant pools, but in this context, we don't want
+ // it. Check for this so it will be removed from the WaterList.
+ // Also remove any entry from NewWaterList.
+ MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
+ IP = llvm::find(WaterList, WaterBB);
+ if (IP != WaterList.end())
+ NewWaterList.erase(WaterBB);
+
+ // We are adding new water. Update NewWaterList.
+ NewWaterList.insert(NewIsland);
+ }
+
+ // Remove the original WaterList entry; we want subsequent insertions in
+ // this vicinity to go after the one we're about to insert. This
+ // considerably reduces the number of times we have to move the same CPE
+ // more than once and is also important to ensure the algorithm terminates.
+ if (IP != WaterList.end())
+ WaterList.erase(IP);
+
+ // Okay, we know we can put an island before NewMBB now, do it!
+ MF->insert(NewMBB->getIterator(), NewIsland);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ updateForInsertedWaterBlock(NewIsland);
+
+ // Decrement the old entry, and remove it if refcount becomes 0.
+ decrementCPEReferenceCount(CPI, CPEMI);
+
+ // No existing clone of this CPE is within range.
+ // We will be generating a new clone. Get a UID for it.
+ unsigned ID = createPICLabelUId();
+
+ // Now that we have an island to add the CPE to, clone the original CPE and
+ // add it to the island.
+ U.HighWaterMark = NewIsland;
+ U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(CSKY::CONSTPOOL_ENTRY))
+ .addImm(ID)
+ .addConstantPoolIndex(CPI)
+ .addImm(Size);
+ CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
+ ++NumCPEs;
+
+ // Mark the basic block as aligned as required by the const-pool entry.
+ NewIsland->setAlignment(getCPEAlign(*U.CPEMI));
+
+ // Increase the size of the island block to account for the new entry.
+ BBInfo[NewIsland->getNumber()].Size += Size;
+ adjustBBOffsetsAfter(&*--NewIsland->getIterator());
+
+ // Finally, change the CPI in the instruction operand to be ID.
+ for (unsigned I = 0, E = UserMI->getNumOperands(); I != E; ++I)
+ if (UserMI->getOperand(I).isCPI()) {
+ UserMI->getOperand(I).setIndex(ID);
+ break;
+ }
+
+ LLVM_DEBUG(
+ dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
+
+ return true;
+}
+
+/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
+/// sizes and offsets of impacted basic blocks.
+void CSKYConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
+ MachineBasicBlock *CPEBB = CPEMI->getParent();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ CPEMI->eraseFromParent();
+ BBInfo[CPEBB->getNumber()].Size -= Size;
+ // All succeeding offsets have the current size value added in, fix this.
+ if (CPEBB->empty()) {
+ BBInfo[CPEBB->getNumber()].Size = 0;
+
+ // This block no longer needs to be aligned.
+ CPEBB->setAlignment(Align(4));
+ } else {
+ // Entries are sorted by descending alignment, so realign from the front.
+ CPEBB->setAlignment(getCPEAlign(*CPEBB->begin()));
+ }
+
+ adjustBBOffsetsAfter(CPEBB);
+ // An island has only one predecessor BB and one successor BB. Check if
+ // this BB's predecessor jumps directly to this BB's successor. This
+ // shouldn't happen currently.
+ assert(!bbIsJumpedOver(CPEBB) && "How did this happen?");
+ // FIXME: remove the empty blocks after all the work is done?
+}
+
+/// removeUnusedCPEntries - Remove constant pool entries whose refcounts
+/// are zero.
+bool CSKYConstantIslands::removeUnusedCPEntries() {
+ unsigned MadeChange = false;
+ for (unsigned I = 0, E = CPEntries.size(); I != E; ++I) {
+ std::vector<CPEntry> &CPEs = CPEntries[I];
+ for (unsigned J = 0, Ee = CPEs.size(); J != Ee; ++J) {
+ if (CPEs[J].RefCount == 0 && CPEs[J].CPEMI) {
+ removeDeadCPEMI(CPEs[J].CPEMI);
+ CPEs[J].CPEMI = nullptr;
+ MadeChange = true;
+ }
+ }
+ }
+ return MadeChange;
+}
+
+/// isBBInRange - Returns true if the distance between specific MI and
+/// specific BB can fit in MI's displacement field.
+bool CSKYConstantIslands::isBBInRange(MachineInstr *MI,
+ MachineBasicBlock *DestBB,
+ unsigned MaxDisp) {
+ unsigned BrOffset = getOffsetOf(MI);
+ unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
+
+ LLVM_DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
+ << " from " << printMBBReference(*MI->getParent())
+ << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
+
+ if (BrOffset <= DestOffset) {
+ // Branch before the Dest.
+ if (DestOffset - BrOffset <= MaxDisp)
+ return true;
+ } else {
+ if (BrOffset - DestOffset <= MaxDisp)
+ return true;
+ }
+ return false;
+}
+
+/// fixupImmediateBr - Fix up an immediate branch whose destination is too far
+/// away to fit in its displacement field.
+bool CSKYConstantIslands::fixupImmediateBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = TII->getBranchDestBlock(*MI);
+
+ // Check to see if the DestBB is already in-range.
+ if (isBBInRange(MI, DestBB, Br.MaxDisp))
+ return false;
+
+ if (!Br.IsCond)
+ return fixupUnconditionalBr(Br);
+ return fixupConditionalBr(Br);
+}
+
+/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
+/// too far away to fit in its displacement field. If the LR register has been
+/// spilled in the epilogue, then we can use BSR to implement a far jump.
+/// Otherwise, add an intermediate branch instruction to a branch.
+bool CSKYConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *MBB = MI->getParent();
+
+ if (!MFI->isLRSpilled())
+ report_fatal_error("underestimated function size");
+
+ // Use BSR to implement far jump.
+ Br.MaxDisp = ((1 << (26 - 1)) - 1) * 2;
+ MI->setDesc(TII->get(CSKY::BSR32_BR));
+ BBInfo[MBB->getNumber()].Size += 4;
+ adjustBBOffsetsAfter(MBB);
+ ++NumUBrFixed;
+
+ LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
+
+ return true;
+}
+
+/// fixupConditionalBr - Fix up a conditional branch whose destination is too
+/// far away to fit in its displacement field. It is converted to an inverse
+/// conditional branch + an unconditional branch to the destination.
+bool CSKYConstantIslands::fixupConditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = TII->getBranchDestBlock(*MI);
+
+ SmallVector<MachineOperand, 4> Cond;
+ Cond.push_back(MachineOperand::CreateImm(MI->getOpcode()));
+ Cond.push_back(MI->getOperand(0));
+ TII->reverseBranchCondition(Cond);
+
+ // Add an unconditional branch to the destination and invert the branch
+ // condition to jump over it:
+ // bteqz L1
+ // =>
+ // bnez L2
+ // b L1
+ // L2:
+
+ // If the branch is at the end of its MBB and that has a fall-through block,
+ // direct the updated conditional branch to the fall-through block. Otherwise,
+ // split the MBB before the next instruction.
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *BMI = &MBB->back();
+ bool NeedSplit = (BMI != MI) || !bbHasFallthrough(MBB);
+
+ ++NumCBrFixed;
+ if (BMI != MI) {
+ if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
+ BMI->isUnconditionalBranch()) {
+ // Last MI in the BB is an unconditional branch. Can we simply invert the
+ // condition and swap destinations:
+ // beqz L1
+ // b L2
+ // =>
+ // bnez L2
+ // b L1
+ MachineBasicBlock *NewDest = TII->getBranchDestBlock(*BMI);
+ if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
+ LLVM_DEBUG(
+ dbgs() << " Invert Bcc condition and swap its destination with "
+ << *BMI);
+ BMI->getOperand(BMI->getNumExplicitOperands() - 1).setMBB(DestBB);
+ MI->getOperand(MI->getNumExplicitOperands() - 1).setMBB(NewDest);
+
+ MI->setDesc(TII->get(Cond[0].getImm()));
+ return true;
+ }
+ }
+ }
+
+ if (NeedSplit) {
+ splitBlockBeforeInstr(*MI);
+ // No need for the branch to the next block. We're adding an unconditional
+ // branch to the destination.
+ int Delta = TII->getInstSizeInBytes(MBB->back());
+ BBInfo[MBB->getNumber()].Size -= Delta;
+ MBB->back().eraseFromParent();
+ // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
+
+ // The conditional successor will be swapped between the BBs after this, so
+ // update CFG.
+ MBB->addSuccessor(DestBB);
+ std::next(MBB->getIterator())->removeSuccessor(DestBB);
+ }
+ MachineBasicBlock *NextBB = &*++MBB->getIterator();
+
+ LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
+ << " also invert condition and change dest. to "
+ << printMBBReference(*NextBB) << "\n");
+
+ // Insert a new conditional branch and a new unconditional branch.
+ // Also update the ImmBranch as well as adding a new entry for the new branch.
+
+ BuildMI(MBB, DebugLoc(), TII->get(Cond[0].getImm()))
+ .addReg(MI->getOperand(0).getReg())
+ .addMBB(NextBB);
+
+ Br.MI = &MBB->back();
+ BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
+ BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
+ BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
+ unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
+ ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
+
+ // Remove the old conditional branch. It may or may not still be in MBB.
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->getInstSizeInBytes(*MI);
+ MI->eraseFromParent();
+ adjustBBOffsetsAfter(MBB);
+ return true;
+}
+
+/// Returns a pass that converts branches to long branches.
+FunctionPass *llvm::createCSKYConstantIslandPass() {
+ return new CSKYConstantIslands();
+}
+
+INITIALIZE_PASS(CSKYConstantIslands, DEBUG_TYPE,
+ "CSKY constant island placement and branch shortening pass",
+ false, false)
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.cpp
new file mode 100644
index 000000000000..d4c4bb847237
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.cpp
@@ -0,0 +1,216 @@
+//===-- CSKYConstantPoolValue.cpp - CSKY constantpool value ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CSKY specific constantpool value class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKYConstantPoolValue.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// CSKYConstantPoolValue
+//===----------------------------------------------------------------------===//
+
+CSKYConstantPoolValue::CSKYConstantPoolValue(Type *Ty, CSKYCP::CSKYCPKind Kind,
+ unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress,
+ unsigned ID)
+ : MachineConstantPoolValue(Ty), Kind(Kind), PCAdjust(PCAdjust),
+ Modifier(Modifier), AddCurrentAddress(AddCurrentAddress), LabelId(ID) {}
+
+const char *CSKYConstantPoolValue::getModifierText() const {
+ switch (Modifier) {
+ case CSKYCP::ADDR:
+ return "ADDR";
+ case CSKYCP::GOT:
+ return "GOT";
+ case CSKYCP::GOTOFF:
+ return "GOTOFF";
+ case CSKYCP::PLT:
+ return "PLT";
+ case CSKYCP::TLSIE:
+ return "TLSIE";
+ case CSKYCP::TLSLE:
+ return "TLSLE";
+ case CSKYCP::TLSGD:
+ return "TLSGD";
+ case CSKYCP::NO_MOD:
+ return "";
+ }
+ llvm_unreachable("Unknown modifier!");
+}
+
+int CSKYConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) {
+ llvm_unreachable("Shouldn't be calling this directly!");
+}
+
+void CSKYConstantPoolValue::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddInteger(LabelId);
+ ID.AddInteger(PCAdjust);
+ ID.AddInteger(Modifier);
+}
+
+void CSKYConstantPoolValue::print(raw_ostream &O) const {
+ if (Modifier)
+ O << "(" << getModifierText() << ")";
+ if (PCAdjust)
+ O << " + " << PCAdjust;
+}
+
+//===----------------------------------------------------------------------===//
+// CSKYConstantPoolConstant
+//===----------------------------------------------------------------------===//
+
+CSKYConstantPoolConstant::CSKYConstantPoolConstant(
+ const Constant *C, CSKYCP::CSKYCPKind Kind, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress, unsigned ID)
+ : CSKYConstantPoolValue(C->getType(), Kind, PCAdjust, Modifier,
+ AddCurrentAddress, ID),
+ CVal(C) {}
+
+CSKYConstantPoolConstant *CSKYConstantPoolConstant::Create(
+ const Constant *C, CSKYCP::CSKYCPKind Kind, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress, unsigned ID) {
+ return new CSKYConstantPoolConstant(C, Kind, PCAdjust, Modifier,
+ AddCurrentAddress, ID);
+}
+
+const GlobalValue *CSKYConstantPoolConstant::getGV() const {
+ assert(isa<GlobalValue>(CVal) && "CVal should be GlobalValue");
+ return cast<GlobalValue>(CVal);
+}
+
+const BlockAddress *CSKYConstantPoolConstant::getBlockAddress() const {
+ assert(isa<BlockAddress>(CVal) && "CVal should be BlockAddress");
+ return cast<BlockAddress>(CVal);
+}
+
+int CSKYConstantPoolConstant::getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) {
+ return getExistingMachineCPValueImpl<CSKYConstantPoolConstant>(CP, Alignment);
+}
+
+void CSKYConstantPoolConstant::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddPointer(CVal);
+
+ CSKYConstantPoolValue::addSelectionDAGCSEId(ID);
+}
+
+void CSKYConstantPoolConstant::print(raw_ostream &O) const {
+ O << CVal->getName();
+ CSKYConstantPoolValue::print(O);
+}
+
+//===----------------------------------------------------------------------===//
+// CSKYConstantPoolSymbol
+//===----------------------------------------------------------------------===//
+
+CSKYConstantPoolSymbol::CSKYConstantPoolSymbol(Type *Ty, const char *S,
+ unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress)
+ : CSKYConstantPoolValue(Ty, CSKYCP::CPExtSymbol, PCAdjust, Modifier,
+ AddCurrentAddress),
+ S(strdup(S)) {}
+
+CSKYConstantPoolSymbol *
+CSKYConstantPoolSymbol::Create(Type *Ty, const char *S, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier) {
+ return new CSKYConstantPoolSymbol(Ty, S, PCAdjust, Modifier, false);
+}
+
+int CSKYConstantPoolSymbol::getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) {
+
+ return getExistingMachineCPValueImpl<CSKYConstantPoolSymbol>(CP, Alignment);
+}
+
+void CSKYConstantPoolSymbol::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddString(S);
+ CSKYConstantPoolValue::addSelectionDAGCSEId(ID);
+}
+
+void CSKYConstantPoolSymbol::print(raw_ostream &O) const {
+ O << S;
+ CSKYConstantPoolValue::print(O);
+}
+
+//===----------------------------------------------------------------------===//
+// CSKYConstantPoolMBB
+//===----------------------------------------------------------------------===//
+
+CSKYConstantPoolMBB::CSKYConstantPoolMBB(Type *Ty, const MachineBasicBlock *Mbb,
+ unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress)
+ : CSKYConstantPoolValue(Ty, CSKYCP::CPMachineBasicBlock, PCAdjust, Modifier,
+ AddCurrentAddress),
+ MBB(Mbb) {}
+
+CSKYConstantPoolMBB *CSKYConstantPoolMBB::Create(Type *Ty,
+ const MachineBasicBlock *Mbb,
+ unsigned PCAdjust) {
+ return new CSKYConstantPoolMBB(Ty, Mbb, PCAdjust, CSKYCP::ADDR, false);
+}
+
+int CSKYConstantPoolMBB::getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) {
+ return getExistingMachineCPValueImpl<CSKYConstantPoolMBB>(CP, Alignment);
+}
+
+void CSKYConstantPoolMBB::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddPointer(MBB);
+ CSKYConstantPoolValue::addSelectionDAGCSEId(ID);
+}
+
+void CSKYConstantPoolMBB::print(raw_ostream &O) const {
+ O << "BB#" << MBB->getNumber();
+ CSKYConstantPoolValue::print(O);
+}
+
+//===----------------------------------------------------------------------===//
+// CSKYConstantPoolJT
+//===----------------------------------------------------------------------===//
+
+CSKYConstantPoolJT::CSKYConstantPoolJT(Type *Ty, int JTIndex, unsigned PCAdj,
+ CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress)
+ : CSKYConstantPoolValue(Ty, CSKYCP::CPJT, PCAdj, Modifier,
+ AddCurrentAddress),
+ JTI(JTIndex) {}
+
+CSKYConstantPoolJT *
+CSKYConstantPoolJT::Create(Type *Ty, int JTI, unsigned PCAdj,
+ CSKYCP::CSKYCPModifier Modifier) {
+ return new CSKYConstantPoolJT(Ty, JTI, PCAdj, Modifier, false);
+}
+
+int CSKYConstantPoolJT::getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) {
+ return getExistingMachineCPValueImpl<CSKYConstantPoolJT>(CP, Alignment);
+}
+
+void CSKYConstantPoolJT::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddInteger(JTI);
+ CSKYConstantPoolValue::addSelectionDAGCSEId(ID);
+}
+
+void CSKYConstantPoolJT::print(raw_ostream &O) const {
+ O << "JTI#" << JTI;
+ CSKYConstantPoolValue::print(O);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.h
new file mode 100644
index 000000000000..2eff9404a34c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYConstantPoolValue.h
@@ -0,0 +1,221 @@
+//===-- CSKYConstantPoolValue.h - CSKY constantpool value -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CSKY specific constantpool value class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_CSKY_CONSTANTPOOLVALUE_H
+#define LLVM_TARGET_CSKY_CONSTANTPOOLVALUE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstddef>
+
+namespace llvm {
+
+class BlockAddress;
+class Constant;
+class GlobalValue;
+class LLVMContext;
+class MachineBasicBlock;
+
+namespace CSKYCP {
+enum CSKYCPKind {
+ CPValue,
+ CPExtSymbol,
+ CPBlockAddress,
+ CPMachineBasicBlock,
+ CPJT
+};
+
+enum CSKYCPModifier { NO_MOD, ADDR, GOT, GOTOFF, PLT, TLSLE, TLSIE, TLSGD };
+} // namespace CSKYCP
+
+/// CSKYConstantPoolValue - CSKY specific constantpool value. This is used to
+/// represent PC-relative displacement between the address of the load
+/// instruction and the constant being loaded, i.e. (&GV-(LPIC+8)).
+class CSKYConstantPoolValue : public MachineConstantPoolValue {
+protected:
+ CSKYCP::CSKYCPKind Kind; // Kind of constant.
+ unsigned PCAdjust; // Extra adjustment if constantpool is pc-relative.
+ CSKYCP::CSKYCPModifier Modifier; // GV modifier
+ bool AddCurrentAddress;
+
+ unsigned LabelId = 0;
+
+ CSKYConstantPoolValue(Type *Ty, CSKYCP::CSKYCPKind Kind, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress,
+ unsigned ID = 0);
+
+public:
+ const char *getModifierText() const;
+ unsigned getPCAdjustment() const { return PCAdjust; }
+ bool mustAddCurrentAddress() const { return AddCurrentAddress; }
+ CSKYCP::CSKYCPModifier getModifier() const { return Modifier; }
+ unsigned getLabelID() const { return LabelId; }
+
+ bool isGlobalValue() const { return Kind == CSKYCP::CPValue; }
+ bool isExtSymbol() const { return Kind == CSKYCP::CPExtSymbol; }
+ bool isBlockAddress() const { return Kind == CSKYCP::CPBlockAddress; }
+ bool isMachineBasicBlock() const {
+ return Kind == CSKYCP::CPMachineBasicBlock;
+ }
+ bool isJT() const { return Kind == CSKYCP::CPJT; }
+
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) override;
+
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+
+ void print(raw_ostream &O) const override;
+
+ bool equals(const CSKYConstantPoolValue *A) const {
+ return this->LabelId == A->LabelId && this->PCAdjust == A->PCAdjust &&
+ this->Modifier == A->Modifier;
+ }
+
+ template <typename Derived>
+ int getExistingMachineCPValueImpl(MachineConstantPool *CP, Align Alignment) {
+ const std::vector<MachineConstantPoolEntry> &Constants = CP->getConstants();
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ if (Constants[i].isMachineConstantPoolEntry() &&
+ Constants[i].getAlign() >= Alignment) {
+ auto *CPV =
+ static_cast<CSKYConstantPoolValue *>(Constants[i].Val.MachineCPVal);
+ if (Derived *APC = dyn_cast<Derived>(CPV))
+ if (cast<Derived>(this)->equals(APC))
+ return i;
+ }
+ }
+
+ return -1;
+ }
+};
+
+/// CSKY-specific constant pool values for Constants,
+/// Functions, and BlockAddresses.
+class CSKYConstantPoolConstant : public CSKYConstantPoolValue {
+ const Constant *CVal; // Constant being loaded.
+
+ CSKYConstantPoolConstant(const Constant *C, CSKYCP::CSKYCPKind Kind,
+ unsigned PCAdjust, CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress, unsigned ID);
+
+public:
+ static CSKYConstantPoolConstant *
+ Create(const Constant *C, CSKYCP::CSKYCPKind Kind, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress,
+ unsigned ID = 0);
+ const GlobalValue *getGV() const;
+ const BlockAddress *getBlockAddress() const;
+
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
+
+ bool equals(const CSKYConstantPoolConstant *A) const {
+ return CVal == A->CVal && CSKYConstantPoolValue::equals(A);
+ }
+
+ static bool classof(const CSKYConstantPoolValue *APV) {
+ return APV->isGlobalValue() || APV->isBlockAddress();
+ }
+};
+
+/// CSKYConstantPoolSymbol - CSKY-specific constantpool values for external
+/// symbols.
+class CSKYConstantPoolSymbol : public CSKYConstantPoolValue {
+ const std::string S; // ExtSymbol being loaded.
+
+ CSKYConstantPoolSymbol(Type *Ty, const char *S, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier,
+ bool AddCurrentAddress);
+
+public:
+ static CSKYConstantPoolSymbol *Create(Type *Ty, const char *S,
+ unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier);
+
+ StringRef getSymbol() const { return S; }
+
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
+
+ bool equals(const CSKYConstantPoolSymbol *A) const {
+ return S == A->S && CSKYConstantPoolValue::equals(A);
+ }
+
+ static bool classof(const CSKYConstantPoolValue *ACPV) {
+ return ACPV->isExtSymbol();
+ }
+};
+
+/// CSKYConstantPoolMBB - CSKY-specific constantpool value of a machine basic
+/// block.
+class CSKYConstantPoolMBB : public CSKYConstantPoolValue {
+ const MachineBasicBlock *MBB; // Machine basic block.
+
+ CSKYConstantPoolMBB(Type *Ty, const MachineBasicBlock *Mbb, unsigned PCAdjust,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress);
+
+public:
+ static CSKYConstantPoolMBB *Create(Type *Ty, const MachineBasicBlock *Mbb,
+ unsigned PCAdjust);
+
+ const MachineBasicBlock *getMBB() const { return MBB; }
+
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
+
+ bool equals(const CSKYConstantPoolMBB *A) const {
+ return MBB == A->MBB && CSKYConstantPoolValue::equals(A);
+ }
+
+ static bool classof(const CSKYConstantPoolValue *ACPV) {
+ return ACPV->isMachineBasicBlock();
+ }
+};
+
+/// CSKY-specific constantpool value of a jump table.
+class CSKYConstantPoolJT : public CSKYConstantPoolValue {
+ signed JTI; // Machine basic block.
+
+ CSKYConstantPoolJT(Type *Ty, int JTIndex, unsigned PCAdj,
+ CSKYCP::CSKYCPModifier Modifier, bool AddCurrentAddress);
+
+public:
+ static CSKYConstantPoolJT *Create(Type *Ty, int JTI, unsigned PCAdj,
+ CSKYCP::CSKYCPModifier Modifier);
+
+ signed getJTI() { return JTI; }
+
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ Align Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
+
+ bool equals(const CSKYConstantPoolJT *A) const {
+ return JTI == A->JTI && CSKYConstantPoolValue::equals(A);
+ }
+
+ static bool classof(const CSKYConstantPoolValue *ACPV) {
+ return ACPV->isJT();
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
index 3a8ee5713584..3bf001c2cee7 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CSKYFrameLowering.h"
+#include "CSKYMachineFunctionInfo.h"
#include "CSKYSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -46,12 +47,555 @@ bool CSKYFrameLowering::hasBP(const MachineFunction &MF) const {
return MFI.hasVarSizedObjects();
}
+// Determines the size of the frame and maximum call frame size.
+void CSKYFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const CSKYRegisterInfo *RI = STI.getRegisterInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t FrameSize = MFI.getStackSize();
+
+ // Get the alignment.
+ Align StackAlign = getStackAlign();
+ if (RI->hasStackRealignment(MF)) {
+ Align MaxStackAlign = std::max(StackAlign, MFI.getMaxAlign());
+ FrameSize += (MaxStackAlign.value() - StackAlign.value());
+ StackAlign = MaxStackAlign;
+ }
+
+ // Set Max Call Frame Size
+ uint64_t MaxCallSize = alignTo(MFI.getMaxCallFrameSize(), StackAlign);
+ MFI.setMaxCallFrameSize(MaxCallSize);
+
+ // Make sure the frame is aligned.
+ FrameSize = alignTo(FrameSize, StackAlign);
+
+ // Update frame info.
+ MFI.setStackSize(FrameSize);
+}
+
void CSKYFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- // FIXME: Implement this when we have function calls
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const CSKYRegisterInfo *RI = STI.getRegisterInfo();
+ const CSKYInstrInfo *TII = STI.getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ Register FPReg = getFPReg(STI);
+ Register SPReg = CSKY::R14;
+ Register BPReg = getBPReg(STI);
+
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc DL;
+
+ if (MF.getFunction().hasFnAttribute("interrupt"))
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::NIE));
+
+ // Determine the correct frame layout
+ determineFrameLayout(MF);
+
+ // FIXME (note copied from Lanai): This appears to be overallocating. Needs
+ // investigation. Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI.getStackSize();
+
+ // Early exit if there is no need to allocate on the stack
+ if (StackSize == 0 && !MFI.adjustsStack())
+ return;
+
+ const auto &CSI = MFI.getCalleeSavedInfo();
+
+ unsigned spillAreaSize = CFI->getCalleeSaveAreaSize();
+
+ uint64_t ActualSize = spillAreaSize + CFI->getVarArgsSaveSize();
+
+ // First part stack allocation.
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg, -(static_cast<int64_t>(ActualSize)),
+ MachineInstr::NoFlags);
+
+ // Emit ".cfi_def_cfa_offset FirstSPAdjustAmount"
+ unsigned CFIIndex =
+ MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, ActualSize));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // The frame pointer is callee-saved, and code has been generated for us to
+ // save it to the stack. We need to skip over the storing of callee-saved
+ // registers as the frame pointer must be modified after it has been saved
+ // to the stack, not before.
+ // FIXME: assumes exactly one instruction is used to save each callee-saved
+ // register.
+ std::advance(MBBI, CSI.size());
+
+ // Iterate over list of callee-saved registers and emit .cfi_offset
+ // directives.
+ for (const auto &Entry : CSI) {
+ int64_t Offset = MFI.getObjectOffset(Entry.getFrameIdx());
+ Register Reg = Entry.getReg();
+
+ unsigned Num = TRI->getRegSizeInBits(Reg, MRI) / 32;
+ for (unsigned i = 0; i < Num; i++) {
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, RI->getDwarfRegNum(Reg, true) + i, Offset + i * 4));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
+
+ // Generate new FP.
+ if (hasFP(MF)) {
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), FPReg)
+ .addReg(SPReg)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // Emit ".cfi_def_cfa_register $fp"
+ unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ nullptr, RI->getDwarfRegNum(FPReg, true)));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Second part stack allocation.
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg,
+ -(static_cast<int64_t>(StackSize - ActualSize)),
+ MachineInstr::NoFlags);
+
+ // Realign Stack
+ const CSKYRegisterInfo *RI = STI.getRegisterInfo();
+ if (RI->hasStackRealignment(MF)) {
+ Align MaxAlignment = MFI.getMaxAlign();
+
+ const CSKYInstrInfo *TII = STI.getInstrInfo();
+ if (STI.hasE2() && isUInt<12>(~(-(int)MaxAlignment.value()))) {
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::ANDNI32), SPReg)
+ .addReg(SPReg)
+ .addImm(~(-(int)MaxAlignment.value()));
+ } else {
+ unsigned ShiftAmount = Log2(MaxAlignment);
+
+ if (STI.hasE2()) {
+ Register VR =
+ MF.getRegInfo().createVirtualRegister(&CSKY::GPRRegClass);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::LSRI32), VR)
+ .addReg(SPReg)
+ .addImm(ShiftAmount);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::LSLI32), SPReg)
+ .addReg(VR)
+ .addImm(ShiftAmount);
+ } else {
+ Register VR =
+ MF.getRegInfo().createVirtualRegister(&CSKY::mGPRRegClass);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::MOV16), VR).addReg(SPReg);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::LSRI16), VR)
+ .addReg(VR)
+ .addImm(ShiftAmount);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::LSLI16), VR)
+ .addReg(VR)
+ .addImm(ShiftAmount);
+ BuildMI(MBB, MBBI, DL, TII->get(CSKY::MOV16), SPReg).addReg(VR);
+ }
+ }
+ }
+
+ // FP will be used to restore the frame in the epilogue, so we need
+ // another base register BP to record SP after re-alignment. SP will
+ // track the current stack after allocating variable sized objects.
+ if (hasBP(MF)) {
+ // move BP, SP
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), BPReg).addReg(SPReg);
+ }
+
+ } else {
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg,
+ -(static_cast<int64_t>(StackSize - ActualSize)),
+ MachineInstr::NoFlags);
+ // Emit ".cfi_def_cfa_offset StackSize"
+ unsigned CFIIndex = MF.addFrameInst(
+ MCCFIInstruction::cfiDefCfaOffset(nullptr, MFI.getStackSize()));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
}
void CSKYFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- // FIXME: Implement this when we have function calls
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ Register FPReg = getFPReg(STI);
+ Register SPReg = CSKY::R14;
+
+ // Get the insert location for the epilogue. If there were no terminators in
+ // the block, get the last instruction.
+ MachineBasicBlock::iterator MBBI = MBB.end();
+ DebugLoc DL;
+ if (!MBB.empty()) {
+ MBBI = MBB.getFirstTerminator();
+ if (MBBI == MBB.end())
+ MBBI = MBB.getLastNonDebugInstr();
+ DL = MBBI->getDebugLoc();
+
+ // If this is not a terminator, the actual insert location should be after
+ // the last instruction.
+ if (!MBBI->isTerminator())
+ MBBI = std::next(MBBI);
+ }
+
+ const auto &CSI = MFI.getCalleeSavedInfo();
+ uint64_t StackSize = MFI.getStackSize();
+
+ uint64_t ActualSize =
+ CFI->getCalleeSaveAreaSize() + CFI->getVarArgsSaveSize();
+
+ // Skip to before the restores of callee-saved registers
+ // FIXME: assumes exactly one instruction is used to restore each
+ // callee-saved register.
+ auto LastFrameDestroy = MBBI;
+ if (!CSI.empty())
+ LastFrameDestroy = std::prev(MBBI, CSI.size());
+
+ if (hasFP(MF)) {
+ const CSKYInstrInfo *TII = STI.getInstrInfo();
+ BuildMI(MBB, LastFrameDestroy, DL, TII->get(TargetOpcode::COPY), SPReg)
+ .addReg(FPReg)
+ .setMIFlag(MachineInstr::NoFlags);
+ } else {
+ adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg, (StackSize - ActualSize),
+ MachineInstr::FrameDestroy);
+ }
+
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg, ActualSize,
+ MachineInstr::FrameDestroy);
+}
+
+static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
+ const CSKYSubtarget &STI) {
+ unsigned Limit = (1 << 12) - 1;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isDebugInstr())
+ continue;
+
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isFI())
+ continue;
+
+ if (MI.getOpcode() == CSKY::SPILL_CARRY ||
+ MI.getOpcode() == CSKY::RESTORE_CARRY ||
+ MI.getOpcode() == CSKY::STORE_PAIR ||
+ MI.getOpcode() == CSKY::LOAD_PAIR) {
+ Limit = std::min(Limit, ((1U << 12) - 1) * 4);
+ break;
+ }
+
+ if (MI.getOpcode() == CSKY::ADDI32) {
+ Limit = std::min(Limit, (1U << 12));
+ break;
+ }
+
+ if (MI.getOpcode() == CSKY::ADDI16XZ) {
+ Limit = std::min(Limit, (1U << 3));
+ break;
+ }
+
+ // ADDI16 will not require an extra register,
+ // it can reuse the destination.
+ if (MI.getOpcode() == CSKY::ADDI16)
+ break;
+
+ // Otherwise check the addressing mode.
+ switch (MI.getDesc().TSFlags & CSKYII::AddrModeMask) {
+ default:
+ LLVM_DEBUG(MI.dump());
+ llvm_unreachable(
+ "Unhandled addressing mode in stack size limit calculation");
+ case CSKYII::AddrMode32B:
+ Limit = std::min(Limit, (1U << 12) - 1);
+ break;
+ case CSKYII::AddrMode32H:
+ Limit = std::min(Limit, ((1U << 12) - 1) * 2);
+ break;
+ case CSKYII::AddrMode32WD:
+ Limit = std::min(Limit, ((1U << 12) - 1) * 4);
+ break;
+ case CSKYII::AddrMode16B:
+ Limit = std::min(Limit, (1U << 5) - 1);
+ break;
+ case CSKYII::AddrMode16H:
+ Limit = std::min(Limit, ((1U << 5) - 1) * 2);
+ break;
+ case CSKYII::AddrMode16W:
+ Limit = std::min(Limit, ((1U << 5) - 1) * 4);
+ break;
+ case CSKYII::AddrMode32SDF:
+ Limit = std::min(Limit, ((1U << 8) - 1) * 4);
+ break;
+ }
+ break; // At most one FI per instruction
+ }
+ }
+ }
+
+ return Limit;
+}
+
+void CSKYFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ if (hasFP(MF))
+ SavedRegs.set(CSKY::R8);
+
+ // Mark BP as used if function has dedicated base pointer.
+ if (hasBP(MF))
+ SavedRegs.set(CSKY::R7);
+
+ // If interrupt is enabled and there are calls in the handler,
+ // unconditionally save all Caller-saved registers and
+ // all FP registers, regardless whether they are used.
+ if (MF.getFunction().hasFnAttribute("interrupt") && MFI.hasCalls()) {
+
+ static const MCPhysReg CSRegs[] = {CSKY::R0, CSKY::R1, CSKY::R2, CSKY::R3,
+ CSKY::R12, CSKY::R13, 0};
+
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ SavedRegs.set(CSRegs[i]);
+
+ if (STI.hasHighRegisters()) {
+
+ static const MCPhysReg CSHRegs[] = {CSKY::R18, CSKY::R19, CSKY::R20,
+ CSKY::R21, CSKY::R22, CSKY::R23,
+ CSKY::R24, CSKY::R25, 0};
+
+ for (unsigned i = 0; CSHRegs[i]; ++i)
+ SavedRegs.set(CSHRegs[i]);
+ }
+
+ static const MCPhysReg CSF32Regs[] = {
+ CSKY::F8_32, CSKY::F9_32, CSKY::F10_32,
+ CSKY::F11_32, CSKY::F12_32, CSKY::F13_32,
+ CSKY::F14_32, CSKY::F15_32, 0};
+ static const MCPhysReg CSF64Regs[] = {
+ CSKY::F8_64, CSKY::F9_64, CSKY::F10_64,
+ CSKY::F11_64, CSKY::F12_64, CSKY::F13_64,
+ CSKY::F14_64, CSKY::F15_64, 0};
+
+ const MCPhysReg *FRegs = NULL;
+ if (STI.hasFPUv2DoubleFloat() || STI.hasFPUv3DoubleFloat())
+ FRegs = CSF64Regs;
+ else if (STI.hasFPUv2SingleFloat() || STI.hasFPUv3SingleFloat())
+ FRegs = CSF32Regs;
+
+ if (FRegs != NULL) {
+ const MCPhysReg *Regs = MF.getRegInfo().getCalleeSavedRegs();
+
+ for (unsigned i = 0; Regs[i]; ++i)
+ if (CSKY::FPR32RegClass.contains(Regs[i]) ||
+ CSKY::FPR64RegClass.contains(Regs[i])) {
+ unsigned x = 0;
+ for (; FRegs[x]; ++x)
+ if (FRegs[x] == Regs[i])
+ break;
+ if (FRegs[x] == 0)
+ SavedRegs.set(Regs[i]);
+ }
+ }
+ }
+
+ CFI->setLRIsSpilled(SavedRegs.test(CSKY::R15));
+
+ unsigned CSStackSize = 0;
+ for (unsigned Reg : SavedRegs.set_bits()) {
+ auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8;
+ CSStackSize += RegSize;
+ }
+
+ CFI->setCalleeSaveAreaSize(CSStackSize);
+
+ uint64_t Limit = estimateRSStackSizeLimit(MF, STI);
+
+ bool BigFrame = (MFI.estimateStackSize(MF) + CSStackSize >= Limit);
+
+ if (BigFrame || CFI->isCRSpilled() || !STI.hasE2()) {
+ const TargetRegisterClass *RC = &CSKY::GPRRegClass;
+ unsigned size = TRI->getSpillSize(*RC);
+ Align align = TRI->getSpillAlign(*RC);
+
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(size, align, false));
+ }
+}
+
+// Not preserve stack space within prologue for outgoing variables when the
+// function contains variable size objects and let eliminateCallFramePseudoInstr
+// preserve stack space for it.
+bool CSKYFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo().hasVarSizedObjects();
+}
+
+bool CSKYFrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return true;
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ DebugLoc DL;
+ if (MI != MBB.end() && !MI->isDebugInstr())
+ DL = MI->getDebugLoc();
+
+ for (auto &CS : CSI) {
+ // Insert the spill to the stack frame.
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CS.getFrameIdx(), RC, TRI);
+ }
+
+ return true;
+}
+
+bool CSKYFrameLowering::restoreCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return true;
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ DebugLoc DL;
+ if (MI != MBB.end() && !MI->isDebugInstr())
+ DL = MI->getDebugLoc();
+
+ for (auto &CS : reverse(CSI)) {
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI);
+ assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
+ }
+
+ return true;
+}
+
+// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
+MachineBasicBlock::iterator CSKYFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ Register SPReg = CSKY::R14;
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (!hasReservedCallFrame(MF)) {
+ // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
+ // ADJCALLSTACKUP must be converted to instructions manipulating the stack
+ // pointer. This is necessary when there is a variable length stack
+ // allocation (e.g. alloca), which means it's not possible to allocate
+ // space for outgoing arguments from within the function prologue.
+ int64_t Amount = MI->getOperand(0).getImm();
+
+ if (Amount != 0) {
+ // Ensure the stack remains aligned after adjustment.
+ Amount = alignSPAdjust(Amount);
+
+ if (MI->getOpcode() == CSKY::ADJCALLSTACKDOWN)
+ Amount = -Amount;
+
+ adjustReg(MBB, MI, DL, SPReg, SPReg, Amount, MachineInstr::NoFlags);
+ }
+ }
+
+ return MBB.erase(MI);
+}
+
+void CSKYFrameLowering::adjustReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, Register DestReg,
+ Register SrcReg, int64_t Val,
+ MachineInstr::MIFlag Flag) const {
+ const CSKYInstrInfo *TII = STI.getInstrInfo();
+
+ if (DestReg == SrcReg && Val == 0)
+ return;
+
+ // TODO: Add 16-bit instruction support with immediate num
+ if (STI.hasE2() && isUInt<12>(std::abs(Val) - 1)) {
+ BuildMI(MBB, MBBI, DL, TII->get(Val < 0 ? CSKY::SUBI32 : CSKY::ADDI32),
+ DestReg)
+ .addReg(SrcReg)
+ .addImm(std::abs(Val))
+ .setMIFlag(Flag);
+ } else if (!STI.hasE2() && isShiftedUInt<7, 2>(std::abs(Val))) {
+ BuildMI(MBB, MBBI, DL,
+ TII->get(Val < 0 ? CSKY::SUBI16SPSP : CSKY::ADDI16SPSP), CSKY::R14)
+ .addReg(CSKY::R14, RegState::Kill)
+ .addImm(std::abs(Val))
+ .setMIFlag(Flag);
+ } else {
+
+ unsigned Op = 0;
+
+ if (STI.hasE2()) {
+ Op = Val < 0 ? CSKY::SUBU32 : CSKY::ADDU32;
+ } else {
+ assert(SrcReg == DestReg);
+ Op = Val < 0 ? CSKY::SUBU16XZ : CSKY::ADDU16XZ;
+ }
+
+ Register ScratchReg = TII->movImm(MBB, MBBI, DL, std::abs(Val), Flag);
+
+ BuildMI(MBB, MBBI, DL, TII->get(Op), DestReg)
+ .addReg(SrcReg)
+ .addReg(ScratchReg, RegState::Kill)
+ .setMIFlag(Flag);
+ }
+}
+
+StackOffset
+CSKYFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ Register &FrameReg) const {
+ const CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const auto &CSI = MFI.getCalleeSavedInfo();
+
+ int MinCSFI = 0;
+ int MaxCSFI = -1;
+
+ int Offset = MFI.getObjectOffset(FI) + MFI.getOffsetAdjustment();
+
+ if (CSI.size()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ }
+
+ if (FI >= MinCSFI && FI <= MaxCSFI) {
+ FrameReg = CSKY::R14;
+ Offset += CFI->getVarArgsSaveSize() + CFI->getCalleeSaveAreaSize();
+ } else if (RI->hasStackRealignment(MF)) {
+ assert(hasFP(MF));
+ if (!MFI.isFixedObjectIndex(FI)) {
+ FrameReg = hasBP(MF) ? getBPReg(STI) : CSKY::R14;
+ Offset += MFI.getStackSize();
+ } else {
+ FrameReg = getFPReg(STI);
+ Offset += CFI->getVarArgsSaveSize() + CFI->getCalleeSaveAreaSize();
+ }
+ } else {
+ if (MFI.isFixedObjectIndex(FI) && hasFP(MF)) {
+ FrameReg = getFPReg(STI);
+ Offset += CFI->getVarArgsSaveSize() + CFI->getCalleeSaveAreaSize();
+ } else {
+ FrameReg = hasBP(MF) ? getBPReg(STI) : CSKY::R14;
+ Offset += MFI.getStackSize();
+ }
+ }
+
+ return StackOffset::getFixed(Offset);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.h
index 49921a1866bc..69bf01cf1801 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYFrameLowering.h
@@ -21,6 +21,11 @@ class CSKYSubtarget;
class CSKYFrameLowering : public TargetFrameLowering {
const CSKYSubtarget &STI;
+ void determineFrameLayout(MachineFunction &MF) const;
+ void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, Register DestReg, Register SrcReg,
+ int64_t Val, MachineInstr::MIFlag Flag) const;
+
public:
explicit CSKYFrameLowering(const CSKYSubtarget &STI)
: TargetFrameLowering(StackGrowsDown,
@@ -31,8 +36,39 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
+ Register &FrameReg) const override;
+
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS) const override;
+
+ bool assignCalleeSavedSpillSlots(
+ MachineFunction &MF, const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const override {
+
+ std::reverse(CSI.begin(), CSI.end());
+
+ return false;
+ }
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ ArrayRef<CalleeSavedInfo> CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ MutableArrayRef<CalleeSavedInfo> CSI,
+ const TargetRegisterInfo *TRI) const override;
+
bool hasFP(const MachineFunction &MF) const override;
bool hasBP(const MachineFunction &MF) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
};
} // namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp
index 8dc91904b8cc..d58f9095aa0d 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp
@@ -68,6 +68,24 @@ void CSKYDAGToDAGISel::Select(SDNode *N) {
case ISD::SUBCARRY:
IsSelected = selectSubCarry(N);
break;
+ case ISD::GLOBAL_OFFSET_TABLE: {
+ Register GP = Subtarget->getInstrInfo()->getGlobalBaseReg(*MF);
+ ReplaceNode(N, CurDAG->getRegister(GP, N->getValueType(0)).getNode());
+
+ IsSelected = true;
+ break;
+ }
+ case ISD::FrameIndex: {
+ SDValue Imm = CurDAG->getTargetConstant(0, Dl, MVT::i32);
+ int FI = cast<FrameIndexSDNode>(N)->getIndex();
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
+ ReplaceNode(N, CurDAG->getMachineNode(Subtarget->hasE2() ? CSKY::ADDI32
+ : CSKY::ADDI16XZ,
+ Dl, MVT::i32, TFI, Imm));
+
+ IsSelected = true;
+ break;
+ }
}
if (IsSelected)
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
index a1f7cc685d4c..0b589e3d3e4f 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
@@ -13,6 +13,7 @@
#include "CSKYISelLowering.h"
#include "CSKYCallingConv.h"
+#include "CSKYConstantPoolValue.h"
#include "CSKYMachineFunctionInfo.h"
#include "CSKYRegisterInfo.h"
#include "CSKYSubtarget.h"
@@ -37,6 +38,18 @@ CSKYTargetLowering::CSKYTargetLowering(const TargetMachine &TM,
// Register Class
addRegisterClass(MVT::i32, &CSKY::GPRRegClass);
+ if (STI.useHardFloat()) {
+ if (STI.hasFPUv2SingleFloat())
+ addRegisterClass(MVT::f32, &CSKY::sFPR32RegClass);
+ else if (STI.hasFPUv3SingleFloat())
+ addRegisterClass(MVT::f32, &CSKY::FPR32RegClass);
+
+ if (STI.hasFPUv2DoubleFloat())
+ addRegisterClass(MVT::f64, &CSKY::sFPR64RegClass);
+ else if (STI.hasFPUv3DoubleFloat())
+ addRegisterClass(MVT::f64, &CSKY::FPR64RegClass);
+ }
+
setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
@@ -53,16 +66,29 @@ CSKYTargetLowering::CSKYTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::MULHS, MVT::i32, Expand);
setOperationAction(ISD::MULHU, MVT::i32, Expand);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Promote);
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
+ setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
+ setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+
if (!Subtarget.hasE2()) {
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Expand);
@@ -77,6 +103,44 @@ CSKYTargetLowering::CSKYTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UDIV, MVT::i32, Expand);
}
+ if (!Subtarget.has3r2E3r3()) {
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
+ }
+
+ // Float
+
+ ISD::CondCode FPCCToExtend[] = {
+ ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
+ ISD::SETUGE, ISD::SETULT, ISD::SETULE,
+ };
+
+ ISD::NodeType FPOpToExpand[] = {ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
+ ISD::FPOW, ISD::FREM, ISD::FCOPYSIGN};
+
+ if (STI.useHardFloat()) {
+
+ MVT AllVTy[] = {MVT::f32, MVT::f64};
+
+ for (auto VT : AllVTy) {
+ setOperationAction(ISD::FREM, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ setOperationAction(ISD::BR_CC, VT, Expand);
+
+ for (auto CC : FPCCToExtend)
+ setCondCodeAction(CC, VT, Expand);
+ for (auto Op : FPOpToExpand)
+ setOperationAction(Op, VT, Expand);
+ }
+
+ if (STI.hasFPUv2SingleFloat() || STI.hasFPUv3SingleFloat()) {
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ }
+ if (STI.hasFPUv2DoubleFloat() || STI.hasFPUv3DoubleFloat()) {
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ }
+ }
+
// Compute derived properties from the register classes.
computeRegisterProperties(STI.getRegisterInfo());
@@ -92,6 +156,30 @@ CSKYTargetLowering::CSKYTargetLowering(const TargetMachine &TM,
setSchedulingPreference(Sched::Source);
}
+SDValue CSKYTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("unimplemented op");
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::ExternalSymbol:
+ return LowerExternalSymbol(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ return LowerGlobalTLSAddress(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::BlockAddress:
+ return LowerBlockAddress(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR:
+ return LowerRETURNADDR(Op, DAG);
+ }
+}
+
EVT CSKYTargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext &Context, EVT VT) const {
if (!VT.isVector())
@@ -145,6 +233,14 @@ static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget,
case MVT::i32:
RC = &CSKY::GPRRegClass;
break;
+ case MVT::f32:
+ RC = Subtarget.hasFPUv2SingleFloat() ? &CSKY::sFPR32RegClass
+ : &CSKY::FPR32RegClass;
+ break;
+ case MVT::f64:
+ RC = Subtarget.hasFPUv2DoubleFloat() ? &CSKY::sFPR64RegClass
+ : &CSKY::FPR64RegClass;
+ break;
}
Register VReg = RegInfo.createVirtualRegister(RC);
@@ -181,6 +277,44 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
return Val;
}
+static SDValue unpack64(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA,
+ const SDLoc &DL) {
+ assert(VA.getLocVT() == MVT::i32 &&
+ (VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::i64) &&
+ "Unexpected VA");
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+ if (VA.isMemLoc()) {
+ // f64/i64 is passed on the stack.
+ int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ return DAG.getLoad(VA.getValVT(), DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ }
+
+ assert(VA.isRegLoc() && "Expected register VA assignment");
+
+ Register LoVReg = RegInfo.createVirtualRegister(&CSKY::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
+ SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
+ SDValue Hi;
+ if (VA.getLocReg() == CSKY::R3) {
+ // Second half of f64/i64 is passed on the stack.
+ int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
+ } else {
+ // Second half of f64/i64 is passed in another GPR.
+ Register HiVReg = RegInfo.createVirtualRegister(&CSKY::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
+ Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
+ }
+ return DAG.getNode(CSKYISD::BITCAST_FROM_LOHI, DL, VA.getValVT(), Lo, Hi);
+}
+
// Transform physical registers into virtual registers.
SDValue CSKYTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
@@ -210,7 +344,11 @@ SDValue CSKYTargetLowering::LowerFormalArguments(
CCValAssign &VA = ArgLocs[i];
SDValue ArgValue;
- if (VA.isRegLoc())
+ bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
+
+ if (IsF64OnCSKY)
+ ArgValue = unpack64(DAG, Chain, VA, DL);
+ else if (VA.isRegLoc())
ArgValue = unpackFromRegLoc(Subtarget, DAG, Chain, VA, DL);
else
ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
@@ -354,6 +492,255 @@ CSKYTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return DAG.getNode(CSKYISD::RET, DL, MVT::Other, RetOps);
}
+// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
+// and output parameter nodes.
+SDValue CSKYTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ MVT XLenVT = MVT::i32;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Analyze the operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ ArgCCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, IsVarArg));
+
+ // Check if it's really possible to do a tail call.
+ if (IsTailCall)
+ IsTailCall = false; // TODO: TailCallOptimization;
+
+ if (IsTailCall)
+ ++NumTailCalls;
+ else if (CLI.CB && CLI.CB->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = ArgCCInfo.getNextStackOffset();
+
+ // Create local copies for byval args
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[i];
+ unsigned Size = Flags.getByValSize();
+ Align Alignment = Flags.getNonZeroByValAlign();
+
+ int FI =
+ MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
+
+ Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
+ /*IsVolatile=*/false,
+ /*AlwaysInline=*/false, IsTailCall,
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+
+ if (!IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
+
+ // Copy argument values to their designated locations.
+ SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+ SDValue StackPtr;
+ for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue ArgValue = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
+
+ if (IsF64OnCSKY && VA.isRegLoc()) {
+ SDValue Split64 =
+ DAG.getNode(CSKYISD::BITCAST_TO_LOHI, DL,
+ DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
+ SDValue Lo = Split64.getValue(0);
+ SDValue Hi = Split64.getValue(1);
+
+ Register RegLo = VA.getLocReg();
+ RegsToPass.push_back(std::make_pair(RegLo, Lo));
+
+ if (RegLo == CSKY::R3) {
+ // Second half of f64/i64 is passed on the stack.
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, CSKY::R14, PtrVT);
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
+ } else {
+ // Second half of f64/i64 is passed in another GPR.
+ assert(RegLo < CSKY::R31 && "Invalid register pair");
+ Register RegHigh = RegLo + 1;
+ RegsToPass.push_back(std::make_pair(RegHigh, Hi));
+ }
+ continue;
+ }
+
+ ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
+
+ // Use local copy if it is a byval arg.
+ if (Flags.isByVal())
+ ArgValue = ByValArgs[j++];
+
+ if (VA.isRegLoc()) {
+ // Queue up the argument copies and emit them at the end.
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
+ } else {
+ assert(VA.isMemLoc() && "Argument not register or memory");
+ assert(!IsTailCall && "Tail call not allowed if stack is used "
+ "for passing parameters");
+
+ // Work out the address of the stack slot.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, CSKY::R14, PtrVT);
+ SDValue Address =
+ DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
+ DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+
+ // Emit the store.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
+ }
+ }
+
+ // Join the stores, which are independent of one another.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
+
+ SDValue Glue;
+
+ // Build a sequence of copy-to-reg nodes, chained and glued together.
+ for (auto &Reg : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
+ Glue = Chain.getValue(1);
+ }
+
+ SmallVector<SDValue, 8> Ops;
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ bool IsRegCall = false;
+
+ Ops.push_back(Chain);
+
+ if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = S->getGlobal();
+ bool IsLocal =
+ getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
+
+ if (isPositionIndependent() || !Subtarget.has2E3()) {
+ IsRegCall = true;
+ Ops.push_back(getAddr<GlobalAddressSDNode, true>(S, DAG, IsLocal));
+ } else {
+ Ops.push_back(getTargetNode(cast<GlobalAddressSDNode>(Callee), DL, Ty,
+ DAG, CSKYII::MO_None));
+ Ops.push_back(getTargetConstantPoolValue(
+ cast<GlobalAddressSDNode>(Callee), Ty, DAG, CSKYII::MO_None));
+ }
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(
+ *MF.getFunction().getParent(), nullptr);
+
+ if (isPositionIndependent() || !Subtarget.has2E3()) {
+ IsRegCall = true;
+ Ops.push_back(getAddr<ExternalSymbolSDNode, true>(S, DAG, IsLocal));
+ } else {
+ Ops.push_back(getTargetNode(cast<ExternalSymbolSDNode>(Callee), DL, Ty,
+ DAG, CSKYII::MO_None));
+ Ops.push_back(getTargetConstantPoolValue(
+ cast<ExternalSymbolSDNode>(Callee), Ty, DAG, CSKYII::MO_None));
+ }
+ } else {
+ IsRegCall = true;
+ Ops.push_back(Callee);
+ }
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (auto &Reg : RegsToPass)
+ Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
+
+ if (!IsTailCall) {
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+ }
+
+ // Glue the call to the argument copies, if any.
+ if (Glue.getNode())
+ Ops.push_back(Glue);
+
+ // Emit the call.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+
+ if (IsTailCall) {
+ MF.getFrameInfo().setHasTailCall();
+ return DAG.getNode(IsRegCall ? CSKYISD::TAILReg : CSKYISD::TAIL, DL,
+ NodeTys, Ops);
+ }
+
+ Chain = DAG.getNode(IsRegCall ? CSKYISD::CALLReg : CSKYISD::CALL, DL, NodeTys,
+ Ops);
+ DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
+ Glue = Chain.getValue(1);
+
+ // Mark the end of the call, which is glued to the call itself.
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, DL, PtrVT, true),
+ DAG.getConstant(0, DL, PtrVT, true), Glue, DL);
+ Glue = Chain.getValue(1);
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> CSKYLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, MF, CSKYLocs, *DAG.getContext());
+ RetCCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, IsVarArg));
+
+ // Copy all of the result registers out of their specified physreg.
+ for (auto &VA : CSKYLocs) {
+ // Copy the value out
+ SDValue RetValue =
+ DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
+ // Glue the RetValue to the end of the call sequence
+ Chain = RetValue.getValue(1);
+ Glue = RetValue.getValue(2);
+
+ bool IsF64OnCSKY = VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
+
+ if (IsF64OnCSKY) {
+ assert(VA.getLocReg() == GPRArgRegs[0] && "Unexpected reg assignment");
+ SDValue RetValue2 =
+ DAG.getCopyFromReg(Chain, DL, GPRArgRegs[1], MVT::i32, Glue);
+ Chain = RetValue2.getValue(1);
+ Glue = RetValue2.getValue(2);
+ RetValue = DAG.getNode(CSKYISD::BITCAST_FROM_LOHI, DL, VA.getValVT(),
+ RetValue, RetValue2);
+ }
+
+ RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
+
+ InVals.push_back(RetValue);
+ }
+
+ return Chain;
+}
+
CCAssignFn *CSKYTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
bool IsVarArg) const {
if (IsVarArg || !Subtarget.useHardFloatABI())
@@ -370,6 +757,165 @@ CCAssignFn *CSKYTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
return CC_CSKY_ABIV2_FP;
}
+static CSKYCP::CSKYCPModifier getModifier(unsigned Flags) {
+
+ if (Flags == CSKYII::MO_ADDR32)
+ return CSKYCP::ADDR;
+ else if (Flags == CSKYII::MO_GOT32)
+ return CSKYCP::GOT;
+ else if (Flags == CSKYII::MO_GOTOFF)
+ return CSKYCP::GOTOFF;
+ else if (Flags == CSKYII::MO_PLT32)
+ return CSKYCP::PLT;
+ else if (Flags == CSKYII::MO_None)
+ return CSKYCP::NO_MOD;
+ else
+ assert(0 && "unknown CSKYII Modifier");
+ return CSKYCP::NO_MOD;
+}
+
+SDValue CSKYTargetLowering::getTargetConstantPoolValue(GlobalAddressSDNode *N,
+ EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flags) const {
+ CSKYConstantPoolValue *CPV = CSKYConstantPoolConstant::Create(
+ N->getGlobal(), CSKYCP::CPValue, 0, getModifier(Flags), false);
+
+ return DAG.getTargetConstantPool(CPV, Ty);
+}
+
+static MachineBasicBlock *
+emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode) {
+
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+
+ // To "insert" a SELECT instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = ++BB->getIterator();
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // bt32 c, sinkMBB
+ // fallthrough --> copyMBB
+ MachineBasicBlock *thisMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *copyMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, copyMBB);
+ F->insert(It, sinkMBB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Next, add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(copyMBB);
+ BB->addSuccessor(sinkMBB);
+
+ // bt32 condition, sinkMBB
+ BuildMI(BB, DL, TII.get(Opcode))
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(sinkMBB);
+
+ // copyMBB:
+ // %FalseValue = ...
+ // # fallthrough to sinkMBB
+ BB = copyMBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copyMBB ]
+ // ...
+ BB = sinkMBB;
+
+ BuildMI(*BB, BB->begin(), DL, TII.get(CSKY::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(thisMBB)
+ .addReg(MI.getOperand(3).getReg())
+ .addMBB(copyMBB);
+
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+
+ return BB;
+}
+
+MachineBasicBlock *
+CSKYTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
+ case CSKY::ISEL32:
+ return emitSelectPseudo(MI, BB, CSKY::BT32);
+ case CSKY::ISEL16:
+ return emitSelectPseudo(MI, BB, CSKY::BT16);
+ }
+}
+
+SDValue CSKYTargetLowering::getTargetConstantPoolValue(ExternalSymbolSDNode *N,
+ EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flags) const {
+ CSKYConstantPoolValue *CPV =
+ CSKYConstantPoolSymbol::Create(Type::getInt32Ty(*DAG.getContext()),
+ N->getSymbol(), 0, getModifier(Flags));
+
+ return DAG.getTargetConstantPool(CPV, Ty);
+}
+
+SDValue CSKYTargetLowering::getTargetConstantPoolValue(JumpTableSDNode *N,
+ EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flags) const {
+ CSKYConstantPoolValue *CPV =
+ CSKYConstantPoolJT::Create(Type::getInt32Ty(*DAG.getContext()),
+ N->getIndex(), 0, getModifier(Flags));
+ return DAG.getTargetConstantPool(CPV, Ty);
+}
+
+SDValue CSKYTargetLowering::getTargetConstantPoolValue(BlockAddressSDNode *N,
+ EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flags) const {
+ CSKYConstantPoolValue *CPV = CSKYConstantPoolConstant::Create(
+ N->getBlockAddress(), CSKYCP::CPBlockAddress, 0, getModifier(Flags),
+ false);
+ return DAG.getTargetConstantPool(CPV, Ty);
+}
+
+SDValue CSKYTargetLowering::getTargetNode(GlobalAddressSDNode *N, SDLoc DL,
+ EVT Ty, SelectionDAG &DAG,
+ unsigned Flags) const {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
+}
+
+SDValue CSKYTargetLowering::getTargetNode(ExternalSymbolSDNode *N, SDLoc DL,
+ EVT Ty, SelectionDAG &DAG,
+ unsigned Flags) const {
+ return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flags);
+}
+
+SDValue CSKYTargetLowering::getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flags) const {
+ return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
+}
+
+SDValue CSKYTargetLowering::getTargetNode(BlockAddressSDNode *N, SDLoc DL,
+ EVT Ty, SelectionDAG &DAG,
+ unsigned Flags) const {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
+ Flags);
+}
+
const char *CSKYTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default:
@@ -380,7 +926,243 @@ const char *CSKYTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "CSKYISD::NIR";
case CSKYISD::RET:
return "CSKYISD::RET";
+ case CSKYISD::CALL:
+ return "CSKYISD::CALL";
+ case CSKYISD::CALLReg:
+ return "CSKYISD::CALLReg";
+ case CSKYISD::TAIL:
+ return "CSKYISD::TAIL";
+ case CSKYISD::TAILReg:
+ return "CSKYISD::TAILReg";
+ case CSKYISD::LOAD_ADDR:
+ return "CSKYISD::LOAD_ADDR";
case CSKYISD::BITCAST_TO_LOHI:
return "CSKYISD::BITCAST_TO_LOHI";
+ case CSKYISD::BITCAST_FROM_LOHI:
+ return "CSKYISD::BITCAST_FROM_LOHI";
}
}
+
+SDValue CSKYTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = N->getOffset();
+
+ const GlobalValue *GV = N->getGlobal();
+ bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
+ SDValue Addr = getAddr<GlobalAddressSDNode, false>(N, DAG, IsLocal);
+
+ // In order to maximise the opportunity for common subexpression elimination,
+ // emit a separate ADD node for the global address offset instead of folding
+ // it in the global address node. Later peephole optimisations may choose to
+ // fold it back in when profitable.
+ if (Offset != 0)
+ return DAG.getNode(ISD::ADD, DL, Ty, Addr,
+ DAG.getConstant(Offset, DL, MVT::i32));
+ return Addr;
+}
+
+SDValue CSKYTargetLowering::LowerExternalSymbol(SDValue Op,
+ SelectionDAG &DAG) const {
+ ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
+
+ return getAddr(N, DAG, false);
+}
+
+SDValue CSKYTargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
+
+ return getAddr<JumpTableSDNode, false>(N, DAG);
+}
+
+SDValue CSKYTargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+
+ return getAddr(N, DAG);
+}
+
+SDValue CSKYTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ CSKYMachineFunctionInfo *FuncInfo = MF.getInfo<CSKYMachineFunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy(MF.getDataLayout()));
+
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+SDValue CSKYTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const CSKYRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ Register FrameReg = RI.getFrameRegister(MF);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo());
+ return FrameAddr;
+}
+
+SDValue CSKYTargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ const CSKYRegisterInfo &RI = *Subtarget.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setReturnAddressIsTaken(true);
+
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
+ return DAG.getLoad(VT, dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
+ MachinePointerInfo());
+ }
+ // Return the value of the return address register, marking it an implicit
+ // live-in.
+ unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(MVT::i32));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
+}
+
+Register CSKYTargetLowering::getExceptionPointerRegister(
+ const Constant *PersonalityFn) const {
+ return CSKY::R0;
+}
+
+Register CSKYTargetLowering::getExceptionSelectorRegister(
+ const Constant *PersonalityFn) const {
+ return CSKY::R1;
+}
+
+SDValue CSKYTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = N->getOffset();
+ MVT XLenVT = MVT::i32;
+
+ TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
+ SDValue Addr;
+ switch (Model) {
+ case TLSModel::LocalExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
+ break;
+ case TLSModel::InitialExec:
+ Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
+ break;
+ case TLSModel::LocalDynamic:
+ case TLSModel::GeneralDynamic:
+ Addr = getDynamicTLSAddr(N, DAG);
+ break;
+ }
+
+ // In order to maximise the opportunity for common subexpression elimination,
+ // emit a separate ADD node for the global address offset instead of folding
+ // it in the global address node. Later peephole optimisations may choose to
+ // fold it back in when profitable.
+ if (Offset != 0)
+ return DAG.getNode(ISD::ADD, DL, Ty, Addr,
+ DAG.getConstant(Offset, DL, XLenVT));
+ return Addr;
+}
+
+SDValue CSKYTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG,
+ bool UseGOT) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+
+ unsigned CSKYPCLabelIndex = CFI->createPICLabelUId();
+
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+
+ CSKYCP::CSKYCPModifier Flag = UseGOT ? CSKYCP::TLSIE : CSKYCP::TLSLE;
+ bool AddCurrentAddr = UseGOT ? true : false;
+ unsigned char PCAjust = UseGOT ? 4 : 0;
+
+ CSKYConstantPoolValue *CPV =
+ CSKYConstantPoolConstant::Create(N->getGlobal(), CSKYCP::CPValue, PCAjust,
+ Flag, AddCurrentAddr, CSKYPCLabelIndex);
+ SDValue CAddr = DAG.getTargetConstantPool(CPV, Ty);
+
+ SDValue Load;
+ if (UseGOT) {
+ SDValue PICLabel = DAG.getTargetConstant(CSKYPCLabelIndex, DL, MVT::i32);
+ auto *LRWGRS = DAG.getMachineNode(CSKY::PseudoTLSLA32, DL, {Ty, Ty},
+ {CAddr, PICLabel});
+ auto LRWADDGRS =
+ DAG.getNode(ISD::ADD, DL, Ty, SDValue(LRWGRS, 0), SDValue(LRWGRS, 1));
+ Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), LRWADDGRS,
+ MachinePointerInfo(N->getGlobal()));
+ } else {
+ Load = SDValue(DAG.getMachineNode(CSKY::LRW32, DL, Ty, CAddr), 0);
+ }
+
+ // Add the thread pointer.
+ SDValue TPReg = DAG.getRegister(CSKY::R31, MVT::i32);
+ return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
+}
+
+SDValue CSKYTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+
+ unsigned CSKYPCLabelIndex = CFI->createPICLabelUId();
+
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
+
+ CSKYConstantPoolValue *CPV =
+ CSKYConstantPoolConstant::Create(N->getGlobal(), CSKYCP::CPValue, 4,
+ CSKYCP::TLSGD, true, CSKYPCLabelIndex);
+ SDValue Addr = DAG.getTargetConstantPool(CPV, Ty);
+ SDValue PICLabel = DAG.getTargetConstant(CSKYPCLabelIndex, DL, MVT::i32);
+
+ auto *LRWGRS =
+ DAG.getMachineNode(CSKY::PseudoTLSLA32, DL, {Ty, Ty}, {Addr, PICLabel});
+
+ auto Load =
+ DAG.getNode(ISD::ADD, DL, Ty, SDValue(LRWGRS, 0), SDValue(LRWGRS, 1));
+
+ // Prepare argument list to generate call.
+ ArgListTy Args;
+ ArgListEntry Entry;
+ Entry.Node = Load;
+ Entry.Ty = CallTy;
+ Args.push_back(Entry);
+
+ // Setup call to __tls_get_addr.
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL)
+ .setChain(DAG.getEntryNode())
+ .setLibCallee(CallingConv::C, CallTy,
+ DAG.getExternalSymbol("__tls_get_addr", Ty),
+ std::move(Args));
+ SDValue V = LowerCallTo(CLI).first;
+
+ return V;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.h
index 7557c11f50a8..e1744d5ce220 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYISelLowering.h
@@ -27,7 +27,15 @@ enum NodeType : unsigned {
NIE,
NIR,
RET,
- BITCAST_TO_LOHI
+ CALL,
+ CALLReg,
+ TAIL,
+ TAILReg,
+ LOAD_ADDR,
+ // i32, i32 <-- f64
+ BITCAST_TO_LOHI,
+ // f64 < -- i32, i32
+ BITCAST_FROM_LOHI,
};
}
@@ -38,6 +46,8 @@ public:
explicit CSKYTargetLowering(const TargetMachine &TM,
const CSKYSubtarget &STI);
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -58,8 +68,96 @@ private:
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
const char *getTargetNodeName(unsigned Opcode) const override;
+ /// If a physical register, this returns the register that receives the
+ /// exception address on entry to an EH pad.
+ Register
+ getExceptionPointerRegister(const Constant *PersonalityFn) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception typeid on entry to a landing pad.
+ Register
+ getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+
+ bool isSelectSupported(SelectSupportKind Kind) const override {
+ // CSKY does not support scalar condition selects on vectors.
+ return (Kind != ScalarCondVectorVal);
+ }
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
+
+ SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetNode(ExternalSymbolSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
+ unsigned Flags) const;
+
+ SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetConstantPoolValue(GlobalAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetConstantPoolValue(ExternalSymbolSDNode *N, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetConstantPoolValue(JumpTableSDNode *N, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ SDValue getTargetConstantPoolValue(BlockAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG, unsigned Flags) const;
+
+ template <class NodeTy, bool IsCall = false>
+ SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const {
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+
+ unsigned Flag = CSKYII::MO_None;
+ bool IsPIC = isPositionIndependent();
+
+ if (IsPIC)
+ Flag = IsLocal ? CSKYII::MO_GOTOFF
+ : IsCall ? CSKYII::MO_PLT32
+ : CSKYII::MO_GOT32;
+
+ SDValue TCPV = getTargetConstantPoolValue(N, Ty, DAG, Flag);
+ SDValue TV = getTargetNode(N, DL, Ty, DAG, Flag);
+ SDValue Addr = DAG.getNode(CSKYISD::LOAD_ADDR, DL, Ty, {TV, TCPV});
+
+ if (!IsPIC)
+ return Addr;
+
+ SDValue Result =
+ DAG.getNode(ISD::ADD, DL, Ty, {DAG.getGLOBAL_OFFSET_TABLE(Ty), Addr});
+ if (IsLocal)
+ return Result;
+
+ return DAG.getLoad(Ty, DL, DAG.getEntryNode(), Result,
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+ }
+
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
+ bool UseGOT) const;
+ SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
+
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg) const;
};
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF1.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF1.td
new file mode 100644
index 000000000000..446670a4d0a9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF1.td
@@ -0,0 +1,274 @@
+//===- CSKYInstrFormatsF1.td - CSKY Float1.0 Instr Format --*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// CSKY Instruction Format Float1.0 Definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class CSKYFP1Inst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : CSKY32Inst<AddrModeNone, 0x3d, outs, ins, asmstr, pattern>, Requires<[HasFPUv2_SF]> {
+}
+
+class F_XYZ_BASE<bits<5> datatype, bits<6> sop, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKYFP1Inst<outs, ins, opcodestr, pattern> {
+ bits<4> vrx;
+ bits<4> vry;
+ bits<4> vrz;
+ let Inst{25 - 21} = {0, vry};
+ let Inst{20 - 16} = {0, vrx};
+ let Inst{15 - 11} = datatype;
+ let Inst{10 - 5} = sop;
+ let Inst{4 - 0} = {0, vrz};
+}
+
+class F_XZ_GF<bits<5> datatype, bits<6> sop, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKYFP1Inst<outs, ins, opcodestr, pattern> {
+ bits<4> vrx;
+ bits<5> rz;
+ let Inst{25 - 21} = 0;
+ let Inst{20 - 16} = {0, vrx};
+ let Inst{15 - 11} = datatype;
+ let Inst{10 - 5} = sop;
+ let Inst{4 - 0} = {rz};
+}
+
+class F_XZ_FG<bits<5> datatype, bits<6> sop, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKYFP1Inst<outs, ins, opcodestr, pattern> {
+ bits<5> rx;
+ bits<4> vrz;
+ let Inst{25 - 21} = 0;
+ let Inst{20 - 16} = {rx};
+ let Inst{15 - 11} = datatype;
+ let Inst{10 - 5} = sop;
+ let Inst{4 - 0} = {0, vrz};
+}
+
+class F_XZ_TRANS_FROM<bits<6> sop, string op, RegisterOperand regtype1, RegisterOperand regtype2>
+ : F_XZ_GF<3, sop, (outs regtype1:$rz), (ins regtype2:$vrx), !strconcat(op, "\t$rz, $vrx"),
+ []>;
+
+class F_XZ_TRANS_TO<bits<6> sop, string op, RegisterOperand regtype1, RegisterOperand regtype2>
+ : F_XZ_FG<3, sop, (outs regtype1:$vrz), (ins regtype2:$rx), !strconcat(op, "\t$vrz, $rx"),
+ []>;
+
+let vry = 0 in {
+class F_XZ<bits<5> datatype, bits<6> sop, string op, string op_su, PatFrag opnode, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs regtype:$vrz), (ins regtype:$vrx), !strconcat(op#op_su, "\t$vrz, $vrx"),
+ [(set regtype:$vrz, (opnode regtype:$vrx))]>;
+
+class F_MOV<bits<5> datatype, bits<6> sop, string op, string op_su, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs regtype:$vrz), (ins regtype:$vrx), !strconcat(op#op_su, "\t$vrz, $vrx"),
+ []>;
+
+class F_XZ_TRANS<bits<6> sop, string op, RegisterOperand regtype1, RegisterOperand regtype2>
+ : F_XYZ_BASE<3, sop, (outs regtype1:$vrz), (ins regtype2:$vrx), !strconcat(op, "\t$vrz, $vrx"),
+ []>;
+
+class F_XZ_TRANS_DS<bits<6> sop, string op, PatFrag opnode>
+ : F_XYZ_BASE<3, sop, (outs sFPR32Op:$vrz), (ins sFPR64Op:$vrx), !strconcat(op, "\t$vrz, $vrx"),
+ [(set sFPR32Op:$vrz, (opnode sFPR64Op:$vrx))]>;
+
+class F_XZ_TRANS_SD<bits<6> sop, string op, PatFrag opnode>
+ : F_XYZ_BASE<3, sop, (outs sFPR64Op:$vrz), (ins sFPR32Op:$vrx), !strconcat(op, "\t$vrz, $vrx"),
+ [(set sFPR64Op:$vrz, (opnode sFPR32Op:$vrx))]>;
+}
+
+multiclass FT_MOV<bits<6> sop, string op> {
+ def _S : F_MOV<0, sop, op, "s", sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_MOV<1, sop, op, "d", sFPR64Op>;
+}
+
+multiclass FT_XZ<bits<6> sop, string op, PatFrag opnode> {
+ def _S : F_XZ<0, sop, op, "s", opnode, sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XZ<1, sop, op, "d", opnode, sFPR64Op>;
+}
+
+let vrz = 0, isCompare = 1 in {
+class F_CMPXY<bits<5> datatype, bits<6> sop, string op, string op_su, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs CARRY:$ca), (ins regtype:$vrx, regtype:$vry), !strconcat(op#op_su, "\t$vrx, $vry"),
+ []>;
+
+let vry = 0 in{
+class F_CMPZX<bits<5> datatype, bits<6> sop, string op, string op_su, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs CARRY:$ca), (ins regtype:$vrx), !strconcat(op#op_su, "\t$vrx"),
+ []>;
+}
+}
+
+class F_XYZ<bits<5> datatype, bits<6> sop, string op, string op_su, PatFrag opnode, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs regtype:$vrz), (ins regtype:$vrx, regtype:$vry),
+ !strconcat(op#op_su, "\t$vrz, $vrx, $vry"),
+ [(set regtype:$vrz, (opnode regtype:$vrx, regtype:$vry))]>;
+
+multiclass FT_XYZ<bits<6> sop, string op, PatFrag opnode> {
+ def _S : F_XYZ<0, sop, op, "s", opnode, sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XYZ<1, sop, op, "d", opnode, sFPR64Op>;
+}
+
+let Constraints = "$vrt = $vrz" in {
+class F_ACCUM_XYZ<bits<5> datatype, bits<6> sop, string op, string op_su, PatFrag opnode, RegisterOperand regtype>
+ : F_XYZ_BASE<datatype, sop, (outs regtype:$vrz), (ins regtype:$vrt, regtype:$vrx, regtype:$vry),
+ !strconcat(op#op_su, "\t$vrz, $vrx, $vry"),
+ [(set regtype:$vrz, (opnode regtype:$vrt, regtype:$vrx, regtype:$vry))]>;
+}
+
+multiclass FT_ACCUM_XYZ<bits<6> sop, string op, PatFrag opnode> {
+ def _S : F_ACCUM_XYZ<0, sop, op, "s", opnode, sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_ACCUM_XYZ<1, sop, op, "d", opnode, sFPR64Op>;
+}
+
+multiclass FT_CMPXY<bits<6> sop, string op> {
+ def _S : F_CMPXY<0, sop, op, "s", sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_CMPXY<1, sop, op, "d", sFPR64Op>;
+}
+
+
+multiclass FT_CMPZX<bits<6> sop, string op> {
+ def _S : F_CMPZX<0, sop, op, "s", sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_CMPZX<1, sop, op, "d", sFPR64Op>;
+}
+
+class F_I8_XY_MEM<bits<7> sop, bits<1> sop_su, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKY32Inst<AddrMode32SDF, 0x3d, outs, ins, opcodestr, pattern> {
+ bits<5> rx;
+ bits<4> vrz;
+ bits<8> imm8;
+ let Inst{25} = 0;
+ let Inst{24 - 21} = imm8{7 - 4}; //imm4h
+ let Inst{20 - 16} = rx; //rx
+ let Inst{15 - 9} = sop;
+ let Inst{8} = sop_su;
+ let Inst{7 - 4} = imm8{3 - 0}; // imm4l
+ let Inst{3 - 0} = vrz;
+}
+
+class F_I4_XY_MEM<bits<7> sop, bits<1> sop_su, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKY32Inst<AddrMode32SDF, 0x3d, outs, ins, opcodestr, pattern> {
+ bits<10> regs;
+ bits<5> rx;
+
+ let Inst{25} = 0;
+ let Inst{24 - 21} = regs{3-0}; //imm4
+ let Inst{20 - 16} = rx; //rx
+ let Inst{15 - 9} = sop;
+ let Inst{8} = sop_su;
+ let Inst{7 - 4} = 0;
+ let Inst{3 - 0} = regs{8-5};
+}
+
+class F_I8_Z_MEM<bits<7> sop, bits<1> sop_su, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKY32Inst<AddrModeNone, 0x3d, outs, ins, opcodestr, pattern> {
+ bits<4> vrz;
+ bits<8> imm8;
+ let Inst{25} = 0;
+ let Inst{24 - 21} = imm8{7 - 4}; //imm4h
+ let Inst{20 - 16} = 0; //rx
+ let Inst{15 - 9} = sop;
+ let Inst{8} = sop_su;
+ let Inst{7 - 4} = imm8{3 - 0}; // imm4l
+ let Inst{3 - 0} = vrz;
+}
+
+class F_XYZ_MEM<bits<7> sop, bits<1> sop_su, dag outs, dag ins, string opcodestr, list<dag> pattern>
+ : CSKY32Inst<AddrModeNone, 0x3d, outs, ins, opcodestr, pattern> {
+ bits<5> rx;
+ bits<5> ry;
+ bits<4> vrz;
+ bits<2> imm;
+
+ let Inst{25 - 21} = ry; // ry;
+ let Inst{20 - 16} = rx; // rx;
+ let Inst{15 - 9} = sop;
+ let Inst{8} = sop_su;
+ let Inst{7} = 0;
+ let Inst{6,5} = imm; // shift;
+ let Inst{4} = 0;
+ let Inst{3 - 0} = vrz;
+}
+
+class F_XYAI_LD<bits<7> sop, bits<1> sop_su, string op, string op_su,
+ RegisterOperand regtype, Operand operand>
+ : F_I8_XY_MEM<sop, sop_su, (outs regtype:$vrz), (ins GPR:$rx, operand:$imm8),
+ !strconcat(op#op_su, "\t$vrz, ($rx, ${imm8})"), []>;
+
+class F_XYAR_LD<bits<7> sop, bits<1> sop_su, string op, string op_su,
+ RegisterOperand regtype>
+ : F_XYZ_MEM<sop, sop_su, (outs regtype:$vrz), (ins GPR:$rx, GPR:$ry, uimm2:$imm),
+ op#op_su#"\t$vrz, ($rx, $ry << ${imm})", []>;
+
+class F_XYAI_ST<bits<7> sop, bits<1> sop_su, string op, string op_su,
+ RegisterOperand regtype, Operand operand>
+ : F_I8_XY_MEM<sop, sop_su, (outs), (ins regtype:$vrz, GPR:$rx, operand:$imm8),
+ !strconcat(op#op_su, "\t$vrz, ($rx, ${imm8})"), []>;
+
+class F_XYAR_ST<bits<7> sop, bits<1> sop_su, string op, string op_su,
+ RegisterOperand regtype>
+ : F_XYZ_MEM<sop, sop_su, (outs), (ins regtype:$vrz, GPR:$rx, GPR:$ry, uimm2:$imm),
+ op#op_su#"\t$vrz, ($rx, $ry << ${imm})", []>;
+
+def Mem8SL2 : Operand<iPTR>, ComplexPattern<iPTR, 2, "SelectAddrRegImm8", []> {
+ let MIOperandInfo = (ops GPR, i32imm);
+ let PrintMethod = "printAddrModeRegImmOperand";
+ let EncoderMethod = "getAddrModeFloatImm8_sl2OpValue";
+}
+
+def FRRS : Operand<iPTR>, ComplexPattern<iPTR, 3, "SelectAddrRegReg", []> {
+ let MIOperandInfo = (ops GPR, GPR, i32imm);
+ let PrintMethod = "printAddrModeRegRegSLOperand";
+ let EncoderMethod = "getAddrModeFloatRegRegSLOpValue";
+}
+
+multiclass FT_XYAI_LD<bits<7> sop, string op> {
+ def _S : F_XYAI_LD<sop, 0, op, "s", sFPR32Op, uimm8_2>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XYAI_LD<sop, 1, op, "d", sFPR64Op, uimm8_2>;
+}
+
+multiclass FT_XYAR_LD<bits<7> sop, string op> {
+ def _S : F_XYAR_LD<sop, 0, op, "s", sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XYAR_LD<sop, 1, op, "d", sFPR64Op>;
+}
+
+multiclass FT_XYAI_ST<bits<7> sop, string op> {
+ def _S : F_XYAI_ST<sop, 0, op, "s", sFPR32Op, uimm8_2>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XYAI_ST<sop, 1, op, "d", sFPR64Op, uimm8_2>;
+}
+
+multiclass FT_XYAR_ST<bits<7> sop, string op> {
+ def _S : F_XYAR_ST<sop, 0, op, "s", sFPR32Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_XYAR_ST<sop, 1, op, "d", sFPR64Op>;
+}
+
+multiclass FT_XYAR_STM<bits<7> sop, string op> {
+ def _S : F_I4_XY_MEM<sop, 0, (outs),
+ (ins GPR:$rx, regseq_f1:$regs, variable_ops),
+ !strconcat(op#"s", "\t$regs, (${rx})"), []>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_I4_XY_MEM<sop, 1, (outs),
+ (ins GPR:$rx, regseq_d1:$regs, variable_ops),
+ !strconcat(op#"d", "\t$regs, (${rx})"), []>;
+}
+
+multiclass FT_XYAR_LDM<bits<7> sop, string op> {
+ def _S : F_I4_XY_MEM<sop, 0, (outs),
+ (ins GPR:$rx, regseq_f1:$regs, variable_ops),
+ !strconcat(op#"s", "\t$regs, (${rx})"), []>;
+ let Predicates = [HasFPUv2_DF] in
+ def _D : F_I4_XY_MEM<sop, 1, (outs),
+ (ins GPR:$rx, regseq_d1:$regs, variable_ops),
+ !strconcat(op#"d", "\t$regs, (${rx})"), []>;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF2.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF2.td
new file mode 100644
index 000000000000..641ad623f140
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrFormatsF2.td
@@ -0,0 +1,208 @@
+//===- CSKYInstrFormatsF2.td - CSKY Float2.0 Instr Format --*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// CSKY Instruction Format Float2.0 Definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class CSKYInstF2<AddrMode am, dag outs, dag ins, string opcodestr,
+ list<dag> pattern>
+ : CSKY32Inst<am, 0x3d, outs, ins, opcodestr, pattern> {
+ let Predicates = [HasFPUv3_SF];
+ let DecoderNamespace = "FPUV3";
+}
+
+class F2_XYZ<bits<5> datatype, bits<6> sop, string opcodestr, dag outs, dag ins,
+ list<dag> pattern>
+ : CSKYInstF2<AddrModeNone, outs, ins, opcodestr, pattern> {
+ bits<5> vry;
+ bits<5> vrx;
+ bits<5> vrz;
+
+ let Inst{25-21} = vry;
+ let Inst{20-16} = vrx;
+ let Inst{15-11} = datatype;
+ let Inst{10-5} = sop;
+ let Inst{4-0} = vrz;
+}
+
+multiclass F2_XYZ_T<bits<6> sop, string op, PatFrag opnode> {
+ def _S : F2_XYZ<0b00000, sop, op#".32"#"\t$vrz, $vrx, $vry",
+ (outs FPR32Op:$vrz), (ins FPR32Op:$vrx, FPR32Op:$vry),
+ [(set FPR32Op:$vrz, (opnode FPR32Op:$vrx, FPR32Op:$vry))]>;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_XYZ<0b00001, sop, op#".64"#"\t$vrz, $vrx, $vry",
+ (outs FPR64Op:$vrz), (ins FPR64Op:$vrx, FPR64Op:$vry),
+ [(set FPR64Op:$vrz, (opnode FPR64Op:$vrx, FPR64Op:$vry))]>;
+}
+
+let Constraints = "$vrZ = $vrz" in
+multiclass F2_XYZZ_T<bits<6> sop, string op, PatFrag opnode> {
+ def _S : F2_XYZ<0b00000, sop, op#".32"#"\t$vrz, $vrx, $vry",
+ (outs FPR32Op:$vrz), (ins FPR32Op:$vrZ, FPR32Op:$vrx, FPR32Op:$vry),
+ [(set FPR32Op:$vrz, (opnode FPR32Op:$vrx, FPR32Op:$vry, FPR32Op:$vrZ))]>;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_XYZ<0b00001, sop, op#".64"#"\t$vrz, $vrx, $vry",
+ (outs FPR64Op:$vrz), (ins FPR64Op:$vrZ, FPR64Op:$vrx, FPR64Op:$vry),
+ [(set FPR64Op:$vrz, (opnode FPR64Op:$vrx, FPR64Op:$vry, FPR64Op:$vrZ))]>;
+}
+
+let vry = 0 in {
+class F2_XZ<bits<5> datatype, RegisterOperand regtype, bits<6> sop, string op, SDNode opnode>
+ : F2_XYZ<datatype, sop, !strconcat(op, "\t$vrz, $vrx"),
+ (outs regtype:$vrz), (ins regtype:$vrx),
+ [(set regtype:$vrz, (opnode regtype:$vrx))]>;
+
+class F2_XZ_SET<bits<5> datatype, RegisterOperand regtype, bits<6> sop, string op>
+ : F2_XYZ<datatype, sop, !strconcat(op, "\t$vrz, $vrx"),
+ (outs regtype:$vrz), (ins regtype:$vrx),
+ []>;
+
+class F2_XZ_P<bits<5> datatype, bits<6> sop, string op, list<dag> pattern = [],
+ dag outs, dag ins>
+ : F2_XYZ<datatype, sop, op#"\t$vrz, $vrx", outs, ins, pattern>;
+}
+
+multiclass F2_XZ_RM<bits<5> datatype, bits<4> sop, string op, dag outs, dag ins> {
+ def _RN : F2_XZ_P<datatype, {sop, 0b00}, op#".rn", [], outs, ins>;
+ def _RZ : F2_XZ_P<datatype, {sop, 0b01}, op#".rz", [], outs, ins>;
+ def _RPI : F2_XZ_P<datatype, {sop, 0b10}, op#".rpi", [], outs, ins>;
+ def _RNI : F2_XZ_P<datatype, {sop, 0b11}, op#".rni", [], outs, ins>;
+}
+
+multiclass F2_XZ_T<bits<6> sop, string op, SDNode opnode> {
+ def _S : F2_XZ<0b00000, FPR32Op, sop, op#".32", opnode>;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_XZ<0b00001, FPR64Op, sop, op#".64", opnode>;
+}
+
+multiclass F2_XZ_SET_T<bits<6> sop, string op, string suffix = ""> {
+ def _S : F2_XZ_SET<0b00000, FPR32Op, sop, op#".32"#suffix>;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_XZ_SET<0b00001, FPR64Op, sop, op#".64"#suffix>;
+}
+
+
+let vrz = 0, isCompare = 1 in
+class F2_CXY<bits<5> datatype, RegisterOperand regtype, bits<6> sop, string op>
+ : F2_XYZ<datatype, sop, !strconcat(op, "\t$vrx, $vry"),
+ (outs CARRY:$ca), (ins regtype:$vrx, regtype:$vry),
+ []>;
+
+multiclass F2_CXY_T<bits<6> sop, string op> {
+ def _S : F2_CXY<0b00000, FPR32Op, sop, op#".32">;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_CXY<0b00001, FPR64Op, sop, op#".64">;
+}
+
+
+let vrz = 0, vry = 0, isCompare = 1 in
+class F2_CX<bits<5> datatype, RegisterOperand regtype, bits<6> sop, string op>
+ : F2_XYZ<datatype, sop, !strconcat(op, "\t$vrx"),
+ (outs CARRY:$ca), (ins regtype:$vrx),
+ []>;
+
+multiclass F2_CX_T<bits<6> sop, string op> {
+ def _S : F2_CX<0b00000, FPR32Op, sop, op#".32">;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_CX<0b00001, FPR64Op, sop, op#".64">;
+}
+
+
+class F2_LDST<bits<2> datatype, bits<1> sop, string op, dag outs, dag ins>
+ : CSKYInstF2<AddrMode32SDF, outs, ins,
+ !strconcat(op, "\t$vrz, ($rx, ${imm8})"), []> {
+ bits<10> imm8;
+ bits<5> rx;
+ bits<5> vrz;
+
+ let Inst{25} = vrz{4};
+ let Inst{24-21} = imm8{7-4};
+ let Inst{20-16} = rx;
+ let Inst{15-11} = 0b00100;
+ let Inst{10} = sop;
+ let Inst{9-8} = datatype;
+ let Inst{7-4} = imm8{3-0};
+ let Inst{3-0} = vrz{3-0};
+}
+
+class F2_LDST_S<bits<1> sop, string op, dag outs, dag ins>
+ : F2_LDST<0b00, sop, op#".32", outs, ins>;
+class F2_LDST_D<bits<1> sop, string op, dag outs, dag ins>
+ : F2_LDST<0b01, sop, op#".64", outs, ins>;
+
+class F2_LDSTM<bits<2> datatype, bits<1> sop, bits<3> sop2, string op, dag outs, dag ins>
+ : CSKYInstF2<AddrMode32SDF, outs, ins,
+ !strconcat(op, "\t$regs, (${rx})"), []> {
+ bits<10> regs;
+ bits<5> rx;
+
+ let Inst{25-21} = regs{4-0};
+ let Inst{20-16} = rx;
+ let Inst{15-11} = 0b00110;
+ let Inst{10} = sop;
+ let Inst{9-8} = datatype;
+ let Inst{7-5} = sop2;
+ let Inst{4-0} = regs{9-5};
+}
+
+class F2_LDSTM_S<bits<1> sop, bits<3> sop2, string op, dag outs, dag ins>
+ : F2_LDSTM<0b00, sop, sop2, op#".32", outs, ins>;
+class F2_LDSTM_D<bits<1> sop, bits<3> sop2, string op, dag outs, dag ins>
+ : F2_LDSTM<0b01, sop, sop2, op#".64", outs, ins>;
+
+
+class F2_LDSTR<bits<2> datatype, bits<1> sop, string op, dag outs, dag ins>
+ : CSKYInstF2<AddrModeNone, outs, ins,
+ op#"\t$rz, ($rx, $ry << ${imm})", []> {
+ bits<5> rx;
+ bits<5> ry;
+ bits<5> rz;
+ bits<2> imm;
+
+ let Inst{25-21} = ry;
+ let Inst{20-16} = rx;
+ let Inst{15-11} = 0b00101;
+ let Inst{10} = sop;
+ let Inst{9-8} = datatype;
+ let Inst{7} = 0;
+ let Inst{6-5} = imm;
+ let Inst{4-0} = rz;
+}
+
+class F2_LDSTR_S<bits<1> sop, string op, dag outs, dag ins>
+ : F2_LDSTR<0b00, sop, op#".32", outs, ins>;
+class F2_LDSTR_D<bits<1> sop, string op, dag outs, dag ins>
+ : F2_LDSTR<0b01, sop, op#".64", outs, ins>;
+
+class F2_CXYZ<bits<5> datatype, RegisterOperand regtype, bits<6> sop, string op>
+ : F2_XYZ<datatype, sop, !strconcat(op, "\t$vrz, $vrx, $vry"),
+ (outs regtype:$vrz), (ins CARRY:$ca, regtype:$vrx, regtype:$vry),
+ []>;
+multiclass F2_CXYZ_T<bits<6> sop, string op> {
+ def _S : F2_CXYZ<0b00000, FPR32Op, sop, op#".32">;
+ let Predicates = [HasFPUv3_DF] in
+ def _D : F2_CXYZ<0b00001, FPR64Op, sop, op#".64">;
+}
+
+class F2_LRW<bits<2> datatype, bits<1> sop, string op, dag outs, dag ins>
+ : CSKYInstF2<AddrModeNone, outs, ins,
+ !strconcat(op, "\t$vrz, ${imm8}"), []> {
+ bits<10> imm8;
+ bits<5> rx;
+ bits<5> vrz;
+
+ let Inst{25} = vrz{4};
+ let Inst{24-21} = imm8{7-4};
+ let Inst{20-16} = 0;
+ let Inst{15-11} = 0b00111;
+ let Inst{10} = sop;
+ let Inst{9-8} = datatype;
+ let Inst{7-4} = imm8{3-0};
+ let Inst{3-0} = vrz{3-0};
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
index 6fcb136cd99b..c57ccb9d6eea 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CSKYInstrInfo.h"
+#include "CSKYConstantPoolValue.h"
#include "CSKYMachineFunctionInfo.h"
#include "CSKYTargetMachine.h"
#include "llvm/MC/MCContext.h"
@@ -24,6 +25,199 @@ using namespace llvm;
CSKYInstrInfo::CSKYInstrInfo(CSKYSubtarget &STI)
: CSKYGenInstrInfo(CSKY::ADJCALLSTACKDOWN, CSKY::ADJCALLSTACKUP), STI(STI) {
+ v2sf = STI.hasFPUv2SingleFloat();
+ v2df = STI.hasFPUv2DoubleFloat();
+ v3sf = STI.hasFPUv3SingleFloat();
+ v3df = STI.hasFPUv3DoubleFloat();
+}
+
+static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
+ SmallVectorImpl<MachineOperand> &Cond) {
+ // Block ends with fall-through condbranch.
+ assert(LastInst.getDesc().isConditionalBranch() &&
+ "Unknown conditional branch");
+ Target = LastInst.getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
+ Cond.push_back(LastInst.getOperand(0));
+}
+
+bool CSKYInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ TBB = FBB = nullptr;
+ Cond.clear();
+
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end() || !isUnpredicatedTerminator(*I))
+ return false;
+
+ // Count the number of terminators and find the first unconditional or
+ // indirect branch.
+ MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
+ int NumTerminators = 0;
+ for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
+ J++) {
+ NumTerminators++;
+ if (J->getDesc().isUnconditionalBranch() ||
+ J->getDesc().isIndirectBranch()) {
+ FirstUncondOrIndirectBr = J.getReverse();
+ }
+ }
+
+ // If AllowModify is true, we can erase any terminators after
+ // FirstUncondOrIndirectBR.
+ if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
+ while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
+ std::next(FirstUncondOrIndirectBr)->eraseFromParent();
+ NumTerminators--;
+ }
+ I = FirstUncondOrIndirectBr;
+ }
+
+ // We can't handle blocks that end in an indirect branch.
+ if (I->getDesc().isIndirectBranch())
+ return true;
+
+ // We can't handle blocks with more than 2 terminators.
+ if (NumTerminators > 2)
+ return true;
+
+ // Handle a single unconditional branch.
+ if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
+ TBB = getBranchDestBlock(*I);
+ return false;
+ }
+
+ // Handle a single conditional branch.
+ if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
+ parseCondBranch(*I, TBB, Cond);
+ return false;
+ }
+
+ // Handle a conditional branch followed by an unconditional branch.
+ if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
+ I->getDesc().isUnconditionalBranch()) {
+ parseCondBranch(*std::prev(I), TBB, Cond);
+ FBB = getBranchDestBlock(*I);
+ return false;
+ }
+
+ // Otherwise, we can't handle this.
+ return true;
+}
+
+unsigned CSKYInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ if (BytesRemoved)
+ *BytesRemoved = 0;
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end())
+ return 0;
+
+ if (!I->getDesc().isUnconditionalBranch() &&
+ !I->getDesc().isConditionalBranch())
+ return 0;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin())
+ return 1;
+ --I;
+ if (!I->getDesc().isConditionalBranch())
+ return 1;
+
+ // Remove the branch.
+ if (BytesRemoved)
+ *BytesRemoved += getInstSizeInBytes(*I);
+ I->eraseFromParent();
+ return 2;
+}
+
+MachineBasicBlock *
+CSKYInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ assert(MI.getDesc().isBranch() && "Unexpected opcode!");
+ // The branch target is always the last operand.
+ int NumOp = MI.getNumExplicitOperands();
+ assert(MI.getOperand(NumOp - 1).isMBB() && "Expected MBB!");
+ return MI.getOperand(NumOp - 1).getMBB();
+}
+
+unsigned CSKYInstrInfo::insertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
+ if (BytesAdded)
+ *BytesAdded = 0;
+
+ // Shouldn't be a fall through.
+ assert(TBB && "insertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
+ "CSKY branch conditions have two components!");
+
+ // Unconditional branch.
+ if (Cond.empty()) {
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(CSKY::BR32)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 1;
+ }
+
+ // Either a one or two-way conditional branch.
+ unsigned Opc = Cond[0].getImm();
+ MachineInstr &CondMI = *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(CondMI);
+
+ // One-way conditional branch.
+ if (!FBB)
+ return 1;
+
+ // Two-way conditional branch.
+ MachineInstr &MI = *BuildMI(&MBB, DL, get(CSKY::BR32)).addMBB(FBB);
+ if (BytesAdded)
+ *BytesAdded += getInstSizeInBytes(MI);
+ return 2;
+}
+
+static unsigned getOppositeBranchOpc(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unknown conditional branch!");
+ case CSKY::BT32:
+ return CSKY::BF32;
+ case CSKY::BT16:
+ return CSKY::BF16;
+ case CSKY::BF32:
+ return CSKY::BT32;
+ case CSKY::BF16:
+ return CSKY::BT16;
+ case CSKY::BHZ32:
+ return CSKY::BLSZ32;
+ case CSKY::BHSZ32:
+ return CSKY::BLZ32;
+ case CSKY::BLZ32:
+ return CSKY::BHSZ32;
+ case CSKY::BLSZ32:
+ return CSKY::BHZ32;
+ case CSKY::BNEZ32:
+ return CSKY::BEZ32;
+ case CSKY::BEZ32:
+ return CSKY::BNEZ32;
+ }
+}
+
+bool CSKYInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert((Cond.size() == 2) && "Invalid branch condition!");
+ Cond[0].setImm(getOppositeBranchOpc(Cond[0].getImm()));
+ return false;
}
Register CSKYInstrInfo::movImm(MachineBasicBlock &MBB,
@@ -147,6 +341,10 @@ unsigned CSKYInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
case CSKY::LD32H:
case CSKY::LD32HS:
case CSKY::LD32W:
+ case CSKY::FLD_S:
+ case CSKY::FLD_D:
+ case CSKY::f2FLD_S:
+ case CSKY::f2FLD_D:
case CSKY::RESTORE_CARRY:
break;
}
@@ -171,6 +369,10 @@ unsigned CSKYInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
case CSKY::ST32B:
case CSKY::ST32H:
case CSKY::ST32W:
+ case CSKY::FST_S:
+ case CSKY::FST_D:
+ case CSKY::f2FST_S:
+ case CSKY::f2FST_D:
case CSKY::SPILL_CARRY:
break;
}
@@ -204,7 +406,15 @@ void CSKYInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
} else if (CSKY::CARRYRegClass.hasSubClassEq(RC)) {
Opcode = CSKY::SPILL_CARRY;
CFI->setSpillsCR();
- } else {
+ } else if (v2sf && CSKY::sFPR32RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::FST_S;
+ else if (v2df && CSKY::sFPR64RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::FST_D;
+ else if (v3sf && CSKY::FPR32RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::f2FST_S;
+ else if (v3df && CSKY::FPR64RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::f2FST_D;
+ else {
llvm_unreachable("Unknown RegisterClass");
}
@@ -239,7 +449,15 @@ void CSKYInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
} else if (CSKY::CARRYRegClass.hasSubClassEq(RC)) {
Opcode = CSKY::RESTORE_CARRY;
CFI->setSpillsCR();
- } else {
+ } else if (v2sf && CSKY::sFPR32RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::FLD_S;
+ else if (v2df && CSKY::sFPR64RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::FLD_D;
+ else if (v3sf && CSKY::FPR32RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::f2FLD_S;
+ else if (v3df && CSKY::FPR64RegClass.hasSubClassEq(RC))
+ Opcode = CSKY::f2FLD_D;
+ else {
llvm_unreachable("Unknown RegisterClass");
}
@@ -302,6 +520,38 @@ void CSKYInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned Opcode = 0;
if (CSKY::GPRRegClass.contains(DestReg, SrcReg))
Opcode = CSKY::MOV32;
+ else if (v2sf && CSKY::sFPR32RegClass.contains(DestReg, SrcReg))
+ Opcode = CSKY::FMOV_S;
+ else if (v3sf && CSKY::FPR32RegClass.contains(DestReg, SrcReg))
+ Opcode = CSKY::f2FMOV_S;
+ else if (v2df && CSKY::sFPR64RegClass.contains(DestReg, SrcReg))
+ Opcode = CSKY::FMOV_D;
+ else if (v3df && CSKY::FPR64RegClass.contains(DestReg, SrcReg))
+ Opcode = CSKY::f2FMOV_D;
+ else if (v2sf && CSKY::sFPR32RegClass.contains(SrcReg) &&
+ CSKY::GPRRegClass.contains(DestReg))
+ Opcode = CSKY::FMFVRL;
+ else if (v3sf && CSKY::FPR32RegClass.contains(SrcReg) &&
+ CSKY::GPRRegClass.contains(DestReg))
+ Opcode = CSKY::f2FMFVRL;
+ else if (v2df && CSKY::sFPR64RegClass.contains(SrcReg) &&
+ CSKY::GPRRegClass.contains(DestReg))
+ Opcode = CSKY::FMFVRL_D;
+ else if (v3df && CSKY::FPR64RegClass.contains(SrcReg) &&
+ CSKY::GPRRegClass.contains(DestReg))
+ Opcode = CSKY::f2FMFVRL_D;
+ else if (v2sf && CSKY::GPRRegClass.contains(SrcReg) &&
+ CSKY::sFPR32RegClass.contains(DestReg))
+ Opcode = CSKY::FMTVRL;
+ else if (v3sf && CSKY::GPRRegClass.contains(SrcReg) &&
+ CSKY::FPR32RegClass.contains(DestReg))
+ Opcode = CSKY::f2FMTVRL;
+ else if (v2df && CSKY::GPRRegClass.contains(SrcReg) &&
+ CSKY::sFPR64RegClass.contains(DestReg))
+ Opcode = CSKY::FMTVRL_D;
+ else if (v3df && CSKY::GPRRegClass.contains(SrcReg) &&
+ CSKY::FPR64RegClass.contains(DestReg))
+ Opcode = CSKY::f2FMTVRL_D;
else {
LLVM_DEBUG(dbgs() << "src = " << SrcReg << ", dst = " << DestReg);
LLVM_DEBUG(I->dump());
@@ -311,3 +561,58 @@ void CSKYInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, get(Opcode), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
+
+Register CSKYInstrInfo::getGlobalBaseReg(MachineFunction &MF) const {
+ CSKYMachineFunctionInfo *CFI = MF.getInfo<CSKYMachineFunctionInfo>();
+ MachineConstantPool *MCP = MF.getConstantPool();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ Register GlobalBaseReg = CFI->getGlobalBaseReg();
+ if (GlobalBaseReg != 0)
+ return GlobalBaseReg;
+
+ // Insert a pseudo instruction to set the GlobalBaseReg into the first
+ // MBB of the function
+ MachineBasicBlock &FirstMBB = MF.front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ DebugLoc DL;
+
+ CSKYConstantPoolValue *CPV = CSKYConstantPoolSymbol::Create(
+ Type::getInt32Ty(MF.getFunction().getContext()), "_GLOBAL_OFFSET_TABLE_",
+ 0, CSKYCP::ADDR);
+
+ unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4));
+
+ MachineMemOperand *MO =
+ MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
+ MachineMemOperand::MOLoad, 4, Align(4));
+ BuildMI(FirstMBB, MBBI, DL, get(CSKY::LRW32), CSKY::R28)
+ .addConstantPoolIndex(CPI)
+ .addMemOperand(MO);
+
+ GlobalBaseReg = MRI.createVirtualRegister(&CSKY::GPRRegClass);
+ BuildMI(FirstMBB, MBBI, DL, get(TargetOpcode::COPY), GlobalBaseReg)
+ .addReg(CSKY::R28);
+
+ CFI->setGlobalBaseReg(GlobalBaseReg);
+ return GlobalBaseReg;
+}
+
+unsigned CSKYInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ return MI.getDesc().getSize();
+ case CSKY::CONSTPOOL_ENTRY:
+ return MI.getOperand(2).getImm();
+ case CSKY::SPILL_CARRY:
+ case CSKY::RESTORE_CARRY:
+ case CSKY::PseudoTLSLA32:
+ return 8;
+ case TargetOpcode::INLINEASM_BR:
+ case TargetOpcode::INLINEASM: {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.h
index 450641d96b74..1a1bbbf9154f 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.h
@@ -24,6 +24,11 @@ namespace llvm {
class CSKYSubtarget;
class CSKYInstrInfo : public CSKYGenInstrInfo {
+ bool v2sf;
+ bool v2df;
+ bool v3sf;
+ bool v3df;
+
protected:
const CSKYSubtarget &STI;
@@ -50,6 +55,28 @@ public:
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
bool KillSrc) const override;
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+
+ Register getGlobalBaseReg(MachineFunction &MF) const;
+
// Materializes the given integer Val into DstReg.
Register movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, int64_t Val,
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.td
index 30d9206eec68..a782efe7f4f4 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfo.td
@@ -15,22 +15,42 @@
// CSKY specific DAG Nodes.
//===----------------------------------------------------------------------===//
+// Target-independent type requirements, but with target-specific formats.
def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
SDTCisVT<1, i32>]>;
def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
SDTCisVT<1, i32>]>;
+def SDT_CSKYCall : SDTypeProfile<0, 2, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
+
+def SDT_CSKYCallReg : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
+
+def SDT_CSKY_LOADADDR : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisVT<1, iPTR>, SDTCisVT<2, iPTR>]>;
+
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
-
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-// Target-dependent nodes.
def CSKY_RET : SDNode<"CSKYISD::RET", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def CSKY_CALL : SDNode<"CSKYISD::CALL", SDT_CSKYCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+
+def CSKY_CALLReg : SDNode<"CSKYISD::CALLReg", SDT_CSKYCallReg,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+
+def CSKY_TAIL : SDNode<"CSKYISD::TAIL", SDT_CSKYCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+
+def CSKY_TAILReg : SDNode<"CSKYISD::TAILReg", SDT_CSKYCallReg,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+
+def CSKY_LOAD_ADDR : SDNode<"CSKYISD::LOAD_ADDR", SDT_CSKY_LOADADDR>;
+
//===----------------------------------------------------------------------===//
// Operand and SDNode transformation definitions.
//===----------------------------------------------------------------------===//
@@ -57,6 +77,24 @@ def to_tframeindex : SDNodeXForm<frameindex, [{
return CurDAG->getTargetFrameIndex(FI->getIndex(), TLI->getPointerTy(CurDAG->getDataLayout()));
}]>;
+def to_tconstpool : SDNodeXForm<constpool, [{
+ auto CP = cast<ConstantPoolSDNode>(N);
+ return CurDAG->getTargetConstantPool(CP->getConstVal(), TLI->getPointerTy(CurDAG->getDataLayout()),
+ CP->getAlign(), CP->getOffset(), CSKYII::MO_None);
+}]>;
+
+def to_tconstpool_hi16 : SDNodeXForm<constpool, [{
+ auto CP = cast<ConstantPoolSDNode>(N);
+ return CurDAG->getTargetConstantPool(CP->getConstVal(), TLI->getPointerTy(CurDAG->getDataLayout()),
+ CP->getAlign(), CP->getOffset(), CSKYII::MO_ADDR_HI16);
+}]>;
+
+def to_tconstpool_lo16 : SDNodeXForm<constpool, [{
+ auto CP = cast<ConstantPoolSDNode>(N);
+ return CurDAG->getTargetConstantPool(CP->getConstVal(), TLI->getPointerTy(CurDAG->getDataLayout()),
+ CP->getAlign(), CP->getOffset(), CSKYII::MO_ADDR_LO16);
+}]>;
+
class oimm<int num> : Operand<i32>,
ImmLeaf<i32, "return isUInt<"#num#">(Imm - 1);"> {
let EncoderMethod = "getOImmOpValue";
@@ -1055,6 +1093,178 @@ let Predicates = [iHas2E3] in {
def : Pat<(sext_inreg GPR:$src, i1), (SEXT32 GPR:$src, 0, 0)>;
def : Pat<(and GPR:$src, 255), (ZEXT32 GPR:$src, 7, 0)>;
def : Pat<(and GPR:$src, 65535), (ZEXT32 GPR:$src, 15, 0)>;
+
+ // Call Patterns
+ def : Pat<(CSKY_CALL tglobaladdr, tconstpool:$src2), (JSRI32 tconstpool:$src2)>;
+ def : Pat<(CSKY_CALL texternalsym, tconstpool:$src2), (JSRI32 tconstpool:$src2)>;
+ def : Pat<(CSKY_TAIL tglobaladdr, tconstpool:$src2), (JMPI32 tconstpool:$src2)>;
+ def : Pat<(CSKY_TAIL texternalsym, tconstpool:$src2), (JMPI32 tconstpool:$src2)>;
+
+ def : Pat<(CSKY_CALLReg GPR:$src), (JSR32 GPR:$src)>;
+ def : Pat<(CSKY_TAILReg GPR:$src), (JMP32 GPR:$src)>;
+}
+
+// Symbol address Patterns
+def : Pat<(CSKY_LOAD_ADDR tglobaladdr, tconstpool:$src2), (LRW32 tconstpool:$src2)>;
+def : Pat<(CSKY_LOAD_ADDR tblockaddress, tconstpool:$src2), (LRW32 tconstpool:$src2)>;
+def : Pat<(CSKY_LOAD_ADDR tjumptable:$src1, tconstpool:$src2), (LRW32_Gen tjumptable:$src1, tconstpool:$src2)>;
+def : Pat<(CSKY_LOAD_ADDR texternalsym, tconstpool:$src2), (LRW32 tconstpool:$src2)>;
+
+let Predicates = [iHas2E3] in
+ def : Pat<(i32 constpool:$src), (GRS32 (to_tconstpool tconstpool:$src))>;
+
+let Predicates = [iHasE2] in
+ def : Pat<(i32 constpool:$src),
+ (ORI32 (MOVIH32 (to_tconstpool_hi16 tconstpool:$src)),
+ (to_tconstpool_lo16 tconstpool:$src))>;
+
+def : Pat<(i32 (load constpool:$src)), (LRW32 (to_tconstpool tconstpool:$src))>;
+
+// Branch Patterns.
+let Predicates = [iHasE2] in {
+ def : Pat<(brcond CARRY:$ca, bb:$imm16),
+ (BT32 CARRY:$ca, bb:$imm16)>;
+
+ def : Pat<(brcond (i32 (setne GPR:$rs1, uimm16:$rs2)), bb:$imm16),
+ (BT32 (CMPNEI32 GPR:$rs1, uimm16:$rs2), bb:$imm16)>;
+ def : Pat<(brcond (i32 (seteq GPR:$rs1, uimm16:$rs2)), bb:$imm16),
+ (BF32 (CMPNEI32 GPR:$rs1, uimm16:$rs2), bb:$imm16)>;
+ def : Pat<(brcond (i32 (setuge GPR:$rs1, oimm16:$rs2)), bb:$imm16),
+ (BT32 (CMPHSI32 GPR:$rs1, oimm16:$rs2), bb:$imm16)>;
+ def : Pat<(brcond (i32 (setult GPR:$rs1, oimm16:$rs2)), bb:$imm16),
+ (BF32 (CMPHSI32 GPR:$rs1, oimm16:$rs2), bb:$imm16)>;
+ def : Pat<(brcond (i32 (setlt GPR:$rs1, oimm16:$rs2)), bb:$imm16),
+ (BT32 (CMPLTI32 GPR:$rs1, oimm16:$rs2), bb:$imm16)>;
+ def : Pat<(brcond (i32 (setge GPR:$rs1, oimm16:$rs2)), bb:$imm16),
+ (BF32 (CMPLTI32 GPR:$rs1, oimm16:$rs2), bb:$imm16)>;
+
+}
+
+let Predicates = [iHas2E3] in {
+
+def : Pat<(brcond (i32 (setne GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BT32 (CMPNE32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (seteq GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BF32 (CMPNE32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (setuge GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BT32 (CMPHS32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (setule GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BT32 (CMPHS32 GPR:$rs2, GPR:$rs1), bb:$imm16)>;
+def : Pat<(brcond (i32 (setult GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BF32 (CMPHS32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (setugt GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BF32 (CMPHS32 GPR:$rs2, GPR:$rs1), bb:$imm16)>;
+def : Pat<(brcond (i32 (setlt GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BT32 (CMPLT32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (setgt GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BT32 (CMPLT32 GPR:$rs2, GPR:$rs1), bb:$imm16)>;
+def : Pat<(brcond (i32 (setge GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BF32 (CMPLT32 GPR:$rs1, GPR:$rs2), bb:$imm16)>;
+def : Pat<(brcond (i32 (setle GPR:$rs1, GPR:$rs2)), bb:$imm16),
+ (BF32 (CMPLT32 GPR:$rs2, GPR:$rs1), bb:$imm16)>;
+
+def : Pat<(brcond (i32 (seteq GPR:$rs1, (i32 0))), bb:$imm16),
+ (BEZ32 GPR:$rs1, bb:$imm16)>;
+def : Pat<(brcond (i32 (setne GPR:$rs1, (i32 0))), bb:$imm16),
+ (BNEZ32 GPR:$rs1, bb:$imm16)>;
+def : Pat<(brcond (i32 (setlt GPR:$rs1, (i32 0))), bb:$imm16),
+ (BLZ32 GPR:$rs1, bb:$imm16)>;
+def : Pat<(brcond (i32 (setge GPR:$rs1, (i32 0))), bb:$imm16),
+ (BHSZ32 GPR:$rs1, bb:$imm16)>;
+def : Pat<(brcond (i32 (setgt GPR:$rs1, (i32 0))), bb:$imm16),
+ (BHZ32 GPR:$rs1, bb:$imm16)>;
+def : Pat<(brcond (i32 (setle GPR:$rs1, (i32 0))), bb:$imm16),
+ (BLSZ32 GPR:$rs1, bb:$imm16)>;
+}
+
+// Compare Patterns.
+let Predicates = [iHas2E3] in {
+ def : Pat<(setne GPR:$rs1, GPR:$rs2),
+ (CMPNE32 GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(i32 (seteq GPR:$rs1, GPR:$rs2)),
+ (MVCV32 (CMPNE32 GPR:$rs1, GPR:$rs2))>;
+ def : Pat<(setuge GPR:$rs1, GPR:$rs2),
+ (CMPHS32 GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(setule GPR:$rs1, GPR:$rs2),
+ (CMPHS32 GPR:$rs2, GPR:$rs1)>;
+ def : Pat<(i32 (setult GPR:$rs1, GPR:$rs2)),
+ (MVCV32 (CMPHS32 GPR:$rs1, GPR:$rs2))>;
+ def : Pat<(i32 (setugt GPR:$rs1, GPR:$rs2)),
+ (MVCV32 (CMPHS32 GPR:$rs2, GPR:$rs1))>;
+ def : Pat<(setlt GPR:$rs1, GPR:$rs2),
+ (CMPLT32 GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(setgt GPR:$rs1, GPR:$rs2),
+ (CMPLT32 GPR:$rs2, GPR:$rs1)>;
+ def : Pat<(i32 (setge GPR:$rs1, GPR:$rs2)),
+ (MVCV32 (CMPLT32 GPR:$rs1, GPR:$rs2))>;
+ def : Pat<(i32 (setle GPR:$rs1, GPR:$rs2)),
+ (MVCV32 (CMPLT32 GPR:$rs2, GPR:$rs1))>;
+}
+
+let Predicates = [iHasE2] in {
+ def : Pat<(setne GPR:$rs1, uimm16:$rs2),
+ (CMPNEI32 GPR:$rs1, uimm16:$rs2)>;
+ let Predicates = [iHas2E3] in
+ def : Pat<(i32 (seteq GPR:$rs1, uimm16:$rs2)),
+ (MVCV32 (CMPNEI32 GPR:$rs1, uimm16:$rs2))>;
+ def : Pat<(setuge GPR:$rs1, oimm16:$rs2),
+ (CMPHSI32 GPR:$rs1, oimm16:$rs2)>;
+ let Predicates = [iHas2E3] in
+ def : Pat<(i32 (setult GPR:$rs1, oimm16:$rs2)),
+ (MVCV32 (CMPHSI32 GPR:$rs1, oimm16:$rs2))>;
+ def : Pat<(setlt GPR:$rs1, oimm16:$rs2),
+ (CMPLTI32 GPR:$rs1, oimm16:$rs2)>;
+ let Predicates = [iHas2E3] in
+ def : Pat<(i32 (setge GPR:$rs1, oimm16:$rs2)),
+ (MVCV32 (CMPLTI32 GPR:$rs1, oimm16:$rs2))>;
+}
+
+// Select Patterns.
+let Predicates = [iHasE2] in {
+def : Pat<(select CARRY:$ca, GPR:$rx, GPR:$false),
+ (MOVT32 CARRY:$ca, GPR:$rx, GPR:$false)>;
+def : Pat<(select (and CARRY:$ca, 1), GPR:$rx, GPR:$false),
+ (MOVT32 CARRY:$ca, GPR:$rx, GPR:$false)>;
+
+def : Pat<(select (i32 (setne GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPNE32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setne GPR:$rs1, uimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPNEI32 GPR:$rs1, uimm16:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (seteq GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPNE32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (seteq GPR:$rs1, uimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPNEI32 GPR:$rs1, uimm16:$rs2), GPR:$rx, GPR:$false)>;
+
+def : Pat<(select (i32 (setuge GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPHS32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setuge GPR:$rs1, oimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPHSI32 GPR:$rs1, oimm16:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setule GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPHS32 GPR:$rs2, GPR:$rs1), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setult GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPHS32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setult GPR:$rs1, oimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPHSI32 GPR:$rs1, oimm16:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setugt GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPHS32 GPR:$rs2, GPR:$rs1), GPR:$rx, GPR:$false)>;
+
+def : Pat<(select (i32 (setlt GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPLT32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setlt GPR:$rs1, oimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPLTI32 GPR:$rs1, oimm16:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setgt GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVT32 (CMPLT32 GPR:$rs2, GPR:$rs1), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setge GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPLT32 GPR:$rs1, GPR:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setge GPR:$rs1, oimm16:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPLTI32 GPR:$rs1, oimm16:$rs2), GPR:$rx, GPR:$false)>;
+def : Pat<(select (i32 (setle GPR:$rs1, GPR:$rs2)), GPR:$rx, GPR:$false),
+ (MOVF32 (CMPLT32 GPR:$rs2, GPR:$rs1), GPR:$rx, GPR:$false)>;
+
+def : Pat<(select CARRY:$ca, GPR:$rx, GPR:$false),
+ (ISEL32 CARRY:$ca, GPR:$rx, GPR:$false)>;
+def : Pat<(select (and CARRY:$ca, 1), GPR:$rx, GPR:$false),
+ (ISEL32 CARRY:$ca, GPR:$rx, GPR:$false)>;
}
// Constant materialize patterns.
@@ -1150,3 +1360,5 @@ def CONSTPOOL_ENTRY : CSKYPseudo<(outs),
(ins i32imm:$instid, i32imm:$cpidx, i32imm:$size), "", []>;
include "CSKYInstrInfo16Instr.td"
+include "CSKYInstrInfoF1.td"
+include "CSKYInstrInfoF2.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF1.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF1.td
new file mode 100644
index 000000000000..30cef024f35a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF1.td
@@ -0,0 +1,420 @@
+//===- CSKYInstrInfoF1.td - CSKY Instruction Float1.0 ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the CSKY instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+def regseq_f1 : Operand<iPTR> {
+ let EncoderMethod = "getRegisterSeqOpValue";
+ let ParserMatchClass = RegSeqAsmOperand<"V1">;
+ let PrintMethod = "printRegisterSeq";
+ let DecoderMethod = "DecodeRegSeqOperandF1";
+ let MIOperandInfo = (ops sFPR32, uimm5);
+}
+
+def regseq_d1 : Operand<iPTR> {
+ let EncoderMethod = "getRegisterSeqOpValue";
+ let ParserMatchClass = RegSeqAsmOperand<"V1">;
+ let PrintMethod = "printRegisterSeq";
+ let DecoderMethod = "DecodeRegSeqOperandD1";
+ let MIOperandInfo = (ops sFPR64, uimm5);
+}
+
+def sFPR32Op : RegisterOperand<sFPR32, "printFPR">;
+def sFPR64Op : RegisterOperand<sFPR64, "printFPR">;
+def sFPR64_V_OP : RegisterOperand<sFPR64_V, "printFPR">;
+
+include "CSKYInstrFormatsF1.td"
+
+//===----------------------------------------------------------------------===//
+// CSKY specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDT_BITCAST_TO_LOHI : SDTypeProfile<2, 1, [SDTCisSameAs<0, 1>]>;
+def CSKY_BITCAST_TO_LOHI : SDNode<"CSKYISD::BITCAST_TO_LOHI", SDT_BITCAST_TO_LOHI>;
+def SDT_BITCAST_FROM_LOHI : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def CSKY_BITCAST_FROM_LOHI : SDNode<"CSKYISD::BITCAST_FROM_LOHI", SDT_BITCAST_FROM_LOHI>;
+//===----------------------------------------------------------------------===//
+// Operand and SDNode transformation definitions.
+//===----------------------------------------------------------------------===//
+
+def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+
+def fpimm32_hi16 : SDNodeXForm<fpimm, [{
+ return CurDAG->getTargetConstant(
+ (N->getValueAPF().bitcastToAPInt().getZExtValue() >> 16) & 0xFFFF,
+ SDLoc(N), MVT::i32);
+}]>;
+
+def fpimm32_lo16 : SDNodeXForm<fpimm, [{
+ return CurDAG->getTargetConstant(
+ N->getValueAPF().bitcastToAPInt().getZExtValue() & 0xFFFF,
+ SDLoc(N), MVT::i32);
+}]>;
+
+class fpimm_xform<int width, int shift = 0> : SDNodeXForm<fpimm,
+ "return CurDAG->getTargetConstant(N->getValueAPF().bitcastToAPInt().lshr("#shift#").getLoBits("#width#"), SDLoc(N), MVT::i32);">;
+
+class fpimm_xform_i16<int width, int shift = 0> : SDNodeXForm<fpimm,
+ "return CurDAG->getTargetConstant(N->getValueAPF().bitcastToAPInt().lshr("#shift#").getLoBits("#width#"), SDLoc(N), MVT::i16);">;
+
+class fpimm_t<int width, int shift = 0> : PatLeaf<(fpimm),
+ "return isShiftedUInt<"#width#", "#shift#">(N->getValueAPF().bitcastToAPInt().getZExtValue());">;
+
+def fpimm8 : fpimm_t<8>;
+def fpimm8_8 : fpimm_t<8, 8>;
+def fpimm8_16 : fpimm_t<8, 16>;
+def fpimm8_24 : fpimm_t<8, 24>;
+def fpimm16 : fpimm_t<16>;
+def fpimm16_8 : fpimm_t<16, 8>;
+def fpimm16_16 : fpimm_t<16, 16>;
+def fpimm24 : fpimm_t<24>;
+def fpimm24_8 : fpimm_t<24, 8>;
+def fpimm32 : fpimm_t<32>;
+
+def fpimm8_sr0_XFORM : fpimm_xform<8>;
+def fpimm8_sr8_XFORM : fpimm_xform<8, 8>;
+def fpimm8_sr16_XFORM : fpimm_xform<8, 16>;
+def fpimm8_sr24_XFORM : fpimm_xform<8, 24>;
+
+def fpimm8_sr0_i16_XFORM : fpimm_xform_i16<8>;
+def fpimm8_sr8_i16_XFORM : fpimm_xform_i16<8, 8>;
+
+def fconstpool_symbol : Operand<iPTR> {
+ let ParserMatchClass = Constpool;
+ let EncoderMethod =
+ "getConstpoolSymbolOpValue<CSKY::fixup_csky_pcrel_uimm8_scale4>";
+ let DecoderMethod = "decodeUImmOperand<8, 2>";
+ let PrintMethod = "printConstpool";
+ let OperandType = "OPERAND_PCREL";
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+//arithmetic
+
+def FABSM : F_XZ<0x2, 0b000110, "fabsm", "", UnOpFrag<(fabs node:$Src)>, sFPR64_V_OP>;
+def FNEGM : F_XZ<0x2, 0b000111, "fnegm", "", UnOpFrag<(fneg node:$Src)>, sFPR64_V_OP>;
+def FADDM : F_XYZ<0x2, 0b000000, "faddm", "", BinOpFrag<(fadd node:$LHS, node:$RHS)>, sFPR64_V_OP>;
+def FSUBM : F_XYZ<0x2, 0b000001, "fsubm", "", BinOpFrag<(fsub node:$LHS, node:$RHS)>, sFPR64_V_OP>;
+def FMULM : F_XYZ<0x2, 0b010000, "fmulm", "", BinOpFrag<(fmul node:$LHS, node:$RHS)>, sFPR64_V_OP>;
+def FNMULM : F_XYZ<0x2, 0b010001, "fnmulm", "", BinOpFrag<(fneg (fmul node:$LHS, node:$RHS))>, sFPR64_V_OP>;
+def FMACM : F_ACCUM_XYZ<0x2, 0b010100, "fmacm", "", TriOpFrag<(fadd node:$LHS, (fmul node:$MHS, node:$RHS))>, sFPR64_V_OP>;
+def FMSCM : F_ACCUM_XYZ<0x2, 0b010101, "fmscm", "", TriOpFrag<(fsub (fmul node:$MHS, node:$RHS), node:$LHS)>, sFPR64_V_OP>;
+def FNMACM : F_ACCUM_XYZ<0x2, 0b010110, "fnmacm", "", TriOpFrag<(fsub node:$LHS, (fmul node:$MHS, node:$RHS))>, sFPR64_V_OP>;
+def FNMSCM : F_ACCUM_XYZ<0x2, 0b010111, "fnmscm", "", TriOpFrag<(fneg (fadd node:$LHS, (fmul node:$MHS, node:$RHS)))>, sFPR64_V_OP>;
+
+def FMOVM : F_MOV<0x2, 0b000100, "fmovm", "", sFPR64_V_OP>;
+
+defm FABS : FT_XZ<0b000110, "fabs", UnOpFrag<(fabs node:$Src)>>;
+defm FNEG : FT_XZ<0b000111, "fneg", UnOpFrag<(fneg node:$Src)>>;
+defm FSQRT : FT_XZ<0b011010, "fsqrt", UnOpFrag<(fsqrt node:$Src)>>;
+
+defm FADD : FT_XYZ<0b000000, "fadd", BinOpFrag<(fadd node:$LHS, node:$RHS)>>;
+defm FSUB : FT_XYZ<0b000001, "fsub", BinOpFrag<(fsub node:$LHS, node:$RHS)>>;
+defm FDIV : FT_XYZ<0b011000, "fdiv", BinOpFrag<(fdiv node:$LHS, node:$RHS)>>;
+defm FMUL : FT_XYZ<0b010000, "fmul", BinOpFrag<(fmul node:$LHS, node:$RHS)>>;
+defm FNMUL : FT_XYZ<0b010001, "fnmul", BinOpFrag<(fneg (fmul node:$LHS, node:$RHS))>>;
+defm FMAC : FT_ACCUM_XYZ<0b010100, "fmac", TriOpFrag<(fadd node:$LHS, (fmul node:$MHS, node:$RHS))>>;
+defm FMSC : FT_ACCUM_XYZ<0b010101, "fmsc", TriOpFrag<(fsub (fmul node:$MHS, node:$RHS), node:$LHS)>>;
+defm FNMAC : FT_ACCUM_XYZ<0b010110, "fnmac", TriOpFrag<(fsub node:$LHS, (fmul node:$MHS, node:$RHS))>>;
+defm FNMSC : FT_ACCUM_XYZ<0b010111, "fnmsc", TriOpFrag<(fneg (fadd node:$LHS, (fmul node:$MHS, node:$RHS)))>>;
+
+defm FCMPHS : FT_CMPXY<0b001100, "fcmphs">;
+defm FCMPLT : FT_CMPXY<0b001101, "fcmplt">;
+defm FCMPNE : FT_CMPXY<0b001110, "fcmpne">;
+defm FCMPUO : FT_CMPXY<0b001111, "fcmpuo">;
+defm FCMPZHS : FT_CMPZX<0b001000, "fcmpzhs">;
+defm FCMPZLS : FT_CMPZX<0b001001, "fcmpzls">;
+defm FCMPZNE : FT_CMPZX<0b001010, "fcmpzne">;
+defm FCMPZUO : FT_CMPZX<0b001011, "fcmpzuo">;
+
+defm FRECIP : FT_MOV<0b011001, "frecip">;
+
+//fmov, fmtvr, fmfvr
+defm FMOV : FT_MOV<0b000100, "fmov">;
+def FMFVRL : F_XZ_GF<3, 0b011001, (outs GPR:$rz), (ins sFPR32Op:$vrx),
+ "fmfvrl\t$rz, $vrx", [(set GPR:$rz, (bitconvert sFPR32Op:$vrx))]>;
+def FMTVRL : F_XZ_FG<3, 0b011011, (outs sFPR32Op:$vrz), (ins GPR:$rx),
+ "fmtvrl\t$vrz, $rx", [(set sFPR32Op:$vrz, (bitconvert GPR:$rx))]>;
+
+let Predicates = [HasFPUv2_DF] in {
+ let isCodeGenOnly = 1 in
+ def FMFVRL_D : F_XZ_GF<3, 0b011001, (outs GPR:$rz), (ins sFPR64Op:$vrx),
+ "fmfvrl\t$rz, $vrx", []>;
+ def FMFVRH_D : F_XZ_GF<3, 0b011000, (outs GPR:$rz), (ins sFPR64Op:$vrx),
+ "fmfvrh\t$rz, $vrx", []>;
+ let isCodeGenOnly = 1 in
+ def FMTVRL_D : F_XZ_FG<3, 0b011011, (outs sFPR64Op:$vrz), (ins GPR:$rx),
+ "fmtvrl\t$vrz, $rx", []>;
+let Constraints = "$vrZ = $vrz" in
+ def FMTVRH_D : F_XZ_FG<3, 0b011010, (outs sFPR64Op:$vrz), (ins sFPR64Op:$vrZ, GPR:$rx),
+ "fmtvrh\t$vrz, $rx", []>;
+}
+
+//fcvt
+
+def FSITOS : F_XZ_TRANS<0b010000, "fsitos", sFPR32Op, sFPR32Op>;
+def : Pat<(f32 (sint_to_fp GPR:$a)),
+ (FSITOS (COPY_TO_REGCLASS GPR:$a, sFPR32))>,
+ Requires<[HasFPUv2_SF]>;
+
+def FUITOS : F_XZ_TRANS<0b010001, "fuitos", sFPR32Op, sFPR32Op>;
+def : Pat<(f32 (uint_to_fp GPR:$a)),
+ (FUITOS (COPY_TO_REGCLASS GPR:$a, sFPR32))>,
+ Requires<[HasFPUv2_SF]>;
+
+def FSITOD : F_XZ_TRANS<0b010100, "fsitod", sFPR64Op, sFPR64Op>;
+def : Pat<(f64 (sint_to_fp GPR:$a)),
+ (FSITOD (COPY_TO_REGCLASS GPR:$a, sFPR64))>,
+ Requires<[HasFPUv2_DF]>;
+
+def FUITOD : F_XZ_TRANS<0b010101, "fuitod", sFPR64Op, sFPR64Op>;
+def : Pat<(f64 (uint_to_fp GPR:$a)),
+ (FUITOD (COPY_TO_REGCLASS GPR:$a, sFPR64))>,
+ Requires<[HasFPUv2_DF]>;
+
+let Predicates = [HasFPUv2_DF] in {
+def FDTOS : F_XZ_TRANS_DS<0b010110,"fdtos", UnOpFrag<(fpround node:$Src)>>;
+def FSTOD : F_XZ_TRANS_SD<0b010111,"fstod", UnOpFrag<(fpextend node:$Src)>>;
+}
+
+def rpiFSTOSI : F_XZ_TRANS<0b000010, "fstosi.rpi", sFPR32Op, sFPR32Op>;
+def rpiFSTOUI : F_XZ_TRANS<0b000110, "fstoui.rpi", sFPR32Op, sFPR32Op>;
+def rzFSTOSI : F_XZ_TRANS<0b000001, "fstosi.rz", sFPR32Op, sFPR32Op>;
+def rzFSTOUI : F_XZ_TRANS<0b000101, "fstoui.rz", sFPR32Op, sFPR32Op>;
+def rnFSTOSI : F_XZ_TRANS<0b000000, "fstosi.rn", sFPR32Op, sFPR32Op>;
+def rnFSTOUI : F_XZ_TRANS<0b000100, "fstoui.rn", sFPR32Op, sFPR32Op>;
+def rniFSTOSI : F_XZ_TRANS<0b000011, "fstosi.rni", sFPR32Op, sFPR32Op>;
+def rniFSTOUI : F_XZ_TRANS<0b000111, "fstoui.rni", sFPR32Op, sFPR32Op>;
+
+let Predicates = [HasFPUv2_DF] in {
+def rpiFDTOSI : F_XZ_TRANS<0b001010, "fdtosi.rpi", sFPR64Op, sFPR64Op>;
+def rpiFDTOUI : F_XZ_TRANS<0b001110, "fdtoui.rpi", sFPR64Op, sFPR64Op>;
+def rzFDTOSI : F_XZ_TRANS<0b001001, "fdtosi.rz", sFPR64Op, sFPR64Op>;
+def rzFDTOUI : F_XZ_TRANS<0b001101, "fdtoui.rz", sFPR64Op, sFPR64Op>;
+def rnFDTOSI : F_XZ_TRANS<0b001000, "fdtosi.rn", sFPR64Op, sFPR64Op>;
+def rnFDTOUI : F_XZ_TRANS<0b001100, "fdtoui.rn", sFPR64Op, sFPR64Op>;
+def rniFDTOSI : F_XZ_TRANS<0b001011, "fdtosi.rni", sFPR64Op, sFPR64Op>;
+def rniFDTOUI : F_XZ_TRANS<0b001111, "fdtoui.rni", sFPR64Op, sFPR64Op>;
+}
+
+multiclass FPToIntegerPats<SDNode round, string SUFFIX> {
+ def : Pat<(i32 (fp_to_sint (round sFPR32Op:$Rn))),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FSTOSI) sFPR32Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_SF]>;
+ def : Pat<(i32 (fp_to_uint (round sFPR32Op:$Rn))),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FSTOUI) sFPR32Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_SF]>;
+ def : Pat<(i32 (fp_to_sint (round sFPR64Op:$Rn))),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FDTOSI) sFPR64Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_DF]>;
+ def : Pat<(i32 (fp_to_uint (round sFPR64Op:$Rn))),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FDTOUI) sFPR64Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_DF]>;
+}
+
+defm: FPToIntegerPats<fceil, "rpi">;
+defm: FPToIntegerPats<fround, "rn">;
+defm: FPToIntegerPats<ffloor, "rni">;
+
+multiclass FPToIntegerTowardszeroPats<string SUFFIX> {
+ def : Pat<(i32 (fp_to_sint sFPR32Op:$Rn)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FSTOSI) sFPR32Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_SF]>;
+ def : Pat<(i32 (fp_to_uint sFPR32Op:$Rn)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FSTOUI) sFPR32Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_SF]>;
+ def : Pat<(i32 (fp_to_sint sFPR64Op:$Rn)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FDTOSI) sFPR64Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_DF]>;
+ def : Pat<(i32 (fp_to_uint sFPR64Op:$Rn)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(SUFFIX # FDTOUI) sFPR64Op:$Rn), GPR)>,
+ Requires<[HasFPUv2_DF]>;
+}
+
+defm: FPToIntegerTowardszeroPats<"rz">;
+
+
+//fld, fst
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ defm FLD : FT_XYAI_LD<0b0010000, "fld">;
+ defm FLDR : FT_XYAR_LD<0b0010100, "fldr">;
+ defm FLDM : FT_XYAR_LDM<0b0011000, "fldm">;
+
+ let Predicates = [HasFPUv2_DF] in
+ def FLDRM : F_XYAR_LD<0b0010101, 0, "fldrm", "", sFPR64Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def FLDMM : F_I4_XY_MEM<0b0011001, 0,
+ (outs), (ins GPR:$rx, regseq_d1:$regs, variable_ops), "fldmm\t$regs, (${rx})", []>;
+ let Predicates = [HasFPUv2_DF] in
+ def FLDM : F_XYAI_LD<0b0010001, 0, "fldm", "", sFPR64Op, uimm8_3>;
+}
+
+
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ defm FST : FT_XYAI_ST<0b0010010, "fst">;
+ defm FSTR : FT_XYAR_ST<0b0010110, "fstr">;
+ defm FSTM : FT_XYAR_STM<0b0011010, "fstm">;
+
+ let Predicates = [HasFPUv2_DF] in
+ def FSTRM : F_XYAR_ST<0b0010111, 0, "fstrm", "", sFPR64Op>;
+ let Predicates = [HasFPUv2_DF] in
+ def FSTMM : F_I4_XY_MEM<0b0011011, 0,
+ (outs), (ins GPR:$rx, regseq_d1:$regs, variable_ops), "fstmm\t$regs, (${rx})", []>;
+ let Predicates = [HasFPUv2_DF] in
+ def FSTM : F_XYAI_ST<0b0010011, 0, "fstm", "", sFPR64Op, uimm8_3>;
+}
+
+defm : LdPat<load, uimm8_2, FLD_S, f32>, Requires<[HasFPUv2_SF]>;
+defm : LdPat<load, uimm8_2, FLD_D, f64>, Requires<[HasFPUv2_DF]>;
+defm : LdrPat<load, FLDR_S, f32>, Requires<[HasFPUv2_SF]>;
+defm : LdrPat<load, FLDR_D, f64>, Requires<[HasFPUv2_DF]>;
+
+defm : StPat<store, f32, uimm8_2, FST_S>, Requires<[HasFPUv2_SF]>;
+defm : StPat<store, f64, uimm8_2, FST_D>, Requires<[HasFPUv2_DF]>;
+defm : StrPat<store, f32, FSTR_S>, Requires<[HasFPUv2_SF]>;
+defm : StrPat<store, f64, FSTR_D>, Requires<[HasFPUv2_DF]>;
+
+
+def : Pat<(f32 fpimm16:$imm), (COPY_TO_REGCLASS (MOVI32 (fpimm32_lo16 fpimm16:$imm)), sFPR32)>,
+ Requires<[HasFPUv2_SF]>;
+def : Pat<(f32 fpimm16_16:$imm), (f32 (COPY_TO_REGCLASS (MOVIH32 (fpimm32_hi16 fpimm16_16:$imm)), sFPR32))>,
+ Requires<[HasFPUv2_SF]>;
+def : Pat<(f32 fpimm:$imm), (COPY_TO_REGCLASS (ORI32 (MOVIH32 (fpimm32_hi16 fpimm:$imm)), (fpimm32_lo16 fpimm:$imm)), sFPR32)>,
+ Requires<[HasFPUv2_SF]>;
+
+def : Pat<(f64(CSKY_BITCAST_FROM_LOHI GPR:$rs1, GPR:$rs2)), (FMTVRH_D(FMTVRL_D GPR:$rs1), GPR:$rs2)>,
+ Requires<[HasFPUv2_DF]>;
+
+multiclass BRCond_Bin<CondCode CC, string Instr, Instruction Br, Instruction MV> {
+ let Predicates = [HasFPUv2_SF] in
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, sFPR32Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_S) sFPR32Op:$rs1, sFPR32Op:$rs2), bb:$imm16)>;
+ let Predicates = [HasFPUv2_DF] in
+ def : Pat<(brcond (i32 (setcc sFPR64Op:$rs1, sFPR64Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_D) sFPR64Op:$rs1, sFPR64Op:$rs2), bb:$imm16)>;
+
+ let Predicates = [HasFPUv2_SF] in
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, sFPR32Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_S) sFPR32Op:$rs1, sFPR32Op:$rs2))>;
+ let Predicates = [HasFPUv2_DF] in
+ def : Pat<(i32 (setcc sFPR64Op:$rs1, sFPR64Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_D) sFPR64Op:$rs1, sFPR64Op:$rs2))>;
+}
+
+multiclass BRCond_Bin_SWAP<CondCode CC, string Instr, Instruction Br, Instruction MV> {
+ let Predicates = [HasFPUv2_SF] in
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, sFPR32Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_S) sFPR32Op:$rs2, sFPR32Op:$rs1), bb:$imm16)>;
+ let Predicates = [HasFPUv2_DF] in
+ def : Pat<(brcond (i32 (setcc sFPR64Op:$rs1, sFPR64Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_D) sFPR64Op:$rs2, sFPR64Op:$rs1), bb:$imm16)>;
+
+ let Predicates = [HasFPUv2_SF] in
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, sFPR32Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_S) sFPR32Op:$rs2, sFPR32Op:$rs1))>;
+ let Predicates = [HasFPUv2_DF] in
+ def : Pat<(i32 (setcc sFPR64Op:$rs1, sFPR64Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_D) sFPR64Op:$rs2, sFPR64Op:$rs1))>;
+}
+
+// inverse (order && compare) to (unorder || inverse(compare))
+
+defm : BRCond_Bin<SETUNE, "FCMPNE", BT32, MVC32>;
+defm : BRCond_Bin<SETOEQ, "FCMPNE", BF32, MVCV32>;
+defm : BRCond_Bin<SETOGE, "FCMPHS", BT32, MVC32>;
+defm : BRCond_Bin<SETOLT, "FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin<SETUO, "FCMPUO", BT32, MVC32>;
+defm : BRCond_Bin<SETO, "FCMPUO", BF32, MVCV32>;
+defm : BRCond_Bin_SWAP<SETOGT, "FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP<SETOLE, "FCMPHS", BT32, MVC32>;
+
+defm : BRCond_Bin<SETNE, "FCMPNE", BT32, MVC32>;
+defm : BRCond_Bin<SETEQ, "FCMPNE", BF32, MVCV32>;
+defm : BRCond_Bin<SETGE, "FCMPHS", BT32, MVC32>;
+defm : BRCond_Bin<SETLT, "FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP<SETGT, "FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP<SETLE, "FCMPHS", BT32, MVC32>;
+
+// -----------
+
+let Predicates = [HasFPUv2_SF] in {
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETOGE)), bb:$imm16),
+ (BT32 (FCMPZHS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETOGE)),
+ (MVC32 (FCMPZHS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETOLT)), bb:$imm16),
+ (BF32 (FCMPZHS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETOLT)),
+ (MVCV32 (FCMPZHS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETOLE)), bb:$imm16),
+ (BT32 (FCMPZLS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETOLE)),
+ (MVC32 (FCMPZLS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETOGT)), bb:$imm16),
+ (BF32 (FCMPZLS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETOGT)),
+ (MVCV32 (FCMPZLS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETUNE)), bb:$imm16),
+ (BT32 (FCMPZNE_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETUNE)),
+ (MVC32 (FCMPZNE_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETOEQ)), bb:$imm16),
+ (BF32 (FCMPZNE_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETOEQ)),
+ (MVCV32 (FCMPZNE_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm, SETUO)), bb:$imm16),
+ (BT32 (FCMPZUO_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm, SETUO)),
+ (MVC32 (FCMPZUO_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm, SETO)), bb:$imm16),
+ (BF32 (FCMPZUO_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm, SETO)),
+ (MVCV32 (FCMPZUO_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETGE)), bb:$imm16),
+ (BT32 (FCMPZHS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETGE)),
+ (MVC32 (FCMPZHS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETLT)), bb:$imm16),
+ (BF32 (FCMPZHS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETLT)),
+ (MVCV32 (FCMPZHS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETLE)), bb:$imm16),
+ (BT32 (FCMPZLS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETLE)),
+ (MVC32 (FCMPZLS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETGT)), bb:$imm16),
+ (BF32 (FCMPZLS_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETGT)),
+ (MVCV32 (FCMPZLS_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETNE)), bb:$imm16),
+ (BT32 (FCMPZNE_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETNE)),
+ (MVC32 (FCMPZNE_S sFPR32Op:$rs1))>;
+ def : Pat<(brcond (i32 (setcc sFPR32Op:$rs1, fpimm0, SETEQ)), bb:$imm16),
+ (BF32 (FCMPZNE_S sFPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc sFPR32Op:$rs1, fpimm0, SETEQ)),
+ (MVCV32 (FCMPZNE_S sFPR32Op:$rs1))>;
+}
+
+let usesCustomInserter = 1 in {
+ let Predicates = [HasFPUv2_SF] in
+ def FSELS : CSKYPseudo<(outs sFPR32Op:$dst), (ins CARRY:$cond, sFPR32Op:$src1, sFPR32Op:$src2),
+ "!fsels\t$dst, $src1, src2", [(set sFPR32Op:$dst, (select CARRY:$cond, sFPR32Op:$src1, sFPR32Op:$src2))]>;
+
+ let Predicates = [HasFPUv2_DF] in
+ def FSELD : CSKYPseudo<(outs sFPR64Op:$dst), (ins CARRY:$cond, sFPR64Op:$src1, sFPR64Op:$src2),
+ "!fseld\t$dst, $src1, src2", [(set sFPR64Op:$dst, (select CARRY:$cond, sFPR64Op:$src1, sFPR64Op:$src2))]>;
+} \ No newline at end of file
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF2.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF2.td
new file mode 100644
index 000000000000..8a00e7d9af3a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYInstrInfoF2.td
@@ -0,0 +1,462 @@
+//===- CSKYInstrInfoF2.td - CSKY Instruction Float2.0 ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the CSKY instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+def regseq_f2 : Operand<i32> {
+ let EncoderMethod = "getRegisterSeqOpValue";
+ let ParserMatchClass = RegSeqAsmOperand<"V2">;
+ let PrintMethod = "printRegisterSeq";
+ let DecoderMethod = "DecodeRegSeqOperandF2";
+ let MIOperandInfo = (ops FPR32, uimm5);
+}
+
+def regseq_d2 : Operand<i32> {
+ let EncoderMethod = "getRegisterSeqOpValue";
+ let ParserMatchClass = RegSeqAsmOperand<"V2">;
+ let PrintMethod = "printRegisterSeq";
+ let DecoderMethod = "DecodeRegSeqOperandD2";
+ let MIOperandInfo = (ops FPR64, uimm5);
+}
+
+def FPR32Op : RegisterOperand<FPR32, "printFPR">;
+def FPR64Op : RegisterOperand<FPR64, "printFPR">;
+
+include "CSKYInstrFormatsF2.td"
+
+// Predicates
+def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
+ return isOrEquivalentToAdd(N);
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+defm f2FADD : F2_XYZ_T<0b000000, "fadd", BinOpFrag<(fadd node:$LHS, node:$RHS)>>;
+defm f2FSUB : F2_XYZ_T<0b000001, "fsub", BinOpFrag<(fsub node:$LHS, node:$RHS)>>;
+defm f2FDIV : F2_XYZ_T<0b011000, "fdiv", BinOpFrag<(fdiv node:$LHS, node:$RHS)>>;
+defm f2FMUL : F2_XYZ_T<0b010000, "fmul", BinOpFrag<(fmul node:$LHS, node:$RHS)>>;
+
+defm f2FMAXNM : F2_XYZ_T<0b101000, "fmaxnm", BinOpFrag<(fmaxnum node:$LHS, node:$RHS)>>;
+defm f2FMINNM : F2_XYZ_T<0b101001, "fminnm", BinOpFrag<(fminnum node:$LHS, node:$RHS)>>;
+
+defm f2FABS : F2_XZ_T<0b000110, "fabs", fabs>;
+defm f2FNEG : F2_XZ_T<0b000111, "fneg", fneg>;
+defm f2FSQRT : F2_XZ_T<0b011010, "fsqrt", fsqrt>;
+defm f2FMOV : F2_XZ_SET_T<0b000100, "fmov">;
+def f2FMOVX : F2_XZ_SET<0b00001, FPR32Op, 0b000101, "fmovx.32">;
+
+defm f2RECIP : F2_XZ_SET_T<0b011001, "frecip">;
+
+// fld/fst
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def f2FLD_S : F2_LDST_S<0b0, "fld", (outs FPR32Op:$vrz), (ins GPR:$rx, uimm8_2:$imm8)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FLD_D : F2_LDST_D<0b0, "fld", (outs FPR64Op:$vrz), (ins GPR:$rx, uimm8_2:$imm8)>;
+}
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def f2FST_S : F2_LDST_S<0b1, "fst", (outs), (ins FPR32Op:$vrz, GPR:$rx, uimm8_2:$imm8)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FST_D : F2_LDST_D<0b1, "fst", (outs), (ins FPR64Op:$vrz, GPR:$rx, uimm8_2:$imm8)>;
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def f2FSTM_S : F2_LDSTM_S<0b1, 0, "fstm", (outs), (ins GPR:$rx, regseq_f2:$regs, variable_ops)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FSTM_D : F2_LDSTM_D<0b1, 0, "fstm", (outs), (ins GPR:$rx, regseq_d2:$regs, variable_ops)>;
+
+ def f2FSTMU_S : F2_LDSTM_S<0b1, 0b100, "fstmu", (outs), (ins GPR:$rx, regseq_f2:$regs, variable_ops)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FSTMU_D : F2_LDSTM_D<0b1, 0b100, "fstmu", (outs), (ins GPR:$rx, regseq_d2:$regs, variable_ops)>;
+}
+
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def f2FLDM_S : F2_LDSTM_S<0b0, 0, "fldm", (outs), (ins GPR:$rx, regseq_f2:$regs, variable_ops)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FLDM_D : F2_LDSTM_D<0b0, 0, "fldm", (outs), (ins GPR:$rx, regseq_d2:$regs, variable_ops)>;
+
+ def f2FLDMU_S : F2_LDSTM_S<0b0, 0b100, "fldmu", (outs), (ins GPR:$rx, regseq_f2:$regs, variable_ops)>;
+ let Predicates = [HasFPUv3_DF] in
+ def f2FLDMU_D : F2_LDSTM_D<0b0, 0b100, "fldmu", (outs), (ins GPR:$rx, regseq_d2:$regs, variable_ops)>;
+}
+
+multiclass FLSR {
+ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def FLDR_S : F2_LDSTR_S<0b0, "fldr", (outs FPR32Op:$rz), (ins GPR:$rx, GPR:$ry, uimm2:$imm)>;
+ let Predicates = [HasFPUv3_DF] in
+ def FLDR_D : F2_LDSTR_D<0b0, "fldr", (outs FPR64Op:$rz), (ins GPR:$rx, GPR:$ry, uimm2:$imm)>;
+ }
+ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def FSTR_S : F2_LDSTR_S<0b1, "fstr", (outs), (ins FPR32Op:$rz, GPR:$rx, GPR:$ry, uimm2:$imm)>;
+ let Predicates = [HasFPUv3_DF] in
+ def FSTR_D : F2_LDSTR_D<0b1, "fstr", (outs), (ins FPR64Op:$rz, GPR:$rx, GPR:$ry, uimm2:$imm)>;
+ }
+}
+
+defm f2: FLSR;
+
+def f2FLRW_S : F2_LRW<0b00, 0b0, "flrw.32", (outs FPR32Op:$vrz), (ins fconstpool_symbol:$imm8)>;
+def f2FLRW_D : F2_LRW<0b01, 0b0, "flrw.64", (outs FPR64Op:$vrz), (ins fconstpool_symbol:$imm8)>;
+
+def : Pat<(f32 (load constpool:$src)), (f2FLRW_S (to_tconstpool tconstpool:$src))>, Requires<[HasFPUv3_SF]>;
+def : Pat<(f64 (load constpool:$src)), (f2FLRW_D (to_tconstpool tconstpool:$src))>, Requires<[HasFPUv3_DF]>;
+
+defm : LdPat<load, uimm8_2, f2FLD_S, f32>, Requires<[HasFPUv3_SF]>;
+defm : LdPat<load, uimm8_2, f2FLD_D, f64>, Requires<[HasFPUv3_DF]>;
+defm : LdrPat<load, f2FLDR_S, f32>, Requires<[HasFPUv3_SF]>;
+defm : LdrPat<load, f2FLDR_D, f64>, Requires<[HasFPUv3_DF]>;
+
+defm : StPat<store, f32, uimm8_2, f2FST_S>, Requires<[HasFPUv3_SF]>;
+defm : StPat<store, f64, uimm8_2, f2FST_D>, Requires<[HasFPUv3_DF]>;
+defm : StrPat<store, f32, f2FSTR_S>, Requires<[HasFPUv3_SF]>;
+defm : StrPat<store, f64, f2FSTR_D>, Requires<[HasFPUv3_DF]>;
+
+// fmfvr
+let vry = 0 in
+def f2FMFVRL : F2_XYZ<0b00011, 0b011001, "fmfvr.32.1\t$vrz, $vrx",
+ (outs GPR:$vrz), (ins FPR32Op:$vrx),
+ [(set GPR:$vrz, (bitconvert FPR32Op:$vrx))]>;
+// TODO: vrz and vrz+1
+def f2FMFVRL_2 : F2_XYZ<0b00011, 0b111010, "fmfvr.32.2\t$vrz, $vry, $vrx",
+ (outs GPR:$vrz, GPR:$vry), (ins FPR64Op:$vrx),
+ []>;
+
+let Predicates = [HasFPUv3_DF] in {
+let vry = 0 in {
+let isCodeGenOnly = 1 in
+def f2FMFVRL_D : F2_XYZ<0b00011, 0b011001, "fmfvr.32.1\t$vrz, $vrx",
+ (outs GPR:$vrz), (ins FPR64Op:$vrx),
+ []>;
+def f2FMFVRH_D : F2_XYZ<0b00011, 0b011000, "fmfvrh\t$vrz, $vrx",
+ (outs GPR:$vrz), (ins FPR64Op:$vrx),
+ []>;
+}
+def f2FMFVR_D : F2_XYZ<0b00011, 0b111000, "fmfvr.64\t$vrz, $vry, $vrx",
+ (outs GPR:$vrz, GPR:$vry), (ins FPR64Op:$vrx),
+ [(set GPR:$vrz, GPR:$vry, (CSKY_BITCAST_TO_LOHI FPR64Op:$vrx))]>;
+}
+
+// fmtvr
+def f2FMTVRL : F2_XZ_P<0b00011, 0b011011, "fmtvr.32.1",
+ [(set FPR32Op:$vrz, (bitconvert GPR:$vrx))],
+ (outs FPR32Op:$vrz), (ins GPR:$vrx)>;
+// TODO: vrz and vrz+1
+def f2FMTVRL_2 : F2_XYZ<0b00011, 0b111110, "fmtvr.32.2\t$vrz, $vrx, $vry",
+ (outs FPR32Op:$vrz), (ins GPR:$vrx, GPR:$vry),
+ []>;
+
+let Predicates = [HasFPUv3_DF] in {
+let isCodeGenOnly = 1 in
+def f2FMTVRL_D : F2_XZ_P<0b00011, 0b011011, "fmtvr.32.1",
+ [],
+ (outs FPR64Op:$vrz), (ins GPR:$vrx)>;
+let Constraints = "$vrZ = $vrz" in
+def f2FMTVRH_D : F2_XZ_P<0b00011, 0b011010, "fmtvrh",
+ [],
+ (outs FPR64Op:$vrz), (ins FPR64Op:$vrZ, GPR:$vrx)>;
+def f2FMTVR_D : F2_XYZ<0b00011, 0b111100, "fmtvr.64\t$vrz, $vrx, $vry",
+ (outs FPR64Op:$vrz), (ins GPR:$vrx, GPR:$vry),
+ [(set FPR64Op:$vrz, (CSKY_BITCAST_FROM_LOHI GPR:$vrx, GPR:$vry))]>;
+}
+
+// fcmp
+
+defm f2FCMPHS: F2_CXY_T<0b001100, "fcmphs">;
+defm f2FCMPLT: F2_CXY_T<0b001101, "fcmplt">;
+defm f2FCMPNE: F2_CXY_T<0b001110, "fcmpne">;
+defm f2FCMPUO: F2_CXY_T<0b001111, "fcmpuo">;
+
+defm f2FCMPHSZ: F2_CX_T<0b001000, "fcmphsz">;
+defm f2FCMPHZ : F2_CX_T<0b101010, "fcmphz">;
+defm f2FCMPLSZ: F2_CX_T<0b101011, "fcmplsz">;
+defm f2FCMPLTZ: F2_CX_T<0b001001, "fcmpltz">;
+defm f2FCMPNEZ: F2_CX_T<0b001010, "fcmpnez">;
+defm f2FCMPUOZ: F2_CX_T<0b001011, "fcmpuoz">;
+
+defm f2FMULA : F2_XYZZ_T<0b010100, "fmula",
+ TriOpFrag<(fadd (fmul node:$LHS, node:$MHS), node:$RHS)>>;
+
+defm f2FMULS : F2_XYZZ_T<0b010110, "fmuls",
+ TriOpFrag<(fsub node:$RHS, (fmul node:$LHS, node:$MHS))>>;
+
+defm f2FFMULA : F2_XYZZ_T<0b110000, "ffmula",
+ TriOpFrag<(fma node:$LHS, node:$MHS, node:$RHS)>>;
+
+defm f2FFMULS : F2_XYZZ_T<0b110001, "ffmuls",
+ TriOpFrag<(fma (fneg node:$LHS), node:$MHS, node:$RHS)>>;
+
+defm f2FFNMULA : F2_XYZZ_T<0b110010, "ffnmula",
+ TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))>>;
+
+defm f2FFNMULS : F2_XYZZ_T<0b110011, "ffnmuls",
+ TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))>>;
+
+defm f2FNMULA : F2_XYZZ_T<0b010111, "fnmula",
+ TriOpFrag<(fneg (fadd (fmul node:$LHS, node:$MHS), node:$RHS))>>;
+
+defm f2FNMULS : F2_XYZZ_T<0b010101, "fnmuls",
+ TriOpFrag<(fneg (fsub node:$RHS, (fmul node:$LHS, node:$MHS)))>>;
+
+defm f2FNMUL : F2_XYZ_T<0b010001, "fnmul",
+ BinOpFrag<(fneg (fmul node:$LHS, node:$RHS))>>;
+
+// fcvt
+def f2FFTOS32_S : F2_XZ_P<0b01000, 0b011011, "fftoi.f32.s32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FFTOU32_S : F2_XZ_P<0b01000, 0b011010, "fftoi.f32.u32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FS32TOF_S : F2_XZ_P<0b01001, 0b011011, "fitof.s32.f32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FU32TOF_S : F2_XZ_P<0b01001, 0b011010, "fitof.u32.f32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FFTOXU32_S : F2_XZ_P<0b01000, 0b001010, "fftox.f32.u32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FFTOXS32_S : F2_XZ_P<0b01000, 0b001011, "fftox.f32.s32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FXTOFU32_S : F2_XZ_P<0b01001, 0b001010, "fxtof.u32.f32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FXTOFS32_S : F2_XZ_P<0b01001, 0b001011, "fxtof.s32.f32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+let Predicates = [HasFPUv3_DF] in {
+def f2FFTOS32_D : F2_XZ_P<0b01000, 0b011101, "fftoi.f64.s32", [], (outs FPR32Op:$vrz), (ins FPR64Op:$vrx)>;
+def f2FFTOU32_D : F2_XZ_P<0b01000, 0b011100, "fftoi.f64.u32", [], (outs FPR32Op:$vrz), (ins FPR64Op:$vrx)>;
+def f2FS32TOF_D : F2_XZ_P<0b01001, 0b011101, "fitof.s32.f64", [], (outs FPR64Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FU32TOF_D : F2_XZ_P<0b01001, 0b011100, "fitof.u32.f64", [], (outs FPR64Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FFTOXU32_D : F2_XZ_P<0b01000, 0b001100, "fftox.f64.u32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FFTOXS32_D : F2_XZ_P<0b01000, 0b001101, "fftox.f64.s32", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FXTOFU32_D : F2_XZ_P<0b01001, 0b001100, "fxtof.u32.f64", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+def f2FXTOFS32_D : F2_XZ_P<0b01001, 0b001101, "fxtof.s32.f64", [], (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+}
+
+defm f2FF32TOSI32 : F2_XZ_RM<0b00011, 0b0000, "fftoi.f32.s32", (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+defm f2FF32TOUI32 : F2_XZ_RM<0b00011, 0b0001, "fftoi.f32.u32", (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+defm f2FF32TOFI32 : F2_XZ_RM<0b01000, 0b1001, "fftofi.f32", (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+let Predicates = [HasFPUv3_DF] in {
+defm f2FF64TOSI32 : F2_XZ_RM<0b00011, 0b0010, "fftoi.f64.s32", (outs FPR32Op:$vrz), (ins FPR64Op:$vrx)>;
+defm f2FF64TOUI32 : F2_XZ_RM<0b00011, 0b0011, "fftoi.f64.u32", (outs FPR32Op:$vrz), (ins FPR64Op:$vrx)>;
+defm f2FF64TOFI32 : F2_XZ_RM<0b01000, 0b1010, "fftofi.f64", (outs FPR32Op:$vrz), (ins FPR32Op:$vrx)>;
+}
+
+def : Pat<(i32 (fp_to_sint (fround FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOSI32_RN $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_uint (fround FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOUI32_RN $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_sint (fceil FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOSI32_RPI $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_uint (fceil FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOUI32_RPI $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_sint (ffloor FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOSI32_RNI $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_uint (ffloor FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOUI32_RNI $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_sint (ftrunc FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOSI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_uint (ftrunc FPR32Op:$vrx))), (COPY_TO_REGCLASS (f2FF32TOUI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_sint FPR32Op:$vrx)), (COPY_TO_REGCLASS (f2FF32TOSI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+def : Pat<(i32 (fp_to_uint FPR32Op:$vrx)), (COPY_TO_REGCLASS (f2FF32TOUI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_SF]>;
+
+def : Pat<(i32 (fp_to_sint (fround FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOSI32_RN $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_uint (fround FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOUI32_RN $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_sint (fceil FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOSI32_RPI $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_uint (fceil FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOUI32_RPI $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_sint (ffloor FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOSI32_RNI $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_uint (ffloor FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOUI32_RNI $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_sint (ftrunc FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOSI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_uint (ftrunc FPR64Op:$vrx))), (COPY_TO_REGCLASS (f2FF64TOUI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_sint FPR64Op:$vrx)), (COPY_TO_REGCLASS (f2FF64TOSI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+def : Pat<(i32 (fp_to_uint FPR64Op:$vrx)), (COPY_TO_REGCLASS (f2FF64TOUI32_RZ $vrx), GPR)>, Requires<[HasFPUv3_DF]>;
+
+def : Pat<(sint_to_fp GPR:$vrx), (f2FS32TOF_S (COPY_TO_REGCLASS $vrx, FPR32))>, Requires<[HasFPUv3_SF]>;
+def : Pat<(uint_to_fp GPR:$vrx), (f2FU32TOF_S (COPY_TO_REGCLASS $vrx, FPR32))>, Requires<[HasFPUv3_SF]>;
+def : Pat<(sint_to_fp GPR:$vrx), (f2FS32TOF_D (COPY_TO_REGCLASS $vrx, FPR32))>, Requires<[HasFPUv3_DF]>;
+def : Pat<(uint_to_fp GPR:$vrx), (f2FU32TOF_D (COPY_TO_REGCLASS $vrx, FPR32))>, Requires<[HasFPUv3_DF]>;
+
+let Predicates = [HasFPUv3_DF] in {
+def f2FDTOS : F2_XZ_P<0b00011, 0b010110, "fdtos", [(set FPR32Op:$vrz, (fpround FPR64Op:$vrx))], (outs FPR32Op:$vrz),
+ (ins FPR64Op:$vrx)>;
+def f2FSTOD : F2_XZ_P<0b00011, 0b010111, "fstod", [(set FPR64Op:$vrz, (fpextend FPR32Op:$vrx))], (outs FPR64Op:$vrz),
+ (ins FPR32Op:$vrx)>;
+}
+
+// fsel
+defm f2FSEL: F2_CXYZ_T<0b111001, "fsel">;
+
+def f2FINS: F2_XZ_SET<0b00000, FPR32Op, 0b011011, "fins.32">;
+
+def : Pat<(f32 fpimm16:$imm),(COPY_TO_REGCLASS (MOVI32 (fpimm32_lo16 fpimm16:$imm)), FPR32)>,
+ Requires<[HasFPUv3_SF]>;
+def : Pat<(f32 fpimm16_16:$imm), (COPY_TO_REGCLASS (MOVIH32 (fpimm32_hi16 fpimm16_16:$imm)), FPR32)>,
+ Requires<[HasFPUv3_SF]>;
+def : Pat<(f32 fpimm:$imm),(COPY_TO_REGCLASS (ORI32 (MOVIH32 (fpimm32_hi16 fpimm:$imm)), (fpimm32_lo16 fpimm:$imm)), FPR32)>,
+ Requires<[HasFPUv3_SF]>;
+
+
+multiclass BRCond_Bin_F2<CondCode CC, string Instr, Instruction Br, Instruction MV, bit IsSelectSwap = 0> {
+ let Predicates = [HasFPUv3_SF] in
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_S) FPR32Op:$rs1, FPR32Op:$rs2), bb:$imm16)>;
+ let Predicates = [HasFPUv3_DF] in
+ def : Pat<(brcond (i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_D) FPR64Op:$rs1, FPR64Op:$rs2), bb:$imm16)>;
+
+ let Predicates = [HasFPUv3_SF] in
+ def : Pat<(i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_S) FPR32Op:$rs1, FPR32Op:$rs2))>;
+ let Predicates = [HasFPUv3_DF] in
+ def : Pat<(i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_D) FPR64Op:$rs1, FPR64Op:$rs2))>;
+
+ let Predicates = [HasFPUv3_SF] in {
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)), FPR32Op:$rx, FPR32Op:$false),
+ !if(
+ !eq(IsSelectSwap, 0),
+ (f2FSEL_S (!cast<Instruction>(Instr#_S) FPR32Op:$rs1, FPR32Op:$rs2), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (!cast<Instruction>(Instr#_S) FPR32Op:$rs1, FPR32Op:$rs2), FPR32Op:$false, FPR32Op:$rx)
+ )>;
+ }
+ let Predicates = [HasFPUv3_DF] in {
+ def : Pat<(select (i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)), FPR64Op:$rx, FPR64Op:$false),
+ !if(
+ !eq(IsSelectSwap, 0),
+ (f2FSEL_D (!cast<Instruction>(Instr#_D) FPR64Op:$rs1, FPR64Op:$rs2), FPR64Op:$rx, FPR64Op:$false),
+ (f2FSEL_D (!cast<Instruction>(Instr#_D) FPR64Op:$rs1, FPR64Op:$rs2), FPR64Op:$false, FPR64Op:$rx)
+ )>;
+ }
+}
+
+multiclass BRCond_Bin_SWAP_F2<CondCode CC, string Instr, Instruction Br, Instruction MV, bit IsSelectSwap = 0> {
+ let Predicates = [HasFPUv3_SF] in
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_S) FPR32Op:$rs2, FPR32Op:$rs1), bb:$imm16)>;
+ let Predicates = [HasFPUv3_DF] in
+ def : Pat<(brcond (i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)), bb:$imm16),
+ (Br (!cast<Instruction>(Instr#_D) FPR64Op:$rs2, FPR64Op:$rs1), bb:$imm16)>;
+
+ let Predicates = [HasFPUv3_SF] in
+ def : Pat<(i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_S) FPR32Op:$rs2, FPR32Op:$rs1))>;
+ let Predicates = [HasFPUv3_DF] in
+ def : Pat<(i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)),
+ (MV (!cast<Instruction>(Instr#_D) FPR64Op:$rs2, FPR64Op:$rs1))>;
+
+ let Predicates = [HasFPUv3_SF] in {
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, FPR32Op:$rs2, CC)), FPR32Op:$rx, FPR32Op:$false),
+ !if(
+ !eq(IsSelectSwap, 0),
+ (f2FSEL_S (!cast<Instruction>(Instr#_S) FPR32Op:$rs2, FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (!cast<Instruction>(Instr#_S) FPR32Op:$rs2, FPR32Op:$rs1), FPR32Op:$false, FPR32Op:$rx)
+ )>;
+ }
+ let Predicates = [HasFPUv3_DF] in {
+ def : Pat<(select (i32 (setcc FPR64Op:$rs1, FPR64Op:$rs2, CC)), FPR64Op:$rx, FPR64Op:$false),
+ !if(
+ !eq(IsSelectSwap, 0),
+ (f2FSEL_D (!cast<Instruction>(Instr#_D) FPR64Op:$rs2, FPR64Op:$rs1), FPR64Op:$rx, FPR64Op:$false),
+ (f2FSEL_D (!cast<Instruction>(Instr#_D) FPR64Op:$rs2, FPR64Op:$rs1), FPR64Op:$false, FPR64Op:$rx)
+ )>;
+ }
+}
+
+// inverse (order && compare) to (unorder || inverse(compare))
+
+defm : BRCond_Bin_F2<SETUNE, "f2FCMPNE", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETOEQ, "f2FCMPNE", BF32, MVCV32, 1>;
+defm : BRCond_Bin_F2<SETOGE, "f2FCMPHS", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETOLT, "f2FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETUO, "f2FCMPUO", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETO, "f2FCMPUO", BF32, MVCV32, 1>;
+defm : BRCond_Bin_SWAP_F2<SETOGT, "f2FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP_F2<SETOLE, "f2FCMPHS", BT32, MVC32>;
+
+defm : BRCond_Bin_F2<SETNE, "f2FCMPNE", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETEQ, "f2FCMPNE", BF32, MVCV32, 1>;
+defm : BRCond_Bin_F2<SETGE, "f2FCMPHS", BT32, MVC32>;
+defm : BRCond_Bin_F2<SETLT, "f2FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP_F2<SETGT, "f2FCMPLT", BT32, MVC32>;
+defm : BRCond_Bin_SWAP_F2<SETLE, "f2FCMPHS", BT32, MVC32>;
+
+// ------
+
+let Predicates = [HasFPUv3_SF] in {
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETOGE)), bb:$imm16),
+ (BT32 (f2FCMPHSZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETOGE)),
+ (MVC32 (f2FCMPHSZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETOGE)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPHSZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETOLT)), bb:$imm16),
+ (BT32 (f2FCMPLTZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETOLT)),
+ (MVC32 (f2FCMPLTZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETOLT)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPLTZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETOLE)), bb:$imm16),
+ (BT32 (f2FCMPLSZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETOLE)),
+ (MVC32 (f2FCMPLSZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETOLE)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPLSZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETOGT)), bb:$imm16),
+ (BT32 (f2FCMPHZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETOGT)),
+ (MVC32 (f2FCMPHZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETOGT)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPHZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETUNE)), bb:$imm16),
+ (BT32 (f2FCMPNEZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETUNE)),
+ (MVC32 (f2FCMPNEZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETUNE)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPNEZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm, SETUO)), bb:$imm16),
+ (BT32 (f2FCMPUOZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm, SETUO)),
+ (MVC32 (f2FCMPUOZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm, SETUO)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPUOZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETGE)), bb:$imm16),
+ (BT32 (f2FCMPHSZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETGE)),
+ (MVC32 (f2FCMPHSZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETGE)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPHSZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETLT)), bb:$imm16),
+ (BT32 (f2FCMPLTZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETLT)),
+ (MVC32 (f2FCMPLTZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETLT)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPLTZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETLE)), bb:$imm16),
+ (BT32 (f2FCMPLSZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETLE)),
+ (MVC32 (f2FCMPLSZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETLE)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPLSZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETGT)), bb:$imm16),
+ (BT32 (f2FCMPHZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETGT)),
+ (MVC32 (f2FCMPHZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETGT)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPHZ_S FPR32Op:$rs1), FPR32Op:$rx, FPR32Op:$false)>;
+
+
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm, SETO)), bb:$imm16),
+ (BF32 (f2FCMPUOZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm, SETO)),
+ (MVCV32 (f2FCMPUOZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm, SETO)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPUOZ_S FPR32Op:$rs1), FPR32Op:$false, FPR32Op:$rx)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETOEQ)), bb:$imm16),
+ (BF32 (f2FCMPNEZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETOEQ)),
+ (MVCV32 (f2FCMPNEZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETOEQ)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPNEZ_S FPR32Op:$rs1), FPR32Op:$false, FPR32Op:$rx)>;
+ def : Pat<(brcond (i32 (setcc FPR32Op:$rs1, fpimm0, SETEQ)), bb:$imm16),
+ (BF32 (f2FCMPNEZ_S FPR32Op:$rs1), bb:$imm16)>;
+ def : Pat<(i32 (setcc FPR32Op:$rs1, fpimm0, SETEQ)),
+ (MVCV32 (f2FCMPNEZ_S FPR32Op:$rs1))>;
+ def : Pat<(select (i32 (setcc FPR32Op:$rs1, fpimm0, SETEQ)), FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S (f2FCMPNEZ_S FPR32Op:$rs1), FPR32Op:$false, FPR32Op:$rx)>;
+}
+
+
+let Predicates = [HasFPUv3_SF] in
+def : Pat<(select CARRY:$ca, FPR32Op:$rx, FPR32Op:$false),
+ (f2FSEL_S CARRY:$ca, FPR32Op:$rx, FPR32Op:$false)>;
+let Predicates = [HasFPUv3_DF] in
+def : Pat<(select CARRY:$ca, FPR64Op:$rx, FPR64Op:$false),
+ (f2FSEL_D CARRY:$ca, FPR64Op:$rx, FPR64Op:$false)>; \ No newline at end of file
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYRegisterInfo.td
index ade5c7f795af..b7f4fc17166b 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYRegisterInfo.td
@@ -194,6 +194,8 @@ def FPR64 : RegisterClass<"CSKY", [f64], 64,
def sFPR64 : RegisterClass<"CSKY", [f64], 64,
(add (sequence "F%u_64", 0, 15))>;
+def sFPR64_V : RegisterClass<"CSKY", [v2f32], 32, (add sFPR64)>;
+
def FPR128 : RegisterClass<"CSKY",
[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16], 128,
(add (sequence "F%u_128", 0, 31))>;
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
index 8f61feb6506d..94b24044c27d 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/CSKYTargetMachine.cpp
@@ -23,6 +23,9 @@ using namespace llvm;
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeCSKYTarget() {
RegisterTargetMachine<CSKYTargetMachine> X(getTheCSKYTarget());
+
+ PassRegistry *Registry = PassRegistry::getPassRegistry();
+ initializeCSKYConstantIslandsPass(*Registry);
}
static std::string computeDataLayout(const Triple &TT) {
@@ -92,6 +95,7 @@ public:
}
bool addInstSelector() override;
+ void addPreEmitPass() override;
};
} // namespace
@@ -105,3 +109,7 @@ bool CSKYPassConfig::addInstSelector() {
return false;
}
+
+void CSKYPassConfig::addPreEmitPass() {
+ addPass(createCSKYConstantIslandPass());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.cpp
index 7001de999a51..07757f03c258 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.cpp
@@ -73,6 +73,13 @@ void CSKYInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const {
O << getRegisterName(RegNo);
}
+void CSKYInstPrinter::printFPRRegName(raw_ostream &O, unsigned RegNo) const {
+ if (PrintBranchImmAsAddress)
+ O << getRegisterName(RegNo, CSKY::NoRegAltName);
+ else
+ O << getRegisterName(RegNo);
+}
+
void CSKYInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI, raw_ostream &O,
const char *Modifier) {
@@ -201,3 +208,11 @@ const char *CSKYInstPrinter::getRegisterName(unsigned RegNo) {
return getRegisterName(RegNo, ArchRegNames ? CSKY::NoRegAltName
: CSKY::ABIRegAltName);
}
+
+void CSKYInstPrinter::printFPR(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNo);
+ assert(MO.isReg());
+
+ printFPRRegName(O, MO.getReg());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.h
index f93a342ec6a3..52a1b9276762 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYInstPrinter.h
@@ -36,6 +36,8 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier = nullptr);
+ void printFPRRegName(raw_ostream &O, unsigned RegNo) const;
+
// Autogenerated by tblgen.
std::pair<const char *, uint64_t> getMnemonic(const MCInst *MI) override;
void printInstruction(const MCInst *MI, uint64_t Address,
@@ -60,6 +62,8 @@ public:
const MCSubtargetInfo &STI, raw_ostream &O);
void printSPAddr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
+ void printFPR(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
static const char *getRegisterName(unsigned RegNo, unsigned AltIdx);
};
diff --git a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYMCAsmInfo.cpp
index 668247bbbd87..543f2e3d43d4 100644
--- a/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYMCAsmInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/CSKY/MCTargetDesc/CSKYMCAsmInfo.cpp
@@ -22,4 +22,6 @@ CSKYMCAsmInfo::CSKYMCAsmInfo(const Triple &TargetTriple) {
AlignmentIsInBytes = false;
SupportsDebugInformation = true;
CommentString = "#";
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index d131cf896834..15eba89eeb55 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -211,8 +211,7 @@ struct HexagonOperand : public MCParsedAsmOperand {
struct ImmTy Imm;
};
- HexagonOperand(KindTy K, MCContext &Context)
- : MCParsedAsmOperand(), Kind(K), Context(Context) {}
+ HexagonOperand(KindTy K, MCContext &Context) : Kind(K), Context(Context) {}
public:
HexagonOperand(const HexagonOperand &o)
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/BitTracker.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/BitTracker.cpp
index 685bafd785df..17adf32750db 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -940,8 +940,8 @@ void BT::visitBranchesFrom(const MachineInstr &BI) {
// If evaluated successfully add the targets to the cumulative list.
if (Trace) {
dbgs() << " adding targets:";
- for (unsigned i = 0, n = BTs.size(); i < n; ++i)
- dbgs() << " " << printMBBReference(*BTs[i]);
+ for (const MachineBasicBlock *BT : BTs)
+ dbgs() << " " << printMBBReference(*BT);
if (FallsThrough)
dbgs() << "\n falls through\n";
else
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 428d25da6dbc..b2a842233bb8 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -3260,13 +3260,12 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
dbgs() << "Group[" << i << "] inp: "
<< printReg(G.Inp.Reg, HRI, G.Inp.Sub)
<< " out: " << printReg(G.Out.Reg, HRI, G.Out.Sub) << "\n";
- for (unsigned j = 0, m = G.Ins.size(); j < m; ++j)
- dbgs() << " " << *G.Ins[j];
+ for (const MachineInstr *MI : G.Ins)
+ dbgs() << " " << MI;
}
});
- for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
- InstrGroup &G = Groups[i];
+ for (InstrGroup &G : Groups) {
if (!isShuffleOf(G.Out.Reg, G.Inp.Reg))
continue;
auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
index 1938a5c259da..8e014b395286 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -493,6 +493,11 @@ bool HexagonEvaluator::evaluate(const MachineInstr &MI,
RegisterCell RC = eADD(rc(1), lo(M, W0));
return rr0(RC, Outputs);
}
+ case M2_mnaci: {
+ RegisterCell M = eMLS(rc(2), rc(3));
+ RegisterCell RC = eSUB(rc(1), lo(M, W0));
+ return rr0(RC, Outputs);
+ }
case M2_mpysmi: {
RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
return rr0(lo(M, 32), Outputs);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index b456cf139c55..a31ad45f4bb0 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -118,13 +118,10 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
return false;
// Loop over all of the basic blocks.
- for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
- MBBb != MBBe; ++MBBb) {
- MachineBasicBlock *MBB = &*MBBb;
-
+ for (MachineBasicBlock &MBB : Fn) {
// Traverse the basic block.
- MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
- if (MII != MBB->end()) {
+ MachineBasicBlock::iterator MII = MBB.getFirstTerminator();
+ if (MII != MBB.end()) {
MachineInstr &MI = *MII;
int Opc = MI.getOpcode();
if (IsConditionalBranch(Opc)) {
@@ -155,17 +152,17 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// Remove BB2
// BB3: ...
// BB4: ...
- unsigned NumSuccs = MBB->succ_size();
- MachineBasicBlock::succ_iterator SI = MBB->succ_begin();
+ unsigned NumSuccs = MBB.succ_size();
+ MachineBasicBlock::succ_iterator SI = MBB.succ_begin();
MachineBasicBlock* FirstSucc = *SI;
MachineBasicBlock* SecondSucc = *(++SI);
MachineBasicBlock* LayoutSucc = nullptr;
MachineBasicBlock* JumpAroundTarget = nullptr;
- if (MBB->isLayoutSuccessor(FirstSucc)) {
+ if (MBB.isLayoutSuccessor(FirstSucc)) {
LayoutSucc = FirstSucc;
JumpAroundTarget = SecondSucc;
- } else if (MBB->isLayoutSuccessor(SecondSucc)) {
+ } else if (MBB.isLayoutSuccessor(SecondSucc)) {
LayoutSucc = SecondSucc;
JumpAroundTarget = FirstSucc;
} else {
@@ -201,7 +198,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
if (case1 || case2) {
InvertAndChangeJumpTarget(MI, UncondTarget);
- MBB->replaceSuccessor(JumpAroundTarget, UncondTarget);
+ MBB.replaceSuccessor(JumpAroundTarget, UncondTarget);
// Remove the unconditional branch in LayoutSucc.
LayoutSucc->erase(LayoutSucc->begin());
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCallingConv.td b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCallingConv.td
index 93e17e608dd1..cc41b569e490 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCallingConv.td
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCallingConv.td
@@ -126,16 +126,16 @@ def CC_Hexagon_HVX: CallingConv<[
// HVX 128-byte mode
CCIfHvx128<
- CCIfType<[v32i32,v64i16,v128i8],
+ CCIfType<[v32i32,v64i16,v128i8,v32f32,v64f16],
CCAssignToReg<[V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15]>>>,
CCIfHvx128<
- CCIfType<[v64i32,v128i16,v256i8],
+ CCIfType<[v64i32,v128i16,v256i8,v64f32,v128f16],
CCAssignToReg<[W0,W1,W2,W3,W4,W5,W6,W7]>>>,
CCIfHvx128<
- CCIfType<[v32i32,v64i16,v128i8],
+ CCIfType<[v32i32,v64i16,v128i8,v32f32,v64f16],
CCAssignToStack<128,128>>>,
CCIfHvx128<
- CCIfType<[v64i32,v128i16,v256i8],
+ CCIfType<[v64i32,v128i16,v256i8,v64f32,v128f16],
CCAssignToStack<256,128>>>,
CCDelegateTo<CC_Hexagon>
@@ -152,10 +152,10 @@ def RetCC_Hexagon_HVX: CallingConv<[
// HVX 128-byte mode
CCIfHvx128<
- CCIfType<[v32i32,v64i16,v128i8],
+ CCIfType<[v32i32,v64i16,v128i8,v32f32,v64f16],
CCAssignToReg<[V0]>>>,
CCIfHvx128<
- CCIfType<[v64i32,v128i16,v256i8],
+ CCIfType<[v64i32,v128i16,v256i8,v64f32,v128f16],
CCAssignToReg<[W0]>>>,
CCDelegateTo<RetCC_Hexagon>
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp
index a53efeb96961..fc5e05d8c9a0 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp
@@ -290,13 +290,11 @@ namespace {
raw_ostream &operator<< (raw_ostream &OS,
const NodeToUsesMap &M) LLVM_ATTRIBUTE_UNUSED;
raw_ostream &operator<< (raw_ostream &OS, const NodeToUsesMap &M){
- using const_iterator = NodeToUsesMap::const_iterator;
-
- for (const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
- const UseSet &Us = I->second;
- OS << I->first << " -> #" << Us.size() << '{';
- for (UseSet::const_iterator J = Us.begin(), F = Us.end(); J != F; ++J) {
- User *R = (*J)->getUser();
+ for (const auto &I : M) {
+ const UseSet &Us = I.second;
+ OS << I.first << " -> #" << Us.size() << '{';
+ for (const Use *U : Us) {
+ User *R = U->getUser();
if (R->hasName())
OS << ' ' << R->getName();
else
@@ -420,15 +418,12 @@ void HexagonCommonGEP::collect() {
// instruction that uses another GEP instruction as the base pointer, the
// gep node for the base pointer should already exist.
ValueToNodeMap NM;
- for (ValueVect::iterator I = BO.begin(), E = BO.end(); I != E; ++I) {
- BasicBlock *B = cast<BasicBlock>(*I);
- for (BasicBlock::iterator J = B->begin(), F = B->end(); J != F; ++J) {
- if (!isa<GetElementPtrInst>(J))
- continue;
- GetElementPtrInst *GepI = cast<GetElementPtrInst>(J);
- if (isHandledGepForm(GepI))
- processGepInst(GepI, NM);
- }
+ for (Value *I : BO) {
+ BasicBlock *B = cast<BasicBlock>(I);
+ for (Instruction &J : *B)
+ if (auto *GepI = dyn_cast<GetElementPtrInst>(&J))
+ if (isHandledGepForm(GepI))
+ processGepInst(GepI, NM);
}
LLVM_DEBUG(dbgs() << "Gep nodes after initial collection:\n" << Nodes);
@@ -436,17 +431,14 @@ void HexagonCommonGEP::collect() {
static void invert_find_roots(const NodeVect &Nodes, NodeChildrenMap &NCM,
NodeVect &Roots) {
- using const_iterator = NodeVect::const_iterator;
-
- for (const_iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
- GepNode *N = *I;
- if (N->Flags & GepNode::Root) {
- Roots.push_back(N);
- continue;
- }
- GepNode *PN = N->Parent;
- NCM[PN].push_back(N);
+ for (GepNode *N : Nodes) {
+ if (N->Flags & GepNode::Root) {
+ Roots.push_back(N);
+ continue;
}
+ GepNode *PN = N->Parent;
+ NCM[PN].push_back(N);
+ }
}
static void nodes_for_root(GepNode *Root, NodeChildrenMap &NCM,
@@ -546,8 +538,7 @@ void HexagonCommonGEP::common() {
using NodeSetMap = std::map<unsigned, NodeSet>;
NodeSetMap MaybeEq;
- for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
- GepNode *N = *I;
+ for (GepNode *N : Nodes) {
unsigned H = node_hash(N);
MaybeEq[H].insert(N);
}
@@ -556,9 +547,8 @@ void HexagonCommonGEP::common() {
// one for equality and the other for non-equality.
NodeSymRel EqRel; // Equality relation (as set of equivalence classes).
NodePairSet Eq, Ne; // Caches.
- for (NodeSetMap::iterator I = MaybeEq.begin(), E = MaybeEq.end();
- I != E; ++I) {
- NodeSet &S = I->second;
+ for (auto &I : MaybeEq) {
+ NodeSet &S = I.second;
for (NodeSet::iterator NI = S.begin(), NE = S.end(); NI != NE; ++NI) {
GepNode *N = *NI;
// If node already has a class, then the class must have been created
@@ -612,8 +602,7 @@ void HexagonCommonGEP::common() {
// Update the min element's flags, and user list.
uint32_t Flags = 0;
UseSet &MinUs = Uses[Min];
- for (NodeSet::iterator J = S.begin(), F = S.end(); J != F; ++J) {
- GepNode *N = *J;
+ for (GepNode *N : S) {
uint32_t NF = N->Flags;
// If N is used, append all original values of N to the list of
// original values of Min.
@@ -633,8 +622,7 @@ void HexagonCommonGEP::common() {
// selected (minimum) node from the corresponding equivalence class.
// If a given parent does not have an equivalence class, leave it
// unchanged (it means that it's the only element in its class).
- for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
- GepNode *N = *I;
+ for (GepNode *N : Nodes) {
if (N->Flags & GepNode::Root)
continue;
const NodeSet *PC = node_class(N->Parent, EqRel);
@@ -652,8 +640,7 @@ void HexagonCommonGEP::common() {
// Finally, erase the nodes that are no longer used.
NodeSet Erase;
- for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
- GepNode *N = *I;
+ for (GepNode *N : Nodes) {
const NodeSet *PC = node_class(N, EqRel);
if (!PC)
continue;
@@ -663,7 +650,7 @@ void HexagonCommonGEP::common() {
if (N == F->second)
continue;
// Node for removal.
- Erase.insert(*I);
+ Erase.insert(N);
}
erase_if(Nodes, in_set(Erase));
@@ -775,8 +762,7 @@ BasicBlock *HexagonCommonGEP::recalculatePlacement(GepNode *Node,
NodeToUsesMap::iterator UF = Uses.find(Node);
assert(UF != Uses.end() && "Used node with no use information");
UseSet &Us = UF->second;
- for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I) {
- Use *U = *I;
+ for (Use *U : Us) {
User *R = U->getUser();
if (!isa<Instruction>(R))
continue;
@@ -790,8 +776,7 @@ BasicBlock *HexagonCommonGEP::recalculatePlacement(GepNode *Node,
NodeChildrenMap::iterator CF = NCM.find(Node);
if (CF != NCM.end()) {
NodeVect &Cs = CF->second;
- for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) {
- GepNode *CN = *I;
+ for (GepNode *CN : Cs) {
NodeToValueMap::iterator LF = Loc.find(CN);
// If the child is only used in GEP instructions (i.e. is not used in
// non-GEP instructions), the nearest dominator computed for it may
@@ -831,8 +816,8 @@ BasicBlock *HexagonCommonGEP::recalculatePlacementRec(GepNode *Node,
NodeChildrenMap::iterator CF = NCM.find(Node);
if (CF != NCM.end()) {
NodeVect &Cs = CF->second;
- for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I)
- recalculatePlacementRec(*I, NCM, Loc);
+ for (GepNode *C : Cs)
+ recalculatePlacementRec(C, NCM, Loc);
}
BasicBlock *LB = recalculatePlacement(Node, NCM, Loc);
LLVM_DEBUG(dbgs() << "LocRec end for node:" << Node << '\n');
@@ -921,8 +906,8 @@ BasicBlock *HexagonCommonGEP::adjustForInvariance(GepNode *Node,
NodeChildrenMap::iterator CF = NCM.find(Node);
if (CF != NCM.end()) {
NodeVect &Cs = CF->second;
- for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I)
- adjustForInvariance(*I, NCM, Loc);
+ for (GepNode *C : Cs)
+ adjustForInvariance(C, NCM, Loc);
}
return LocB;
}
@@ -938,10 +923,9 @@ namespace {
raw_ostream &operator<< (raw_ostream &OS,
const LocationAsBlock &Loc) LLVM_ATTRIBUTE_UNUSED ;
raw_ostream &operator<< (raw_ostream &OS, const LocationAsBlock &Loc) {
- for (NodeToValueMap::const_iterator I = Loc.Map.begin(), E = Loc.Map.end();
- I != E; ++I) {
- OS << I->first << " -> ";
- if (BasicBlock *B = cast_or_null<BasicBlock>(I->second))
+ for (const auto &I : Loc.Map) {
+ OS << I.first << " -> ";
+ if (BasicBlock *B = cast_or_null<BasicBlock>(I.second))
OS << B->getName() << '(' << B << ')';
else
OS << "<null-block>";
@@ -1016,8 +1000,7 @@ void HexagonCommonGEP::separateConstantChains(GepNode *Node,
// Collect all used nodes together with the uses from loads and stores,
// where the GEP node could be folded into the load/store instruction.
NodeToUsesMap FNs; // Foldable nodes.
- for (NodeSet::iterator I = Ns.begin(), E = Ns.end(); I != E; ++I) {
- GepNode *N = *I;
+ for (GepNode *N : Ns) {
if (!(N->Flags & GepNode::Used))
continue;
NodeToUsesMap::iterator UF = Uses.find(N);
@@ -1025,8 +1008,7 @@ void HexagonCommonGEP::separateConstantChains(GepNode *Node,
UseSet &Us = UF->second;
// Loads/stores that use the node N.
UseSet LSs;
- for (UseSet::iterator J = Us.begin(), F = Us.end(); J != F; ++J) {
- Use *U = *J;
+ for (Use *U : Us) {
User *R = U->getUser();
// We're interested in uses that provide the address. It can happen
// that the value may also be provided via GEP, but we won't handle
@@ -1051,11 +1033,11 @@ void HexagonCommonGEP::separateConstantChains(GepNode *Node,
LLVM_DEBUG(dbgs() << "Nodes with foldable users:\n" << FNs);
- for (NodeToUsesMap::iterator I = FNs.begin(), E = FNs.end(); I != E; ++I) {
- GepNode *N = I->first;
- UseSet &Us = I->second;
- for (UseSet::iterator J = Us.begin(), F = Us.end(); J != F; ++J)
- separateChainForNode(N, *J, Loc);
+ for (auto &FN : FNs) {
+ GepNode *N = FN.first;
+ UseSet &Us = FN.second;
+ for (Use *U : Us)
+ separateChainForNode(N, U, Loc);
}
}
@@ -1068,21 +1050,21 @@ void HexagonCommonGEP::computeNodePlacement(NodeToValueMap &Loc) {
// Compute the initial placement determined by the users' locations, and
// the locations of the child nodes.
- for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
- recalculatePlacementRec(*I, NCM, Loc);
+ for (GepNode *Root : Roots)
+ recalculatePlacementRec(Root, NCM, Loc);
LLVM_DEBUG(dbgs() << "Initial node placement:\n" << LocationAsBlock(Loc));
if (OptEnableInv) {
- for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
- adjustForInvariance(*I, NCM, Loc);
+ for (GepNode *Root : Roots)
+ adjustForInvariance(Root, NCM, Loc);
LLVM_DEBUG(dbgs() << "Node placement after adjustment for invariance:\n"
<< LocationAsBlock(Loc));
}
if (OptEnableConst) {
- for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
- separateConstantChains(*I, NCM, Loc);
+ for (GepNode *Root : Roots)
+ separateConstantChains(Root, NCM, Loc);
}
LLVM_DEBUG(dbgs() << "Node use information:\n" << Uses);
@@ -1153,8 +1135,8 @@ void HexagonCommonGEP::getAllUsersForNode(GepNode *Node, ValueVect &Values,
NodeToUsesMap::iterator UF = Uses.find(N);
assert(UF != Uses.end() && "No use information for used node");
UseSet &Us = UF->second;
- for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I)
- Values.push_back((*I)->getUser());
+ for (const auto &U : Us)
+ Values.push_back(U->getUser());
}
NodeChildrenMap::iterator CF = NCM.find(N);
if (CF != NCM.end()) {
@@ -1223,8 +1205,7 @@ void HexagonCommonGEP::materialize(NodeToValueMap &Loc) {
// to the Roots list.
if (LastCN > 0) {
NodeVect &Cs = NCM[Last];
- for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) {
- GepNode *CN = *I;
+ for (GepNode *CN : Cs) {
CN->Flags &= ~GepNode::Internal;
CN->Flags |= GepNode::Root;
CN->BaseVal = NewInst;
@@ -1238,10 +1219,8 @@ void HexagonCommonGEP::materialize(NodeToValueMap &Loc) {
NodeToUsesMap::iterator UF = Uses.find(Last);
assert(UF != Uses.end() && "No use information found");
UseSet &Us = UF->second;
- for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I) {
- Use *U = *I;
+ for (Use *U : Us)
U->set(NewInst);
- }
}
}
}
@@ -1261,8 +1240,8 @@ void HexagonCommonGEP::removeDeadCode() {
ValueVect Ins;
for (Instruction &I : llvm::reverse(*B))
Ins.push_back(&I);
- for (ValueVect::iterator I = Ins.begin(), E = Ins.end(); I != E; ++I) {
- Instruction *In = cast<Instruction>(*I);
+ for (Value *I : Ins) {
+ Instruction *In = cast<Instruction>(I);
if (isInstructionTriviallyDead(In))
In->eraseFromParent();
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
index d3fcdb6ae9a8..d8af35cbf3a8 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -229,7 +229,7 @@ namespace {
private:
struct Register {
Register() = default;
- Register(unsigned R, unsigned S) : Reg(R), Sub(S) {}
+ Register(llvm::Register R, unsigned S) : Reg(R), Sub(S) {}
Register(const MachineOperand &Op)
: Reg(Op.getReg()), Sub(Op.getSubReg()) {}
Register &operator=(const MachineOperand &Op) {
@@ -1573,7 +1573,7 @@ HCE::Register HCE::insertInitializer(Loc DefL, const ExtenderInit &ExtI) {
// No compounds are available. It is not clear whether we should
// even process such extenders where the initializer cannot be
// a single instruction, but do it for now.
- unsigned TmpR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
+ llvm::Register TmpR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
BuildMI(MBB, At, dl, HII->get(Hexagon::S2_asl_i_r), TmpR)
.add(MachineOperand(Ex.Rs))
.addImm(Ex.S);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index daf311fc49d4..105bf2811a20 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -125,8 +125,8 @@ namespace {
};
LatticeCell() : Kind(Top), Size(0), IsSpecial(false) {
- for (unsigned i = 0; i < MaxCellSize; ++i)
- Values[i] = nullptr;
+ for (const Constant *&Value : Values)
+ Value = nullptr;
}
bool meet(const LatticeCell &L);
@@ -1029,8 +1029,8 @@ bool MachineConstPropagator::rewrite(MachineFunction &MF) {
ToRemove.push_back(const_cast<MachineBasicBlock*>(SB));
Targets.remove(SB);
}
- for (unsigned i = 0, n = ToRemove.size(); i < n; ++i)
- removeCFGEdge(B, ToRemove[i]);
+ for (MachineBasicBlock *MBB : ToRemove)
+ removeCFGEdge(B, MBB);
// If there are any blocks left in the computed targets, it means that
// we think that the block could go somewhere, but the CFG does not.
// This could legitimately happen in blocks that have non-returning
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 03b0f75b2dc1..2ee7f1325df9 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -70,9 +70,7 @@ class HexagonCopyToCombine : public MachineFunctionPass {
public:
static char ID;
- HexagonCopyToCombine() : MachineFunctionPass(ID) {
- initializeHexagonCopyToCombinePass(*PassRegistry::getPassRegistry());
- }
+ HexagonCopyToCombine() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 9a3feb5b6af1..2207925ceeba 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -612,8 +612,8 @@ bool HexagonEarlyIfConversion::visitBlock(MachineBasicBlock *B,
// Simply keep a list of children of B, and traverse that list.
using DTNodeVectType = SmallVector<MachineDomTreeNode *, 4>;
DTNodeVectType Cn(GTN::child_begin(N), GTN::child_end(N));
- for (DTNodeVectType::iterator I = Cn.begin(), E = Cn.end(); I != E; ++I) {
- MachineBasicBlock *SB = (*I)->getBlock();
+ for (auto &I : Cn) {
+ MachineBasicBlock *SB = I->getBlock();
if (!Deleted.count(SB))
Changed |= visitBlock(SB, L);
}
@@ -648,8 +648,8 @@ bool HexagonEarlyIfConversion::visitLoop(MachineLoop *L) {
<< "\n");
bool Changed = false;
if (L) {
- for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
- Changed |= visitLoop(*I);
+ for (MachineLoop *I : *L)
+ Changed |= visitLoop(I);
}
MachineBasicBlock *EntryB = GraphTraits<MachineFunction*>::getEntryNode(MFN);
@@ -964,8 +964,8 @@ void HexagonEarlyIfConversion::removeBlock(MachineBasicBlock *B) {
using DTNodeVectType = SmallVector<MachineDomTreeNode *, 4>;
DTNodeVectType Cn(GTN::child_begin(N), GTN::child_end(N));
- for (DTNodeVectType::iterator I = Cn.begin(), E = Cn.end(); I != E; ++I) {
- MachineBasicBlock *SB = (*I)->getBlock();
+ for (auto &I : Cn) {
+ MachineBasicBlock *SB = I->getBlock();
MDT->changeImmediateDominator(SB, IDB);
}
}
@@ -973,8 +973,8 @@ void HexagonEarlyIfConversion::removeBlock(MachineBasicBlock *B) {
while (!B->succ_empty())
B->removeSuccessor(B->succ_begin());
- for (auto I = B->pred_begin(), E = B->pred_end(); I != E; ++I)
- (*I)->removeSuccessor(B, true);
+ for (MachineBasicBlock *Pred : B->predecessors())
+ Pred->removeSuccessor(B, true);
Deleted.insert(B);
MDT->eraseNode(B);
@@ -1064,8 +1064,8 @@ bool HexagonEarlyIfConversion::runOnMachineFunction(MachineFunction &MF) {
Deleted.clear();
bool Changed = false;
- for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I)
- Changed |= visitLoop(*I);
+ for (MachineLoop *L : *MLI)
+ Changed |= visitLoop(L);
Changed |= visitLoop(nullptr);
return Changed;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index c444cf557c21..2693940bb1e9 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -1106,8 +1106,7 @@ bool HexagonExpandCondsets::isIntReg(RegisterRef RR, unsigned &BW) {
}
bool HexagonExpandCondsets::isIntraBlocks(LiveInterval &LI) {
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- LiveRange::Segment &LR = *I;
+ for (LiveRange::Segment &LR : LI) {
// Range must start at a register...
if (!LR.start.isRegister())
return false;
@@ -1160,16 +1159,16 @@ bool HexagonExpandCondsets::coalesceRegisters(RegisterRef R1, RegisterRef R2) {
// Move all live segments from L2 to L1.
using ValueInfoMap = DenseMap<VNInfo *, VNInfo *>;
ValueInfoMap VM;
- for (LiveInterval::iterator I = L2.begin(), E = L2.end(); I != E; ++I) {
- VNInfo *NewVN, *OldVN = I->valno;
+ for (LiveRange::Segment &I : L2) {
+ VNInfo *NewVN, *OldVN = I.valno;
ValueInfoMap::iterator F = VM.find(OldVN);
if (F == VM.end()) {
- NewVN = L1.getNextValue(I->valno->def, LIS->getVNInfoAllocator());
+ NewVN = L1.getNextValue(I.valno->def, LIS->getVNInfoAllocator());
VM.insert(std::make_pair(OldVN, NewVN));
} else {
NewVN = F->second;
}
- L1.addSegment(LiveRange::Segment(I->start, I->end, NewVN));
+ L1.addSegment(LiveRange::Segment(I.start, I.end, NewVN));
}
while (!L2.empty())
L2.removeSegment(*L2.begin());
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 12ceac545e9d..989a98571434 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -416,8 +416,8 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
UnsignedMap RPO;
RPOTType RPOT(&MF);
unsigned RPON = 0;
- for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
- RPO[(*I)->getNumber()] = RPON++;
+ for (auto &I : RPOT)
+ RPO[I->getNumber()] = RPON++;
// Don't process functions that have loops, at least for now. Placement
// of prolog and epilog must take loop structure into account. For simpli-
@@ -1410,7 +1410,7 @@ bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// Add live in registers. We treat eh_return callee saved register r0 - r3
// specially. They are not really callee saved registers as they are not
// supposed to be killed.
@@ -1479,7 +1479,7 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
int FI = I.getFrameIdx();
HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
@@ -1620,7 +1620,7 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
// sub-registers to SRegs.
LLVM_DEBUG(dbgs() << "Initial CS registers: {");
for (const CalleeSavedInfo &I : CSI) {
- unsigned R = I.getReg();
+ Register R = I.getReg();
LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
SRegs[*SR] = true;
@@ -2635,7 +2635,7 @@ bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
// a contiguous block starting from D8.
BitVector Regs(Hexagon::NUM_TARGET_REGS);
for (const CalleeSavedInfo &I : CSI) {
- unsigned R = I.getReg();
+ Register R = I.getReg();
if (!Hexagon::DoubleRegsRegClass.contains(R))
return true;
Regs[R] = true;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index 85230cac9d7c..0bb1658e7698 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -583,14 +583,12 @@ namespace {
char HexagonGenInsert::ID = 0;
void HexagonGenInsert::dump_map() const {
- using iterator = IFMapType::const_iterator;
-
- for (iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- dbgs() << " " << printReg(I->first, HRI) << ":\n";
- const IFListType &LL = I->second;
- for (unsigned i = 0, n = LL.size(); i < n; ++i)
- dbgs() << " " << PrintIFR(LL[i].first, HRI) << ", "
- << PrintRegSet(LL[i].second, HRI) << '\n';
+ for (const auto &I : IFMap) {
+ dbgs() << " " << printReg(I.first, HRI) << ":\n";
+ const IFListType &LL = I.second;
+ for (const auto &J : LL)
+ dbgs() << " " << PrintIFR(J.first, HRI) << ", "
+ << PrintRegSet(J.second, HRI) << '\n';
}
}
@@ -627,8 +625,8 @@ void HexagonGenInsert::buildOrderingBT(RegisterOrdering &RB,
using SortableVectorType = std::vector<unsigned>;
SortableVectorType VRs;
- for (RegisterOrdering::iterator I = RB.begin(), E = RB.end(); I != E; ++I)
- VRs.push_back(I->first);
+ for (auto &I : RB)
+ VRs.push_back(I.first);
llvm::sort(VRs, LexCmp);
// Transfer the results to the outgoing register ordering.
for (unsigned i = 0, n = VRs.size(); i < n; ++i)
@@ -853,20 +851,18 @@ bool HexagonGenInsert::findRecordInsertForms(unsigned VR,
if (isDebug()) {
dbgs() << "Prefixes matching register " << printReg(VR, HRI) << "\n";
- for (LRSMapType::iterator I = LM.begin(), E = LM.end(); I != E; ++I) {
- dbgs() << " L=" << I->first << ':';
- const RSListType &LL = I->second;
- for (unsigned i = 0, n = LL.size(); i < n; ++i)
- dbgs() << " (" << printReg(LL[i].first, HRI) << ",@"
- << LL[i].second << ')';
+ for (const auto &I : LM) {
+ dbgs() << " L=" << I.first << ':';
+ const RSListType &LL = I.second;
+ for (const auto &J : LL)
+ dbgs() << " (" << printReg(J.first, HRI) << ",@" << J.second << ')';
dbgs() << '\n';
}
}
bool Recorded = false;
- for (iterator I = AVs.begin(), E = AVs.end(); I != E; ++I) {
- unsigned SrcR = *I;
+ for (unsigned SrcR : AVs) {
int FDi = -1, LDi = -1; // First/last different bit.
const BitTracker::RegisterCell &AC = CMS->lookup(SrcR);
uint16_t AW = AC.width();
@@ -888,8 +884,8 @@ bool HexagonGenInsert::findRecordInsertForms(unsigned VR,
if (F == LM.end())
continue;
RSListType &LL = F->second;
- for (unsigned i = 0, n = LL.size(); i < n; ++i) {
- uint16_t S = LL[i].second;
+ for (const auto &I : LL) {
+ uint16_t S = I.second;
// MinL is the minimum length of the prefix. Any length above MinL
// allows some flexibility as to where the prefix can start:
// given the extra length EL=L-MinL, the prefix must start between
@@ -900,7 +896,7 @@ bool HexagonGenInsert::findRecordInsertForms(unsigned VR,
uint16_t LowS = (EL < FD) ? FD-EL : 0;
if (S < LowS) // Starts too early.
continue;
- unsigned InsR = LL[i].first;
+ unsigned InsR = I.first;
if (!isValidInsertForm(VR, SrcR, InsR, L, S))
continue;
if (isDebug()) {
@@ -1029,10 +1025,10 @@ void HexagonGenInsert::findRemovableRegisters(unsigned VR, IFRecord IF,
}
void HexagonGenInsert::computeRemovableRegisters() {
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- IFListType &LL = I->second;
- for (unsigned i = 0, n = LL.size(); i < n; ++i)
- findRemovableRegisters(I->first, LL[i].first, LL[i].second);
+ for (auto &I : IFMap) {
+ IFListType &LL = I.second;
+ for (auto &J : LL)
+ findRemovableRegisters(I.first, J.first, J.second);
}
}
@@ -1064,8 +1060,8 @@ void HexagonGenInsert::pruneCoveredSets(unsigned VR) {
MachineInstr *DefVR = MRI->getVRegDef(VR);
bool DefEx = HII->isConstExtended(*DefVR);
bool HasNE = false;
- for (unsigned i = 0, n = LL.size(); i < n; ++i) {
- if (LL[i].second.empty())
+ for (const auto &I : LL) {
+ if (I.second.empty())
continue;
HasNE = true;
break;
@@ -1172,8 +1168,8 @@ void HexagonGenInsert::pruneCandidates() {
// selection method.
// First, remove candidates whose potentially removable set is a subset
// of another candidate's set.
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I)
- pruneCoveredSets(I->first);
+ for (const auto &I : IFMap)
+ pruneCoveredSets(I.first);
UnsignedMap RPO;
@@ -1181,18 +1177,18 @@ void HexagonGenInsert::pruneCandidates() {
RPOTType RPOT(MFN);
unsigned RPON = 0;
- for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
- RPO[(*I)->getNumber()] = RPON++;
+ for (const auto &I : RPOT)
+ RPO[I->getNumber()] = RPON++;
PairMapType Memo; // Memoization map for distance calculation.
// Remove candidates that would use registers defined too far away.
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I)
- pruneUsesTooFar(I->first, RPO, Memo);
+ for (const auto &I : IFMap)
+ pruneUsesTooFar(I.first, RPO, Memo);
pruneEmptyLists();
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I)
- pruneRegCopies(I->first);
+ for (const auto &I : IFMap)
+ pruneRegCopies(I.first);
}
namespace {
@@ -1277,8 +1273,8 @@ void HexagonGenInsert::selectCandidates() {
for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) {
const IFListType &LL = I->second;
RegisterSet TT;
- for (unsigned i = 0, n = LL.size(); i < n; ++i)
- TT.insert(LL[i].second);
+ for (const auto &J : LL)
+ TT.insert(J.second);
for (unsigned R = TT.find_first(); R; R = TT.find_next(R))
RemC[R]++;
AllRMs.insert(TT);
@@ -1384,8 +1380,8 @@ bool HexagonGenInsert::generateInserts() {
// Create a new register for each one from IFMap, and store them in the
// map.
UnsignedMap RegMap;
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- unsigned VR = I->first;
+ for (auto &I : IFMap) {
+ unsigned VR = I.first;
const TargetRegisterClass *RC = MRI->getRegClass(VR);
Register NewVR = MRI->createVirtualRegister(RC);
RegMap[VR] = NewVR;
@@ -1394,15 +1390,15 @@ bool HexagonGenInsert::generateInserts() {
// We can generate the "insert" instructions using potentially stale re-
// gisters: SrcR and InsR for a given VR may be among other registers that
// are also replaced. This is fine, we will do the mass "rauw" a bit later.
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- MachineInstr *MI = MRI->getVRegDef(I->first);
+ for (auto &I : IFMap) {
+ MachineInstr *MI = MRI->getVRegDef(I.first);
MachineBasicBlock &B = *MI->getParent();
DebugLoc DL = MI->getDebugLoc();
- unsigned NewR = RegMap[I->first];
+ unsigned NewR = RegMap[I.first];
bool R32 = MRI->getRegClass(NewR) == &Hexagon::IntRegsRegClass;
const MCInstrDesc &D = R32 ? HII->get(Hexagon::S2_insert)
: HII->get(Hexagon::S2_insertp);
- IFRecord IF = I->second[0].first;
+ IFRecord IF = I.second[0].first;
unsigned Wdh = IF.Wdh, Off = IF.Off;
unsigned InsS = 0;
if (R32 && MRI->getRegClass(IF.InsR) == &Hexagon::DoubleRegsRegClass) {
@@ -1428,9 +1424,9 @@ bool HexagonGenInsert::generateInserts() {
MRI->clearKillFlags(IF.InsR);
}
- for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) {
- MachineInstr *DefI = MRI->getVRegDef(I->first);
- MRI->replaceRegWith(I->first, RegMap[I->first]);
+ for (const auto &I : IFMap) {
+ MachineInstr *DefI = MRI->getVRegDef(I.first);
+ MRI->replaceRegWith(I.first, RegMap[I.first]);
DefI->eraseFromParent();
}
@@ -1523,9 +1519,8 @@ bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) {
if (isDebug()) {
dbgs() << "Cell ordering:\n";
- for (RegisterOrdering::iterator I = CellOrd.begin(), E = CellOrd.end();
- I != E; ++I) {
- unsigned VR = I->first, Pos = I->second;
+ for (const auto &I : CellOrd) {
+ unsigned VR = I.first, Pos = I.second;
dbgs() << printReg(VR, HRI) << " -> " << Pos << "\n";
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 1a66394e9757..00615f355146 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -505,8 +505,8 @@ bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
collectPredicateGPR(MF);
- for (SetOfReg::iterator I = PredGPRs.begin(), E = PredGPRs.end(); I != E; ++I)
- processPredicateGPR(*I);
+ for (const RegisterSubReg &R : PredGPRs)
+ processPredicateGPR(R);
bool Again;
do {
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 5d2e1b259449..43afae441457 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1127,8 +1127,8 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L,
bool L1Used = false;
// Process nested loops first.
- for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
- Changed |= convertToHardwareLoop(*I, RecL0used, RecL1used);
+ for (MachineLoop *I : *L) {
+ Changed |= convertToHardwareLoop(I, RecL0used, RecL1used);
L0Used |= RecL0used;
L1Used |= RecL1used;
}
@@ -1587,16 +1587,6 @@ void HexagonHardwareLoops::setImmediate(MachineOperand &MO, int64_t Val) {
MO.setReg(NewR);
}
-static bool isImmValidForOpcode(unsigned CmpOpc, int64_t Imm) {
- // These two instructions are not extendable.
- if (CmpOpc == Hexagon::A4_cmpbeqi)
- return isUInt<8>(Imm);
- if (CmpOpc == Hexagon::A4_cmpbgti)
- return isInt<8>(Imm);
- // The rest of the comparison-with-immediate instructions are extendable.
- return true;
-}
-
bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
MachineBasicBlock *Header = L->getHeader();
MachineBasicBlock *Latch = L->getLoopLatch();
@@ -1812,9 +1802,9 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
// Most comparisons of register against an immediate value allow
// the immediate to be constant-extended. There are some exceptions
// though. Make sure the new combination will work.
- if (CmpImmOp->isImm())
- if (!isImmValidForOpcode(PredDef->getOpcode(), CmpImm))
- return false;
+ if (CmpImmOp->isImm() && !TII->isExtendable(*PredDef) &&
+ !TII->isValidOffset(PredDef->getOpcode(), CmpImm, TRI, false))
+ return false;
// Make sure that the compare happens after the bump. Otherwise,
// after the fixup, the compare would use a yet-undefined register.
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
index 44679d429de5..e2215c9900d0 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
@@ -44,12 +44,7 @@ HexagonHazardRecognizer::getHazardType(SUnit *SU, int stalls) {
if (!Resources->canReserveResources(*MI)) {
LLVM_DEBUG(dbgs() << "*** Hazard in cycle " << PacketNum << ", " << *MI);
HazardType RetVal = Hazard;
- if (TII->mayBeNewStore(*MI)) {
- // Make sure the register to be stored is defined by an instruction in the
- // packet.
- MachineOperand &MO = MI->getOperand(MI->getNumOperands() - 1);
- if (!MO.isReg() || RegDefs.count(MO.getReg()) == 0)
- return Hazard;
+ if (isNewStore(*MI)) {
// The .new store version uses different resources so check if it
// causes a hazard.
MachineFunction *MF = MI->getParent()->getParent();
@@ -105,6 +100,15 @@ bool HexagonHazardRecognizer::ShouldPreferAnother(SUnit *SU) {
return UsesDotCur && ((SU == UsesDotCur) ^ (DotCurPNum == (int)PacketNum));
}
+/// Return true if the instruction would be converted to a new value store when
+/// packetized.
+bool HexagonHazardRecognizer::isNewStore(MachineInstr &MI) {
+ if (!TII->mayBeNewStore(MI))
+ return false;
+ MachineOperand &MO = MI.getOperand(MI.getNumOperands() - 1);
+ return (MO.isReg() && RegDefs.count(MO.getReg()) != 0);
+}
+
void HexagonHazardRecognizer::EmitInstruction(SUnit *SU) {
MachineInstr *MI = SU->getInstr();
if (!MI)
@@ -119,7 +123,7 @@ void HexagonHazardRecognizer::EmitInstruction(SUnit *SU) {
if (TII->isZeroCost(MI->getOpcode()))
return;
- if (!Resources->canReserveResources(*MI)) {
+ if (!Resources->canReserveResources(*MI) || isNewStore(*MI)) {
// It must be a .new store since other instructions must be able to be
// reserved at this point.
assert(TII->mayBeNewStore(*MI) && "Expecting .new store");
@@ -127,11 +131,12 @@ void HexagonHazardRecognizer::EmitInstruction(SUnit *SU) {
MachineInstr *NewMI =
MF->CreateMachineInstr(TII->get(TII->getDotNewOp(*MI)),
MI->getDebugLoc());
- assert(Resources->canReserveResources(*NewMI));
- Resources->reserveResources(*NewMI);
+ if (Resources->canReserveResources(*NewMI))
+ Resources->reserveResources(*NewMI);
+ else
+ Resources->reserveResources(*MI);
MF->deleteMachineInstr(NewMI);
- }
- else
+ } else
Resources->reserveResources(*MI);
LLVM_DEBUG(dbgs() << " Add instruction " << *MI);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.h
index 53b9cb43b4b6..0528cbd1f15f 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonHazardRecognizer.h
@@ -40,6 +40,10 @@ class HexagonHazardRecognizer : public ScheduleHazardRecognizer {
// The set of registers defined by instructions in the current packet.
SmallSet<unsigned, 8> RegDefs;
+ // Return true if the instruction is a store that is converted to a new value
+ // store because its value is defined in the same packet.
+ bool isNewStore(MachineInstr &MI);
+
public:
HexagonHazardRecognizer(const InstrItineraryData *II,
const HexagonInstrInfo *HII,
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 2679e399852f..161768b8dc22 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -1176,6 +1176,9 @@ void HexagonDAGToDAGISel::ppHoistZextI1(std::vector<SDNode*> &&Nodes) {
EVT UVT = U->getValueType(0);
if (!UVT.isSimple() || !UVT.isInteger() || UVT.getSimpleVT() == MVT::i1)
continue;
+ // Do not generate select for all i1 vector type.
+ if (UVT.isVector() && UVT.getVectorElementType() == MVT::i1)
+ continue;
if (isMemOPCandidate(N, U))
continue;
@@ -1282,7 +1285,7 @@ void HexagonDAGToDAGISel::emitFunctionEntryCode() {
MachineFrameInfo &MFI = MF->getFrameInfo();
MachineBasicBlock *EntryBB = &MF->front();
- unsigned AR = FuncInfo->CreateReg(MVT::i32);
+ Register AR = FuncInfo->CreateReg(MVT::i32);
Align EntryMaxA = MFI.getMaxAlign();
BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR)
.addImm(EntryMaxA.value());
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
index ed4874baf7c8..0a6dd727eb82 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
@@ -230,8 +230,7 @@ bool Coloring::color() {
WorkQ.push_back(N);
}
- for (unsigned I = 0; I < WorkQ.size(); ++I) {
- Node N = WorkQ[I];
+ for (Node N : WorkQ) {
NodeSet &Ns = Edges[N];
auto P = getUniqueColor(Ns);
if (P.first) {
@@ -270,8 +269,7 @@ bool Coloring::color() {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Coloring::dump() const {
dbgs() << "{ Order: {";
- for (unsigned I = 0; I != Order.size(); ++I) {
- Node P = Order[I];
+ for (Node P : Order) {
if (P != Ignore)
dbgs() << ' ' << P;
else
@@ -761,8 +759,7 @@ void ResultStack::print(raw_ostream &OS, const SelectionDAG &G) const {
namespace {
struct ShuffleMask {
ShuffleMask(ArrayRef<int> M) : Mask(M) {
- for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
- int M = Mask[I];
+ for (int M : Mask) {
if (M == -1)
continue;
MinSrc = (MinSrc == -1) ? M : std::min(MinSrc, M);
@@ -935,8 +932,7 @@ static SmallVector<unsigned, 4> getInputSegmentList(ShuffleMask SM,
unsigned Shift = Log2_32(SegLen);
BitVector Segs(alignTo(SM.MaxSrc + 1, SegLen) >> Shift);
- for (int I = 0, E = SM.Mask.size(); I != E; ++I) {
- int M = SM.Mask[I];
+ for (int M : SM.Mask) {
if (M >= 0)
Segs.set(M >> Shift);
}
@@ -2397,6 +2393,7 @@ void HexagonDAGToDAGISel::SelectV65GatherPred(SDNode *N) {
SDValue Base = N->getOperand(4);
SDValue Modifier = N->getOperand(5);
SDValue Offset = N->getOperand(6);
+ SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32);
unsigned Opcode;
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
@@ -2418,7 +2415,8 @@ void HexagonDAGToDAGISel::SelectV65GatherPred(SDNode *N) {
}
SDVTList VTs = CurDAG->getVTList(MVT::Other);
- SDValue Ops[] = { Address, Predicate, Base, Modifier, Offset, Chain };
+ SDValue Ops[] = { Address, ImmOperand,
+ Predicate, Base, Modifier, Offset, Chain };
SDNode *Result = CurDAG->getMachineNode(Opcode, dl, VTs, Ops);
MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2434,6 +2432,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) {
SDValue Base = N->getOperand(3);
SDValue Modifier = N->getOperand(4);
SDValue Offset = N->getOperand(5);
+ SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32);
unsigned Opcode;
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
@@ -2455,7 +2454,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) {
}
SDVTList VTs = CurDAG->getVTList(MVT::Other);
- SDValue Ops[] = { Address, Base, Modifier, Offset, Chain };
+ SDValue Ops[] = { Address, ImmOperand, Base, Modifier, Offset, Chain };
SDNode *Result = CurDAG->getMachineNode(Opcode, dl, VTs, Ops);
MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 88effed9f076..d7ca934a23e6 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -543,9 +543,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// The Glue is necessary since all emitted instructions must be
// stuck together.
if (!CLI.IsTailCall) {
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, Glue);
+ for (const auto &R : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
Glue = Chain.getValue(1);
}
} else {
@@ -560,9 +559,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
//
// Do not flag preceding copytoreg stuff together with the following stuff.
Glue = SDValue();
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, Glue);
+ for (const auto &R : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
Glue = Chain.getValue(1);
}
Glue = SDValue();
@@ -589,10 +587,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Add argument registers to the end of the list so that they are
// known live into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
- }
+ for (const auto &R : RegsToPass)
+ Ops.push_back(DAG.getRegister(R.first, R.second.getValueType()));
const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
@@ -690,7 +686,7 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
case InlineAsm::Kind_RegDef:
case InlineAsm::Kind_RegDefEarlyClobber: {
for (; NumVals; --NumVals, ++i) {
- unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
+ Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
if (Reg != LR)
continue;
HMFI.setHasClobberLR(true);
@@ -1190,7 +1186,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
}
// Return LR, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
@@ -1776,6 +1772,18 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
+ // Special handling for half-precision floating point conversions.
+ // Lower half float conversions into library calls.
+ setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+
// Handling of indexed loads/stores: default is "expand".
//
for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
@@ -1856,6 +1864,11 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
else
setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
+ // Routines to handle fp16 storage type.
+ setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
+ setLibcallName(RTLIB::FPROUND_F64_F16, "__truncdfhf2");
+ setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
+
// These cause problems when the shift amount is non-constant.
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
@@ -2204,8 +2217,7 @@ HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
// Express the shuffle mask in terms of bytes.
SmallVector<int,8> ByteMask;
unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- int M = Mask[i];
+ for (int M : Mask) {
if (M < 0) {
for (unsigned j = 0; j != ElemBytes; ++j)
ByteMask.push_back(-1);
@@ -2428,8 +2440,8 @@ HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
if (AllConst) {
int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
(Consts[1]->getZExtValue() & 0xFF) << 8 |
- (Consts[1]->getZExtValue() & 0xFF) << 16 |
- Consts[2]->getZExtValue() << 24;
+ (Consts[2]->getZExtValue() & 0xFF) << 16 |
+ Consts[3]->getZExtValue() << 24;
return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
}
@@ -2720,7 +2732,6 @@ SDValue
HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
const {
if (Ty.isVector()) {
- assert(Ty.isInteger() && "Only integer vectors are supported here");
unsigned W = Ty.getSizeInBits();
if (W <= 64)
return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index d518c036f125..f9ce7a9407aa 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -458,6 +458,7 @@ private:
SelectionDAG &DAG) const;
SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerHvxSplatVector(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
@@ -468,7 +469,6 @@ private:
SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
@@ -476,6 +476,8 @@ private:
SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerHvxFpExtend(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerHvxConvertFpInt(SDValue Op, SelectionDAG &DAG) const;
SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const;
SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index f7237f496aee..0ba75a544c04 100644..100755
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -55,6 +55,12 @@ HexagonTargetLowering::initializeHVXLowering() {
addRegisterClass(MVT::v32i1, &Hexagon::HvxQRRegClass);
addRegisterClass(MVT::v64i1, &Hexagon::HvxQRRegClass);
addRegisterClass(MVT::v128i1, &Hexagon::HvxQRRegClass);
+ if (Subtarget.useHVXV68Ops() && Subtarget.useHVXFloatingPoint()) {
+ addRegisterClass(MVT::v32f32, &Hexagon::HvxVRRegClass);
+ addRegisterClass(MVT::v64f16, &Hexagon::HvxVRRegClass);
+ addRegisterClass(MVT::v64f32, &Hexagon::HvxWRRegClass);
+ addRegisterClass(MVT::v128f16, &Hexagon::HvxWRRegClass);
+ }
}
// Set up operation actions.
@@ -83,6 +89,72 @@ HexagonTargetLowering::initializeHVXLowering() {
setOperationAction(ISD::VECTOR_SHUFFLE, ByteW, Legal);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ if (Subtarget.useHVX128BOps() && Subtarget.useHVXV68Ops() &&
+ Subtarget.useHVXFloatingPoint()) {
+ setOperationAction(ISD::FMINNUM, MVT::v64f16, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::v64f16, Legal);
+ setOperationAction(ISD::FADD, MVT::v64f16, Legal);
+ setOperationAction(ISD::FSUB, MVT::v64f16, Legal);
+ setOperationAction(ISD::FMUL, MVT::v64f16, Legal);
+ setOperationAction(ISD::FADD, MVT::v32f32, Legal);
+ setOperationAction(ISD::FSUB, MVT::v32f32, Legal);
+ setOperationAction(ISD::FMUL, MVT::v32f32, Legal);
+ setOperationAction(ISD::FMINNUM, MVT::v32f32, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::v32f32, Legal);
+ setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64f16, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64f16, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
+
+ // Handle ISD::BUILD_VECTOR for v32f32 in a custom way to generate vsplat
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v32f32, Custom);
+
+ // BUILD_VECTOR with f16 operands cannot be promoted without
+ // promoting the result, so lower the node to vsplat or constant pool
+ setOperationAction(ISD::BUILD_VECTOR, MVT::f16, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::f16, Custom);
+ setOperationAction(ISD::SPLAT_VECTOR, MVT::f16, Custom);
+ setOperationAction(ISD::SPLAT_VECTOR, MVT::v64f16, Legal);
+ setOperationAction(ISD::SPLAT_VECTOR, MVT::v32f32, Legal);
+ // Vector shuffle is always promoted to ByteV and a bitcast to f16 is
+ // generated.
+ setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v64f16, ByteV);
+ setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v64f32, ByteW);
+ setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v32f32, ByteV);
+
+ // Custom-lower BUILD_VECTOR for vector pairs. The standard (target-
+ // independent) handling of it would convert it to a load, which is
+ // not always the optimal choice.
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v64f32, Custom);
+ // Make concat-vectors custom to handle concats of more than 2 vectors.
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v128f16, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v64f32, Custom);
+
+ setOperationAction(ISD::LOAD, MVT::v64f32, Custom);
+ setOperationAction(ISD::STORE, MVT::v64f32, Custom);
+ setOperationAction(ISD::FADD, MVT::v64f32, Custom);
+ setOperationAction(ISD::FSUB, MVT::v64f32, Custom);
+ setOperationAction(ISD::FMUL, MVT::v64f32, Custom);
+ setOperationAction(ISD::FMINNUM, MVT::v64f32, Custom);
+ setOperationAction(ISD::FMAXNUM, MVT::v64f32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v64f32, Custom);
+
+ if (Subtarget.useHVXQFloatOps()) {
+ setOperationAction(ISD::FP_EXTEND, MVT::v64f32, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::v64f16, Legal);
+ } else if (Subtarget.useHVXIEEEFPOps()) {
+ setOperationAction(ISD::FP_EXTEND, MVT::v64f32, Legal);
+ setOperationAction(ISD::FP_ROUND, MVT::v64f16, Legal);
+ }
+
+ setOperationAction(ISD::MLOAD, MVT::v32f32, Custom);
+ setOperationAction(ISD::MSTORE, MVT::v32f32, Custom);
+ setOperationAction(ISD::MLOAD, MVT::v64f16, Custom);
+ setOperationAction(ISD::MSTORE, MVT::v64f16, Custom);
+ setOperationAction(ISD::MLOAD, MVT::v64f32, Custom);
+ setOperationAction(ISD::MSTORE, MVT::v64f32, Custom);
+ }
+
for (MVT T : LegalV) {
setIndexedLoadAction(ISD::POST_INC, T, Legal);
setIndexedStoreAction(ISD::POST_INC, T, Legal);
@@ -137,6 +209,18 @@ HexagonTargetLowering::initializeHVXLowering() {
setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteV);
}
+ if (Subtarget.useHVXQFloatOps()) {
+ setOperationAction(ISD::SINT_TO_FP, T, Expand);
+ setOperationAction(ISD::UINT_TO_FP, T, Expand);
+ setOperationAction(ISD::FP_TO_SINT, T, Expand);
+ setOperationAction(ISD::FP_TO_UINT, T, Expand);
+ } else if (Subtarget.useHVXIEEEFPOps()) {
+ setOperationAction(ISD::SINT_TO_FP, T, Custom);
+ setOperationAction(ISD::UINT_TO_FP, T, Custom);
+ setOperationAction(ISD::FP_TO_SINT, T, Custom);
+ setOperationAction(ISD::FP_TO_UINT, T, Custom);
+ }
+
setCondCodeAction(ISD::SETNE, T, Expand);
setCondCodeAction(ISD::SETLE, T, Expand);
setCondCodeAction(ISD::SETGE, T, Expand);
@@ -198,8 +282,39 @@ HexagonTargetLowering::initializeHVXLowering() {
setOperationAction(ISD::UMIN, T, Custom);
setOperationAction(ISD::UMAX, T, Custom);
}
+
+ setOperationAction(ISD::SINT_TO_FP, T, Custom);
+ setOperationAction(ISD::UINT_TO_FP, T, Custom);
+ setOperationAction(ISD::FP_TO_SINT, T, Custom);
+ setOperationAction(ISD::FP_TO_UINT, T, Custom);
}
+ setCondCodeAction(ISD::SETNE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETLE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETGE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETOGE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETOLT, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETUNE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::v64f16, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::v64f16, Expand);
+
+ setCondCodeAction(ISD::SETNE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETLE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETGE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETOGE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETOLT, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETUNE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::v32f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::v32f32, Expand);
+
// Boolean vectors.
for (MVT T : LegalW) {
@@ -497,7 +612,9 @@ HexagonTargetLowering::buildHvxVectorReg(ArrayRef<SDValue> Values,
assert(ElemSize*VecLen == HwLen);
SmallVector<SDValue,32> Words;
- if (VecTy.getVectorElementType() != MVT::i32) {
+ if (VecTy.getVectorElementType() != MVT::i32 &&
+ !(Subtarget.useHVXFloatingPoint() &&
+ VecTy.getVectorElementType() == MVT::f32)) {
assert((ElemSize == 1 || ElemSize == 2) && "Invalid element size");
unsigned OpsPerWord = (ElemSize == 1) ? 4 : 2;
MVT PartVT = MVT::getVectorVT(VecTy.getVectorElementType(), OpsPerWord);
@@ -506,22 +623,31 @@ HexagonTargetLowering::buildHvxVectorReg(ArrayRef<SDValue> Values,
Words.push_back(DAG.getBitcast(MVT::i32, W));
}
} else {
- Words.assign(Values.begin(), Values.end());
+ for (SDValue V : Values)
+ Words.push_back(DAG.getBitcast(MVT::i32, V));
}
+ auto isSplat = [] (ArrayRef<SDValue> Values, SDValue &SplatV) {
+ unsigned NumValues = Values.size();
+ assert(NumValues > 0);
+ bool IsUndef = true;
+ for (unsigned i = 0; i != NumValues; ++i) {
+ if (Values[i].isUndef())
+ continue;
+ IsUndef = false;
+ if (!SplatV.getNode())
+ SplatV = Values[i];
+ else if (SplatV != Values[i])
+ return false;
+ }
+ if (IsUndef)
+ SplatV = Values[0];
+ return true;
+ };
unsigned NumWords = Words.size();
- bool IsSplat = true, IsUndef = true;
SDValue SplatV;
- for (unsigned i = 0; i != NumWords && IsSplat; ++i) {
- if (isUndef(Words[i]))
- continue;
- IsUndef = false;
- if (!SplatV.getNode())
- SplatV = Words[i];
- else if (SplatV != Words[i])
- IsSplat = false;
- }
- if (IsUndef)
+ bool IsSplat = isSplat(Words, SplatV);
+ if (IsSplat && isUndef(SplatV))
return DAG.getUNDEF(VecTy);
if (IsSplat) {
assert(SplatV.getNode());
@@ -618,24 +744,75 @@ HexagonTargetLowering::buildHvxVectorReg(ArrayRef<SDValue> Values,
}
}
- // Construct two halves in parallel, then or them together.
+ // Find most common element to initialize vector with. This is to avoid
+ // unnecessary vinsert/valign for cases where the same value is present
+ // many times. Creates a histogram of the vector's elements to find the
+ // most common element n.
assert(4*Words.size() == Subtarget.getVectorLength());
- SDValue HalfV0 = getInstr(Hexagon::V6_vd0, dl, VecTy, {}, DAG);
- SDValue HalfV1 = getInstr(Hexagon::V6_vd0, dl, VecTy, {}, DAG);
- SDValue S = DAG.getConstant(4, dl, MVT::i32);
+ int VecHist[32];
+ int n = 0;
+ for (unsigned i = 0; i != NumWords; ++i) {
+ VecHist[i] = 0;
+ if (Words[i].isUndef())
+ continue;
+ for (unsigned j = i; j != NumWords; ++j)
+ if (Words[i] == Words[j])
+ VecHist[i]++;
+
+ if (VecHist[i] > VecHist[n])
+ n = i;
+ }
+
+ SDValue HalfV = getZero(dl, VecTy, DAG);
+ if (VecHist[n] > 1) {
+ SDValue SplatV = DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Words[n]);
+ HalfV = DAG.getNode(HexagonISD::VALIGN, dl, VecTy,
+ {HalfV, SplatV, DAG.getConstant(HwLen/2, dl, MVT::i32)});
+ }
+ SDValue HalfV0 = HalfV;
+ SDValue HalfV1 = HalfV;
+
+ // Construct two halves in parallel, then or them together. Rn and Rm count
+ // number of rotations needed before the next element. One last rotation is
+ // performed post-loop to position the last element.
+ int Rn = 0, Rm = 0;
+ SDValue Sn, Sm;
+ SDValue N = HalfV0;
+ SDValue M = HalfV1;
for (unsigned i = 0; i != NumWords/2; ++i) {
- SDValue N = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy,
- {HalfV0, Words[i]});
- SDValue M = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy,
- {HalfV1, Words[i+NumWords/2]});
- HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, S});
- HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, S});
+
+ // Rotate by element count since last insertion.
+ if (Words[i] != Words[n] || VecHist[n] <= 1) {
+ Sn = DAG.getConstant(Rn, dl, MVT::i32);
+ HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, Sn});
+ N = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy,
+ {HalfV0, Words[i]});
+ Rn = 0;
+ }
+ if (Words[i+NumWords/2] != Words[n] || VecHist[n] <= 1) {
+ Sm = DAG.getConstant(Rm, dl, MVT::i32);
+ HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, Sm});
+ M = DAG.getNode(HexagonISD::VINSERTW0, dl, VecTy,
+ {HalfV1, Words[i+NumWords/2]});
+ Rm = 0;
+ }
+ Rn += 4;
+ Rm += 4;
}
+ // Perform last rotation.
+ Sn = DAG.getConstant(Rn+HwLen/2, dl, MVT::i32);
+ Sm = DAG.getConstant(Rm, dl, MVT::i32);
+ HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {N, Sn});
+ HalfV1 = DAG.getNode(HexagonISD::VROR, dl, VecTy, {M, Sm});
+
+ SDValue T0 = DAG.getBitcast(tyVector(VecTy, MVT::i32), HalfV0);
+ SDValue T1 = DAG.getBitcast(tyVector(VecTy, MVT::i32), HalfV1);
+
+ SDValue DstV = DAG.getNode(ISD::OR, dl, ty(T0), {T0, T1});
- HalfV0 = DAG.getNode(HexagonISD::VROR, dl, VecTy,
- {HalfV0, DAG.getConstant(HwLen/2, dl, MVT::i32)});
- SDValue DstV = DAG.getNode(ISD::OR, dl, VecTy, {HalfV0, HalfV1});
- return DstV;
+ SDValue OutV =
+ DAG.getBitcast(tyVector(ty(DstV), VecTy.getVectorElementType()), DstV);
+ return OutV;
}
SDValue
@@ -1237,6 +1414,19 @@ HexagonTargetLowering::LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG)
if (VecTy.getVectorElementType() == MVT::i1)
return buildHvxVectorPred(Ops, dl, VecTy, DAG);
+ // In case of MVT::f16 BUILD_VECTOR, since MVT::f16 is
+ // not a legal type, just bitcast the node to use i16
+ // types and bitcast the result back to f16
+ if (VecTy.getVectorElementType() == MVT::f16) {
+ SmallVector<SDValue,64> NewOps;
+ for (unsigned i = 0; i != Size; i++)
+ NewOps.push_back(DAG.getBitcast(MVT::i16, Ops[i]));
+
+ SDValue T0 = DAG.getNode(ISD::BUILD_VECTOR, dl,
+ tyVector(VecTy, MVT::i16), NewOps);
+ return DAG.getBitcast(tyVector(VecTy, MVT::f16), T0);
+ }
+
if (VecTy.getSizeInBits() == 16*Subtarget.getVectorLength()) {
ArrayRef<SDValue> A(Ops);
MVT SingleTy = typeSplit(VecTy).first;
@@ -1249,6 +1439,24 @@ HexagonTargetLowering::LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG)
}
SDValue
+HexagonTargetLowering::LowerHvxSplatVector(SDValue Op, SelectionDAG &DAG)
+ const {
+ const SDLoc &dl(Op);
+ MVT VecTy = ty(Op);
+ MVT ArgTy = ty(Op.getOperand(0));
+
+ if (ArgTy == MVT::f16) {
+ MVT SplatTy = MVT::getVectorVT(MVT::i16, VecTy.getVectorNumElements());
+ SDValue ToInt16 = DAG.getBitcast(MVT::i16, Op.getOperand(0));
+ SDValue ToInt32 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, ToInt16);
+ SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, dl, SplatTy, ToInt32);
+ return DAG.getBitcast(VecTy, Splat);
+ }
+
+ return SDValue();
+}
+
+SDValue
HexagonTargetLowering::LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG)
const {
// Vector concatenation of two integer (non-bool) vectors does not need
@@ -1363,6 +1571,7 @@ SDValue
HexagonTargetLowering::LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG)
const {
const SDLoc &dl(Op);
+ MVT VecTy = ty(Op);
SDValue VecV = Op.getOperand(0);
SDValue ValV = Op.getOperand(1);
SDValue IdxV = Op.getOperand(2);
@@ -1370,6 +1579,14 @@ HexagonTargetLowering::LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG)
if (ElemTy == MVT::i1)
return insertHvxElementPred(VecV, IdxV, ValV, dl, DAG);
+ if (ElemTy == MVT::f16) {
+ SDValue T0 = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
+ tyVector(VecTy, MVT::i16),
+ DAG.getBitcast(tyVector(VecTy, MVT::i16), VecV),
+ DAG.getBitcast(MVT::i16, ValV), IdxV);
+ return DAG.getBitcast(tyVector(VecTy, MVT::f16), T0);
+ }
+
return insertHvxElementReg(VecV, IdxV, ValV, dl, DAG);
}
@@ -1800,6 +2017,80 @@ HexagonTargetLowering::LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, {StoreLo, StoreHi});
}
+SDValue HexagonTargetLowering::LowerHvxFpExtend(SDValue Op,
+ SelectionDAG &DAG) const {
+ // This conversion only applies to QFloat.
+ assert(Subtarget.useHVXQFloatOps());
+
+ assert(Op->getOpcode() == ISD::FP_EXTEND);
+
+ MVT VecTy = ty(Op);
+ MVT ArgTy = ty(Op.getOperand(0));
+ const SDLoc &dl(Op);
+ assert(VecTy == MVT::v64f32 && ArgTy == MVT::v64f16);
+
+ SDValue F16Vec = Op.getOperand(0);
+
+ APFloat FloatVal = APFloat(1.0f);
+ bool Ignored;
+ FloatVal.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
+ SDValue Fp16Ones = DAG.getConstantFP(FloatVal, dl, ArgTy);
+ SDValue VmpyVec =
+ getInstr(Hexagon::V6_vmpy_qf32_hf, dl, VecTy, {F16Vec, Fp16Ones}, DAG);
+
+ MVT HalfTy = typeSplit(VecTy).first;
+ VectorPair Pair = opSplit(VmpyVec, dl, DAG);
+ SDValue LoVec =
+ getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.first}, DAG);
+ SDValue HiVec =
+ getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.second}, DAG);
+
+ SDValue ShuffVec =
+ getInstr(Hexagon::V6_vshuffvdd, dl, VecTy,
+ {HiVec, LoVec, DAG.getConstant(-4, dl, MVT::i32)}, DAG);
+
+ return ShuffVec;
+}
+
+SDValue
+HexagonTargetLowering::LowerHvxConvertFpInt(SDValue Op, SelectionDAG &DAG)
+ const {
+ // This conversion only applies to IEEE.
+ assert(Subtarget.useHVXIEEEFPOps());
+
+ unsigned Opc = Op.getOpcode();
+ // Catch invalid conversion ops (just in case).
+ assert(Opc == ISD::FP_TO_SINT || Opc == ISD::FP_TO_UINT ||
+ Opc == ISD::SINT_TO_FP || Opc == ISD::UINT_TO_FP);
+ MVT ResTy = ty(Op);
+
+ if (Opc == ISD::FP_TO_SINT || Opc == ISD::FP_TO_UINT) {
+ MVT FpTy = ty(Op.getOperand(0)).getVectorElementType();
+ // There are only conversions of f16.
+ if (FpTy != MVT::f16)
+ return SDValue();
+
+ MVT IntTy = ResTy.getVectorElementType();
+ // Other int types aren't legal in HVX, so we shouldn't see them here.
+ assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
+ // Conversions to i8 and i16 are legal.
+ if (IntTy == MVT::i8 || IntTy == MVT::i16)
+ return Op;
+ } else {
+ // Converting int -> fp.
+ if (ResTy.getVectorElementType() != MVT::f16)
+ return SDValue();
+ MVT IntTy = ty(Op.getOperand(0)).getVectorElementType();
+ // Other int types aren't legal in HVX, so we shouldn't see them here.
+ assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
+ // i8, i16 -> f16 is legal.
+ if (IntTy == MVT::i8 || IntTy == MVT::i16)
+ return Op;
+ }
+
+ return SDValue();
+}
+
SDValue
HexagonTargetLowering::SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const {
assert(!Op.isMachineOpcode());
@@ -2104,10 +2395,22 @@ HexagonTargetLowering::LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::MLOAD:
case ISD::MSTORE:
return SplitHvxMemOp(Op, DAG);
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ if (ty(Op).getSizeInBits() == ty(Op.getOperand(0)).getSizeInBits())
+ return SplitHvxPairOp(Op, DAG);
+ break;
case ISD::CTPOP:
case ISD::CTLZ:
case ISD::CTTZ:
case ISD::MUL:
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
case ISD::MULHS:
case ISD::MULHU:
case ISD::AND:
@@ -2134,6 +2437,7 @@ HexagonTargetLowering::LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const {
default:
break;
case ISD::BUILD_VECTOR: return LowerHvxBuildVector(Op, DAG);
+ case ISD::SPLAT_VECTOR: return LowerHvxSplatVector(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerHvxConcatVectors(Op, DAG);
case ISD::INSERT_SUBVECTOR: return LowerHvxInsertSubvector(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerHvxInsertElement(Op, DAG);
@@ -2158,6 +2462,11 @@ HexagonTargetLowering::LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::MSTORE: return LowerHvxMaskedOp(Op, DAG);
// Unaligned loads will be handled by the default lowering.
case ISD::LOAD: return SDValue();
+ case ISD::FP_EXTEND: return LowerHvxFpExtend(Op, DAG);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP: return LowerHvxConvertFpInt(Op, DAG);
}
#ifndef NDEBUG
Op.dumpr(&DAG);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 931b0c0e0090..9b4e92a16663 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -146,6 +146,48 @@ static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
return Count;
}
+// Check if the A2_tfrsi instruction is cheap or not. If the operand has
+// to be constant-extendend it is not cheap since it occupies two slots
+// in a packet.
+bool HexagonInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
+ // Enable the following steps only at Os/Oz
+ if (!(MI.getMF()->getFunction().hasOptSize()))
+ return MI.isAsCheapAsAMove();
+
+ if (MI.getOpcode() == Hexagon::A2_tfrsi) {
+ auto Op = MI.getOperand(1);
+ // If the instruction has a global address as operand, it is not cheap
+ // since the operand will be constant extended.
+ if (Op.getType() == MachineOperand::MO_GlobalAddress)
+ return false;
+ // If the instruction has an operand of size > 16bits, its will be
+ // const-extended and hence, it is not cheap.
+ if (Op.isImm()) {
+ int64_t Imm = Op.getImm();
+ if (!isInt<16>(Imm))
+ return false;
+ }
+ }
+ return MI.isAsCheapAsAMove();
+}
+
+// Do not sink floating point instructions that updates USR register.
+// Example:
+// feclearexcept
+// F2_conv_w2sf
+// fetestexcept
+// MachineSink sinks F2_conv_w2sf and we are not able to catch exceptions.
+// TODO: On some of these floating point instructions, USR is marked as Use.
+// In reality, these instructions also Def the USR. If USR is marked as Def,
+// some of the assumptions in assembler packetization are broken.
+bool HexagonInstrInfo::shouldSink(const MachineInstr &MI) const {
+ // Assumption: A floating point instruction that reads the USR will write
+ // the USR as well.
+ if (isFloat(MI) && MI.hasRegisterImplicitUseOperand(Hexagon::USR))
+ return false;
+ return true;
+}
+
/// Find the hardware loop instruction used to set-up the specified loop.
/// On Hexagon, we have two instructions used to set-up the hardware loop
/// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
@@ -1464,75 +1506,75 @@ HexagonInstrInfo::expandVGatherPseudo(MachineInstr &MI) const {
switch (Opc) {
case Hexagon::V6_vgathermh_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
- .add(MI.getOperand(3));
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
case Hexagon::V6_vgathermw_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
- .add(MI.getOperand(3));
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
case Hexagon::V6_vgathermhw_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
- .add(MI.getOperand(3));
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
case Hexagon::V6_vgathermhq_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
.add(MI.getOperand(3))
- .add(MI.getOperand(4));
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
case Hexagon::V6_vgathermwq_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
.add(MI.getOperand(3))
- .add(MI.getOperand(4));
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
case Hexagon::V6_vgathermhwq_pseudo:
First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
- .add(MI.getOperand(1))
.add(MI.getOperand(2))
.add(MI.getOperand(3))
- .add(MI.getOperand(4));
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5));
BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
.add(MI.getOperand(0))
- .addImm(0)
+ .addImm(MI.getOperand(1).getImm())
.addReg(Hexagon::VTMP);
MBB.erase(MI);
return First.getInstrIterator();
@@ -1851,6 +1893,7 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
case Hexagon::C4_cmplte:
case Hexagon::C4_cmplteu:
SrcReg2 = MI.getOperand(2).getReg();
+ Value = 0;
return true;
case Hexagon::C2_cmpeqi:
@@ -2725,7 +2768,13 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::V6_vL32b_nt_ai:
case Hexagon::V6_vS32b_nt_ai:
case Hexagon::V6_vL32Ub_ai:
- case Hexagon::V6_vS32Ub_ai: {
+ case Hexagon::V6_vS32Ub_ai:
+ case Hexagon::V6_vgathermh_pseudo:
+ case Hexagon::V6_vgathermw_pseudo:
+ case Hexagon::V6_vgathermhw_pseudo:
+ case Hexagon::V6_vgathermhq_pseudo:
+ case Hexagon::V6_vgathermwq_pseudo:
+ case Hexagon::V6_vgathermhwq_pseudo: {
unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
assert(isPowerOf2_32(VectorSize));
if (Offset & (VectorSize-1))
@@ -2751,6 +2800,11 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::S4_storeirit_io:
case Hexagon::S4_storeirif_io:
return isShiftedUInt<6,2>(Offset);
+ // Handle these two compare instructions that are not extendable.
+ case Hexagon::A4_cmpbeqi:
+ return isUInt<8>(Offset);
+ case Hexagon::A4_cmpbgti:
+ return isInt<8>(Offset);
}
if (Extend)
@@ -2788,6 +2842,8 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L4_isub_memopw_io:
case Hexagon::L4_add_memopw_io:
case Hexagon::L4_sub_memopw_io:
+ case Hexagon::L4_iand_memopw_io:
+ case Hexagon::L4_ior_memopw_io:
case Hexagon::L4_and_memopw_io:
case Hexagon::L4_or_memopw_io:
return (0 <= Offset && Offset <= 255);
@@ -2796,6 +2852,8 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L4_isub_memoph_io:
case Hexagon::L4_add_memoph_io:
case Hexagon::L4_sub_memoph_io:
+ case Hexagon::L4_iand_memoph_io:
+ case Hexagon::L4_ior_memoph_io:
case Hexagon::L4_and_memoph_io:
case Hexagon::L4_or_memoph_io:
return (0 <= Offset && Offset <= 127);
@@ -2804,6 +2862,8 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L4_isub_memopb_io:
case Hexagon::L4_add_memopb_io:
case Hexagon::L4_sub_memopb_io:
+ case Hexagon::L4_iand_memopb_io:
+ case Hexagon::L4_ior_memopb_io:
case Hexagon::L4_and_memopb_io:
case Hexagon::L4_or_memopb_io:
return (0 <= Offset && Offset <= 63);
@@ -2848,8 +2908,18 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::S2_pstorerdt_io:
case Hexagon::S2_pstorerdf_io:
return isShiftedUInt<6,3>(Offset);
+
+ case Hexagon::L2_loadbsw2_io:
+ case Hexagon::L2_loadbzw2_io:
+ return isShiftedInt<11,1>(Offset);
+
+ case Hexagon::L2_loadbsw4_io:
+ case Hexagon::L2_loadbzw4_io:
+ return isShiftedInt<11,2>(Offset);
} // switch
+ dbgs() << "Failed Opcode is : " << Opcode << " (" << getName(Opcode)
+ << ")\n";
llvm_unreachable("No offset range is defined for this opcode. "
"Please define it in the above switch statement!");
}
@@ -3486,9 +3556,9 @@ int HexagonInstrInfo::getDuplexOpcode(const MachineInstr &MI,
if (Iter != DupMap.end())
return Iter->second;
} else { // Conversion to Tiny core.
- for (auto Iter = DupMap.begin(), End = DupMap.end(); Iter != End; ++Iter)
- if (Iter->second == OpNum)
- return Iter->first;
+ for (const auto &Iter : DupMap)
+ if (Iter.second == OpNum)
+ return Iter.first;
}
return -1;
}
@@ -3516,6 +3586,10 @@ int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_nt_cur_pi;
case Hexagon::V6_vL32b_nt_ai:
return Hexagon::V6_vL32b_nt_cur_ai;
+ case Hexagon::V6_vL32b_ppu:
+ return Hexagon::V6_vL32b_cur_ppu;
+ case Hexagon::V6_vL32b_nt_ppu:
+ return Hexagon::V6_vL32b_nt_cur_ppu;
}
return 0;
}
@@ -3532,6 +3606,10 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_nt_pi;
case Hexagon::V6_vL32b_nt_cur_ai:
return Hexagon::V6_vL32b_nt_ai;
+ case Hexagon::V6_vL32b_cur_ppu:
+ return Hexagon::V6_vL32b_ppu;
+ case Hexagon::V6_vL32b_nt_cur_ppu:
+ return Hexagon::V6_vL32b_nt_ppu;
}
return 0;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 830f04d9eac3..2af09c857d86 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -335,6 +335,13 @@ public:
getSerializableBitmaskMachineOperandTargetFlags() const override;
bool isTailCall(const MachineInstr &MI) const override;
+ bool isAsCheapAsAMove(const MachineInstr &MI) const override;
+
+ // Return true if the instruction should be sunk by MachineSink.
+ // MachineSink determines on its own whether the instruction is safe to sink;
+ // this gives the target a hook to override the default behavior with regards
+ // to which instructions should be sunk.
+ bool shouldSink(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
index 2cdfbe7845b6..ea6a7498e27f 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -110,6 +110,8 @@ private:
bool changeAddAsl(NodeAddr<UseNode *> AddAslUN, MachineInstr *AddAslMI,
const MachineOperand &ImmOp, unsigned ImmOpNum);
bool isValidOffset(MachineInstr *MI, int Offset);
+ unsigned getBaseOpPosition(MachineInstr *MI);
+ unsigned getOffsetOpPosition(MachineInstr *MI);
};
} // end anonymous namespace
@@ -322,6 +324,25 @@ bool HexagonOptAddrMode::isSafeToExtLR(NodeAddr<StmtNode *> SN,
}
bool HexagonOptAddrMode::isValidOffset(MachineInstr *MI, int Offset) {
+ if (HII->isHVXVec(*MI)) {
+ // only HVX vgather instructions handled
+ // TODO: extend the pass to other vector load/store operations
+ switch (MI->getOpcode()) {
+ case Hexagon::V6_vgathermh_pseudo:
+ case Hexagon::V6_vgathermw_pseudo:
+ case Hexagon::V6_vgathermhw_pseudo:
+ case Hexagon::V6_vgathermhq_pseudo:
+ case Hexagon::V6_vgathermwq_pseudo:
+ case Hexagon::V6_vgathermhwq_pseudo:
+ return HII->isValidOffset(MI->getOpcode(), Offset, HRI, false);
+ default:
+ return false;
+ }
+ }
+
+ if (HII->getAddrMode(*MI) != HexagonII::BaseImmOffset)
+ return false;
+
unsigned AlignMask = 0;
switch (HII->getMemAccessSize(*MI)) {
case HexagonII::MemAccessSize::DoubleWordAccess:
@@ -345,29 +366,67 @@ bool HexagonOptAddrMode::isValidOffset(MachineInstr *MI, int Offset) {
return HII->isValidOffset(MI->getOpcode(), Offset, HRI, false);
}
+unsigned HexagonOptAddrMode::getBaseOpPosition(MachineInstr *MI) {
+ const MCInstrDesc &MID = MI->getDesc();
+ switch (MI->getOpcode()) {
+ // vgather pseudos are mayLoad and mayStore
+ // hence need to explicitly specify Base and
+ // Offset operand positions
+ case Hexagon::V6_vgathermh_pseudo:
+ case Hexagon::V6_vgathermw_pseudo:
+ case Hexagon::V6_vgathermhw_pseudo:
+ case Hexagon::V6_vgathermhq_pseudo:
+ case Hexagon::V6_vgathermwq_pseudo:
+ case Hexagon::V6_vgathermhwq_pseudo:
+ return 0;
+ default:
+ return MID.mayLoad() ? 1 : 0;
+ }
+}
+
+unsigned HexagonOptAddrMode::getOffsetOpPosition(MachineInstr *MI) {
+ assert(
+ (HII->getAddrMode(*MI) == HexagonII::BaseImmOffset) &&
+ "Looking for an offset in non-BaseImmOffset addressing mode instruction");
+
+ const MCInstrDesc &MID = MI->getDesc();
+ switch (MI->getOpcode()) {
+ // vgather pseudos are mayLoad and mayStore
+ // hence need to explicitly specify Base and
+ // Offset operand positions
+ case Hexagon::V6_vgathermh_pseudo:
+ case Hexagon::V6_vgathermw_pseudo:
+ case Hexagon::V6_vgathermhw_pseudo:
+ case Hexagon::V6_vgathermhq_pseudo:
+ case Hexagon::V6_vgathermwq_pseudo:
+ case Hexagon::V6_vgathermhwq_pseudo:
+ return 1;
+ default:
+ return MID.mayLoad() ? 2 : 1;
+ }
+}
+
bool HexagonOptAddrMode::processAddUses(NodeAddr<StmtNode *> AddSN,
MachineInstr *AddMI,
const NodeList &UNodeList) {
Register AddDefR = AddMI->getOperand(0).getReg();
+ Register BaseReg = AddMI->getOperand(1).getReg();
for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
NodeAddr<UseNode *> UN = *I;
NodeAddr<StmtNode *> SN = UN.Addr->getOwner(*DFG);
MachineInstr *MI = SN.Addr->getCode();
const MCInstrDesc &MID = MI->getDesc();
if ((!MID.mayLoad() && !MID.mayStore()) ||
- HII->getAddrMode(*MI) != HexagonII::BaseImmOffset ||
- HII->isHVXVec(*MI))
+ HII->getAddrMode(*MI) != HexagonII::BaseImmOffset)
return false;
- MachineOperand BaseOp = MID.mayLoad() ? MI->getOperand(1)
- : MI->getOperand(0);
+ MachineOperand BaseOp = MI->getOperand(getBaseOpPosition(MI));
if (!BaseOp.isReg() || BaseOp.getReg() != AddDefR)
return false;
- MachineOperand OffsetOp = MID.mayLoad() ? MI->getOperand(2)
- : MI->getOperand(1);
+ MachineOperand OffsetOp = MI->getOperand(getOffsetOpPosition(MI));
if (!OffsetOp.isImm())
return false;
@@ -382,11 +441,19 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr<StmtNode *> AddSN,
// Ex: Rx= add(Rt,#10)
// memw(Rx+#0) = Rs
// will be replaced with => memw(Rt+#10) = Rs
- Register BaseReg = AddMI->getOperand(1).getReg();
if (!isSafeToExtLR(AddSN, AddMI, BaseReg, UNodeList))
return false;
}
+ NodeId LRExtRegRD = 0;
+ // Iterate through all the UseNodes in SN and find the reaching def
+ // for the LRExtReg.
+ for (NodeAddr<UseNode *> UA : AddSN.Addr->members_if(DFG->IsUse, *DFG)) {
+ RegisterRef RR = UA.Addr->getRegRef(*DFG);
+ if (BaseReg == RR.Reg)
+ LRExtRegRD = UA.Addr->getReachingDef();
+ }
+
// Update all the uses of 'add' with the appropriate base and offset
// values.
bool Changed = false;
@@ -400,6 +467,12 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr<StmtNode *> AddSN,
LLVM_DEBUG(dbgs() << "\t\t[MI <BB#" << UseMI->getParent()->getNumber()
<< ">]: " << *UseMI << "\n");
Changed |= updateAddUses(AddMI, UseMI);
+
+ // Set the reachingDef for UseNode under consideration
+ // after updating the Add use. This local change is
+ // to avoid rebuilding of the RDF graph after update.
+ NodeAddr<DefNode *> LRExtRegDN = DFG->addr<DefNode *>(LRExtRegRD);
+ UseN.Addr->linkToDef(UseN.Id, LRExtRegDN);
}
if (Changed)
@@ -409,21 +482,18 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr<StmtNode *> AddSN,
}
bool HexagonOptAddrMode::updateAddUses(MachineInstr *AddMI,
- MachineInstr *UseMI) {
+ MachineInstr *UseMI) {
const MachineOperand ImmOp = AddMI->getOperand(2);
const MachineOperand AddRegOp = AddMI->getOperand(1);
- Register newReg = AddRegOp.getReg();
- const MCInstrDesc &MID = UseMI->getDesc();
+ Register NewReg = AddRegOp.getReg();
- MachineOperand &BaseOp = MID.mayLoad() ? UseMI->getOperand(1)
- : UseMI->getOperand(0);
- MachineOperand &OffsetOp = MID.mayLoad() ? UseMI->getOperand(2)
- : UseMI->getOperand(1);
- BaseOp.setReg(newReg);
+ MachineOperand &BaseOp = UseMI->getOperand(getBaseOpPosition(UseMI));
+ MachineOperand &OffsetOp = UseMI->getOperand(getOffsetOpPosition(UseMI));
+ BaseOp.setReg(NewReg);
BaseOp.setIsUndef(AddRegOp.isUndef());
BaseOp.setImplicit(AddRegOp.isImplicit());
OffsetOp.setImm(ImmOp.getImm() + OffsetOp.getImm());
- MRI->clearKillFlags(newReg);
+ MRI->clearKillFlags(NewReg);
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatterns.td b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatterns.td
index cad5ca8ab92e..3abbd896c519 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -87,18 +87,6 @@ def V8I8: PatLeaf<(v8i8 DoubleRegs:$R)>;
def V4I16: PatLeaf<(v4i16 DoubleRegs:$R)>;
def V2I32: PatLeaf<(v2i32 DoubleRegs:$R)>;
-def HQ8: PatLeaf<(VecQ8 HvxQR:$R)>;
-def HQ16: PatLeaf<(VecQ16 HvxQR:$R)>;
-def HQ32: PatLeaf<(VecQ32 HvxQR:$R)>;
-
-def HVI8: PatLeaf<(VecI8 HvxVR:$R)>;
-def HVI16: PatLeaf<(VecI16 HvxVR:$R)>;
-def HVI32: PatLeaf<(VecI32 HvxVR:$R)>;
-
-def HWI8: PatLeaf<(VecPI8 HvxWR:$R)>;
-def HWI16: PatLeaf<(VecPI16 HvxWR:$R)>;
-def HWI32: PatLeaf<(VecPI32 HvxWR:$R)>;
-
def SDTVecLeaf:
SDTypeProfile<1, 0, [SDTCisVec<0>]>;
def SDTVecVecIntOp:
@@ -269,6 +257,9 @@ def anyimm3: PatLeaf<(i32 AnyImm3:$Addr)>;
def f32ImmPred : PatLeaf<(f32 fpimm:$F)>;
def f64ImmPred : PatLeaf<(f64 fpimm:$F)>;
+def f32zero: PatLeaf<(f32 fpimm:$F), [{
+ return N->isExactlyValue(APFloat::getZero(APFloat::IEEEsingle(), false));
+}]>;
// This complex pattern is really only to detect various forms of
// sign-extension i32->i64. The selected value will be of type i64
@@ -378,6 +369,12 @@ def Umin: pf2<umin>; def Umax: pf2<umax>;
def Rol: pf2<rotl>;
+def Fptosi: pf1<fp_to_sint>;
+def Fptoui: pf1<fp_to_uint>;
+def Sitofp: pf1<sint_to_fp>;
+def Uitofp: pf1<uint_to_fp>;
+
+
// --(1) Immediate -------------------------------------------------------
//
@@ -2083,7 +2080,7 @@ let AddedComplexity = 20 in {
defm: Loadxi_pat<sextloadi8, i32, anyimm0, L2_loadrb_io>;
defm: Loadxi_pat<sextloadi16, i32, anyimm1, L2_loadrh_io>;
defm: Loadxi_pat<sextloadv2i8, v2i16, anyimm1, L2_loadbsw2_io>;
- defm: Loadxi_pat<sextloadv4i8, v4i16, anyimm2, L2_loadbzw4_io>;
+ defm: Loadxi_pat<sextloadv4i8, v4i16, anyimm2, L2_loadbsw4_io>;
defm: Loadxi_pat<zextloadi1, i32, anyimm0, L2_loadrub_io>;
defm: Loadxi_pat<zextloadi8, i32, anyimm0, L2_loadrub_io>;
defm: Loadxi_pat<zextloadi16, i32, anyimm1, L2_loadruh_io>;
@@ -2135,7 +2132,7 @@ let AddedComplexity = 60 in {
def: Loadxu_pat<sextloadi8, i32, anyimm0, L4_loadrb_ur>;
def: Loadxu_pat<sextloadi16, i32, anyimm1, L4_loadrh_ur>;
def: Loadxu_pat<sextloadv2i8, v2i16, anyimm1, L4_loadbsw2_ur>;
- def: Loadxu_pat<sextloadv4i8, v4i16, anyimm2, L4_loadbzw4_ur>;
+ def: Loadxu_pat<sextloadv4i8, v4i16, anyimm2, L4_loadbsw4_ur>;
def: Loadxu_pat<zextloadi1, i32, anyimm0, L4_loadrub_ur>;
def: Loadxu_pat<zextloadi8, i32, anyimm0, L4_loadrub_ur>;
def: Loadxu_pat<zextloadi16, i32, anyimm1, L4_loadruh_ur>;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
index a22a3f8ec0ca..0a3dff057ccd 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
@@ -6,6 +6,21 @@
//
//===----------------------------------------------------------------------===//
+def HQ8: PatLeaf<(VecQ8 HvxQR:$R)>;
+def HQ16: PatLeaf<(VecQ16 HvxQR:$R)>;
+def HQ32: PatLeaf<(VecQ32 HvxQR:$R)>;
+
+def HVI8: PatLeaf<(VecI8 HvxVR:$R)>;
+def HVI16: PatLeaf<(VecI16 HvxVR:$R)>;
+def HVI32: PatLeaf<(VecI32 HvxVR:$R)>;
+def HVF16: PatLeaf<(VecF16 HvxVR:$R)>;
+def HVF32: PatLeaf<(VecF32 HvxVR:$R)>;
+
+def HWI8: PatLeaf<(VecPI8 HvxWR:$R)>;
+def HWI16: PatLeaf<(VecPI16 HvxWR:$R)>;
+def HWI32: PatLeaf<(VecPI32 HvxWR:$R)>;
+def HWF16: PatLeaf<(VecPF16 HvxWR:$R)>;
+def HWF32: PatLeaf<(VecPF32 HvxWR:$R)>;
def SDTVecUnaryOp:
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
@@ -49,7 +64,7 @@ def HexagonVPACKL: SDNode<"HexagonISD::VPACKL", SDTVecUnaryOp>;
def HexagonVUNPACK: SDNode<"HexagonISD::VUNPACK", SDTVecUnaryOp>;
def HexagonVUNPACKU: SDNode<"HexagonISD::VUNPACKU", SDTVecUnaryOp>;
-def vzero: PatFrag<(ops), (splat_vector (i32 0))>;
+def vzero: PatFrags<(ops), [(splat_vector (i32 0)), (splat_vector (f32zero))]>;
def qtrue: PatFrag<(ops), (HexagonQTRUE)>;
def qfalse: PatFrag<(ops), (HexagonQFALSE)>;
def qcat: PatFrag<(ops node:$Qs, node:$Qt),
@@ -150,12 +165,19 @@ let Predicates = [UseHVX] in {
defm: HvxLda_pat<V6_vL32b_ai, alignedload, VecI8, IsVecOff>;
defm: HvxLda_pat<V6_vL32b_ai, alignedload, VecI16, IsVecOff>;
defm: HvxLda_pat<V6_vL32b_ai, alignedload, VecI32, IsVecOff>;
-
defm: HvxLd_pat<V6_vL32Ub_ai, unalignedload, VecI8, IsVecOff>;
defm: HvxLd_pat<V6_vL32Ub_ai, unalignedload, VecI16, IsVecOff>;
defm: HvxLd_pat<V6_vL32Ub_ai, unalignedload, VecI32, IsVecOff>;
}
+let Predicates = [UseHVXV68] in {
+ defm: HvxLda_pat<V6_vL32b_nt_ai, alignednontemporalload, VecF16, IsVecOff>;
+ defm: HvxLda_pat<V6_vL32b_nt_ai, alignednontemporalload, VecF32, IsVecOff>;
+ defm: HvxLda_pat<V6_vL32b_ai, alignedload, VecF16, IsVecOff>;
+ defm: HvxLda_pat<V6_vL32b_ai, alignedload, VecF32, IsVecOff>;
+ defm: HvxLd_pat<V6_vL32Ub_ai, unalignedload, VecF16, IsVecOff>;
+ defm: HvxLd_pat<V6_vL32Ub_ai, unalignedload, VecF32, IsVecOff>;
+}
// HVX stores
@@ -199,6 +221,15 @@ let Predicates = [UseHVX] in {
defm: HvxSt_pat<V6_vS32Ub_ai, unalignedstore, HVI32, IsVecOff>;
}
+let Predicates = [UseHVXV68] in {
+ defm: HvxSt_pat<V6_vS32b_nt_ai, alignednontemporalstore, HVF16, IsVecOff>;
+ defm: HvxSt_pat<V6_vS32b_nt_ai, alignednontemporalstore, HVF32, IsVecOff>;
+ defm: HvxSt_pat<V6_vS32b_ai, alignedstore, HVF16, IsVecOff>;
+ defm: HvxSt_pat<V6_vS32b_ai, alignedstore, HVF32, IsVecOff>;
+ defm: HvxSt_pat<V6_vS32Ub_ai, unalignedstore, HVF16, IsVecOff>;
+ defm: HvxSt_pat<V6_vS32Ub_ai, unalignedstore, HVF32, IsVecOff>;
+}
+
// Bitcasts between same-size vector types are no-ops, except for the
// actual type change.
let Predicates = [UseHVX] in {
@@ -211,6 +242,24 @@ let Predicates = [UseHVX] in {
defm: NopCast_pat<VecPI16, VecPI32, HvxWR>;
}
+let Predicates = [UseHVX, UseHVXFloatingPoint] in {
+ defm: NopCast_pat<VecI8, VecF16, HvxVR>;
+ defm: NopCast_pat<VecI8, VecF32, HvxVR>;
+ defm: NopCast_pat<VecI16, VecF16, HvxVR>;
+ defm: NopCast_pat<VecI16, VecF32, HvxVR>;
+ defm: NopCast_pat<VecI32, VecF16, HvxVR>;
+ defm: NopCast_pat<VecI32, VecF32, HvxVR>;
+ defm: NopCast_pat<VecF16, VecF32, HvxVR>;
+
+ defm: NopCast_pat<VecPI8, VecPF16, HvxWR>;
+ defm: NopCast_pat<VecPI8, VecPF32, HvxWR>;
+ defm: NopCast_pat<VecPI16, VecPF16, HvxWR>;
+ defm: NopCast_pat<VecPI16, VecPF32, HvxWR>;
+ defm: NopCast_pat<VecPI32, VecPF16, HvxWR>;
+ defm: NopCast_pat<VecPI32, VecPF32, HvxWR>;
+ defm: NopCast_pat<VecPF16, VecPF32, HvxWR>;
+}
+
let Predicates = [UseHVX] in {
let AddedComplexity = 100 in {
// These should be preferred over a vsplat of 0.
@@ -220,6 +269,7 @@ let Predicates = [UseHVX] in {
def: Pat<(VecPI8 vzero), (PS_vdd0)>;
def: Pat<(VecPI16 vzero), (PS_vdd0)>;
def: Pat<(VecPI32 vzero), (PS_vdd0)>;
+ def: Pat<(VecPF32 vzero), (PS_vdd0)>;
def: Pat<(concat_vectors (VecI8 vzero), (VecI8 vzero)), (PS_vdd0)>;
def: Pat<(concat_vectors (VecI16 vzero), (VecI16 vzero)), (PS_vdd0)>;
@@ -251,6 +301,28 @@ let Predicates = [UseHVX] in {
(V6_vinsertwr HvxVR:$Vu, I32:$Rt)>;
}
+let Predicates = [UseHVX, UseHVXFloatingPoint] in {
+ let AddedComplexity = 100 in {
+ def: Pat<(VecF16 vzero), (V6_vd0)>;
+ def: Pat<(VecF32 vzero), (V6_vd0)>;
+ def: Pat<(VecPF16 vzero), (PS_vdd0)>;
+ def: Pat<(VecPF32 vzero), (PS_vdd0)>;
+
+ def: Pat<(concat_vectors (VecF16 vzero), (VecF16 vzero)), (PS_vdd0)>;
+ def: Pat<(concat_vectors (VecF32 vzero), (VecF32 vzero)), (PS_vdd0)>;
+ }
+
+ def: Pat<(VecPF16 (concat_vectors HVF16:$Vs, HVF16:$Vt)),
+ (Combinev HvxVR:$Vt, HvxVR:$Vs)>;
+ def: Pat<(VecPF32 (concat_vectors HVF32:$Vs, HVF32:$Vt)),
+ (Combinev HvxVR:$Vt, HvxVR:$Vs)>;
+
+ def: Pat<(HexagonVINSERTW0 HVF16:$Vu, I32:$Rt),
+ (V6_vinsertwr HvxVR:$Vu, I32:$Rt)>;
+ def: Pat<(HexagonVINSERTW0 HVF32:$Vu, I32:$Rt),
+ (V6_vinsertwr HvxVR:$Vu, I32:$Rt)>;
+}
+
// Splats for HvxV60
def V60splatib: OutPatFrag<(ops node:$V), (V6_lvsplatw (ToI32 (SplatB $V)))>;
def V60splatih: OutPatFrag<(ops node:$V), (V6_lvsplatw (ToI32 (SplatH $V)))>;
@@ -307,6 +379,18 @@ let Predicates = [UseHVX,UseHVXV62] in {
def: Pat<(VecPI32 (splat_vector I32:$Rs)), (Rep (V62splatrw $Rs))>;
}
}
+let Predicates = [UseHVXV68, UseHVXFloatingPoint] in {
+ let AddedComplexity = 30 in {
+ def: Pat<(VecF16 (splat_vector u16_0ImmPred:$V)), (V62splatih imm:$V)>;
+ def: Pat<(VecF32 (splat_vector anyint:$V)), (V62splatiw imm:$V)>;
+ def: Pat<(VecF32 (splat_vector f32ImmPred:$V)), (V62splatiw (ftoi $V))>;
+ }
+ let AddedComplexity = 20 in {
+ def: Pat<(VecF16 (splat_vector I32:$Rs)), (V62splatrh $Rs)>;
+ def: Pat<(VecF32 (splat_vector I32:$Rs)), (V62splatrw $Rs)>;
+ def: Pat<(VecF32 (splat_vector F32:$Rs)), (V62splatrw $Rs)>;
+ }
+}
class Vneg1<ValueType VecTy>
: PatFrag<(ops), (VecTy (splat_vector (i32 -1)))>;
@@ -369,6 +453,107 @@ let Predicates = [UseHVX] in {
(V6_vmux HvxQR:$Qu, HvxVR:$Vt, HvxVR:$Vs)>;
}
+// For now, we always deal with vector floating point in SF mode.
+class OpR_RR_pat_conv<InstHexagon MI, PatFrag Op, ValueType ResType,
+ PatFrag RsPred, PatFrag RtPred = RsPred>
+ : Pat<(ResType (Op RsPred:$Rs, RtPred:$Rt)),
+ (V6_vconv_sf_qf32 (VecF32 (MI RsPred:$Rs, RtPred:$Rt)))>;
+
+class OpR_RR_pat_conv_hf<InstHexagon MI, PatFrag Op, ValueType ResType,
+ PatFrag RsPred, PatFrag RtPred = RsPred>
+ : Pat<(ResType (Op RsPred:$Rs, RtPred:$Rt)),
+ (V6_vconv_hf_qf16 (VecF16 (MI RsPred:$Rs, RtPred:$Rt)))>;
+
+let Predicates = [UseHVXV68, UseHVXQFloat] in {
+ def: OpR_RR_pat_conv_hf<V6_vsub_hf, pf2<fsub>, VecF16, HVF16>;
+ def: OpR_RR_pat_conv_hf<V6_vadd_hf, pf2<fadd>, VecF16, HVF16>;
+ def: OpR_RR_pat_conv_hf<V6_vmpy_qf16_hf, pf2<fmul>, VecF16, HVF16>;
+ def: OpR_RR_pat_conv<V6_vsub_sf, pf2<fsub>, VecF32, HVF32>;
+ def: OpR_RR_pat_conv<V6_vadd_sf, pf2<fadd>, VecF32, HVF32>;
+ def: OpR_RR_pat_conv<V6_vmpy_qf32_sf, pf2<fmul>, VecF32, HVF32>;
+
+ // For now we assume that the fp32 register is always coming in as IEEE float
+ // since the qfloat arithmetic instructions above always generate the
+ // accompanying conversions as part of their pattern
+ def: Pat<(VecF16 (pf1<fpround> HWF32:$Vuu)),
+ (V6_vdealh (V6_vconv_hf_qf32
+ (VecPF32 (Combinev (V6_vadd_sf (HiVec HvxWR:$Vuu), (V6_vd0)),
+ (V6_vadd_sf (LoVec HvxWR:$Vuu), (V6_vd0))
+ ))))>;
+ // fpextend for QFloat is handled manually in HexagonISelLoweringHVX.cpp.
+}
+
+// HVX IEEE arithmetic Instructions
+let Predicates = [UseHVXV68, UseHVXIEEEFP] in {
+ def: Pat<(fadd HVF16:$Rs, HVF16:$Rt),
+ (V6_vadd_hf_hf HVF16:$Rs, HVF16:$Rt)>;
+ def: Pat<(fadd HVF32:$Rs, HVF32:$Rt),
+ (V6_vadd_sf_sf HVF32:$Rs, HVF32:$Rt)>;
+ def: Pat<(fsub HVF16:$Rs, HVF16:$Rt),
+ (V6_vsub_hf_hf HVF16:$Rs, HVF16:$Rt)>;
+ def: Pat<(fsub HVF32:$Rs, HVF32:$Rt),
+ (V6_vsub_sf_sf HVF32:$Rs, HVF32:$Rt)>;
+ def: Pat<(fmul HVF16:$Rs, HVF16:$Rt),
+ (V6_vmpy_hf_hf HVF16:$Rs, HVF16:$Rt)>;
+ def: Pat<(fmul HVF32:$Rs, HVF32:$Rt),
+ (V6_vmpy_sf_sf HVF32:$Rs, HVF32:$Rt)>;
+
+ def: Pat<(VecF16 (pf1<fpround> HWF32:$Vuu)),
+ (V6_vdealh (V6_vcvt_hf_sf (HiVec HvxWR:$Vuu), (LoVec HvxWR:$Vuu)))>;
+ def: Pat<(VecPF32 (pf1<fpextend> HVF16:$Vu)),
+ (V6_vcvt_sf_hf (V6_vshuffh HvxVR:$Vu))>;
+
+ def: OpR_R_pat<V6_vcvt_h_hf, Fptosi, VecI16, HVF16>;
+ def: OpR_R_pat<V6_vcvt_uh_hf, Fptoui, VecI16, HVF16>;
+ def: OpR_R_pat<V6_vcvt_hf_h, Sitofp, VecF16, HVI16>;
+ def: OpR_R_pat<V6_vcvt_hf_uh, Uitofp, VecF16, HVI16>;
+
+ def: Pat<(VecI8 (Fptosi HWF16:$Vu)),
+ (V6_vcvt_b_hf (HiVec $Vu), (LoVec $Vu))>;
+ def: Pat<(VecI8 (Fptoui HWF16:$Vu)),
+ (V6_vcvt_ub_hf (HiVec $Vu), (LoVec $Vu))>;
+ def: Pat<(VecPF16 (Sitofp HVI8:$Vu)), (V6_vcvt_hf_b HvxVR:$Vu)>;
+ def: Pat<(VecPF16 (Uitofp HVI8:$Vu)), (V6_vcvt_hf_ub HvxVR:$Vu)>;
+}
+
+let Predicates = [UseHVXV68, UseHVXFloatingPoint] in {
+ def: Pat<(vselect HQ16:$Qu, HVF16:$Vs, HVF16:$Vt),
+ (V6_vmux HvxQR:$Qu, HvxVR:$Vs, HvxVR:$Vt)>;
+ def: Pat<(vselect (qnot HQ16:$Qu), HVF16:$Vs, HVF16:$Vt),
+ (V6_vmux HvxQR:$Qu, HvxVR:$Vt, HvxVR:$Vs)>;
+
+ def: Pat<(vselect HQ32:$Qu, HVF32:$Vs, HVF32:$Vt),
+ (V6_vmux HvxQR:$Qu, HvxVR:$Vs, HvxVR:$Vt)>;
+ def: Pat<(vselect (qnot HQ32:$Qu), HVF32:$Vs, HVF32:$Vt),
+ (V6_vmux HvxQR:$Qu, HvxVR:$Vt, HvxVR:$Vs)>;
+}
+
+let Predicates = [UseHVXV68, UseHVX128B, UseHVXQFloat] in {
+ let AddedComplexity = 220 in {
+ defm: MinMax_pats<V6_vmin_hf, V6_vmax_hf, vselect, setgt, VecQ16, HVF16>;
+ defm: MinMax_pats<V6_vmin_hf, V6_vmax_hf, vselect, setogt, VecQ16, HVF16>;
+ defm: MinMax_pats<V6_vmin_sf, V6_vmax_sf, vselect, setgt, VecQ32, HVF32>;
+ defm: MinMax_pats<V6_vmin_sf, V6_vmax_sf, vselect, setogt, VecQ32, HVF32>;
+ }
+ def: OpR_RR_pat<V6_vmin_hf, pf2<fminnum>, VecF16, HVF16>;
+ def: OpR_RR_pat<V6_vmax_hf, pf2<fmaxnum>, VecF16, HVF16>;
+ def: OpR_RR_pat<V6_vmin_sf, pf2<fminnum>, VecF32, HVF32>;
+ def: OpR_RR_pat<V6_vmax_sf, pf2<fmaxnum>, VecF32, HVF32>;
+}
+
+let Predicates = [UseHVXV68, UseHVX128B, UseHVXIEEEFP] in {
+ let AddedComplexity = 220 in {
+ defm: MinMax_pats<V6_vfmin_hf, V6_vfmax_hf, vselect, setgt, VecQ16, HVF16>;
+ defm: MinMax_pats<V6_vfmin_hf, V6_vfmax_hf, vselect, setogt, VecQ16, HVF16>;
+ defm: MinMax_pats<V6_vfmin_sf, V6_vfmax_sf, vselect, setgt, VecQ32, HVF32>;
+ defm: MinMax_pats<V6_vfmin_sf, V6_vfmax_sf, vselect, setogt, VecQ32, HVF32>;
+ }
+ def: OpR_RR_pat<V6_vfmin_hf, pf2<fminnum>, VecF16, HVF16>;
+ def: OpR_RR_pat<V6_vfmax_hf, pf2<fmaxnum>, VecF16, HVF16>;
+ def: OpR_RR_pat<V6_vfmin_sf, pf2<fminnum>, VecF32, HVF32>;
+ def: OpR_RR_pat<V6_vfmax_sf, pf2<fmaxnum>, VecF32, HVF32>;
+}
+
let Predicates = [UseHVX] in {
// For i8 vectors Vs = (a0, a1, ...), Vt = (b0, b1, ...),
// V6_vmpybv Vs, Vt produces a pair of i16 vectors Hi:Lo,
@@ -551,6 +736,12 @@ let Predicates = [UseHVX] in {
def: HvxSel_pat<PS_wselect, HWI32>;
}
+def V2Q: OutPatFrag<(ops node:$Vs), (V6_vandvrt $Vs, (A2_tfrsi -1))>;
+
+let Predicates = [UseHVX] in
+ def: Pat<(select I1:$Pu, VecI1:$Qs, VecI1:$Qt),
+ (V2Q (PS_vselect $Pu, (Q2V $Qs), (Q2V $Qt)))>;
+
let Predicates = [UseHVX] in {
def: Pat<(VecQ8 (qtrue)), (PS_qtrue)>;
def: Pat<(VecQ16 (qtrue)), (PS_qtrue)>;
@@ -623,3 +814,63 @@ let Predicates = [UseHVX] in {
def: AccRRR_pat<V6_vgtuw_or, Or, setugt, HQ32, HVI32, HVI32>;
def: AccRRR_pat<V6_vgtuw_xor, Xor, setugt, HQ32, HVI32, HVI32>;
}
+
+let Predicates = [UseHVXV68, UseHVXFloatingPoint] in {
+ def: OpR_RR_pat<V6_veqh, seteq, VecQ16, HVF16>;
+ def: OpR_RR_pat<V6_veqh, setoeq, VecQ16, HVF16>;
+ def: OpR_RR_pat<V6_veqh, setueq, VecQ16, HVF16>;
+ def: OpR_RR_pat<V6_vgthf, setgt, VecQ16, HVF16>;
+ def: OpR_RR_pat<V6_vgthf, setogt, VecQ16, HVF16>;
+ def: OpR_RR_pat<V6_vgthf, setugt, VecQ16, HVF16>;
+
+ def: OpR_RR_pat<V6_veqw, seteq, VecQ32, HVF32>;
+ def: OpR_RR_pat<V6_veqw, setoeq, VecQ32, HVF32>;
+ def: OpR_RR_pat<V6_veqw, setueq, VecQ32, HVF32>;
+ def: OpR_RR_pat<V6_vgtsf, setgt, VecQ32, HVF32>;
+ def: OpR_RR_pat<V6_vgtsf, setogt, VecQ32, HVF32>;
+ def: OpR_RR_pat<V6_vgtsf, setugt, VecQ32, HVF32>;
+
+ def: AccRRR_pat<V6_veqh_and, And, seteq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_or, Or, seteq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_xor, Xor, seteq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_and, And, setoeq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_or, Or, setoeq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_xor, Xor, setoeq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_and, And, setueq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_or, Or, setueq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_veqh_xor, Xor, setueq, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_and, And, setgt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_or, Or, setgt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_xor, Xor, setgt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_and, And, setogt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_or, Or, setogt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_xor, Xor, setogt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_and, And, setugt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_or, Or, setugt, HQ16, HVF16, HVF16>;
+ def: AccRRR_pat<V6_vgthf_xor, Xor, setugt, HQ16, HVF16, HVF16>;
+
+ def: AccRRR_pat<V6_veqw_and, And, seteq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_or, Or, seteq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_xor, Xor, seteq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_and, And, setoeq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_or, Or, setoeq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_xor, Xor, setoeq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_and, And, setueq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_or, Or, setueq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_veqw_xor, Xor, setueq, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_and, And, setgt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_or, Or, setgt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_xor, Xor, setgt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_and, And, setogt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_or, Or, setogt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_xor, Xor, setogt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_and, And, setugt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_or, Or, setugt, HQ32, HVF32, HVF32>;
+ def: AccRRR_pat<V6_vgtsf_xor, Xor, setugt, HQ32, HVF32, HVF32>;
+
+ def: Pat<(VecQ16 (setone HVF16:$Vt, HVF16:$Vu)),
+ (V6_pred_not (V6_veqh HvxVR:$Vt, HvxVR:$Vu))>;
+
+ def: Pat<(VecQ32 (setone HVF32:$Vt, HVF32:$Vu)),
+ (V6_pred_not (V6_veqw HvxVR:$Vt, HvxVR:$Vu))>;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsV65.td b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsV65.td
index 4cd45ecbe1a1..f927f9b9e7c3 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsV65.td
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonPatternsV65.td
@@ -7,28 +7,31 @@
//===----------------------------------------------------------------------===//
multiclass vgathermh<RegisterClass RC> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = HalfWordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, IntRegs:$Rt,
- ModRegs:$Mu, RC:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ IntRegs:$Rt, ModRegs:$Mu, RC:$Vv),
".error \"should not emit\" ",
[]>;
}
multiclass vgathermw<RegisterClass RC> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = WordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, IntRegs:$Rt,
- ModRegs:$Mu, RC:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ IntRegs:$Rt, ModRegs:$Mu, RC:$Vv),
".error \"should not emit\" ",
[]>;
}
multiclass vgathermhw<RegisterClass RC> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = HalfWordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, IntRegs:$Rt,
- ModRegs:$Mu, RC:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ IntRegs:$Rt, ModRegs:$Mu, RC:$Vv),
".error \"should not emit\" ",
[]>;
}
@@ -38,28 +41,34 @@ defm V6_vgathermw_pseudo : vgathermw<HvxVR>;
defm V6_vgathermhw_pseudo : vgathermhw<HvxWR>;
multiclass vgathermhq<RegisterClass RC1, RegisterClass RC2> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = HalfWordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, RC2:$Vq, IntRegs:$Rt,
- ModRegs:$Mu, RC1:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ RC2:$Vq, IntRegs:$Rt, ModRegs:$Mu,
+ RC1:$Vv),
".error \"should not emit\" ",
[]>;
}
multiclass vgathermwq<RegisterClass RC1, RegisterClass RC2> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = WordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, RC2:$Vq, IntRegs:$Rt,
- ModRegs:$Mu, RC1:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ RC2:$Vq, IntRegs:$Rt, ModRegs:$Mu,
+ RC1:$Vv),
".error \"should not emit\" ",
[]>;
}
multiclass vgathermhwq<RegisterClass RC1, RegisterClass RC2> {
- let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1, mayStore = 1 in
+ let isCodeGenOnly = 1, isPseudo = 1, mayLoad = 1,
+ mayStore = 1, addrMode = BaseImmOffset, accessSize = HalfWordAccess in
def NAME : CVI_GATHER_TMP_LD_Resource_NoOpcode<(outs ),
- (ins IntRegs:$_dst_, RC2:$Vq, IntRegs:$Rt,
- ModRegs:$Mu, RC1:$Vv),
+ (ins IntRegs:$_dst_, s4_0Imm:$Ii,
+ RC2:$Vq, IntRegs:$Rt, ModRegs:$Mu,
+ RC1:$Vv),
".error \"should not emit\" ",
[]>;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
index 8b7138d3c809..4c387c8ba638 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -479,6 +479,10 @@ def VecI16: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v32i16, v64i16, v32i16]>;
def VecI32: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v16i32, v32i32, v16i32]>;
+def VecF16: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
+ [v32f16, v64f16, v32f16]>;
+def VecF32: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
+ [v16f32, v32f32, v16f32]>;
def VecPI8: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v128i8, v256i8, v128i8]>;
@@ -486,6 +490,10 @@ def VecPI16: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v64i16, v128i16, v64i16]>;
def VecPI32: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v32i32, v64i32, v32i32]>;
+def VecPF16: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
+ [v64f16, v128f16, v64f16]>;
+def VecPF32: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
+ [v32f32, v64f32, v32f32]>;
def VecQ8: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v64i1, v128i1, v64i1]>;
@@ -496,13 +504,13 @@ def VecQ32: ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
// HVX register classes
-def HvxVR : RegisterClass<"Hexagon", [VecI8, VecI16, VecI32], 512,
+def HvxVR : RegisterClass<"Hexagon", [VecI8, VecI16, VecI32, VecF16, VecF32], 512,
(add (sequence "V%u", 0, 31), VTMP)> {
let RegInfos = RegInfoByHwMode<[Hvx64, Hvx128, DefaultMode],
[RegInfo<512,512,512>, RegInfo<1024,1024,1024>, RegInfo<512,512,512>]>;
}
-def HvxWR : RegisterClass<"Hexagon", [VecPI8, VecPI16, VecPI32], 1024,
+def HvxWR : RegisterClass<"Hexagon", [VecPI8, VecPI16, VecPI32, VecPF16, VecPF32], 1024,
(add (sequence "W%u", 0, 15), (sequence "WR%u", 0, 15))> {
let RegInfos = RegInfoByHwMode<[Hvx64, Hvx128, DefaultMode],
[RegInfo<1024,1024,1024>, RegInfo<2048,2048,2048>, RegInfo<1024,1024,1024>]>;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 9a0f57fce97d..ada78ca70559 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -224,14 +224,14 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NumRegs = MRI->getNumVirtRegs();
BitVector DoubleRegs(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
- unsigned R = Register::index2VirtReg(i);
+ Register R = Register::index2VirtReg(i);
if (MRI->getRegClass(R) == DoubleRC)
DoubleRegs.set(i);
}
BitVector FixedRegs(NumRegs);
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
MachineInstr *DefI = MRI->getVRegDef(R);
// In some cases a register may exist, but never be defined or used.
// It should never appear anywhere, but mark it as "fixed", just to be
@@ -244,7 +244,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
if (FixedRegs[x])
continue;
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
LLVM_DEBUG(dbgs() << printReg(R, TRI) << " ~~");
USet &Asc = AssocMap[R];
for (auto U = MRI->use_nodbg_begin(R), Z = MRI->use_nodbg_end();
@@ -281,7 +281,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NextP = 1;
USet Visited;
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
if (Visited.count(R))
continue;
// Create a new partition for R.
@@ -578,8 +578,7 @@ void HexagonSplitDoubleRegs::collectIndRegs(LoopRegMap &IRM) {
append_range(WorkQ, *WorkQ[i]);
USet Rs;
- for (unsigned i = 0, n = WorkQ.size(); i < n; ++i) {
- MachineLoop *L = WorkQ[i];
+ for (MachineLoop *L : WorkQ) {
Rs.clear();
collectIndRegsForLoop(L, Rs);
if (!Rs.empty())
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 08bb4580b585..bdd2a2cfc5fa 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -228,7 +228,9 @@ bool HexagonSubtarget::isTypeForHVX(Type *VecTy, bool IncludeBool) const {
if (!VecTy->isVectorTy() || isa<ScalableVectorType>(VecTy))
return false;
// Avoid types like <2 x i32*>.
- if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
+ Type *ScalTy = VecTy->getScalarType();
+ if (!ScalTy->isIntegerTy() &&
+ !(ScalTy->isFloatingPointTy() && useHVXFloatingPoint()))
return false;
// The given type may be something like <17 x i32>, which is not MVT,
// but can be represented as (non-simple) EVT.
@@ -466,28 +468,46 @@ void HexagonSubtarget::adjustSchedDependency(SUnit *Src, int SrcOpIdx,
return;
}
- if (!hasV60Ops())
- return;
-
- // Set the latency for a copy to zero since we hope that is will get removed.
+ // Set the latency for a copy to zero since we hope that is will get
+ // removed.
if (DstInst->isCopy())
Dep.setLatency(0);
// If it's a REG_SEQUENCE/COPY, use its destination instruction to determine
// the correct latency.
- if ((DstInst->isRegSequence() || DstInst->isCopy()) && Dst->NumSuccs == 1) {
+ // If there are multiple uses of the def of COPY/REG_SEQUENCE, set the latency
+ // only if the latencies on all the uses are equal, otherwise set it to
+ // default.
+ if ((DstInst->isRegSequence() || DstInst->isCopy())) {
Register DReg = DstInst->getOperand(0).getReg();
- MachineInstr *DDst = Dst->Succs[0].getSUnit()->getInstr();
- unsigned UseIdx = -1;
- for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) {
- const MachineOperand &MO = DDst->getOperand(OpNum);
- if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == DReg) {
- UseIdx = OpNum;
+ int DLatency = -1;
+ for (const auto &DDep : Dst->Succs) {
+ MachineInstr *DDst = DDep.getSUnit()->getInstr();
+ int UseIdx = -1;
+ for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = DDst->getOperand(OpNum);
+ if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == DReg) {
+ UseIdx = OpNum;
+ break;
+ }
+ }
+
+ if (UseIdx == -1)
+ continue;
+
+ int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst, 0,
+ *DDst, UseIdx));
+ // Set DLatency for the first time.
+ DLatency = (DLatency == -1) ? Latency : DLatency;
+
+ // For multiple uses, if the Latency is different across uses, reset
+ // DLatency.
+ if (DLatency != Latency) {
+ DLatency = -1;
break;
}
}
- int DLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst,
- 0, *DDst, UseIdx));
+
DLatency = std::max(DLatency, 0);
Dep.setLatency((unsigned)DLatency);
}
@@ -500,8 +520,10 @@ void HexagonSubtarget::adjustSchedDependency(SUnit *Src, int SrcOpIdx,
Dep.setLatency(0);
return;
}
-
- updateLatency(*SrcInst, *DstInst, Dep);
+ int Latency = Dep.getLatency();
+ bool IsArtificial = Dep.isArtificial();
+ Latency = updateLatency(*SrcInst, *DstInst, IsArtificial, Latency);
+ Dep.setLatency(Latency);
}
void HexagonSubtarget::getPostRAMutations(
@@ -530,21 +552,19 @@ bool HexagonSubtarget::usePredicatedCalls() const {
return EnablePredicatedCalls;
}
-void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
- MachineInstr &DstInst, SDep &Dep) const {
- if (Dep.isArtificial()) {
- Dep.setLatency(1);
- return;
- }
-
+int HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
+ MachineInstr &DstInst, bool IsArtificial,
+ int Latency) const {
+ if (IsArtificial)
+ return 1;
if (!hasV60Ops())
- return;
-
- auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
+ return Latency;
+ auto &QII = static_cast<const HexagonInstrInfo &>(*getInstrInfo());
// BSB scheduling.
if (QII.isHVXVec(SrcInst) || useBSBScheduling())
- Dep.setLatency((Dep.getLatency() + 1) >> 1);
+ Latency = (Latency + 1) >> 1;
+ return Latency;
}
void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
@@ -580,9 +600,9 @@ void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
// For some instructions (ex: COPY), we might end up with < 0 latency
// as they don't have any Itinerary class associated with them.
Latency = std::max(Latency, 0);
-
+ bool IsArtificial = I.isArtificial();
+ Latency = updateLatency(*SrcI, *DstI, IsArtificial, Latency);
I.setLatency(Latency);
- updateLatency(*SrcI, *DstI, I);
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index e4f375440be1..db682676cf12 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -325,8 +325,8 @@ public:
private:
// Helper function responsible for increasing the latency only.
- void updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst, SDep &Dep)
- const;
+ int updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst,
+ bool IsArtificial, int Latency) const;
void restoreLatency(SUnit *Src, SUnit *Dst) const;
void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index fcf829b522cc..c6703bb8a62a 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -139,6 +139,7 @@ namespace llvm {
void initializeHexagonBitSimplifyPass(PassRegistry&);
void initializeHexagonConstExtendersPass(PassRegistry&);
void initializeHexagonConstPropagationPass(PassRegistry&);
+ void initializeHexagonCopyToCombinePass(PassRegistry&);
void initializeHexagonEarlyIfConversionPass(PassRegistry&);
void initializeHexagonExpandCondsetsPass(PassRegistry&);
void initializeHexagonGenMuxPass(PassRegistry&);
@@ -199,6 +200,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonTarget() {
initializeHexagonBitSimplifyPass(PR);
initializeHexagonConstExtendersPass(PR);
initializeHexagonConstPropagationPass(PR);
+ initializeHexagonCopyToCombinePass(PR);
initializeHexagonEarlyIfConversionPass(PR);
initializeHexagonGenMuxPass(PR);
initializeHexagonHardwareLoopsPass(PR);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 85ec0cdcd8f0..e9b658d18175 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -886,7 +886,8 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI,
// Create a dot new machine instruction to see if resources can be
// allocated. If not, bail out now.
- int NewOpcode = HII->getDotNewOp(MI);
+ int NewOpcode = (RC != &Hexagon::PredRegsRegClass) ? HII->getDotNewOp(MI) :
+ HII->getDotNewPredOp(MI, MBPI);
const MCInstrDesc &D = HII->get(NewOpcode);
MachineInstr *NewMI = MF.CreateMachineInstr(D, DebugLoc());
bool ResourcesAvailable = ResourceTracker->canReserveResources(*NewMI);
@@ -1107,6 +1108,11 @@ static bool cannotCoexistAsymm(const MachineInstr &MI, const MachineInstr &MJ,
HII.isHVXMemWithAIndirect(MI, MJ))
return true;
+ // Don't allow a store and an instruction that must be in slot0 and
+ // doesn't allow a slot1 instruction.
+ if (MI.mayStore() && HII.isRestrictNoSlot1Store(MJ) && HII.isPureSlot0(MJ))
+ return true;
+
// An inline asm cannot be together with a branch, because we may not be
// able to remove the asm out after packetizing (i.e. if the asm must be
// moved past the bundle). Similarly, two asms cannot be together to avoid
@@ -1526,6 +1532,13 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
bool IsVecJ = HII->isHVXVec(J);
bool IsVecI = HII->isHVXVec(I);
+ // Don't reorder the loads if there is an order dependence. This would
+ // occur if the first instruction must go in slot0.
+ if (LoadJ && LoadI && HII->isPureSlot0(J)) {
+ FoundSequentialDependence = true;
+ break;
+ }
+
if (Slot1Store && MF.getSubtarget<HexagonSubtarget>().hasV65Ops() &&
((LoadJ && StoreI && !NVStoreI) ||
(StoreJ && LoadI && !NVStoreJ)) &&
@@ -1696,9 +1709,12 @@ HexagonPacketizerList::addToPacket(MachineInstr &MI) {
MachineBasicBlock::iterator MII = MI.getIterator();
MachineBasicBlock *MBB = MI.getParent();
- if (CurrentPacketMIs.empty())
+ if (CurrentPacketMIs.empty()) {
PacketStalls = false;
+ PacketStallCycles = 0;
+ }
PacketStalls |= producesStall(MI);
+ PacketStallCycles = std::max(PacketStallCycles, calcStall(MI));
if (MI.isImplicitDef()) {
// Add to the packet to allow subsequent instructions to be checked
@@ -1818,14 +1834,6 @@ bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
if (Minimal)
return false;
- // Constrainst for not packetizing this MI with existing instructions in a
- // packet.
- // MI is a store instruction.
- // CurrentPacketMIs has a SLOT0 only instruction with constraint
- // A_RESTRICT_NOSLOT1_STORE/isRestrictNoSlot1Store.
- if (MI.mayStore() && isPureSlot0InsnWithNoSlot1Store(MI))
- return false;
-
if (producesStall(MI))
return false;
@@ -1865,25 +1873,8 @@ bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
return true;
}
-bool HexagonPacketizerList::isPureSlot0InsnWithNoSlot1Store(
- const MachineInstr &MI) {
- bool noSlot1Store = false;
- bool isSlot0Only = false;
- for (auto J : CurrentPacketMIs) {
- noSlot1Store |= HII->isRestrictNoSlot1Store(*J);
- isSlot0Only |= HII->isPureSlot0(*J);
- }
-
- return (noSlot1Store && isSlot0Only);
-}
-
// V60 forward scheduling.
-bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
- // If the packet already stalls, then ignore the stall from a subsequent
- // instruction in the same packet.
- if (PacketStalls)
- return false;
-
+unsigned int HexagonPacketizerList::calcStall(const MachineInstr &I) {
// Check whether the previous packet is in a different loop. If this is the
// case, there is little point in trying to avoid a stall because that would
// favor the rare case (loop entry) over the common case (loop iteration).
@@ -1895,10 +1886,12 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
auto *OldBB = OldPacketMIs.front()->getParent();
auto *ThisBB = I.getParent();
if (MLI->getLoopFor(OldBB) != MLI->getLoopFor(ThisBB))
- return false;
+ return 0;
}
SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
+ if (!SUI)
+ return 0;
// If the latency is 0 and there is a data dependence between this
// instruction and any instruction in the current packet, we disregard any
@@ -1927,7 +1920,7 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
if (Pred.getSUnit() == SUJ)
if ((Pred.getLatency() == 0 && Pred.isAssignedRegDep()) ||
HII->isNewValueJump(I) || HII->isToBeScheduledASAP(*J, I))
- return false;
+ return 0;
}
// Check if the latency is greater than one between this instruction and any
@@ -1936,10 +1929,20 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
SUnit *SUJ = MIToSUnit[J];
for (auto &Pred : SUI->Preds)
if (Pred.getSUnit() == SUJ && Pred.getLatency() > 1)
- return true;
+ return Pred.getLatency();
}
- return false;
+ return 0;
+}
+
+bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
+ unsigned int Latency = calcStall(I);
+ if (Latency == 0)
+ return false;
+ // Ignore stall unless it stalls more than previous instruction in packet
+ if (PacketStalls)
+ return Latency > PacketStallCycles;
+ return true;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h
index 27a47220570a..6a709e566f86 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h
@@ -56,6 +56,9 @@ class HexagonPacketizerList : public VLIWPacketizerList {
// Set to true if the packet contains an instruction that stalls with an
// instruction from the previous packet.
bool PacketStalls = false;
+ // Set to the number of cycles of stall a given instruction will incur
+ // because of dependence on instruction in previous packet.
+ unsigned int PacketStallCycles = 0;
// Set to true if the packet has a duplex pair of sub-instructions.
bool PacketHasDuplex = false;
@@ -156,7 +159,7 @@ protected:
bool hasRegMaskDependence(const MachineInstr &I, const MachineInstr &J);
bool hasDualStoreDependence(const MachineInstr &I, const MachineInstr &J);
bool producesStall(const MachineInstr &MI);
- bool isPureSlot0InsnWithNoSlot1Store(const MachineInstr &MI);
+ unsigned int calcStall(const MachineInstr &MI);
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 21386a91c7b3..6aca8d807872 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -443,7 +443,7 @@ auto AlignVectors::createAdjustedPointer(IRBuilder<> &Builder, Value *Ptr,
// we don't need to do pointer casts.
auto *PtrTy = cast<PointerType>(Ptr->getType());
if (!PtrTy->isOpaque()) {
- Type *ElemTy = PtrTy->getElementType();
+ Type *ElemTy = PtrTy->getNonOpaquePointerElementType();
int ElemSize = HVC.getAllocSizeOf(ElemTy);
if (Adjust % ElemSize == 0 && Adjust != 0) {
Value *Tmp0 =
@@ -718,7 +718,7 @@ auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
// Maximum alignment present in the whole address group.
const AddrInfo &WithMaxAlign =
- getMaxOf(BaseInfos, [](const AddrInfo &AI) { return AI.HaveAlign; });
+ getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.HaveAlign; });
Align MaxGiven = WithMaxAlign.HaveAlign;
// Minimum alignment present in the move address group.
@@ -1181,12 +1181,15 @@ auto HexagonVectorCombine::rescale(IRBuilder<> &Builder, Value *Mask,
int ToCount = (FromCount * FromSize) / ToSize;
assert((FromCount * FromSize) % ToSize == 0);
+ auto *FromITy = IntegerType::get(F.getContext(), FromSize * 8);
+ auto *ToITy = IntegerType::get(F.getContext(), ToSize * 8);
+
// Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> ->
// -> trunc to <M x i1>.
Value *Ext = Builder.CreateSExt(
- Mask, VectorType::get(FromSTy, FromCount, /*Scalable*/ false));
+ Mask, VectorType::get(FromITy, FromCount, /*Scalable*/ false));
Value *Cast = Builder.CreateBitCast(
- Ext, VectorType::get(ToSTy, ToCount, /*Scalable*/ false));
+ Ext, VectorType::get(ToITy, ToCount, /*Scalable*/ false));
return Builder.CreateTrunc(
Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable*/ false));
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
index f973862a0c9b..94b878e21f4d 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
@@ -659,8 +659,7 @@ void HexagonVectorLoopCarriedReuse::findLoopCarriedDeps() {
delete D;
}
LLVM_DEBUG(dbgs() << "Found " << Dependences.size() << " dependences\n");
- LLVM_DEBUG(for (size_t i = 0; i < Dependences.size();
- ++i) { dbgs() << *Dependences[i] << "\n"; });
+ LLVM_DEBUG(for (const DepChain *D : Dependences) dbgs() << *D << "\n";);
}
Pass *llvm::createHexagonVectorLoopCarriedReuseLegacyPass() {
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
index 96c2965296ca..8a866cfe9161 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
@@ -16,6 +16,7 @@
#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "MCTargetDesc/HexagonMCShuffler.h"
#include "MCTargetDesc/HexagonMCTargetDesc.h"
+
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
@@ -65,7 +66,8 @@ void HexagonMCChecker::init() {
void HexagonMCChecker::initReg(MCInst const &MCI, unsigned R, unsigned &PredReg,
bool &isTrue) {
- if (HexagonMCInstrInfo::isPredicated(MCII, MCI) && isPredicateRegister(R)) {
+ if (HexagonMCInstrInfo::isPredicated(MCII, MCI) &&
+ HexagonMCInstrInfo::isPredReg(RI, R)) {
// Note an used predicate register.
PredReg = R;
isTrue = HexagonMCInstrInfo::isPredicatedTrue(MCII, MCI);
@@ -123,7 +125,7 @@ void HexagonMCChecker::init(MCInst const &MCI) {
// same packet with an instruction that modifies is explicitly. Deal
// with such situations individually.
SoftDefs.insert(R);
- else if (isPredicateRegister(R) &&
+ else if (HexagonMCInstrInfo::isPredReg(RI, R) &&
HexagonMCInstrInfo::isPredicateLate(MCII, MCI))
// Include implicit late predicates.
LatePreds.insert(R);
@@ -167,7 +169,7 @@ void HexagonMCChecker::init(MCInst const &MCI) {
// side-effect, then note as a soft definition.
SoftDefs.insert(*SRI);
else if (HexagonMCInstrInfo::isPredicateLate(MCII, MCI) &&
- isPredicateRegister(*SRI))
+ HexagonMCInstrInfo::isPredReg(RI, *SRI))
// Some insns produce predicates too late to be used in the same packet.
LatePreds.insert(*SRI);
else if (i == 0 && HexagonMCInstrInfo::getType(MCII, MCI) ==
@@ -193,7 +195,7 @@ void HexagonMCChecker::init(MCInst const &MCI) {
if (MCI.getOperand(i).isReg()) {
unsigned P = MCI.getOperand(i).getReg();
- if (isPredicateRegister(P))
+ if (HexagonMCInstrInfo::isPredReg(RI, P))
NewPreds.insert(P);
}
}
@@ -202,7 +204,7 @@ HexagonMCChecker::HexagonMCChecker(MCContext &Context, MCInstrInfo const &MCII,
MCSubtargetInfo const &STI, MCInst &mcb,
MCRegisterInfo const &ri, bool ReportErrors)
: Context(Context), MCB(mcb), RI(ri), MCII(MCII), STI(STI),
- ReportErrors(ReportErrors), ReversePairs() {
+ ReportErrors(ReportErrors) {
init();
}
@@ -210,8 +212,7 @@ HexagonMCChecker::HexagonMCChecker(HexagonMCChecker const &Other,
MCSubtargetInfo const &STI,
bool CopyReportErrors)
: Context(Other.Context), MCB(Other.MCB), RI(Other.RI), MCII(Other.MCII),
- STI(STI), ReportErrors(CopyReportErrors ? Other.ReportErrors : false),
- ReversePairs() {
+ STI(STI), ReportErrors(CopyReportErrors ? Other.ReportErrors : false) {
init();
}
@@ -233,9 +234,10 @@ bool HexagonMCChecker::check(bool FullCheck) {
bool chkHWLoop = checkHWLoop();
bool chkValidTmpDst = FullCheck ? checkValidTmpDst() : true;
bool chkLegalVecRegPair = checkLegalVecRegPair();
+ bool ChkHVXAccum = checkHVXAccum();
bool chk = chkP && chkNV && chkR && chkRRO && chkS && chkSh && chkSl &&
chkAXOK && chkCofMax1 && chkHWLoop && chkValidTmpDst &&
- chkLegalVecRegPair;
+ chkLegalVecRegPair && ChkHVXAccum;
return chk;
}
@@ -274,20 +276,27 @@ static bool isDuplexAGroup(unsigned Opcode) {
}
static bool isNeitherAnorX(MCInstrInfo const &MCII, MCInst const &ID) {
- unsigned Result = 0;
+ if (HexagonMCInstrInfo::isFloat(MCII, ID))
+ return true;
unsigned Type = HexagonMCInstrInfo::getType(MCII, ID);
- if (Type == HexagonII::TypeDUPLEX) {
- unsigned subInst0Opcode = ID.getOperand(0).getInst()->getOpcode();
- unsigned subInst1Opcode = ID.getOperand(1).getInst()->getOpcode();
- Result += !isDuplexAGroup(subInst0Opcode);
- Result += !isDuplexAGroup(subInst1Opcode);
- } else
- Result +=
- Type != HexagonII::TypeALU32_2op && Type != HexagonII::TypeALU32_3op &&
- Type != HexagonII::TypeALU32_ADDI && Type != HexagonII::TypeS_2op &&
- Type != HexagonII::TypeS_3op &&
- (Type != HexagonII::TypeALU64 || HexagonMCInstrInfo::isFloat(MCII, ID));
- return Result != 0;
+ switch (Type) {
+ case HexagonII::TypeALU32_2op:
+ case HexagonII::TypeALU32_3op:
+ case HexagonII::TypeALU32_ADDI:
+ case HexagonII::TypeS_2op:
+ case HexagonII::TypeS_3op:
+ case HexagonII::TypeEXTENDER:
+ case HexagonII::TypeM:
+ case HexagonII::TypeALU64:
+ return false;
+ case HexagonII::TypeSUBINSN: {
+ return !isDuplexAGroup(ID.getOpcode());
+ }
+ case HexagonII::TypeDUPLEX:
+ llvm_unreachable("unexpected duplex instruction");
+ default:
+ return true;
+ }
}
bool HexagonMCChecker::checkAXOK() {
@@ -315,8 +324,7 @@ bool HexagonMCChecker::checkAXOK() {
void HexagonMCChecker::reportBranchErrors() {
for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
- MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, I);
- if (Desc.isBranch() || Desc.isCall() || Desc.isReturn())
+ if (HexagonMCInstrInfo::IsABranchingInst(MCII, STI, I))
reportNote(I.getLoc(), "Branching instruction");
}
}
@@ -326,8 +334,7 @@ bool HexagonMCChecker::checkHWLoop() {
!HexagonMCInstrInfo::isOuterLoop(MCB))
return true;
for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
- MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, I);
- if (Desc.isBranch() || Desc.isCall() || Desc.isReturn()) {
+ if (HexagonMCInstrInfo::IsABranchingInst(MCII, STI, I)) {
reportError(MCB.getLoc(),
"Branches cannot be in a packet with hardware loops");
reportBranchErrors();
@@ -340,8 +347,7 @@ bool HexagonMCChecker::checkHWLoop() {
bool HexagonMCChecker::checkCOFMax1() {
SmallVector<MCInst const *, 2> BranchLocations;
for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
- MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, I);
- if (Desc.isBranch() || Desc.isCall() || Desc.isReturn())
+ if (HexagonMCInstrInfo::IsABranchingInst(MCII, STI, I))
BranchLocations.push_back(&I);
}
for (unsigned J = 0, N = BranchLocations.size(); J < N; ++J) {
@@ -373,18 +379,8 @@ bool HexagonMCChecker::checkCOFMax1() {
}
bool HexagonMCChecker::checkSlots() {
- unsigned slotsUsed = 0;
- for (auto HMI : HexagonMCInstrInfo::bundleInstructions(MCB)) {
- MCInst const &MCI = *HMI.getInst();
- if (HexagonMCInstrInfo::isImmext(MCI))
- continue;
- if (HexagonMCInstrInfo::isDuplex(MCII, MCI))
- slotsUsed += 2;
- else
- ++slotsUsed;
- }
-
- if (slotsUsed > HEXAGON_PACKET_SIZE) {
+ if (HexagonMCInstrInfo::slotsConsumed(MCII, STI, MCB) >
+ HexagonMCInstrInfo::packetSizeSlots(STI)) {
reportError("invalid instruction packet: out of slots");
return false;
}
@@ -424,81 +420,109 @@ bool HexagonMCChecker::checkPredicates() {
// Check legal use of new values.
bool HexagonMCChecker::checkNewValues() {
- for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
- if (!HexagonMCInstrInfo::isNewValue(MCII, I))
+ for (auto const &ConsumerInst :
+ HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
+ if (!HexagonMCInstrInfo::isNewValue(MCII, ConsumerInst))
continue;
- auto Consumer = HexagonMCInstrInfo::predicateInfo(MCII, I);
- bool Branch = HexagonMCInstrInfo::getDesc(MCII, I).isBranch();
- MCOperand const &Op = HexagonMCInstrInfo::getNewValueOperand(MCII, I);
+
+ const HexagonMCInstrInfo::PredicateInfo ConsumerPredInfo =
+ HexagonMCInstrInfo::predicateInfo(MCII, ConsumerInst);
+
+ bool Branch = HexagonMCInstrInfo::getDesc(MCII, ConsumerInst).isBranch();
+ MCOperand const &Op =
+ HexagonMCInstrInfo::getNewValueOperand(MCII, ConsumerInst);
assert(Op.isReg());
- auto Producer = registerProducer(Op.getReg(), Consumer);
- if (std::get<0>(Producer) == nullptr) {
- reportError(I.getLoc(), "New value register consumer has no producer");
+
+ auto Producer = registerProducer(Op.getReg(), ConsumerPredInfo);
+ const MCInst *const ProducerInst = std::get<0>(Producer);
+ const HexagonMCInstrInfo::PredicateInfo ProducerPredInfo =
+ std::get<2>(Producer);
+
+ if (ProducerInst == nullptr) {
+ reportError(ConsumerInst.getLoc(),
+ "New value register consumer has no producer");
return false;
}
if (!RelaxNVChecks) {
// Checks that statically prove correct new value consumption
- if (std::get<2>(Producer).isPredicated() &&
- (!Consumer.isPredicated() ||
- llvm::HexagonMCInstrInfo::getType(MCII, I) == HexagonII::TypeNCJ)) {
+ if (ProducerPredInfo.isPredicated() &&
+ (!ConsumerPredInfo.isPredicated() ||
+ llvm::HexagonMCInstrInfo::getType(MCII, ConsumerInst) ==
+ HexagonII::TypeNCJ)) {
reportNote(
- std::get<0>(Producer)->getLoc(),
+ ProducerInst->getLoc(),
"Register producer is predicated and consumer is unconditional");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
- if (std::get<2>(Producer).Register != Hexagon::NoRegister &&
- std::get<2>(Producer).Register != Consumer.Register) {
- reportNote(std::get<0>(Producer)->getLoc(),
+ if (ProducerPredInfo.Register != Hexagon::NoRegister &&
+ ProducerPredInfo.Register != ConsumerPredInfo.Register) {
+ reportNote(ProducerInst->getLoc(),
"Register producer does not use the same predicate "
"register as the consumer");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
}
- if (std::get<2>(Producer).Register == Consumer.Register &&
- Consumer.PredicatedTrue != std::get<2>(Producer).PredicatedTrue) {
+ if (ProducerPredInfo.Register == ConsumerPredInfo.Register &&
+ ConsumerPredInfo.PredicatedTrue != ProducerPredInfo.PredicatedTrue) {
reportNote(
- std::get<0>(Producer)->getLoc(),
+ ProducerInst->getLoc(),
"Register producer has the opposite predicate sense as consumer");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
- MCInstrDesc const &Desc =
- HexagonMCInstrInfo::getDesc(MCII, *std::get<0>(Producer));
- if (Desc.OpInfo[std::get<1>(Producer)].RegClass ==
+
+ MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, *ProducerInst);
+ const unsigned ProducerOpIndex = std::get<1>(Producer);
+
+ if (Desc.OpInfo[ProducerOpIndex].RegClass ==
Hexagon::DoubleRegsRegClassID) {
- reportNote(std::get<0>(Producer)->getLoc(),
+ reportNote(ProducerInst->getLoc(),
"Double registers cannot be new-value producers");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
- if ((Desc.mayLoad() && std::get<1>(Producer) == 1) ||
- (Desc.mayStore() && std::get<1>(Producer) == 0)) {
- unsigned Mode =
- HexagonMCInstrInfo::getAddrMode(MCII, *std::get<0>(Producer));
+
+ // The ProducerOpIsMemIndex logic checks for the index of the producer
+ // register operand. Z-reg load instructions have an implicit operand
+ // that's not encoded, so the producer won't appear as the 1-th def, it
+ // will be at the 0-th.
+ const unsigned ProducerOpSearchIndex =
+ (HexagonMCInstrInfo::getType(MCII, *ProducerInst) ==
+ HexagonII::TypeCVI_ZW)
+ ? 0
+ : 1;
+
+ const bool ProducerOpIsMemIndex =
+ ((Desc.mayLoad() && ProducerOpIndex == ProducerOpSearchIndex) ||
+ (Desc.mayStore() && ProducerOpIndex == 0));
+
+ if (ProducerOpIsMemIndex) {
+ unsigned Mode = HexagonMCInstrInfo::getAddrMode(MCII, *ProducerInst);
+
StringRef ModeError;
if (Mode == HexagonII::AbsoluteSet)
ModeError = "Absolute-set";
if (Mode == HexagonII::PostInc)
ModeError = "Auto-increment";
if (!ModeError.empty()) {
- reportNote(std::get<0>(Producer)->getLoc(),
+ reportNote(ProducerInst->getLoc(),
ModeError + " registers cannot be a new-value "
"producer");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
}
- if (Branch && HexagonMCInstrInfo::isFloat(MCII, *std::get<0>(Producer))) {
- reportNote(std::get<0>(Producer)->getLoc(),
+ if (Branch && HexagonMCInstrInfo::isFloat(MCII, *ProducerInst)) {
+ reportNote(ProducerInst->getLoc(),
"FPU instructions cannot be new-value producers for jumps");
- reportError(I.getLoc(),
+ reportError(ConsumerInst.getLoc(),
"Instruction does not have a valid new register producer");
return false;
}
@@ -541,9 +565,11 @@ HexagonMCChecker::registerProducer(
unsigned Register, HexagonMCInstrInfo::PredicateInfo ConsumerPredicate) {
std::tuple<MCInst const *, unsigned, HexagonMCInstrInfo::PredicateInfo>
WrongSense;
+
for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, I);
auto ProducerPredicate = HexagonMCInstrInfo::predicateInfo(MCII, I);
+
for (unsigned J = 0, N = Desc.getNumDefs(); J < N; ++J)
for (auto K = MCRegAliasIterator(I.getOperand(J).getReg(), &RI, true);
K.isValid(); ++K)
@@ -568,9 +594,15 @@ void HexagonMCChecker::checkRegisterCurDefs() {
for (auto const &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
if (HexagonMCInstrInfo::isCVINew(MCII, I) &&
HexagonMCInstrInfo::getDesc(MCII, I).mayLoad()) {
- unsigned Register = I.getOperand(0).getReg();
- if (!registerUsed(Register))
- reportWarning("Register `" + Twine(RI.getName(Register)) +
+ const unsigned RegDef = I.getOperand(0).getReg();
+
+ bool HasRegDefUse = false;
+ for (MCRegAliasIterator Alias(RegDef, &RI, true); Alias.isValid();
+ ++Alias)
+ HasRegDefUse = HasRegDefUse || registerUsed(*Alias);
+
+ if (!HasRegDefUse)
+ reportWarning("Register `" + Twine(RI.getName(RegDef)) +
"' used with `.cur' "
"but not used in the same packet");
}
@@ -599,7 +631,7 @@ bool HexagonMCChecker::checkRegisters() {
reportErrorRegisters(BadR);
return false;
}
- if (!isPredicateRegister(R) && Defs[R].size() > 1) {
+ if (!HexagonMCInstrInfo::isPredReg(RI, R) && Defs[R].size() > 1) {
// Check for multiple register definitions.
PredSet &PM = Defs[R];
@@ -784,3 +816,22 @@ bool HexagonMCChecker::checkLegalVecRegPair() {
}
return true;
}
+
+// Vd.tmp can't be accumulated
+bool HexagonMCChecker::checkHVXAccum()
+{
+ for (const auto &I : HexagonMCInstrInfo::bundleInstructions(MCII, MCB)) {
+ bool IsTarget =
+ HexagonMCInstrInfo::isAccumulator(MCII, I) && I.getOperand(0).isReg();
+ if (!IsTarget)
+ continue;
+ unsigned int R = I.getOperand(0).getReg();
+ TmpDefsIterator It = TmpDefs.find(R);
+ if (It != TmpDefs.end()) {
+ reportError("register `" + Twine(RI.getName(R)) + ".tmp" +
+ "' is accumulated in this packet");
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
index dbd3d8ae45e6..b83931eb88ac 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
@@ -81,6 +81,10 @@ class HexagonMCChecker {
void initReg(MCInst const &, unsigned, unsigned &PredReg, bool &isTrue);
bool registerUsed(unsigned Register);
+
+ /// \return a tuple of: pointer to the producer instruction or nullptr if
+ /// none was found, the operand index, and the PredicateInfo for the
+ /// producer.
std::tuple<MCInst const *, unsigned, HexagonMCInstrInfo::PredicateInfo>
registerProducer(unsigned Register,
HexagonMCInstrInfo::PredicateInfo Predicated);
@@ -100,14 +104,10 @@ class HexagonMCChecker {
bool checkCOFMax1();
bool checkLegalVecRegPair();
bool checkValidTmpDst();
+ bool checkHVXAccum();
static void compoundRegisterMap(unsigned &);
- bool isPredicateRegister(unsigned R) const {
- return (Hexagon::P0 == R || Hexagon::P1 == R || Hexagon::P2 == R ||
- Hexagon::P3 == R);
- }
-
bool isLoopRegister(unsigned R) const {
return (Hexagon::SA0 == R || Hexagon::LC0 == R || Hexagon::SA1 == R ||
Hexagon::LC1 == R);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 33b2e9a9e302..f8ac35aed7c0 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -712,7 +712,6 @@ unsigned
HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
SmallVectorImpl<MCFixup> &Fixups,
MCSubtargetInfo const &STI) const {
-#ifndef NDEBUG
size_t OperandNumber = ~0U;
for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i)
if (&MI.getOperand(i) == &MO) {
@@ -720,7 +719,6 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
break;
}
assert((OperandNumber != ~0U) && "Operand not found");
-#endif
if (HexagonMCInstrInfo::isNewValue(MCII, MI) &&
&MO == &HexagonMCInstrInfo::getNewValueOperand(MCII, MI)) {
@@ -777,9 +775,13 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
assert(!MO.isImm());
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- if (HexagonMCInstrInfo::isSubInstruction(MI) ||
- HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCJ)
+ switch (HexagonMCInstrInfo::getDesc(MCII, MI).OpInfo[OperandNumber].RegClass) {
+ case GeneralSubRegsRegClassID:
+ case GeneralDoubleLow8RegsRegClassID:
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
+ default:
+ break;
+ }
return MCT.getRegisterInfo()->getEncodingValue(Reg);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
index e7ade7834a9f..3deef95df324 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
@@ -365,8 +365,10 @@ static bool lookForCompound(MCInstrInfo const &MCII, MCContext &Context,
MCI.begin() + HexagonMCInstrInfo::bundleInstructionsOffset;
B != MCI.end(); ++B) {
MCInst const *Inst = B->getInst();
- if (JumpInst == Inst)
+ if (JumpInst == Inst) {
+ BExtended = false;
continue;
+ }
if (HexagonMCInstrInfo::isImmext(*Inst)) {
BExtended = true;
continue;
@@ -405,24 +407,27 @@ void HexagonMCInstrInfo::tryCompound(MCInstrInfo const &MCII, MCSubtargetInfo co
if (MCI.size() < 2)
return;
- bool StartedValid = llvm::HexagonMCShuffle(Context, false, MCII, STI, MCI);
-
// Create a vector, needed to keep the order of jump instructions.
MCInst CheckList(MCI);
+ // Keep the last known good bundle around in case the shuffle fails.
+ MCInst LastValidBundle(MCI);
+
+ bool PreviouslyValid = llvm::HexagonMCShuffle(Context, false, MCII, STI, MCI);
+
// Look for compounds until none are found, only update the bundle when
// a compound is found.
while (lookForCompound(MCII, Context, CheckList)) {
- // Keep the original bundle around in case the shuffle fails.
- MCInst OriginalBundle(MCI);
-
// Need to update the bundle.
MCI = CheckList;
- if (StartedValid &&
- !llvm::HexagonMCShuffle(Context, false, MCII, STI, MCI)) {
+ const bool IsValid = llvm::HexagonMCShuffle(Context, false, MCII, STI, MCI);
+ if (PreviouslyValid && !IsValid) {
LLVM_DEBUG(dbgs() << "Found ERROR\n");
- MCI = OriginalBundle;
+ MCI = LastValidBundle;
+ } else if (IsValid) {
+ LastValidBundle = MCI;
+ PreviouslyValid = true;
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index e1c95f1cc920..36d6c8c9f84b 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -284,8 +284,6 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) {
case Hexagon::J2_jumprf:
case Hexagon::J2_jumprtnew:
case Hexagon::J2_jumprfnew:
- case Hexagon::J2_jumprtnewpt:
- case Hexagon::J2_jumprfnewpt:
case Hexagon::PS_jmprett:
case Hexagon::PS_jmpretf:
case Hexagon::PS_jmprettnew:
@@ -303,8 +301,6 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) {
case Hexagon::L4_return_f:
case Hexagon::L4_return_tnew_pnt:
case Hexagon::L4_return_fnew_pnt:
- case Hexagon::L4_return_tnew_pt:
- case Hexagon::L4_return_fnew_pt:
// [if ([!]p0[.new])] dealloc_return
SrcReg = MCI.getOperand(1).getReg();
if (Hexagon::P0 == SrcReg) {
@@ -699,6 +695,7 @@ inline static void addOps(MCInst &subInstPtr, MCInst const &Inst,
MCInst HexagonMCInstrInfo::deriveSubInst(MCInst const &Inst) {
MCInst Result;
+ Result.setLoc(Inst.getLoc());
bool Absolute;
int64_t Value;
switch (Inst.getOpcode()) {
@@ -830,7 +827,6 @@ MCInst HexagonMCInstrInfo::deriveSubInst(MCInst const &Inst) {
Result.setOpcode(Hexagon::SL2_jumpr31_f);
break; // none SUBInst if (!p0) jumpr r31
case Hexagon::J2_jumprfnew:
- case Hexagon::J2_jumprfnewpt:
case Hexagon::PS_jmpretfnewpt:
case Hexagon::PS_jmpretfnew:
Result.setOpcode(Hexagon::SL2_jumpr31_fnew);
@@ -840,7 +836,6 @@ MCInst HexagonMCInstrInfo::deriveSubInst(MCInst const &Inst) {
Result.setOpcode(Hexagon::SL2_jumpr31_t);
break; // none SUBInst if (p0) jumpr r31
case Hexagon::J2_jumprtnew:
- case Hexagon::J2_jumprtnewpt:
case Hexagon::PS_jmprettnewpt:
case Hexagon::PS_jmprettnew:
Result.setOpcode(Hexagon::SL2_jumpr31_tnew);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
index 68ccb20f4f15..494b0e6cbac6 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
@@ -128,23 +128,28 @@ bool canonicalizePacketImpl(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
bool CheckOk = Check ? Check->check(false) : true;
if (!CheckOk)
return false;
+
+ MCInst OrigMCB = MCB;
+
// Examine the packet and convert pairs of instructions to compound
// instructions when possible.
if (!HexagonDisableCompound)
HexagonMCInstrInfo::tryCompound(MCII, STI, Context, MCB);
HexagonMCShuffle(Context, false, MCII, STI, MCB);
+ const SmallVector<DuplexCandidate, 8> possibleDuplexes =
+ (STI.getFeatureBits()[Hexagon::FeatureDuplex])
+ ? HexagonMCInstrInfo::getDuplexPossibilties(MCII, STI, MCB)
+ : SmallVector<DuplexCandidate, 8>();
+
// Examine the packet and convert pairs of instructions to duplex
// instructions when possible.
- if (STI.getFeatureBits() [Hexagon::FeatureDuplex]) {
- SmallVector<DuplexCandidate, 8> possibleDuplexes;
- possibleDuplexes =
- HexagonMCInstrInfo::getDuplexPossibilties(MCII, STI, MCB);
- HexagonMCShuffle(Context, MCII, STI, MCB, possibleDuplexes);
- }
+ HexagonMCShuffle(Context, MCII, STI, MCB, possibleDuplexes);
+
// Examines packet and pad the packet, if needed, when an
// end-loop is in the bundle.
HexagonMCInstrInfo::padEndloop(MCB, Context);
+
// If compounding and duplexing didn't reduce the size below
// 4 or less we have a packet that is too big.
if (HexagonMCInstrInfo::bundleSize(MCB) > HEXAGON_PACKET_SIZE) {
@@ -156,7 +161,9 @@ bool canonicalizePacketImpl(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
CheckOk = Check ? Check->check(true) : true;
if (!CheckOk)
return false;
+
HexagonMCShuffle(Context, true, MCII, STI, MCB);
+
return true;
}
} // namespace
@@ -857,16 +864,16 @@ bool HexagonMCInstrInfo::isVector(MCInstrInfo const &MCII, MCInst const &MCI) {
}
int64_t HexagonMCInstrInfo::minConstant(MCInst const &MCI, size_t Index) {
- auto Sentinal = static_cast<int64_t>(std::numeric_limits<uint32_t>::max())
+ auto Sentinel = static_cast<int64_t>(std::numeric_limits<uint32_t>::max())
<< 8;
if (MCI.size() <= Index)
- return Sentinal;
+ return Sentinel;
MCOperand const &MCO = MCI.getOperand(Index);
if (!MCO.isExpr())
- return Sentinal;
+ return Sentinel;
int64_t Value;
if (!MCO.getExpr()->evaluateAsAbsolute(Value))
- return Sentinal;
+ return Sentinel;
return Value;
}
@@ -915,10 +922,7 @@ void HexagonMCInstrInfo::padEndloop(MCInst &MCB, MCContext &Context) {
MCInst Nop;
Nop.setOpcode(Hexagon::A2_nop);
assert(isBundle(MCB));
- while ((HexagonMCInstrInfo::isInnerLoop(MCB) &&
- (HexagonMCInstrInfo::bundleSize(MCB) < HEXAGON_PACKET_INNER_SIZE)) ||
- ((HexagonMCInstrInfo::isOuterLoop(MCB) &&
- (HexagonMCInstrInfo::bundleSize(MCB) < HEXAGON_PACKET_OUTER_SIZE))))
+ while (LoopNeedsPadding(MCB))
MCB.addOperand(MCOperand::createInst(new (Context) MCInst(Nop)));
}
@@ -1030,3 +1034,19 @@ unsigned HexagonMCInstrInfo::SubregisterBit(unsigned Consumer,
return Consumer == Producer;
return 0;
}
+
+bool HexagonMCInstrInfo::LoopNeedsPadding(MCInst const &MCB) {
+ return (
+ (HexagonMCInstrInfo::isInnerLoop(MCB) &&
+ (HexagonMCInstrInfo::bundleSize(MCB) < HEXAGON_PACKET_INNER_SIZE)) ||
+ ((HexagonMCInstrInfo::isOuterLoop(MCB) &&
+ (HexagonMCInstrInfo::bundleSize(MCB) < HEXAGON_PACKET_OUTER_SIZE))));
+}
+
+bool HexagonMCInstrInfo::IsABranchingInst(MCInstrInfo const &MCII,
+ MCSubtargetInfo const &STI,
+ MCInst const &I) {
+ assert(!HexagonMCInstrInfo::isBundle(I));
+ MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, I);
+ return (Desc.isBranch() || Desc.isCall() || Desc.isReturn());
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
index 5c56db14798f..f0c4a86fde78 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
@@ -65,18 +65,24 @@ public:
namespace HexagonMCInstrInfo {
-size_t const innerLoopOffset = 0;
-int64_t const innerLoopMask = 1 << innerLoopOffset;
+constexpr size_t innerLoopOffset = 0;
+constexpr int64_t innerLoopMask = 1 << innerLoopOffset;
-size_t const outerLoopOffset = 1;
-int64_t const outerLoopMask = 1 << outerLoopOffset;
+constexpr size_t outerLoopOffset = 1;
+constexpr int64_t outerLoopMask = 1 << outerLoopOffset;
// do not reorder memory load/stores by default load/stores are re-ordered
// and by default loads can be re-ordered
-size_t const memReorderDisabledOffset = 2;
-int64_t const memReorderDisabledMask = 1 << memReorderDisabledOffset;
+constexpr size_t memReorderDisabledOffset = 2;
+constexpr int64_t memReorderDisabledMask = 1 << memReorderDisabledOffset;
-size_t const bundleInstructionsOffset = 1;
+constexpr size_t splitNoMemOrderOffset = 3;
+constexpr int64_t splitNoMemorderMask = 1 << splitNoMemOrderOffset;
+
+constexpr size_t noShuffleOffset = 4;
+constexpr int64_t noShuffleMask = 1 << noShuffleOffset;
+
+constexpr size_t bundleInstructionsOffset = 1;
void addConstant(MCInst &MI, uint64_t Value, MCContext &Context);
void addConstExtender(MCContext &Context, MCInstrInfo const &MCII, MCInst &MCB,
@@ -95,6 +101,8 @@ bool canonicalizePacket(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
MCContext &Context, MCInst &MCB,
HexagonMCChecker *Checker,
bool AttemptCompatibility = false);
+bool IsABranchingInst(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
+ MCInst const &I);
// Create a duplex instruction given the two subinsts
MCInst *deriveDuplex(MCContext &Context, unsigned iClass, MCInst const &inst0,
@@ -307,6 +315,10 @@ bool mustNotExtend(MCExpr const &Expr);
// Returns true if this instruction requires a slot to execute.
bool requiresSlot(MCSubtargetInfo const &STI, MCInst const &MCI);
+
+// Returns true if \a MCB would require endloop padding.
+bool LoopNeedsPadding(MCInst const &MCB);
+
unsigned packetSize(StringRef CPU);
// Returns the maximum number of slots available in the given
@@ -318,8 +330,7 @@ unsigned packetSizeSlots(MCSubtargetInfo const &STI);
unsigned slotsConsumed(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
MCInst const &MCI);
-
-// Pad the bundle with nops to satisfy endloop requirements
+// Pad the bundle with nops to satisfy endloop requirements.
void padEndloop(MCInst &MCI, MCContext &Context);
class PredicateInfo {
public:
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
index d38b77b42fbc..d96fade71a84 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
@@ -81,10 +81,9 @@ void HexagonMCShuffler::copyTo(MCInst &MCB) {
MCB.addOperand(MCOperand::createImm(BundleFlags));
MCB.setLoc(Loc);
// Copy the results into the bundle.
- for (HexagonShuffler::iterator I = begin(); I != end(); ++I) {
-
- MCInst const &MI = I->getDesc();
- MCInst const *Extender = I->getExtender();
+ for (auto &I : *this) {
+ MCInst const &MI = I.getDesc();
+ MCInst const *Extender = I.getExtender();
if (Extender)
MCB.addOperand(MCOperand::createInst(Extender));
MCB.addOperand(MCOperand::createInst(&MI));
@@ -101,10 +100,10 @@ bool HexagonMCShuffler::reshuffleTo(MCInst &MCB) {
return false;
}
-bool llvm::HexagonMCShuffle(MCContext &Context, bool Fatal,
+bool llvm::HexagonMCShuffle(MCContext &Context, bool ReportErrors,
MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
MCInst &MCB) {
- HexagonMCShuffler MCS(Context, Fatal, MCII, STI, MCB);
+ HexagonMCShuffler MCS(Context, ReportErrors, MCII, STI, MCB);
if (DisableShuffle)
// Ignore if user chose so.
@@ -128,11 +127,11 @@ bool llvm::HexagonMCShuffle(MCContext &Context, bool Fatal,
return MCS.reshuffleTo(MCB);
}
-bool
-llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
- MCSubtargetInfo const &STI, MCInst &MCB,
- SmallVector<DuplexCandidate, 8> possibleDuplexes) {
- if (DisableShuffle)
+bool llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
+ MCSubtargetInfo const &STI, MCInst &MCB,
+ SmallVector<DuplexCandidate, 8> possibleDuplexes) {
+
+ if (DisableShuffle || possibleDuplexes.size() == 0)
return false;
if (!HexagonMCInstrInfo::bundleSize(MCB)) {
@@ -173,10 +172,8 @@ llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
HexagonMCShuffler MCS(Context, false, MCII, STI, MCB);
doneShuffling = MCS.reshuffleTo(MCB); // shuffle
}
- if (!doneShuffling)
- return true;
- return false;
+ return doneShuffling;
}
bool llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
index 3410c0ddbd84..4fc8addb27bc 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
@@ -28,16 +28,17 @@ class MCSubtargetInfo;
// Insn bundle shuffler.
class HexagonMCShuffler : public HexagonShuffler {
public:
- HexagonMCShuffler(MCContext &Context, bool Fatal, MCInstrInfo const &MCII,
- MCSubtargetInfo const &STI, MCInst &MCB)
- : HexagonShuffler(Context, Fatal, MCII, STI) {
+ HexagonMCShuffler(MCContext &Context, bool ReportErrors,
+ MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
+ MCInst &MCB)
+ : HexagonShuffler(Context, ReportErrors, MCII, STI) {
init(MCB);
}
- HexagonMCShuffler(MCContext &Context, bool Fatal, MCInstrInfo const &MCII,
- MCSubtargetInfo const &STI, MCInst &MCB,
- MCInst const &AddMI, bool InsertAtFront)
- : HexagonShuffler(Context, Fatal, MCII, STI) {
+ HexagonMCShuffler(MCContext &Context, bool ReportErrors,
+ MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
+ MCInst &MCB, MCInst const &AddMI, bool InsertAtFront)
+ : HexagonShuffler(Context, ReportErrors, MCII, STI) {
init(MCB, AddMI, InsertAtFront);
}
@@ -52,9 +53,11 @@ private:
void init(MCInst &MCB, MCInst const &AddMI, bool InsertAtFront);
};
-// Invocation of the shuffler.
-bool HexagonMCShuffle(MCContext &Context, bool Fatal, MCInstrInfo const &MCII,
- MCSubtargetInfo const &STI, MCInst &MCB);
+// Invocation of the shuffler. Returns true if the shuffle succeeded. If
+// true, MCB will contain the newly-shuffled packet.
+bool HexagonMCShuffle(MCContext &Context, bool ReportErrors,
+ MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
+ MCInst &MCB);
bool HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
MCSubtargetInfo const &STI, MCInst &MCB,
MCInst const &AddMI, int fixupCount);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index dfdddb50657c..6a08d7503bac 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -517,6 +517,14 @@ MCSubtargetInfo *Hexagon_MC::createHexagonMCSubtargetInfo(const Triple &TT,
return nullptr;
}
+ // Add qfloat subtarget feature by default to v68 and above
+ // unless explicitely disabled
+ if (checkFeature(X, Hexagon::ExtensionHVXV68) &&
+ ArchFS.find("-hvx-qfloat", 0) == std::string::npos) {
+ llvm::FeatureBitset Features = X->getFeatureBits();
+ X->setFeatureBits(Features.set(Hexagon::ExtensionHVXQFloat));
+ }
+
if (HexagonDisableDuplex) {
llvm::FeatureBitset Features = X->getFeatureBits();
X->setFeatureBits(Features.reset(Hexagon::FeatureDuplex));
@@ -551,21 +559,11 @@ void Hexagon_MC::addArchSubtarget(MCSubtargetInfo const *STI,
}
unsigned Hexagon_MC::GetELFFlags(const MCSubtargetInfo &STI) {
- static std::map<StringRef,unsigned> ElfFlags = {
- {"hexagonv5", ELF::EF_HEXAGON_MACH_V5},
- {"hexagonv55", ELF::EF_HEXAGON_MACH_V55},
- {"hexagonv60", ELF::EF_HEXAGON_MACH_V60},
- {"hexagonv62", ELF::EF_HEXAGON_MACH_V62},
- {"hexagonv65", ELF::EF_HEXAGON_MACH_V65},
- {"hexagonv66", ELF::EF_HEXAGON_MACH_V66},
- {"hexagonv67", ELF::EF_HEXAGON_MACH_V67},
- {"hexagonv67t", ELF::EF_HEXAGON_MACH_V67T},
- {"hexagonv68", ELF::EF_HEXAGON_MACH_V68},
- {"hexagonv69", ELF::EF_HEXAGON_MACH_V69},
- };
+ using llvm::Hexagon::ElfFlagsByCpuStr;
- auto F = ElfFlags.find(STI.getCPU());
- assert(F != ElfFlags.end() && "Unrecognized Architecture");
+ const std::string CPU(STI.getCPU().str());
+ auto F = ElfFlagsByCpuStr.find(CPU);
+ assert(F != ElfFlagsByCpuStr.end() && "Unrecognized Architecture");
return F->second;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index 1fce90b82864..d82731e153fe 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -167,7 +167,8 @@ static bool checkHVXPipes(const HVXInstsT &hvxInsts, unsigned startIdx,
HexagonShuffler::HexagonShuffler(MCContext &Context, bool ReportErrors,
MCInstrInfo const &MCII,
MCSubtargetInfo const &STI)
- : Context(Context), MCII(MCII), STI(STI), ReportErrors(ReportErrors) {
+ : Context(Context), BundleFlags(), MCII(MCII), STI(STI),
+ ReportErrors(ReportErrors), CheckFailure() {
reset();
}
@@ -244,8 +245,8 @@ void HexagonShuffler::restrictNoSlot1Store(
"Instruction does not allow a store in slot 1"));
}
-bool HexagonShuffler::applySlotRestrictions(
- HexagonPacketSummary const &Summary) {
+bool HexagonShuffler::applySlotRestrictions(HexagonPacketSummary const &Summary,
+ const bool DoShuffle) {
// These restrictions can modify the slot masks in the instructions
// in the Packet member. They should run unconditionally and their
// order does not matter.
@@ -262,7 +263,7 @@ bool HexagonShuffler::applySlotRestrictions(
if (!CheckFailure)
restrictBranchOrder(Summary);
if (!CheckFailure)
- restrictPreferSlot3(Summary);
+ restrictPreferSlot3(Summary, DoShuffle);
return !CheckFailure;
}
@@ -303,10 +304,9 @@ void HexagonShuffler::restrictBranchOrder(HexagonPacketSummary const &Summary) {
Packet = PacketSave;
}
- reportError("invalid instruction packet: out of slots");
+ reportResourceError(Summary, "out of slots");
}
-
void HexagonShuffler::permitNonSlot() {
for (HexagonInstr &ISJ : insts()) {
const bool RequiresSlot = HexagonMCInstrInfo::requiresSlot(STI, *ISJ.ID);
@@ -319,21 +319,19 @@ bool HexagonShuffler::ValidResourceUsage(HexagonPacketSummary const &Summary) {
Optional<HexagonPacket> ShuffledPacket = tryAuction(Summary);
if (!ShuffledPacket) {
- reportError("invalid instruction packet: slot error");
+ reportResourceError(Summary, "slot error");
return false;
- } else {
- Packet = *ShuffledPacket;
}
// Verify the CVI slot subscriptions.
- llvm::stable_sort(*this, HexagonInstr::lessCVI);
+ llvm::stable_sort(*ShuffledPacket, HexagonInstr::lessCVI);
// create vector of hvx instructions to check
HVXInstsT hvxInsts;
hvxInsts.clear();
- for (const_iterator I = cbegin(); I != cend(); ++I) {
+ for (const auto &I : *ShuffledPacket) {
struct CVIUnits inst;
- inst.Units = I->CVI.getUnits();
- inst.Lanes = I->CVI.getLanes();
+ inst.Units = I.CVI.getUnits();
+ inst.Lanes = I.CVI.getLanes();
if (inst.Units == 0)
continue; // not an hvx inst or an hvx inst that doesn't uses any pipes
hvxInsts.push_back(inst);
@@ -349,6 +347,9 @@ bool HexagonShuffler::ValidResourceUsage(HexagonPacketSummary const &Summary) {
return false;
}
}
+
+ Packet = *ShuffledPacket;
+
return true;
}
@@ -438,6 +439,15 @@ bool HexagonShuffler::restrictStoreLoadOrder(
return true;
}
+static std::string SlotMaskToText(unsigned SlotMask) {
+ SmallVector<std::string, HEXAGON_PRESHUFFLE_PACKET_SIZE> Slots;
+ for (unsigned SlotNum = 0; SlotNum < HEXAGON_PACKET_SIZE; SlotNum++)
+ if ((SlotMask & (1 << SlotNum)) != 0)
+ Slots.push_back(utostr(SlotNum));
+
+ return llvm::join(Slots, StringRef(", "));
+}
+
HexagonShuffler::HexagonPacketSummary HexagonShuffler::GetPacketSummary() {
HexagonPacketSummary Summary = HexagonPacketSummary();
@@ -454,8 +464,13 @@ HexagonShuffler::HexagonPacketSummary HexagonShuffler::GetPacketSummary() {
++Summary.pSlot3Cnt;
Summary.PrefSlot3Inst = ISJ;
}
- Summary.ReservedSlotMask |=
+ const unsigned ReservedSlots =
HexagonMCInstrInfo::getOtherReservedSlots(MCII, STI, ID);
+ Summary.ReservedSlotMask |= ReservedSlots;
+ if (ReservedSlots != 0)
+ AppliedRestrictions.push_back(std::make_pair(ID.getLoc(),
+ (Twine("Instruction has reserved slots: ") +
+ SlotMaskToText(ReservedSlots)).str()));
switch (HexagonMCInstrInfo::getType(MCII, ID)) {
case HexagonII::TypeS_2op:
@@ -463,7 +478,8 @@ HexagonShuffler::HexagonPacketSummary HexagonShuffler::GetPacketSummary() {
case HexagonII::TypeALU64:
break;
case HexagonII::TypeJ:
- Summary.branchInsts.push_back(ISJ);
+ if (HexagonMCInstrInfo::IsABranchingInst(MCII, STI, *ISJ->ID))
+ Summary.branchInsts.push_back(ISJ);
break;
case HexagonII::TypeCVI_VM_VP_LDU:
case HexagonII::TypeCVI_VM_LD:
@@ -565,14 +581,15 @@ bool HexagonShuffler::ValidPacketMemoryOps(
return !InvalidPacket;
}
-void HexagonShuffler::restrictPreferSlot3(HexagonPacketSummary const &Summary) {
+void HexagonShuffler::restrictPreferSlot3(HexagonPacketSummary const &Summary,
+ const bool DoShuffle) {
// flag if an instruction requires to be in slot 3
const bool HasOnlySlot3 = llvm::any_of(insts(), [&](HexagonInstr const &I) {
return (I.Core.getUnits() == Slot3Mask);
});
- const bool NeedsPrefSlot3Shuffle =
- (Summary.branchInsts.size() <= 1 && !HasOnlySlot3 &&
- Summary.pSlot3Cnt == 1 && Summary.PrefSlot3Inst);
+ const bool NeedsPrefSlot3Shuffle = Summary.branchInsts.size() <= 1 &&
+ !HasOnlySlot3 && Summary.pSlot3Cnt == 1 &&
+ Summary.PrefSlot3Inst && DoShuffle;
if (!NeedsPrefSlot3Shuffle)
return;
@@ -590,9 +607,9 @@ void HexagonShuffler::restrictPreferSlot3(HexagonPacketSummary const &Summary) {
}
/// Check that the packet is legal and enforce relative insn order.
-bool HexagonShuffler::check() {
+bool HexagonShuffler::check(const bool RequireShuffle) {
const HexagonPacketSummary Summary = GetPacketSummary();
- if (!applySlotRestrictions(Summary))
+ if (!applySlotRestrictions(Summary, RequireShuffle))
return false;
if (!ValidPacketMemoryOps(Summary)) {
@@ -600,13 +617,14 @@ bool HexagonShuffler::check() {
return false;
}
- ValidResourceUsage(Summary);
+ if (RequireShuffle)
+ ValidResourceUsage(Summary);
return !CheckFailure;
}
llvm::Optional<HexagonShuffler::HexagonPacket>
-HexagonShuffler::tryAuction(HexagonPacketSummary const &Summary) const {
+HexagonShuffler::tryAuction(HexagonPacketSummary const &Summary) {
HexagonPacket PacketResult = Packet;
HexagonUnitAuction AuctionCore(Summary.ReservedSlotMask);
llvm::stable_sort(PacketResult, HexagonInstr::lessCore);
@@ -635,13 +653,13 @@ bool HexagonShuffler::shuffle() {
if (size() > HEXAGON_PACKET_SIZE) {
// Ignore a packet with with more than what a packet can hold
// or with compound or duplex insns for now.
- reportError(Twine("invalid instruction packet"));
+ reportError("invalid instruction packet");
return false;
}
// Check and prepare packet.
- bool Ok = true;
- if (size() > 1 && (Ok = check()))
+ bool Ok = check();
+ if (size() > 1 && Ok)
// Reorder the handles for each slot.
for (unsigned nSlot = 0, emptySlots = 0; nSlot < HEXAGON_PACKET_SIZE;
++nSlot) {
@@ -684,6 +702,32 @@ bool HexagonShuffler::shuffle() {
return Ok;
}
+void HexagonShuffler::reportResourceError(HexagonPacketSummary const &Summary, StringRef Err) {
+ if (ReportErrors)
+ reportResourceUsage(Summary);
+ reportError(Twine("invalid instruction packet: ") + Err);
+}
+
+
+void HexagonShuffler::reportResourceUsage(HexagonPacketSummary const &Summary) {
+ auto SM = Context.getSourceManager();
+ if (SM) {
+ for (HexagonInstr const &I : insts()) {
+ const unsigned Units = I.Core.getUnits();
+
+ if (HexagonMCInstrInfo::requiresSlot(STI, *I.ID)) {
+ const std::string UnitsText = Units ? SlotMaskToText(Units) : "<None>";
+ SM->PrintMessage(I.ID->getLoc(), SourceMgr::DK_Note,
+ Twine("Instruction can utilize slots: ") +
+ UnitsText);
+ }
+ else if (!HexagonMCInstrInfo::isImmext(*I.ID))
+ SM->PrintMessage(I.ID->getLoc(), SourceMgr::DK_Note,
+ "Instruction does not require a slot");
+ }
+ }
+}
+
void HexagonShuffler::reportError(Twine const &Msg) {
CheckFailure = true;
if (ReportErrors) {
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
index 1b4ebc5111db..70992e4c7e81 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
@@ -72,16 +72,6 @@ public:
using UnitsAndLanes = std::pair<unsigned, unsigned>;
private:
- // Available HVX slots.
- enum {
- CVI_NONE = 0,
- CVI_XLANE = 1 << 0,
- CVI_SHIFT = 1 << 1,
- CVI_MPY0 = 1 << 2,
- CVI_MPY1 = 1 << 3,
- CVI_ZW = 1 << 4
- };
-
// Count of adjacent slots that the insn requires to be executed.
unsigned Lanes;
// Flag whether the insn is a load or a store.
@@ -177,21 +167,23 @@ protected:
bool ReportErrors;
bool CheckFailure;
std::vector<std::pair<SMLoc, std::string>> AppliedRestrictions;
- bool applySlotRestrictions(HexagonPacketSummary const &Summary);
+
+ bool applySlotRestrictions(HexagonPacketSummary const &Summary,
+ const bool DoShuffle);
void restrictSlot1AOK(HexagonPacketSummary const &Summary);
void restrictNoSlot1Store(HexagonPacketSummary const &Summary);
void restrictNoSlot1();
bool restrictStoreLoadOrder(HexagonPacketSummary const &Summary);
void restrictBranchOrder(HexagonPacketSummary const &Summary);
- void restrictPreferSlot3(HexagonPacketSummary const &Summary);
+ void restrictPreferSlot3(HexagonPacketSummary const &Summary,
+ const bool DoShuffle);
void permitNonSlot();
- Optional<HexagonPacket> tryAuction(HexagonPacketSummary const &Summary) const;
+ Optional<HexagonPacket> tryAuction(HexagonPacketSummary const &Summary);
HexagonPacketSummary GetPacketSummary();
bool ValidPacketMemoryOps(HexagonPacketSummary const &Summary) const;
bool ValidResourceUsage(HexagonPacketSummary const &Summary);
- bool validPacketInsts() const;
public:
using iterator = HexagonPacket::iterator;
@@ -205,7 +197,7 @@ public:
// Reset to initial state.
void reset();
// Check if the bundle may be validly shuffled.
- bool check();
+ bool check(const bool RequireShuffle = true);
// Reorder the insn handles in the bundle.
bool shuffle();
@@ -242,6 +234,8 @@ public:
// Return the error code for the last check or shuffling of the bundle.
void reportError(Twine const &Msg);
+ void reportResourceError(HexagonPacketSummary const &Summary, StringRef Err);
+ void reportResourceUsage(HexagonPacketSummary const &Summary);
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index a994bd7e57a4..660215ca7435 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -141,7 +141,7 @@ struct LanaiOperand : public MCParsedAsmOperand {
struct MemOp Mem;
};
- explicit LanaiOperand(KindTy Kind) : MCParsedAsmOperand(), Kind(Kind) {}
+ explicit LanaiOperand(KindTy Kind) : Kind(Kind) {}
public:
// The functions below are used by the autogenerated ASM matcher and hence to
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 0d9e63c112fb..010ff80ad42a 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -284,7 +284,7 @@ LanaiTargetLowering::getSingleConstraintMatchWeight(
void LanaiTargetLowering::LowerAsmOperandForConstraint(
SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
- SDValue Result(nullptr, 0);
+ SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1)
@@ -511,7 +511,7 @@ SDValue LanaiTargetLowering::LowerCCCArguments(
// the sret argument into rv for the return. Save the argument into
// a virtual register so that we can access it from the return points.
if (MF.getFunction().hasStructRetAttr()) {
- unsigned Reg = LanaiMFI->getSRetReturnReg();
+ Register Reg = LanaiMFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
LanaiMFI->setSRetReturnReg(Reg);
@@ -577,7 +577,7 @@ LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (DAG.getMachineFunction().getFunction().hasStructRetAttr()) {
MachineFunction &MF = DAG.getMachineFunction();
LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
- unsigned Reg = LanaiMFI->getSRetReturnReg();
+ Register Reg = LanaiMFI->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments().");
SDValue Val =
@@ -1077,7 +1077,7 @@ SDValue LanaiTargetLowering::LowerRETURNADDR(SDValue Op,
// Return the link register, which contains the return address.
// Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
index 67443b771d3d..ce79bdafc425 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
@@ -412,9 +412,8 @@ bool LanaiMemAluCombiner::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget<LanaiSubtarget>().getInstrInfo();
bool Modified = false;
- for (MfIterator MFI = MF.begin(); MFI != MF.end(); ++MFI) {
- Modified |= combineMemAluInBasicBlock(&*MFI);
- }
+ for (MachineBasicBlock &MBB : MF)
+ Modified |= combineMemAluInBasicBlock(&MBB);
return Modified;
}
} // namespace
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
index abe20c8e18cf..03cf10205173 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
@@ -165,7 +165,7 @@ void LanaiRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if ((isSPLSOpcode(MI.getOpcode()) && !isInt<10>(Offset)) ||
!isInt<16>(Offset)) {
assert(RS && "Register scavenging must be on");
- unsigned Reg = RS->FindUnusedReg(&Lanai::GPRRegClass);
+ Register Reg = RS->FindUnusedReg(&Lanai::GPRRegClass);
if (!Reg)
Reg = RS->scavengeRegister(&Lanai::GPRRegClass, II, SPAdj);
assert(Reg && "Register scavenger failed");
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiSubtarget.cpp
index d9d7847a0c5a..37a4843e1bc4 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/LanaiSubtarget.cpp
@@ -43,4 +43,4 @@ LanaiSubtarget::LanaiSubtarget(const Triple &TargetTriple, StringRef Cpu,
CodeGenOpt::Level /*OptLevel*/)
: LanaiGenSubtargetInfo(TargetTriple, Cpu, /*TuneCPU*/ Cpu, FeatureString),
FrameLowering(initializeSubtargetDependencies(Cpu, FeatureString)),
- InstrInfo(), TLInfo(TM, *this), TSInfo() {}
+ TLInfo(TM, *this) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/Lanai/MCTargetDesc/LanaiInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/Lanai/MCTargetDesc/LanaiInstPrinter.cpp
index 7027d18126bb..d8a66bc8a0da 100644
--- a/contrib/llvm-project/llvm/lib/Target/Lanai/MCTargetDesc/LanaiInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Lanai/MCTargetDesc/LanaiInstPrinter.cpp
@@ -148,7 +148,7 @@ void LanaiInstPrinter::printInst(const MCInst *MI, uint64_t Address,
void LanaiInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &OS, const char *Modifier) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ assert((Modifier == nullptr || Modifier[0] == 0) && "No modifiers supported");
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg())
OS << "%" << getRegisterName(Op.getReg());
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
index 4db879c34ad9..dcd581875f60 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
@@ -1,4 +1,4 @@
-//===---- M68kAsmParser.cpp - Parse M68k assembly to MCInst instructions --===//
+//===-- M68kAsmParser.cpp - Parse M68k assembly to MCInst instructions ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/Disassembler/M68kDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/Disassembler/M68kDisassembler.cpp
index a08ffa787095..a565ff4e004d 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/Disassembler/M68kDisassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/Disassembler/M68kDisassembler.cpp
@@ -1,4 +1,4 @@
-//===- M68kDisassembler.cpp - Disassembler for M68k -------------*- C++ -*-===//
+//===-- M68kDisassembler.cpp - Disassembler for M68k ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp
index 9cd959012e6f..b3d17184f1fe 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp
@@ -1,4 +1,4 @@
-//===-- M68kCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
+//===-- M68kCallLowering.cpp - Call lowering --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.h b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.h
index 47cdefdba100..24212e6dd9c6 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kCallLowering.h
@@ -1,4 +1,4 @@
-//===-- M68kCallLowering.h - Call lowering -------------------*- C++ -*-===//
+//===-- M68kCallLowering.h - Call lowering ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kInstructionSelector.cpp
index 9ac4ab9a5ba1..a627eccd110d 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kInstructionSelector.cpp
@@ -1,4 +1,4 @@
-//===- M68kInstructionSelector.cpp ------------------------------*- C++ -*-===//
+//===-- M68kInstructionSelector.cpp -----------------------------*- C++ -*-===//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements the targeting of the InstructionSelector class for
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.cpp
index bcbe62816beb..860c0ce29326 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
+//===-- M68kLegalizerInfo.cpp -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
index 205aa81aedcc..a10401ed1a9a 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
@@ -1,4 +1,4 @@
-//===- M68kLegalizerInfo --------------------------------------*- C++ -*-==//
+//===-- M68kLegalizerInfo ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.cpp
index 5c0f5dae8e37..b6ed6ab28a5d 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kRegisterBankInfo.cpp -------------------------------*- C++ -*-===//
+//===-- M68kRegisterBankInfo.cpp --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.h
index 853c75df2bb3..6c0b8ca7ba5a 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBankInfo.h
@@ -1,4 +1,4 @@
-//===-- M68kRegisterBankInfo.h ---------------------------------*- C++ -*-===//
+//===-- M68kRegisterBankInfo.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -42,4 +42,4 @@ public:
getInstrMapping(const MachineInstr &MI) const override;
};
} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_GLSEL_M68KREGISTERBANKINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBanks.td b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBanks.td
index 942677a60e6c..2a00ec065cd4 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBanks.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/GISel/M68kRegisterBanks.td
@@ -1,4 +1,4 @@
-//===-- M68kRegisterBanks.td - Describe the M68k Banks -------*- tablegen -*-===//
+//===-- M68kRegisterBanks.td - Describe the M68k Banks -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68k.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68k.h
index cef40bee7d93..b6069d736deb 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68k.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68k.h
@@ -1,4 +1,4 @@
-//===- M68k.h - Top-level interface for M68k representation -*- C++ -*-===//
+//===-- M68k.h - Top-level interface for M68k representation ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -54,4 +54,4 @@ createM68kInstructionSelector(const M68kTargetMachine &, const M68kSubtarget &,
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68K_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68k.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68k.td
index fde491e1b6d5..de7a6c82d110 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68k.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68k.td
@@ -1,4 +1,4 @@
-//===-- M68k.td - Motorola 680x0 target definitions ------*- tablegen -*-===//
+//===-- M68k.td - Motorola 680x0 target definitions --------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.cpp
index 08b7153632b4..3bcce9e3ba3b 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.cpp
@@ -1,4 +1,4 @@
-//===----- M68kAsmPrinter.cpp - M68k LLVM Assembly Printer -----*- C++ -*-===//
+//===-- M68kAsmPrinter.cpp - M68k LLVM Assembly Printer ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.h
index dff3bb876336..1a76e3bf4e27 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kAsmPrinter.h
@@ -1,4 +1,4 @@
-//===----- M68kAsmPrinter.h - M68k LLVM Assembly Printer -------- C++ -*--===//
+//===-- M68kAsmPrinter.h - M68k LLVM Assembly Printer -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -66,4 +66,4 @@ public:
};
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KASMPRINTER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kCallingConv.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kCallingConv.h
index 20ffa993897f..6823df5472df 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kCallingConv.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kCallingConv.h
@@ -1,4 +1,4 @@
-//===-- M68kCallingConv.h - M68k Custom CC Routines ---------*- C++ -*-===//
+//===-- M68kCallingConv.h - M68k Custom CC Routines -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -74,4 +74,4 @@ inline bool CC_M68k_Any_AssignToReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KCALLINGCONV_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
index 4149ae92ffe9..7f0c0dd92dbb 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
@@ -1,4 +1,4 @@
-//===----- M68kCollapseMOVEMPass.cpp - Expand MOVEM pass --------*- C++ -*-===//
+//===-- M68kCollapseMOVEMPass.cpp - Expand MOVEM pass -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kExpandPseudo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kExpandPseudo.cpp
index 6a4aeaab518a..acfa30f28c2b 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kExpandPseudo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kExpandPseudo.cpp
@@ -1,4 +1,4 @@
-//===--M68kExpandPseudo.cpp - Expand pseudo instructions ------*- C++ -*-===//
+//===-- M68kExpandPseudo.cpp - Expand pseudo instructions -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.cpp
index 66ea6ae38f43..643e156f9446 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.cpp
@@ -1,4 +1,4 @@
-//===-- M68kFrameLowering.cpp - M68k Frame Information ------*- C++ -*-===//
+//===-- M68kFrameLowering.cpp - M68k Frame Information ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -157,7 +157,7 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
MachineOperand &MO = MBBI->getOperand(i);
if (!MO.isReg() || MO.isDef())
continue;
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
if (!Reg)
continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
@@ -463,7 +463,7 @@ void M68kFrameLowering::emitPrologueCalleeSavedFrameMoves(
// Calculate offsets.
for (const auto &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
BuildCFI(MBB, MBBI, DL,
@@ -485,7 +485,7 @@ void M68kFrameLowering::emitPrologue(MachineFunction &MF,
uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF);
bool NeedsDwarfCFI = MMI.hasDebugInfo() || Fn.needsUnwindTableEntry();
- unsigned FramePtr = TRI->getFrameRegister(MF);
+ Register FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr = FramePtr;
unsigned BasePtr = TRI->getBaseRegister();
@@ -683,7 +683,7 @@ void M68kFrameLowering::emitEpilogue(MachineFunction &MF,
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
- unsigned FramePtr = TRI->getFrameRegister(MF);
+ Register FramePtr = TRI->getFrameRegister(MF);
unsigned MachineFramePtr = FramePtr;
// Get the number of bytes to allocate from the FrameInfo.
@@ -819,7 +819,7 @@ bool M68kFrameLowering::assignCalleeSavedSpillSlots(
// Since emitPrologue and emitEpilogue will handle spilling and restoring of
// the frame register, we can delete it from CSI list and not have to worry
// about avoiding it later.
- unsigned FPReg = TRI->getFrameRegister(MF);
+ Register FPReg = TRI->getFrameRegister(MF);
for (unsigned i = 0, e = CSI.size(); i < e; ++i) {
if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
CSI.erase(CSI.begin() + i);
@@ -842,7 +842,7 @@ bool M68kFrameLowering::spillCalleeSavedRegisters(
unsigned Mask = 0;
for (const auto &Info : CSI) {
FI = std::max(FI, Info.getFrameIdx());
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
unsigned Shift = MRI.getSpillRegisterOrder(Reg);
Mask |= 1 << Shift;
}
@@ -856,7 +856,7 @@ bool M68kFrameLowering::spillCalleeSavedRegisters(
const MachineFunction &MF = *MBB.getParent();
const MachineRegisterInfo &RI = MF.getRegInfo();
for (const auto &Info : CSI) {
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
bool IsLiveIn = RI.isLiveIn(Reg);
if (!IsLiveIn)
MBB.addLiveIn(Reg);
@@ -877,7 +877,7 @@ bool M68kFrameLowering::restoreCalleeSavedRegisters(
unsigned Mask = 0;
for (const auto &Info : CSI) {
FI = std::max(FI, Info.getFrameIdx());
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
unsigned Shift = MRI.getSpillRegisterOrder(Reg);
Mask |= 1 << Shift;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.h
index 0eba9e08d858..a5349377232e 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kFrameLowering.h
@@ -1,4 +1,4 @@
-//===- M68kFrameLowering.h - Define frame lowering for M68k -*- C++ -*-===//
+//===-- M68kFrameLowering.h - Define frame lowering for M68k ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -169,4 +169,4 @@ public:
};
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KFRAMELOWERING_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
index 0076c2647df3..9ef97b96ea9a 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
@@ -1,4 +1,4 @@
-//===- M68kISelDAGToDAG.cpp - M68k Dag to Dag Inst Selector -*- C++ -*-===//
+//===-- M68kISelDAGToDAG.cpp - M68k Dag to Dag Inst Selector ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp
index 79b395f8f984..dba190a2ebc0 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -1,4 +1,4 @@
-//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl ------*- C++ -*--===//
+//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -268,7 +268,7 @@ static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
- unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+ Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
@@ -900,7 +900,7 @@ SDValue M68kTargetLowering::LowerFormalArguments(
else
llvm_unreachable("Unknown argument type!");
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32
@@ -1276,7 +1276,7 @@ bool M68kTargetLowering::IsEligibleForTailCallOptimization(
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc())
continue;
- unsigned Reg = VA.getLocReg();
+ Register Reg = VA.getLocReg();
switch (Reg) {
default:
break;
@@ -1409,32 +1409,32 @@ SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Arith, SetCC);
}
-/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
-/// according to equal/not-equal condition code \p CC.
+/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
+/// condition according to equal/not-equal condition code \p CC.
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
const SDLoc &DL, SelectionDAG &DAG) {
- // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
+ // If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
// instruction. Since the shift amount is in-range-or-undefined, we know
// that doing a bittest on the i32 value is ok.
if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
// If the operand types disagree, extend the shift amount to match. Since
- // BT ignores high bits (like shifts) we can use anyextend.
+ // BTST ignores high bits (like shifts) we can use anyextend.
if (Src.getValueType() != BitNo.getValueType())
BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
- SDValue BT = DAG.getNode(M68kISD::BT, DL, MVT::i32, Src, BitNo);
+ SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
// NOTE BTST sets CCR.Z flag
M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_NE : M68k::COND_EQ;
return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
- DAG.getConstant(Cond, DL, MVT::i8), BT);
+ DAG.getConstant(Cond, DL, MVT::i8), BTST);
}
-/// Result of 'and' is compared against zero. Change to a BT node if possible.
-static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
- SelectionDAG &DAG) {
+/// Result of 'and' is compared against zero. Change to a BTST node if possible.
+static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL,
+ SelectionDAG &DAG) {
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
@@ -1468,7 +1468,7 @@ static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
RHS = AndLHS.getOperand(1);
}
- // Use BT if the immediate can't be encoded in a TEST instruction.
+ // Use BTST if the immediate can't be encoded in a TEST instruction.
if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
LHS = AndLHS;
RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
@@ -1592,8 +1592,8 @@ static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
}
// Convert (truncate (srl X, N) to i1) to (bt X, N)
-static SDValue LowerTruncateToBT(SDValue Op, ISD::CondCode CC, const SDLoc &DL,
- SelectionDAG &DAG) {
+static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC,
+ const SDLoc &DL, SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
"Expected TRUNCATE to i1 node");
@@ -1889,14 +1889,14 @@ SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
}
/// Result of 'and' or 'trunc to i1' is compared against zero.
-/// Change to a BT node if possible.
-SDValue M68kTargetLowering::LowerToBT(SDValue Op, ISD::CondCode CC,
- const SDLoc &DL,
- SelectionDAG &DAG) const {
+/// Change to a BTST node if possible.
+SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
+ const SDLoc &DL,
+ SelectionDAG &DAG) const {
if (Op.getOpcode() == ISD::AND)
- return LowerAndToBT(Op, CC, DL, DAG);
+ return LowerAndToBTST(Op, CC, DL, DAG);
if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
- return LowerTruncateToBT(Op, CC, DL, DAG);
+ return LowerTruncateToBTST(Op, CC, DL, DAG);
return SDValue();
}
@@ -1909,14 +1909,14 @@ SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- // Optimize to BT if possible.
- // Lower (X & (1 << N)) == 0 to BT(X, N).
- // Lower ((X >>u N) & 1) != 0 to BT(X, N).
- // Lower ((X >>s N) & 1) != 0 to BT(X, N).
- // Lower (trunc (X >> N) to i1) to BT(X, N).
+ // Optimize to BTST if possible.
+ // Lower (X & (1 << N)) == 0 to BTST(X, N).
+ // Lower ((X >>u N) & 1) != 0 to BTST(X, N).
+ // Lower ((X >>s N) & 1) != 0 to BTST(X, N).
+ // Lower (trunc (X >> N) to i1) to BTST(X, N).
if (Op0.hasOneUse() && isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
- if (SDValue NewSetCC = LowerToBT(Op0, CC, DL, DAG)) {
+ if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
if (VT == MVT::i1)
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
return NewSetCC;
@@ -2099,7 +2099,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool IllegalFPCMov = false;
- if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BT) {
+ if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
Cond = Cmp;
addTest = false;
}
@@ -2163,7 +2163,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// We know the result of AND is compared against zero. Try to match
// it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
- if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
+ if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
CC = NewSetCC.getOperand(0);
Cond = NewSetCC.getOperand(1);
addTest = false;
@@ -2282,7 +2282,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BT) {
+ if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
Cond = Cmp;
AddTest = false;
} else {
@@ -2427,7 +2427,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// We know the result is compared against zero. Try to match it to BT.
if (Cond.hasOneUse()) {
- if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
+ if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
CC = NewSetCC.getOperand(0);
Cond = NewSetCC.getOperand(1);
AddTest = false;
@@ -3101,9 +3101,9 @@ M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
// destination registers, and the registers that went into the PHI.
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
- unsigned DestReg = MIIt->getOperand(0).getReg();
- unsigned Op1Reg = MIIt->getOperand(1).getReg();
- unsigned Op2Reg = MIIt->getOperand(2).getReg();
+ Register DestReg = MIIt->getOperand(0).getReg();
+ Register Op1Reg = MIIt->getOperand(1).getReg();
+ Register Op2Reg = MIIt->getOperand(2).getReg();
// If this CMOV we are generating is the opposite condition from
// the jump we generated, then we have to swap the operands for the
@@ -3211,13 +3211,13 @@ SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
auto &MRI = MF.getRegInfo();
auto SPTy = getPointerTy(DAG.getDataLayout());
auto *ARClass = getRegClassFor(SPTy);
- unsigned Vreg = MRI.createVirtualRegister(ARClass);
+ Register Vreg = MRI.createVirtualRegister(ARClass);
Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
auto &TLI = DAG.getTargetLoweringInfo();
- unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
+ Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
@@ -3391,8 +3391,8 @@ const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "M68kISD::AND";
case M68kISD::CMP:
return "M68kISD::CMP";
- case M68kISD::BT:
- return "M68kISD::BT";
+ case M68kISD::BTST:
+ return "M68kISD::BTST";
case M68kISD::SELECT:
return "M68kISD::SELECT";
case M68kISD::CMOV:
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.h
index 6a5a40a8815b..9375a99962eb 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.h
@@ -1,4 +1,4 @@
-//===-- M68kISelLowering.h - M68k DAG Lowering Interface ----*- C++ -*-===//
+//===-- M68kISelLowering.h - M68k DAG Lowering Interface --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -43,7 +43,7 @@ enum NodeType {
CMP,
/// M68k bit-test instructions.
- BT,
+ BTST,
/// M68k Select
SELECT,
@@ -204,8 +204,8 @@ private:
const CCValAssign &VA, ISD::ArgFlagsTy Flags) const;
SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
- SelectionDAG &DAG) const;
+ SDValue LowerToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL,
+ SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -276,4 +276,4 @@ private:
};
} // namespace llvm
-#endif // M68kISELLOWERING_H
+#endif // LLVM_LIB_TARGET_M68K_M68KISELLOWERING_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrArithmetic.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrArithmetic.td
index b2c05365d30b..ef50de576641 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrArithmetic.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrArithmetic.td
@@ -150,8 +150,7 @@ let mayLoad = 1, mayStore = 1 in {
// FIXME MxBiArOp_FMR/FMI cannot consume CCR from MxAdd/MxSub which leads for
// MxAdd to survive the match and subsequent mismatch.
-class MxBiArOp_FMR<string MN, SDNode NODE, MxType TYPE,
- MxOperand MEMOpd, ComplexPattern MEMPat,
+class MxBiArOp_FMR<string MN, MxType TYPE, MxOperand MEMOpd,
bits<4> CMD, MxEncEA EA, MxEncExt EXT>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.ROp:$opd),
MN#"."#TYPE.Prefix#"\t$opd, $dst",
@@ -160,8 +159,7 @@ class MxBiArOp_FMR<string MN, SDNode NODE, MxType TYPE,
!cast<MxEncOpMode>("MxOpMode"#TYPE.Size#"EA"#TYPE.RLet),
MxBeadDReg<1>, EA, EXT>>;
-class MxBiArOp_FMI<string MN, SDNode NODE, MxType TYPE,
- MxOperand MEMOpd, ComplexPattern MEMPat,
+class MxBiArOp_FMI<string MN, MxType TYPE, MxOperand MEMOpd,
bits<4> CMD, MxEncEA MEMEA, MxEncExt MEMExt>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.IOp:$opd),
MN#"."#TYPE.Prefix#"\t$opd, $dst",
@@ -218,47 +216,47 @@ multiclass MxBiArOp_DF<string MN, SDNode NODE, bit isComm,
def NAME#"32di" : MxBiArOp_RFRI_xEA<MN, NODE, MxType32d, CMD>;
// op $reg, $mem
- def NAME#"8pd" : MxBiArOp_FMR<MN, NODE, MxType8d, MxType8.POp, MxType8.PPat,
+ def NAME#"8pd" : MxBiArOp_FMR<MN, MxType8d, MxType8.POp,
CMD, MxEncEAp_0, MxExtI16_0>;
- def NAME#"16pd" : MxBiArOp_FMR<MN, NODE, MxType16d, MxType16.POp, MxType16.PPat,
+ def NAME#"16pd" : MxBiArOp_FMR<MN, MxType16d, MxType16.POp,
CMD, MxEncEAp_0, MxExtI16_0>;
- def NAME#"32pd" : MxBiArOp_FMR<MN, NODE, MxType32d, MxType32.POp, MxType32.PPat,
+ def NAME#"32pd" : MxBiArOp_FMR<MN, MxType32d, MxType32.POp,
CMD, MxEncEAp_0, MxExtI16_0>;
- def NAME#"8fd" : MxBiArOp_FMR<MN, NODE, MxType8d, MxType8.FOp, MxType8.FPat,
+ def NAME#"8fd" : MxBiArOp_FMR<MN, MxType8d, MxType8.FOp,
CMD, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"16fd" : MxBiArOp_FMR<MN, NODE, MxType16d, MxType16.FOp, MxType16.FPat,
+ def NAME#"16fd" : MxBiArOp_FMR<MN, MxType16d, MxType16.FOp,
CMD, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"32fd" : MxBiArOp_FMR<MN, NODE, MxType32d, MxType32.FOp, MxType32.FPat,
+ def NAME#"32fd" : MxBiArOp_FMR<MN, MxType32d, MxType32.FOp,
CMD, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"8jd" : MxBiArOp_FMR<MN, NODE, MxType8d, MxType8.JOp, MxType8.JPat,
+ def NAME#"8jd" : MxBiArOp_FMR<MN, MxType8d, MxType8.JOp,
CMD, MxEncEAj_0, MxExtEmpty>;
- def NAME#"16jd" : MxBiArOp_FMR<MN, NODE, MxType16d, MxType16.JOp, MxType16.JPat,
+ def NAME#"16jd" : MxBiArOp_FMR<MN, MxType16d, MxType16.JOp,
CMD, MxEncEAj_0, MxExtEmpty>;
- def NAME#"32jd" : MxBiArOp_FMR<MN, NODE, MxType32d, MxType32.JOp, MxType32.JPat,
+ def NAME#"32jd" : MxBiArOp_FMR<MN, MxType32d, MxType32.JOp,
CMD, MxEncEAj_0, MxExtEmpty>;
// op $imm, $mem
- def NAME#"8pi" : MxBiArOp_FMI<MN, NODE, MxType8, MxType8.POp, MxType8.PPat,
+ def NAME#"8pi" : MxBiArOp_FMI<MN, MxType8, MxType8.POp,
CMDI, MxEncEAp_0, MxExtI16_0>;
- def NAME#"16pi" : MxBiArOp_FMI<MN, NODE, MxType16, MxType16.POp, MxType16.PPat,
+ def NAME#"16pi" : MxBiArOp_FMI<MN, MxType16, MxType16.POp,
CMDI, MxEncEAp_0, MxExtI16_0>;
- def NAME#"32pi" : MxBiArOp_FMI<MN, NODE, MxType32, MxType32.POp, MxType32.PPat,
+ def NAME#"32pi" : MxBiArOp_FMI<MN, MxType32, MxType32.POp,
CMDI, MxEncEAp_0, MxExtI16_0>;
- def NAME#"8fi" : MxBiArOp_FMI<MN, NODE, MxType8, MxType8.FOp, MxType8.FPat,
+ def NAME#"8fi" : MxBiArOp_FMI<MN, MxType8, MxType8.FOp,
CMDI, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"16fi" : MxBiArOp_FMI<MN, NODE, MxType16, MxType16.FOp, MxType16.FPat,
+ def NAME#"16fi" : MxBiArOp_FMI<MN, MxType16, MxType16.FOp,
CMDI, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"32fi" : MxBiArOp_FMI<MN, NODE, MxType32, MxType32.FOp, MxType32.FPat,
+ def NAME#"32fi" : MxBiArOp_FMI<MN, MxType32, MxType32.FOp,
CMDI, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"8ji" : MxBiArOp_FMI<MN, NODE, MxType8, MxType8.JOp, MxType8.JPat,
+ def NAME#"8ji" : MxBiArOp_FMI<MN, MxType8, MxType8.JOp,
CMDI, MxEncEAj_0, MxExtEmpty>;
- def NAME#"16ji" : MxBiArOp_FMI<MN, NODE, MxType16, MxType16.JOp, MxType16.JPat,
+ def NAME#"16ji" : MxBiArOp_FMI<MN, MxType16, MxType16.JOp,
CMDI, MxEncEAj_0, MxExtEmpty>;
- def NAME#"32ji" : MxBiArOp_FMI<MN, NODE, MxType32, MxType32.JOp, MxType32.JPat,
+ def NAME#"32ji" : MxBiArOp_FMI<MN, MxType32, MxType32.JOp,
CMDI, MxEncEAj_0, MxExtEmpty>;
def NAME#"16dr" : MxBiArOp_RFRR_xEA<MN, NODE, MxType16d, MxType16r,
@@ -284,8 +282,7 @@ multiclass MxBiArOp_DF<string MN, SDNode NODE, bit isComm,
// operations do not produce CCR we should not match them against Mx nodes that
// produce it.
let Pattern = [(null_frag)] in
-multiclass MxBiArOp_AF<string MN, SDNode NODE, bit isComm,
- bits<4> CMD, bits<4> CMDI> {
+multiclass MxBiArOp_AF<string MN, SDNode NODE, bits<4> CMD> {
def NAME#"32ak" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.KOp, MxType32.KPat,
CMD, MxEncEAk, MxExtBrief_2>;
@@ -307,9 +304,9 @@ multiclass MxBiArOp_AF<string MN, SDNode NODE, bit isComm,
// NOTE These naturally produce CCR
defm ADD : MxBiArOp_DF<"add", MxAdd, 1, 0xD, 0x6>;
-defm ADD : MxBiArOp_AF<"adda", MxAdd, 1, 0xD, 0x6>;
+defm ADD : MxBiArOp_AF<"adda", MxAdd, 0xD>;
defm SUB : MxBiArOp_DF<"sub", MxSub, 0, 0x9, 0x4>;
-defm SUB : MxBiArOp_AF<"suba", MxSub, 0, 0x9, 0x4>;
+defm SUB : MxBiArOp_AF<"suba", MxSub, 0x9>;
let Uses = [CCR], Defs = [CCR] in {
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBits.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBits.td
index d97ca50f74a9..d610bce5c277 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBits.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBits.td
@@ -1,4 +1,4 @@
-//===------- M68kInstrBits.td - Bit Manipulation Instrs --*- tablegen -*-===//
+//===-- M68kInstrBits.td - Bit Manipulation Instrs ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -12,7 +12,7 @@
///
/// Machine:
///
-/// BCNG [ ] BCLR [ ] BSET [ ] BTST [~]
+/// BCHG [ ] BCLR [ ] BSET [ ] BTST [~]
///
/// Map:
///
@@ -51,24 +51,24 @@ class MxBTSTEnc_I<MxBead8Imm IMM, MxEncEA EA, MxEncExt EXT>
let Defs = [CCR] in {
class MxBTST_RR<MxType TYPE>
: MxInst<(outs), (ins TYPE.ROp:$dst, TYPE.ROp:$bitno), "btst\t$bitno, $dst",
- [(set CCR, (MxBt TYPE.VT:$dst, TYPE.VT:$bitno))],
+ [(set CCR, (MxBtst TYPE.VT:$dst, TYPE.VT:$bitno))],
MxBTSTEnc_R<MxBeadDReg<1>, MxEncEAd_0, MxExtEmpty>>;
class MxBTST_RI<MxType TYPE>
: MxInst<(outs), (ins TYPE.ROp:$dst, TYPE.IOp:$bitno), "btst\t$bitno, $dst",
- [(set CCR, (MxBt TYPE.VT:$dst, TYPE.IPat:$bitno))],
+ [(set CCR, (MxBtst TYPE.VT:$dst, TYPE.IPat:$bitno))],
MxBTSTEnc_I<MxBead8Imm<1>, MxEncEAd_0, MxExtEmpty>>;
class MxBTST_MR<MxType TYPE, MxOperand MEMOpd, ComplexPattern MEMPat,
MxEncEA EA, MxEncExt EXT>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.ROp:$bitno), "btst\t$bitno, $dst",
- [(set CCR, (MxBt (TYPE.Load MEMPat:$dst), TYPE.VT:$bitno))],
+ [(set CCR, (MxBtst (TYPE.Load MEMPat:$dst), TYPE.VT:$bitno))],
MxBTSTEnc_R<MxBeadDReg<1>, EA, EXT>>;
class MxBTST_MI<MxType TYPE, MxOperand MEMOpd, ComplexPattern MEMPat,
MxEncEA EA, MxEncExt EXT>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.IOp:$bitno), "btst\t$bitno, $dst",
- [(set CCR, (MxBt (TYPE.Load MEMPat:$dst), TYPE.IPat:$bitno))],
+ [(set CCR, (MxBtst (TYPE.Load MEMPat:$dst), TYPE.IPat:$bitno))],
MxBTSTEnc_I<MxBead8Imm<1>, EA, EXT>>;
} // Defs = [CCR]
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBuilder.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBuilder.h
index e32b1b047a2b..e85bd270287c 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBuilder.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrBuilder.h
@@ -1,4 +1,4 @@
-//===-- M68kInstrBuilder.h - Functions to build M68k insts --*- C++ -*-===//
+//===-- M68kInstrBuilder.h - Functions to build M68k insts ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -91,4 +91,4 @@ addMemOperand(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
} // end namespace M68k
} // end namespace llvm
-#endif // LLVM_LIB_TARGET_M6800_M6800INSTRBUILDER_H
+#endif // LLVM_LIB_TARGET_M68K_M68KINSTRBUILDER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrCompiler.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrCompiler.td
index 8fb331dec0e9..2ecf5ca0e6d0 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrCompiler.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrCompiler.td
@@ -1,4 +1,4 @@
-//===-- M68kInstrCompiler.td - Pseudos and Patterns ------*- tablegen -*-===//
+//===-- M68kInstrCompiler.td - Pseudos and Patterns --------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrControl.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrControl.td
index 9f87833ab0e2..be9045b6e0d2 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrControl.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrControl.td
@@ -1,4 +1,4 @@
-//===-- M68kInstrControl.td - Control Flow Instructions --*- tablegen -*-===//
+//===-- M68kInstrControl.td - Control Flow Instructions ----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrData.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrData.td
index 40b9e4a2a7fa..3dd5d9f8c7ac 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrData.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrData.td
@@ -1,4 +1,4 @@
-//== M68kInstrData.td - M68k Data Movement Instructions -*- tablegen --===//
+//===-- M68kInstrData.td - M68k Data Movement Instructions -*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrFormats.td
index 99b7ffd17971..7e0c96a5b1f6 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrFormats.td
@@ -1,4 +1,4 @@
-//=== M68kInstrFormats.td - M68k Instruction Formats ---*- tablegen -*-===//
+//===-- M68kInstrFormats.td - M68k Instruction Formats -----*- tablegen -*-===//
// The LLVM Compiler Infrastructure
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.cpp
index 639bcd455687..105c816f9885 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kInstrInfo.cpp - M68k Instruction Information ----*- C++ -*-===//
+//===-- M68kInstrInfo.cpp - M68k Instruction Information --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -348,8 +348,8 @@ void M68kInstrInfo::AddZExt(MachineBasicBlock &MBB,
bool M68kInstrInfo::ExpandMOVX_RR(MachineInstrBuilder &MIB, MVT MVTDst,
MVT MVTSrc) const {
unsigned Move = MVTDst == MVT::i16 ? M68k::MOV16rr : M68k::MOV32rr;
- unsigned Dst = MIB->getOperand(0).getReg();
- unsigned Src = MIB->getOperand(1).getReg();
+ Register Dst = MIB->getOperand(0).getReg();
+ Register Src = MIB->getOperand(1).getReg();
assert(Dst != Src && "You cannot use the same Regs with MOVX_RR");
@@ -394,8 +394,8 @@ bool M68kInstrInfo::ExpandMOVSZX_RR(MachineInstrBuilder &MIB, bool IsSigned,
else // i32
Move = M68k::MOV32rr;
- unsigned Dst = MIB->getOperand(0).getReg();
- unsigned Src = MIB->getOperand(1).getReg();
+ Register Dst = MIB->getOperand(0).getReg();
+ Register Src = MIB->getOperand(1).getReg();
assert(Dst != Src && "You cannot use the same Regs with MOVSX_RR");
@@ -437,7 +437,7 @@ bool M68kInstrInfo::ExpandMOVSZX_RM(MachineInstrBuilder &MIB, bool IsSigned,
MVT MVTSrc) const {
LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to LOAD and ");
- unsigned Dst = MIB->getOperand(0).getReg();
+ Register Dst = MIB->getOperand(0).getReg();
// We need the subreg of Dst to make instruction verifier happy because the
// real machine instruction consumes and produces values of the same size and
@@ -559,7 +559,7 @@ bool M68kInstrInfo::ExpandMOVEM(MachineInstrBuilder &MIB,
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc) {
assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
- unsigned Reg = MIB->getOperand(0).getReg();
+ Register Reg = MIB->getOperand(0).getReg();
MIB->setDesc(Desc);
// MachineInstr::addOperand() will insert explicit operands before any
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.h
index 6aced1487365..84d50c181ead 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.h
@@ -1,4 +1,4 @@
-//===-- M68kInstrInfo.h - M68k Instruction Information ------*- C++ -*-===//
+//===-- M68kInstrInfo.h - M68k Instruction Information ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -336,4 +336,4 @@ public:
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KINSTRINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.td
index ed6cd9ecf442..c581dd91eaaa 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrInfo.td
@@ -1,4 +1,4 @@
-//== M68kInstrInfo.td - Main M68k Instruction Definition -*- tablegen -*-=//
+//===-- M68kInstrInfo.td - Main M68k Instruction Definition -*- tablegen -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -137,7 +137,7 @@ def MxSMul : SDNode<"M68kISD::SMUL", MxSDT_BiArithCCROut, [SDNPCommutative]>;
def MxUMul : SDNode<"M68kISD::UMUL", MxSDT_2BiArithCCROut, [SDNPCommutative]>;
def MxCmp : SDNode<"M68kISD::CMP", MxSDT_CmpTest>;
-def MxBt : SDNode<"M68kISD::BT", MxSDT_CmpTest>;
+def MxBtst : SDNode<"M68kISD::BTST", MxSDT_CmpTest>;
def MxCmov : SDNode<"M68kISD::CMOV", MxSDT_Cmov>;
def MxBrCond : SDNode<"M68kISD::BRCOND", MxSDT_BrCond, [SDNPHasChain]>;
@@ -587,8 +587,8 @@ class MxType<ValueType vt, string prefix, string postfix,
// qOp: Supported PCD operand
// qPat: What PCD pattern is used
MxOperand qOp, ComplexPattern qPat,
- // kOp: Supported PCD operand
- // kPat: What PCD pattern is used
+ // kOp: Supported PCI operand
+ // kPat: What PCI pattern is used
MxOperand kOp, ComplexPattern kPat,
// iOp: Supported immediate operand
// iPat: What immediate pattern is used
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrShiftRotate.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrShiftRotate.td
index cab687638076..f1967ec11928 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrShiftRotate.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kInstrShiftRotate.td
@@ -1,4 +1,4 @@
-//===------ M68kInstrShiftRotate.td - Logical Instrs -----*- tablegen -*-===//
+//===-- M68kInstrShiftRotate.td - Logical Instrs -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.cpp
index f14361559b13..a0b1452ee663 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.cpp
@@ -1,4 +1,4 @@
-//===-- M68kMCInstLower.cpp - M68k MachineInstr to MCInst ---*- C++ -*-===//
+//===-- M68kMCInstLower.cpp - M68k MachineInstr to MCInst -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.h
index d6160629545e..76d9a36f70ef 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMCInstLower.h
@@ -1,4 +1,4 @@
-//===-- M68kMCInstLower.h - Lower MachineInstr to MCInst -----*- C++ -*--===//
+//===-- M68kMCInstLower.h - Lower MachineInstr to MCInst --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -51,4 +51,4 @@ public:
};
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KMCINSTLOWER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.cpp
index 3d048df7ba49..b1e7369116d7 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.cpp
@@ -1,4 +1,4 @@
-//===-- M68kMachineFunctionInfo.cpp - M68k private data ----*- C++ -*--===//
+//===-- M68kMachineFunctionInfo.cpp - M68k private data ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.h
index 5760bdd4b9e3..93c5255199d4 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kMachineFunction.h
@@ -1,4 +1,4 @@
-//===-- M68kMachineFunctionInfo.h - M68k private data ---------*- C++ -*-=//
+//===-- M68kMachineFunctionInfo.h - M68k private data -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -111,4 +111,4 @@ private:
} // end of namespace llvm
-#endif // M68K_MACHINE_FUNCTION_INFO_H
+#endif // LLVM_LIB_TARGET_M68K_M68KMACHINEFUNCTION_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.cpp
index 69d16035b1d9..0cae7ac4e312 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kRegisterInfo.cpp - CPU0 Register Information -----*- C++ -*--===//
+//===-- M68kRegisterInfo.cpp - CPU0 Register Information --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.h
index 51b94294772c..7f822e1cb34f 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.h
@@ -1,4 +1,4 @@
-//===-- M68kRegisterInfo.h - M68k Register Information Impl --*- C++ --===//
+//===-- M68kRegisterInfo.h - M68k Register Information Impl -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -106,4 +106,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KREGISTERINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.td
index e2ea2967f75b..49874a2b1099 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kRegisterInfo.td
@@ -1,4 +1,4 @@
-//== M68kRegisterInfo.td - M68k register definitions ----*- tablegen -*-==//
+//==-- M68kRegisterInfo.td - M68k register definitions ------*- tablegen -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSchedule.td b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSchedule.td
index a94cd8f31e2e..6a1bf0c6a020 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSchedule.td
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSchedule.td
@@ -1,4 +1,4 @@
-//===-- M68kSchedule.td - M68k Scheduling Definitions --*- tablegen -*-===//
+//===-- M68kSchedule.td - M68k Scheduling Definitions ------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.cpp
index 991889706e67..ec3830243daf 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.cpp
@@ -1,4 +1,4 @@
-//===-- M68kSubtarget.cpp - M68k Subtarget Information ------*- C++ -*-===//
+//===-- M68kSubtarget.cpp - M68k Subtarget Information ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.h
index f45cb7edca1f..9bf2984983a1 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kSubtarget.h
@@ -1,4 +1,4 @@
-//===-- M68kSubtarget.h - Define Subtarget for the M68k -----*- C++ -*-===//
+//===-- M68kSubtarget.h - Define Subtarget for the M68k ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H
-#define LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H
+#ifndef LLVM_LIB_TARGET_M68K_M68KSUBTARGET_H
+#define LLVM_LIB_TARGET_M68K_M68KSUBTARGET_H
#include "M68kFrameLowering.h"
#include "M68kISelLowering.h"
@@ -179,4 +179,4 @@ public:
};
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KSUBTARGET_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index e8126c6219e8..fd21fe6bcea8 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -1,4 +1,4 @@
-//===-- M68kTargetMachine.cpp - M68k target machine ---------*- C++ -*-===//
+//===-- M68kTargetMachine.cpp - M68k Target Machine -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.h
index 34fae8e45504..8dda720774e7 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetMachine.h
@@ -1,4 +1,4 @@
-//===-- M68kTargetMachine.h - Define TargetMachine for M68k ----- C++ -===//
+//===-- M68kTargetMachine.h - Define TargetMachine for M68k -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -53,4 +53,4 @@ public:
};
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KTARGETMACHINE_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
index 3e26b37e7760..4986d5dbebb9 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
@@ -1,4 +1,4 @@
-//===-- M68kELFTargetObjectFile.cpp - M68k Object Files -----*- C++ -*-===//
+//===-- M68kELFTargetObjectFile.cpp - M68k Object Files ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.h
index dbc5375d5423..80a7d0d6e120 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/M68kTargetObjectFile.h
@@ -1,4 +1,4 @@
-//===-- M68kELFTargetObjectFile.h - M68k Object Info ---------*- C++ -====//
+//===-- M68kELFTargetObjectFile.h - M68k Object Info ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -28,4 +28,4 @@ public:
};
} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_M68KTARGETOBJECTFILE_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
index c1f88fb78ee1..b66557ec6c3a 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
@@ -1,4 +1,4 @@
-//===-- M68kAsmBackend.cpp - M68k Assembler Backend ---------*- C++ -*-===//
+//===-- M68kAsmBackend.cpp - M68k Assembler Backend -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kBaseInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kBaseInfo.h
index 7c56cfdf3123..4883f647e214 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kBaseInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kBaseInfo.h
@@ -1,4 +1,4 @@
-//===-- M68kBaseInfo.h - Top level definitions for M68k MC --*- C++ -*-----===//
+//===-- M68kBaseInfo.h - Top level definitions for M68k MC ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -244,4 +244,4 @@ static inline unsigned getMaskedSpillRegister(unsigned order) {
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_MCTARGETDESC_M68KBASEINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kELFObjectWriter.cpp
index 4c9a3297424d..27f1b3a3fac8 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kELFObjectWriter.cpp
@@ -1,4 +1,4 @@
-//===---------- M68kELFObjectWriter.cpp - M68k ELF Writer ---*- C++ -*-===//
+//===-- M68kELFObjectWriter.cpp - M68k ELF Writer ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kFixupKinds.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kFixupKinds.h
index 2b760dec9e41..54a0e98fea6e 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kFixupKinds.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kFixupKinds.h
@@ -1,4 +1,4 @@
-//===-- M68kFixupKinds.h - M68k Specific Fixup Entries ------*- C++ -*-===//
+//===-- M68kFixupKinds.h - M68k Specific Fixup Entries ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -51,4 +51,4 @@ static inline MCFixupKind getFixupForSize(unsigned Size, bool isPCRel) {
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68k_MCTARGETDESC_M68kFIXUPKINDS_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.cpp
index a2e41437ee21..9ba28622b5b5 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- M68kInstPrinter.cpp - Convert M68k MCInst to asm ----*- C++ -*-===//
+//===-- M68kInstPrinter.cpp - Convert M68k MCInst to asm --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.h
index ec26bc4ddbfd..239268dd7159 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- M68kInstPrinter.h - Convert M68k MCInst to asm ------*- C++ -*-===//
+//===-- M68kInstPrinter.h - Convert M68k MCInst to asm ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -166,4 +166,4 @@ private:
};
} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_INSTPRINTER_M68KINSTPRINTER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.cpp
index ee2041012bb9..005d2d38f53d 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kMCAsmInfo.cpp - M68k Asm Properties -------------*- C++ -*-===//
+//===-- M68kMCAsmInfo.cpp - M68k Asm Properties -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.h
index b3a58cc61223..873264d88674 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCAsmInfo.h
@@ -1,4 +1,4 @@
-//===-- M68kMCAsmInfo.h - M68k Asm Info --------------------*- C++ -*--===//
+//===-- M68kMCAsmInfo.h - M68k Asm Info -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -28,4 +28,4 @@ public:
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_MCTARGETDESC_M68KMCASMINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
index 9708abaadf98..9227bd6c3a78 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- M68kMCCodeEmitter.cpp - Convert M68k code emitter ---*- C++ -*-===//
+//===-- M68kMCCodeEmitter.cpp - Convert M68k code emitter -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.h
index 242a1297206a..aba705aa54b6 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.h
@@ -1,4 +1,4 @@
-//===-- M68kMCCodeEmitter.h - M68k Code Emitter ----------------*- C++ -*--===//
+//===-- M68kMCCodeEmitter.h - M68k Code Emitter -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -25,4 +25,4 @@ const uint8_t *getMCInstrBeads(unsigned);
} // namespace M68k
} // namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_M68K_MCTARGETDESC_M68KMCCODEEMITTER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.cpp
index 9f4db895a821..2606e22410fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- M68kMCTargetDesc.cpp - M68k Target Descriptions -----*- C++ -*-===//
+//===-- M68kMCTargetDesc.cpp - M68k Target Descriptions ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.h
index a0ebca0ce36c..aa53e13af4fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/MCTargetDesc/M68kMCTargetDesc.h
@@ -1,4 +1,4 @@
-//===-- M68kMCTargetDesc.h - M68k Target Descriptions -------*- C++ -*-===//
+//===-- M68kMCTargetDesc.h - M68k Target Descriptions -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -58,4 +58,4 @@ std::unique_ptr<MCObjectTargetWriter> createM68kELFObjectWriter(uint8_t OSABI);
#define GET_SUBTARGETINFO_ENUM
#include "M68kGenSubtargetInfo.inc"
-#endif
+#endif // LLVM_LIB_TARGET_M68K_MCTARGETDESC_M68KMCTARGETDESC_H
diff --git a/contrib/llvm-project/llvm/lib/Target/M68k/TargetInfo/M68kTargetInfo.cpp b/contrib/llvm-project/llvm/lib/Target/M68k/TargetInfo/M68kTargetInfo.cpp
index 2a225b8a43cd..4701f46b0298 100644
--- a/contrib/llvm-project/llvm/lib/Target/M68k/TargetInfo/M68kTargetInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/M68k/TargetInfo/M68kTargetInfo.cpp
@@ -1,4 +1,4 @@
-//===-- M68kTargetInfo.cpp - M68k Target Implementation -----*- C++ -*-===//
+//===-- M68kTargetInfo.cpp - M68k Target Implementation ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
index c1677baf52a7..13cba8b079a9 100644
--- a/contrib/llvm-project/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
@@ -114,13 +114,14 @@ class MSP430Operand : public MCParsedAsmOperand {
public:
MSP430Operand(StringRef Tok, SMLoc const &S)
- : Base(), Kind(k_Tok), Tok(Tok), Start(S), End(S) {}
+ : Kind(k_Tok), Tok(Tok), Start(S), End(S) {}
MSP430Operand(KindTy Kind, unsigned Reg, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(Kind), Reg(Reg), Start(S), End(E) {}
+ : Kind(Kind), Reg(Reg), Start(S), End(E) {}
MSP430Operand(MCExpr const *Imm, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(k_Imm), Imm(Imm), Start(S), End(E) {}
- MSP430Operand(unsigned Reg, MCExpr const *Expr, SMLoc const &S, SMLoc const &E)
- : Base(), Kind(k_Mem), Mem({Reg, Expr}), Start(S), End(E) {}
+ : Kind(k_Imm), Imm(Imm), Start(S), End(E) {}
+ MSP430Operand(unsigned Reg, MCExpr const *Expr, SMLoc const &S,
+ SMLoc const &E)
+ : Kind(k_Mem), Mem({Reg, Expr}), Start(S), End(E) {}
void addRegOperands(MCInst &Inst, unsigned N) const {
assert((Kind == k_Reg || Kind == k_IndReg || Kind == k_PostIndReg) &&
diff --git a/contrib/llvm-project/llvm/lib/Target/MSP430/MCTargetDesc/MSP430ELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/MSP430/MCTargetDesc/MSP430ELFObjectWriter.cpp
index 0cdd1f4f701f..bb5351af6523 100644
--- a/contrib/llvm-project/llvm/lib/Target/MSP430/MCTargetDesc/MSP430ELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/MSP430/MCTargetDesc/MSP430ELFObjectWriter.cpp
@@ -9,7 +9,6 @@
#include "MCTargetDesc/MSP430FixupKinds.h"
#include "MCTargetDesc/MSP430MCTargetDesc.h"
-#include "MCTargetDesc/MSP430MCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCObjectWriter.h"
diff --git a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index 4ef9a567d453..6a8dc3502496 100644
--- a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -190,7 +190,7 @@ bool MSP430FrameLowering::spillCalleeSavedRegisters(
MFI->setCalleeSavedFrameSize(CSI.size() * 2);
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
BuildMI(MBB, MI, DL, TII.get(MSP430::PUSH16r))
diff --git a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index c64a44a0ef95..aebfc6b0ae2e 100644
--- a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -705,7 +705,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
if (Ins[i].Flags.isSRet()) {
- unsigned Reg = FuncInfo->getSRetReturnReg();
+ Register Reg = FuncInfo->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(
getRegClassFor(MVT::i16));
@@ -772,7 +772,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (MF.getFunction().hasStructRetAttr()) {
MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
- unsigned Reg = FuncInfo->getSRetReturnReg();
+ Register Reg = FuncInfo->getSRetReturnReg();
if (!Reg)
llvm_unreachable("sret virtual register not created in entry block");
@@ -1402,12 +1402,12 @@ bool MSP430TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
bool MSP430TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// MSP430 implicitly zero-extends 8-bit results in 16-bit registers.
- return 0 && Ty1->isIntegerTy(8) && Ty2->isIntegerTy(16);
+ return false && Ty1->isIntegerTy(8) && Ty2->isIntegerTy(16);
}
bool MSP430TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// MSP430 implicitly zero-extends 8-bit results in 16-bit registers.
- return 0 && VT1 == MVT::i8 && VT2 == MVT::i16;
+ return false && VT1 == MVT::i8 && VT2 == MVT::i16;
}
bool MSP430TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430Subtarget.cpp b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
index 2fd58717c4db..0604d47597e2 100644
--- a/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
@@ -57,5 +57,5 @@ MSP430Subtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
MSP430Subtarget::MSP430Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
- : MSP430GenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), FrameLowering(),
+ : MSP430GenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 01b5dff2e448..736c41f8ac03 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -827,8 +827,7 @@ private:
} Kind;
public:
- MipsOperand(KindTy K, MipsAsmParser &Parser)
- : MCParsedAsmOperand(), Kind(K), AsmParser(Parser) {}
+ MipsOperand(KindTy K, MipsAsmParser &Parser) : Kind(K), AsmParser(Parser) {}
~MipsOperand() override {
switch (Kind) {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index bfe413a152b6..a3dbe6f84a1e 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -197,7 +197,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
Value = (int64_t)Value / 2;
// We now check if Value can be encoded as a 26-bit signed immediate.
if (!isInt<26>(Value)) {
- Ctx.reportFatalError(Fixup.getLoc(), "out of range PC26 fixup");
+ Ctx.reportError(Fixup.getLoc(), "out of range PC26 fixup");
return 0;
}
break;
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips.h b/contrib/llvm-project/llvm/lib/Target/Mips/Mips.h
index b3faaab436f0..faf58545db62 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips.h
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips.h
@@ -38,6 +38,7 @@ namespace llvm {
FunctionPass *createMicroMipsSizeReducePass();
FunctionPass *createMipsExpandPseudoPass();
FunctionPass *createMipsPreLegalizeCombiner();
+ FunctionPass *createMipsMulMulBugPass();
InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &,
MipsSubtarget &,
@@ -47,6 +48,7 @@ namespace llvm {
void initializeMipsBranchExpansionPass(PassRegistry &);
void initializeMicroMipsSizeReducePass(PassRegistry &);
void initializeMipsPreLegalizerCombinerPass(PassRegistry&);
+ void initializeMipsMulMulBugFixPass(PassRegistry&);
} // end namespace llvm;
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
index 622f2039f9e4..4f4e3f3f2ed7 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -74,7 +74,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF,
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned DReg = MRI->getDwarfRegNum(Reg, true);
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DReg, Offset));
@@ -124,7 +124,7 @@ bool Mips16FrameLowering::spillCalleeSavedRegisters(
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA)
&& MF->getFrameInfo().isReturnAddressTaken();
if (!IsRAAndRetAddrIsTaken)
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index ddd28d095e51..50147c019bfd 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -121,7 +121,7 @@ bool Mips16DAGToDAGISel::selectAddr(bool SPAllowed, SDValue Addr, SDValue &Base,
}
// Addresses of the form FI+const or FI|const
if (CurDAG->isBaseWithConstantOffset(Addr)) {
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ auto *CN = cast<ConstantSDNode>(Addr.getOperand(1));
if (isInt<16>(CN->getSExtValue())) {
// If the first operand is a FI, get the TargetFI Node
if (SPAllowed) {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
index 136612c59d96..78ffe00c020c 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -451,7 +451,7 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// So for now we always save S2. The optimization will be done
// in a follow-on patch.
//
- if (1 || (Signature->RetSig != Mips16HardFloatInfo::NoFPRet))
+ if (true || (Signature->RetSig != Mips16HardFloatInfo::NoFPRet))
FuncInfo->setSaveS2();
}
// one more look at list of intrinsics
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 3403ec01aef2..02d0e770ba66 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -190,7 +190,7 @@ static void addSaveRestoreRegs(MachineInstrBuilder &MIB,
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = CSI[e-i-1].getReg();
+ Register Reg = CSI[e-i-1].getReg();
switch (Reg) {
case Mips::RA:
case Mips::S0:
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
index f6f43da9abf8..563118dfe627 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -37,7 +37,7 @@ using namespace llvm;
#define DEBUG_TYPE "mips16-registerinfo"
-Mips16RegisterInfo::Mips16RegisterInfo() : MipsRegisterInfo() {}
+Mips16RegisterInfo::Mips16RegisterInfo() {}
bool Mips16RegisterInfo::requiresRegisterScavenging
(const MachineFunction &MF) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
index ae2b83c414db..33da0ff31be8 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
@@ -25,8 +25,8 @@ void MipsAnalyzeImmediate::AddInstr(InstSeqLs &SeqLs, const Inst &I) {
return;
}
- for (InstSeqLs::iterator Iter = SeqLs.begin(); Iter != SeqLs.end(); ++Iter)
- Iter->push_back(I);
+ for (auto &S : SeqLs)
+ S.push_back(I);
}
void MipsAnalyzeImmediate::GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize,
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 5d026785b921..4bd8845e9cb9 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -80,13 +80,9 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
MipsFI = MF.getInfo<MipsFunctionInfo>();
if (Subtarget->inMips16Mode())
- for (std::map<
- const char *,
- const Mips16HardFloatInfo::FuncSignature *>::const_iterator
- it = MipsFI->StubsNeeded.begin();
- it != MipsFI->StubsNeeded.end(); ++it) {
- const char *Symbol = it->first;
- const Mips16HardFloatInfo::FuncSignature *Signature = it->second;
+ for (const auto &I : MipsFI->StubsNeeded) {
+ const char *Symbol = I.first;
+ const Mips16HardFloatInfo::FuncSignature *Signature = I.second;
if (StubsNeeded.find(Symbol) == StubsNeeded.end())
StubsNeeded[Symbol] = Signature;
}
@@ -341,7 +337,7 @@ void MipsAsmPrinter::printSavedRegsBitmask() {
unsigned CSFPRegsSize = 0;
for (const auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned RegNum = TRI->getEncodingValue(Reg);
// If it's a floating point register, set the FPU Bitmask.
@@ -1279,11 +1275,11 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
// Align all blocks that are jumped to through jump table.
if (MachineJumpTableInfo *JtInfo = MF.getJumpTableInfo()) {
const std::vector<MachineJumpTableEntry> &JT = JtInfo->getJumpTables();
- for (unsigned I = 0; I < JT.size(); ++I) {
- const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;
+ for (const auto &I : JT) {
+ const std::vector<MachineBasicBlock *> &MBBs = I.MBBs;
- for (unsigned J = 0; J < MBBs.size(); ++J)
- MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ for (MachineBasicBlock *MBB : MBBs)
+ MBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.h b/contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.h
index 1d1406da3201..9f114d55db4c 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.h
@@ -18,7 +18,6 @@
namespace llvm {
-class MachineMemOperand;
class MipsTargetLowering;
class MipsCallLowering : public CallLowering {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index 491d379bfe0b..1efbf5570287 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -604,9 +604,9 @@ MipsConstantIslands::CPEntry
std::vector<CPEntry> &CPEs = CPEntries[CPI];
// Number of entries per constpool index should be small, just do a
// linear search.
- for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
- if (CPEs[i].CPEMI == CPEMI)
- return &CPEs[i];
+ for (CPEntry &CPE : CPEs) {
+ if (CPE.CPEMI == CPEMI)
+ return &CPE;
}
return nullptr;
}
@@ -1052,27 +1052,27 @@ int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
// No. Look for previously created clones of the CPE that are in range.
unsigned CPI = CPEMI->getOperand(1).getIndex();
std::vector<CPEntry> &CPEs = CPEntries[CPI];
- for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ for (CPEntry &CPE : CPEs) {
// We already tried this one
- if (CPEs[i].CPEMI == CPEMI)
+ if (CPE.CPEMI == CPEMI)
continue;
// Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == nullptr)
+ if (CPE.CPEMI == nullptr)
continue;
- if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
- U.NegOk)) {
- LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
+ if (isCPEntryInRange(UserMI, UserOffset, CPE.CPEMI, U.getMaxDisp(),
+ U.NegOk)) {
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" << CPE.CPI
+ << "\n");
// Point the CPUser node to the replacement
- U.CPEMI = CPEs[i].CPEMI;
+ U.CPEMI = CPE.CPEMI;
// Change the CPI in the instruction operand to refer to the clone.
for (MachineOperand &MO : UserMI->operands())
if (MO.isCPI()) {
- MO.setIndex(CPEs[i].CPI);
+ MO.setIndex(CPE.CPI);
break;
}
// Adjust the refcount of the clone...
- CPEs[i].RefCount++;
+ CPE.RefCount++;
// ...and the original. If we didn't remove the old entry, none of the
// addresses changed, so we don't need another pass.
return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
@@ -1108,27 +1108,27 @@ int MipsConstantIslands::findLongFormInRangeCPEntry
// No. Look for previously created clones of the CPE that are in range.
unsigned CPI = CPEMI->getOperand(1).getIndex();
std::vector<CPEntry> &CPEs = CPEntries[CPI];
- for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ for (CPEntry &CPE : CPEs) {
// We already tried this one
- if (CPEs[i].CPEMI == CPEMI)
+ if (CPE.CPEMI == CPEMI)
continue;
// Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == nullptr)
+ if (CPE.CPEMI == nullptr)
continue;
- if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
- U.getLongFormMaxDisp(), U.NegOk)) {
- LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
+ if (isCPEntryInRange(UserMI, UserOffset, CPE.CPEMI, U.getLongFormMaxDisp(),
+ U.NegOk)) {
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" << CPE.CPI
+ << "\n");
// Point the CPUser node to the replacement
- U.CPEMI = CPEs[i].CPEMI;
+ U.CPEMI = CPE.CPEMI;
// Change the CPI in the instruction operand to refer to the clone.
for (MachineOperand &MO : UserMI->operands())
if (MO.isCPI()) {
- MO.setIndex(CPEs[i].CPI);
+ MO.setIndex(CPE.CPI);
break;
}
// Adjust the refcount of the clone...
- CPEs[i].RefCount++;
+ CPE.RefCount++;
// ...and the original. If we didn't remove the old entry, none of the
// addresses changed, so we don't need another pass.
return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
@@ -1435,15 +1435,14 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
/// are zero.
bool MipsConstantIslands::removeUnusedCPEntries() {
unsigned MadeChange = false;
- for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
- std::vector<CPEntry> &CPEs = CPEntries[i];
- for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
- if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
- removeDeadCPEMI(CPEs[j].CPEMI);
- CPEs[j].CPEMI = nullptr;
- MadeChange = true;
- }
+ for (std::vector<CPEntry> &CPEs : CPEntries) {
+ for (CPEntry &CPE : CPEs) {
+ if (CPE.RefCount == 0 && CPE.CPEMI) {
+ removeDeadCPEMI(CPE.CPEMI);
+ CPE.CPEMI = nullptr;
+ MadeChange = true;
}
+ }
}
return MadeChange;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 2d27d7553de6..cf6cec22308c 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -309,12 +309,12 @@ INITIALIZE_PASS(MipsDelaySlotFiller, DEBUG_TYPE,
static void insertDelayFiller(Iter Filler, const BB2BrMap &BrMap) {
MachineFunction *MF = Filler->getParent()->getParent();
- for (BB2BrMap::const_iterator I = BrMap.begin(); I != BrMap.end(); ++I) {
- if (I->second) {
- MIBundleBuilder(I->second).append(MF->CloneMachineInstr(&*Filler));
+ for (const auto &I : BrMap) {
+ if (I.second) {
+ MIBundleBuilder(I.second).append(MF->CloneMachineInstr(&*Filler));
++UsefulSlots;
} else {
- I->first->insert(I->first->end(), MF->CloneMachineInstr(&*Filler));
+ I.first->push_back(MF->CloneMachineInstr(&*Filler));
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp
index 05c1c06ffefe..6ddfec5d0f79 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -313,7 +313,7 @@ unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
llvm_unreachable("unexpected opcode");
}
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -325,7 +325,7 @@ unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (!RHSReg)
return 0;
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return 0;
@@ -341,7 +341,7 @@ unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
ResultReg)
.addFrameIndex(SI->second)
@@ -362,7 +362,7 @@ unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
const TargetRegisterClass *RC) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (isInt<16>(Imm)) {
unsigned Opc = Mips::ADDiu;
@@ -376,7 +376,7 @@ unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
unsigned Hi = (Imm >> 16) & 0xFFFF;
if (Lo) {
// Both Lo and Hi have nonzero bits.
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
emitInst(Mips::LUi, TmpReg).addImm(Hi);
emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
} else {
@@ -391,13 +391,13 @@ unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
if (VT == MVT::f32) {
const TargetRegisterClass *RC = &Mips::FGR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
emitInst(Mips::MTC1, DestReg).addReg(TempReg);
return DestReg;
} else if (VT == MVT::f64) {
const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
unsigned TempReg2 =
materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
@@ -412,7 +412,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
if (VT != MVT::i32)
return 0;
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
bool IsThreadLocal = GVar && GVar->isThreadLocal();
// TLS not supported at this time.
@@ -423,7 +423,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
.addGlobalAddress(GV, 0, MipsII::MO_GOT);
if ((GV->hasInternalLinkage() ||
(GV->hasLocalLinkage() && !isa<Function>(GV)))) {
- unsigned TempReg = createResultReg(RC);
+ Register TempReg = createResultReg(RC);
emitInst(Mips::ADDiu, TempReg)
.addReg(DestReg)
.addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
@@ -434,7 +434,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
emitInst(Mips::LW, DestReg)
.addReg(MFI->getGlobalBaseReg(*MF))
.addSym(Sym, MipsII::MO_GOT);
@@ -649,13 +649,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
default:
return false;
case CmpInst::ICMP_EQ: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_NE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
break;
@@ -667,13 +667,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
break;
case CmpInst::ICMP_UGE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_ULE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
@@ -685,13 +685,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
break;
case CmpInst::ICMP_SGE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_SLE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
@@ -737,8 +737,8 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
default:
llvm_unreachable("Only switching of a subset of CCs.");
}
- unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
- unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
+ Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
+ Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
@@ -964,7 +964,7 @@ bool MipsFastISel::selectBranch(const Instruction *I) {
// For the general case, we need to mask with 1.
if (ZExtCondReg == 0) {
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (CondReg == 0)
return false;
@@ -982,7 +982,7 @@ bool MipsFastISel::selectBranch(const Instruction *I) {
bool MipsFastISel::selectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!emitCmp(ResultReg, CI))
return false;
updateValueMap(I, ResultReg);
@@ -1000,13 +1000,13 @@ bool MipsFastISel::selectFPExt(const Instruction *I) {
if (SrcVT != MVT::f32 || DestVT != MVT::f64)
return false;
- unsigned SrcReg =
+ Register SrcReg =
getRegForValue(Src); // this must be a 32bit floating point register class
// maybe we should handle this differently
if (!SrcReg)
return false;
- unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
+ Register DestReg = createResultReg(&Mips::AFGR64RegClass);
emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
updateValueMap(I, DestReg);
return true;
@@ -1041,22 +1041,22 @@ bool MipsFastISel::selectSelect(const Instruction *I) {
const SelectInst *SI = cast<SelectInst>(I);
const Value *Cond = SI->getCondition();
- unsigned Src1Reg = getRegForValue(SI->getTrueValue());
- unsigned Src2Reg = getRegForValue(SI->getFalseValue());
- unsigned CondReg = getRegForValue(Cond);
+ Register Src1Reg = getRegForValue(SI->getTrueValue());
+ Register Src2Reg = getRegForValue(SI->getFalseValue());
+ Register CondReg = getRegForValue(Cond);
if (!Src1Reg || !Src2Reg || !CondReg)
return false;
- unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
+ Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
if (!ZExtCondReg)
return false;
if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
return false;
- unsigned ResultReg = createResultReg(RC);
- unsigned TempReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
+ Register TempReg = createResultReg(RC);
if (!ResultReg || !TempReg)
return false;
@@ -1079,11 +1079,11 @@ bool MipsFastISel::selectFPTrunc(const Instruction *I) {
if (SrcVT != MVT::f64 || DestVT != MVT::f32)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
- unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
+ Register DestReg = createResultReg(&Mips::FGR32RegClass);
if (!DestReg)
return false;
@@ -1115,14 +1115,14 @@ bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
// Determine the opcode for the conversion, which takes place
// entirely within FPRs.
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
- unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::FGR32RegClass);
unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
// Generate the convert.
@@ -1196,7 +1196,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
break;
}
}
- unsigned ArgReg = getRegForValue(ArgVal);
+ Register ArgReg = getRegForValue(ArgVal);
if (!ArgReg)
return false;
@@ -1294,7 +1294,7 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
CopyVT = MVT::i32;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
if (!ResultReg)
return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1462,11 +1462,11 @@ bool MipsFastISel::fastLowerArguments() {
for (const auto &FormalArg : F->args()) {
unsigned ArgNo = FormalArg.getArgNo();
unsigned SrcReg = Allocation[ArgNo].Reg;
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(Allocation[ArgNo].RC);
+ Register ResultReg = createResultReg(Allocation[ArgNo].RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
@@ -1594,10 +1594,10 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeSupported(RetTy, VT))
return false;
- unsigned SrcReg = getRegForValue(II->getOperand(0));
+ Register SrcReg = getRegForValue(II->getOperand(0));
if (SrcReg == 0)
return false;
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
if (DestReg == 0)
return false;
if (VT == MVT::i16) {
@@ -1607,9 +1607,9 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return true;
} else {
unsigned TempReg[3];
- for (int i = 0; i < 3; i++) {
- TempReg[i] = createResultReg(&Mips::GPR32RegClass);
- if (TempReg[i] == 0)
+ for (unsigned &R : TempReg) {
+ R = createResultReg(&Mips::GPR32RegClass);
+ if (R == 0)
return false;
}
emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
@@ -1621,16 +1621,16 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
}
} else if (VT == MVT::i32) {
if (Subtarget->hasMips32r2()) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
updateValueMap(II, DestReg);
return true;
} else {
unsigned TempReg[8];
- for (int i = 0; i < 8; i++) {
- TempReg[i] = createResultReg(&Mips::GPR32RegClass);
- if (TempReg[i] == 0)
+ for (unsigned &R : TempReg) {
+ R = createResultReg(&Mips::GPR32RegClass);
+ if (R == 0)
return false;
}
@@ -1720,7 +1720,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
if (!VA.isRegLoc())
return false;
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -1788,7 +1788,7 @@ bool MipsFastISel::selectTrunc(const Instruction *I) {
if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg)
return false;
@@ -1804,7 +1804,7 @@ bool MipsFastISel::selectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool isZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1818,7 +1818,7 @@ bool MipsFastISel::selectIntExt(const Instruction *I) {
MVT SrcVT = SrcEVT.getSimpleVT();
MVT DestVT = DestEVT.getSimpleVT();
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
return false;
@@ -1839,7 +1839,7 @@ bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
ShiftAmt = 16;
break;
}
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
return true;
@@ -1935,15 +1935,15 @@ bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src0Reg || !Src1Reg)
return false;
emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return false;
@@ -1962,19 +1962,19 @@ bool MipsFastISel::selectShift(const Instruction *I) {
if (!isTypeSupported(I->getType(), RetVT))
return false;
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return false;
unsigned Opcode = I->getOpcode();
const Value *Op0 = I->getOperand(0);
- unsigned Op0Reg = getRegForValue(Op0);
+ Register Op0Reg = getRegForValue(Op0);
if (!Op0Reg)
return false;
// If AShr or LShr, then we need to make sure the operand0 is sign extended.
if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
if (!TempReg)
return false;
@@ -2008,7 +2008,7 @@ bool MipsFastISel::selectShift(const Instruction *I) {
return true;
}
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (!Op1Reg)
return false;
@@ -2091,7 +2091,7 @@ bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
bool IsUnsigned) {
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
@@ -2100,7 +2100,7 @@ unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
return 0;
if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
return 0;
VReg = TempReg;
@@ -2112,7 +2112,7 @@ void MipsFastISel::simplifyAddress(Address &Addr) {
if (!isInt<16>(Addr.getOffset())) {
unsigned TempReg =
materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
Addr.setReg(DestReg);
Addr.setOffset(0);
@@ -2129,7 +2129,7 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
// followed by another instruction that defines the same registers too.
// We can fix this by explicitly marking those registers as dead.
if (MachineInstOpcode == Mips::MUL) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 9377e83524e1..0c2e129b8f1f 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2523,7 +2523,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MFI.setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
+ Register Reg = MF.addLiveIn(RA, getRegClassFor(VT));
return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
}
@@ -3051,17 +3051,15 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// stuck together.
SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
+ for (auto &R : RegsToPass) {
+ Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InFlag);
InFlag = Chain.getValue(1);
}
// Add argument registers to the end of the list so that they are
// known live into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(CLI.DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
+ for (auto &R : RegsToPass)
+ Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index 6d44ce2ab563..59f158688b16 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -80,8 +80,8 @@ private:
MipsInstructionSelector::MipsInstructionSelector(
const MipsTargetMachine &TM, const MipsSubtarget &STI,
const MipsRegisterBankInfo &RBI)
- : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI),
+ : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
+ RBI(RBI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "MipsGenGlobalISel.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsMachineFunction.cpp
index a7a2be30f58a..411a26e42713 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -148,14 +148,14 @@ void MipsFunctionInfo::initGlobalBaseReg(MachineFunction &MF) {
void MipsFunctionInfo::createEhDataRegsFI(MachineFunction &MF) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- for (int I = 0; I < 4; ++I) {
+ for (int &I : EhDataRegFI) {
const TargetRegisterClass &RC =
static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64()
? Mips::GPR64RegClass
: Mips::GPR32RegClass;
- EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(
- TRI.getSpillSize(RC), TRI.getSpillAlign(RC), false);
+ I = MF.getFrameInfo().CreateStackObject(TRI.getSpillSize(RC),
+ TRI.getSpillAlign(RC), false);
}
}
@@ -167,9 +167,9 @@ void MipsFunctionInfo::createISRRegFI(MachineFunction &MF) {
const TargetRegisterClass &RC = Mips::GPR32RegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- for (int I = 0; I < 2; ++I)
- ISRDataRegFI[I] = MF.getFrameInfo().CreateStackObject(
- TRI.getSpillSize(RC), TRI.getSpillAlign(RC), false);
+ for (int &I : ISRDataRegFI)
+ I = MF.getFrameInfo().CreateStackObject(TRI.getSpillSize(RC),
+ TRI.getSpillAlign(RC), false);
}
bool MipsFunctionInfo::isEhDataRegFI(int FI) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsMulMulBugPass.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsMulMulBugPass.cpp
new file mode 100644
index 000000000000..daaf1135c2b1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsMulMulBugPass.cpp
@@ -0,0 +1,136 @@
+//===- MipsMulMulBugPass.cpp - Mips VR4300 mulmul bugfix pass -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Early revisions of the VR4300 have a hardware bug where two consecutive
+// multiplications can produce an incorrect result in the second multiply.
+//
+// This pass scans for mul instructions in each basic block and inserts
+// a nop whenever the following conditions are met:
+//
+// - The current instruction is a single or double-precision floating-point
+// mul instruction.
+// - The next instruction is either a mul instruction (any kind)
+// or a branch instruction.
+//===----------------------------------------------------------------------===//
+
+#include "Mips.h"
+#include "MipsInstrInfo.h"
+#include "MipsSubtarget.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define DEBUG_TYPE "mips-vr4300-mulmul-fix"
+
+using namespace llvm;
+
+namespace {
+
+class MipsMulMulBugFix : public MachineFunctionPass {
+public:
+ MipsMulMulBugFix() : MachineFunctionPass(ID) {
+ initializeMipsMulMulBugFixPass(*PassRegistry::getPassRegistry());
+ }
+
+ StringRef getPassName() const override { return "Mips VR4300 mulmul bugfix"; }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ static char ID;
+
+private:
+ bool fixMulMulBB(MachineBasicBlock &MBB, const MipsInstrInfo &MipsII);
+};
+
+} // namespace
+
+INITIALIZE_PASS(MipsMulMulBugFix, "mips-vr4300-mulmul-fix",
+ "Mips VR4300 mulmul bugfix", false, false)
+
+char MipsMulMulBugFix::ID = 0;
+
+bool MipsMulMulBugFix::runOnMachineFunction(MachineFunction &MF) {
+ const MipsInstrInfo &MipsII =
+ *static_cast<const MipsInstrInfo *>(MF.getSubtarget().getInstrInfo());
+
+ bool Modified = false;
+
+ for (auto &MBB : MF)
+ Modified |= fixMulMulBB(MBB, MipsII);
+
+ return Modified;
+}
+
+static bool isFirstMul(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case Mips::FMUL_S:
+ case Mips::FMUL_D:
+ case Mips::FMUL_D32:
+ case Mips::FMUL_D64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool isSecondMulOrBranch(const MachineInstr &MI) {
+ if (MI.isBranch() || MI.isIndirectBranch() || MI.isCall())
+ return true;
+
+ switch (MI.getOpcode()) {
+ case Mips::MUL:
+ case Mips::FMUL_S:
+ case Mips::FMUL_D:
+ case Mips::FMUL_D32:
+ case Mips::FMUL_D64:
+ case Mips::MULT:
+ case Mips::MULTu:
+ case Mips::DMULT:
+ case Mips::DMULTu:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool MipsMulMulBugFix::fixMulMulBB(MachineBasicBlock &MBB,
+ const MipsInstrInfo &MipsII) {
+ bool Modified = false;
+
+ MachineBasicBlock::instr_iterator NextMII;
+
+ // Iterate through the instructions in the basic block
+ for (MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),
+ E = MBB.instr_end();
+ MII != E; MII = NextMII) {
+
+ NextMII = next_nodbg(MII, E);
+
+ // Trigger when the current instruction is a mul and the next instruction
+ // is either a mul or a branch in case the branch target start with a mul
+ if (NextMII != E && isFirstMul(*MII) && isSecondMulOrBranch(*NextMII)) {
+ LLVM_DEBUG(dbgs() << "Found mulmul!\n");
+
+ const MCInstrDesc &NewMCID = MipsII.get(Mips::NOP);
+ BuildMI(MBB, NextMII, DebugLoc(), NewMCID);
+ Modified = true;
+ }
+ }
+
+ return Modified;
+}
+
+FunctionPass *llvm::createMipsMulMulBugPass() { return new MipsMulMulBugFix(); }
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 7cba3118cd62..390ab9d22024 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -159,8 +159,8 @@ getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
- for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I)
- Reserved.set(ReservedGPR32[I]);
+ for (MCPhysReg R : ReservedGPR32)
+ Reserved.set(R);
// Reserve registers for the NaCl sandbox.
if (Subtarget.isTargetNaCl()) {
@@ -169,8 +169,8 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(Mips::T8); // Reserved for thread pointer.
}
- for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
- Reserved.set(ReservedGPR64[I]);
+ for (MCPhysReg R : ReservedGPR64)
+ Reserved.set(R);
// For mno-abicalls, GP is a program invariant!
if (!Subtarget.isABICalls()) {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index 193d071447ff..7ee2ddf3605f 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -454,7 +454,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// If Reg is a double precision register, emit two cfa_offsets,
// one for each of the paired single precision registers.
@@ -801,7 +801,7 @@ bool MipsSEFrameLowering::spillCalleeSavedRegisters(
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
&& MF->getFrameInfo().isReturnAddressTaken();
if (!IsRAAndRetAddrIsTaken)
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index 40b215a8204c..346ebe9664fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -85,18 +85,18 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
if (Subtarget.hasDSP()) {
MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
- for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
- addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
+ for (const auto &VecTy : VecTys) {
+ addRegisterClass(VecTy, &Mips::DSPRRegClass);
// Expand all builtin opcodes.
for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
- setOperationAction(Opc, VecTys[i], Expand);
+ setOperationAction(Opc, VecTy, Expand);
- setOperationAction(ISD::ADD, VecTys[i], Legal);
- setOperationAction(ISD::SUB, VecTys[i], Legal);
- setOperationAction(ISD::LOAD, VecTys[i], Legal);
- setOperationAction(ISD::STORE, VecTys[i], Legal);
- setOperationAction(ISD::BITCAST, VecTys[i], Legal);
+ setOperationAction(ISD::ADD, VecTy, Legal);
+ setOperationAction(ISD::SUB, VecTy, Legal);
+ setOperationAction(ISD::LOAD, VecTy, Legal);
+ setOperationAction(ISD::STORE, VecTy, Legal);
+ setOperationAction(ISD::BITCAST, VecTy, Legal);
}
setTargetDAGCombine(ISD::SHL);
@@ -2931,7 +2931,7 @@ static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
// operand is unused and can be replaced with anything. We choose to replace it
// with the used operand since this reduces the number of instructions overall.
static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
- SmallVector<int, 16> Indices,
+ const SmallVector<int, 16> &Indices,
SelectionDAG &DAG) {
SmallVector<SDValue, 16> Ops;
SDValue Op0;
@@ -2953,9 +2953,8 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
Using2ndVec = true;
}
- for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
- ++I)
- Ops.push_back(DAG.getTargetConstant(*I, DL, MaskEltTy));
+ for (int Idx : Indices)
+ Ops.push_back(DAG.getTargetConstant(Idx, DL, MaskEltTy));
SDValue MaskVec = DAG.getBuildVector(MaskVecTy, DL, Ops);
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
index b05e9ad827c4..d6481793ef49 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -38,7 +38,7 @@ using namespace llvm;
#define DEBUG_TYPE "mips-reg-info"
-MipsSERegisterInfo::MipsSERegisterInfo() : MipsRegisterInfo() {}
+MipsSERegisterInfo::MipsSERegisterInfo() {}
bool MipsSERegisterInfo::
requiresRegisterScavenging(const MachineFunction &MF) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 8de3c9fd25bd..f9f662a00117 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -45,6 +45,10 @@ using namespace llvm;
#define DEBUG_TYPE "mips"
+static cl::opt<bool>
+ EnableMulMulFix("mfix4300", cl::init(false),
+ cl::desc("Enable the VR4300 mulmul bug fix."), cl::Hidden);
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeMipsTarget() {
// Register the target.
RegisterTargetMachine<MipsebTargetMachine> X(getTheMipsTarget());
@@ -58,6 +62,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeMipsTarget() {
initializeMipsBranchExpansionPass(*PR);
initializeMicroMipsSizeReducePass(*PR);
initializeMipsPreLegalizerCombinerPass(*PR);
+ initializeMipsMulMulBugFixPass(*PR);
}
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
@@ -292,6 +297,11 @@ void MipsPassConfig::addPreEmitPass() {
// instructions which can be remapped to a 16 bit instruction.
addPass(createMicroMipsSizeReducePass());
+ // This pass inserts a nop instruction between two back-to-back multiplication
+ // instructions when the "mfix4300" flag is passed.
+ if (EnableMulMulFix)
+ addPass(createMipsMulMulBugPass());
+
// The delay slot filler pass can potientially create forbidden slot hazards
// for MIPSR6 and therefore it should go before MipsBranchExpansion pass.
addPass(createMipsDelaySlotFillerPass());
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 82d332ab3f08..da0cbb32659c 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -108,6 +108,10 @@ void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
// SAT flag
if (Imm & NVPTX::PTXCvtMode::SAT_FLAG)
O << ".sat";
+ } else if (strcmp(Modifier, "relu") == 0) {
+ // RELU flag
+ if (Imm & NVPTX::PTXCvtMode::RELU_FLAG)
+ O << ".relu";
} else if (strcmp(Modifier, "base") == 0) {
// Default operand
switch (Imm & NVPTX::PTXCvtMode::BASE_MASK) {
@@ -139,6 +143,9 @@ void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
case NVPTX::PTXCvtMode::RP:
O << ".rp";
break;
+ case NVPTX::PTXCvtMode::RNA:
+ O << ".rna";
+ break;
}
} else {
llvm_unreachable("Invalid conversion modifier");
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTX.h b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTX.h
index c2fd090da084..41e9f375e536 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTX.h
@@ -137,10 +137,12 @@ enum CvtMode {
RZ,
RM,
RP,
+ RNA,
BASE_MASK = 0x0F,
FTZ_FLAG = 0x10,
- SAT_FLAG = 0x20
+ SAT_FLAG = 0x20,
+ RELU_FLAG = 0x40
};
}
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 16add48d4602..3a59306c4998 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1214,9 +1214,9 @@ void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) {
std::vector<const GlobalVariable *> &gvars = localDecls[f];
- for (unsigned i = 0, e = gvars.size(); i != e; ++i) {
+ for (const GlobalVariable *GV : gvars) {
O << "\t// demoted variable\n\t";
- printModuleLevelGV(gvars[i], O, true);
+ printModuleLevelGV(GV, O, true);
}
}
@@ -1454,7 +1454,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() !=
NVPTX::CUDA) {
- Type *ETy = PTy->getElementType();
+ Type *ETy = PTy->getPointerElementType();
int addrSpace = PTy->getAddressSpace();
switch (addrSpace) {
default:
@@ -1514,7 +1514,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
// param has byVal attribute. So should be a pointer
auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
- Type *ETy = PTy->getElementType();
+ Type *ETy = PTy->getPointerElementType();
if (isABI || isKernelFunc) {
// Just print .param .align <a> .b8 .param[size];
@@ -1613,7 +1613,7 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
// We use the per class virtual register number in the ptx output.
unsigned int numVRs = MRI->getNumVirtRegs();
for (unsigned i = 0; i < numVRs; i++) {
- unsigned int vr = Register::index2VirtReg(i);
+ Register vr = Register::index2VirtReg(i);
const TargetRegisterClass *RC = MRI->getRegClass(vr);
DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
int n = regmap.size();
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index a9a5eae42c1d..888fc8ffac2c 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -96,20 +96,18 @@ bool GenericToNVVM::runOnModule(Module &M) {
// Walk through the instructions in function defitinions, and replace any use
// of original global variables in GVMap with a use of the corresponding
// copies in GVMap. If necessary, promote constants to instructions.
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
- if (I->isDeclaration()) {
+ for (Function &F : M) {
+ if (F.isDeclaration()) {
continue;
}
- IRBuilder<> Builder(I->getEntryBlock().getFirstNonPHIOrDbg());
- for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE;
- ++BBI) {
- for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
- ++II) {
- for (unsigned i = 0, e = II->getNumOperands(); i < e; ++i) {
- Value *Operand = II->getOperand(i);
+ IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
+ for (BasicBlock &BB : F) {
+ for (Instruction &II : BB) {
+ for (unsigned i = 0, e = II.getNumOperands(); i < e; ++i) {
+ Value *Operand = II.getOperand(i);
if (isa<Constant>(Operand)) {
- II->setOperand(
- i, remapConstant(&M, &*I, cast<Constant>(Operand), Builder));
+ II.setOperand(
+ i, remapConstant(&M, &F, cast<Constant>(Operand), Builder));
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e2f6b69fc530..eac237bb27bb 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -553,17 +553,30 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// These map to corresponding instructions for f32/f64. f16 must be
// promoted to f32. v2f16 is expanded to f16, which is then promoted
// to f32.
- for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
- ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
+ for (const auto &Op :
+ {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FABS}) {
setOperationAction(Op, MVT::f16, Promote);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
setOperationAction(Op, MVT::v2f16, Expand);
}
- setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMINIMUM, MVT::f16, Promote);
- setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote);
+ // max.f16, max.f16x2 and max.NaN are supported on sm_80+.
+ auto GetMinMaxAction = [&](LegalizeAction NotSm80Action) {
+ bool IsAtLeastSm80 = STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
+ return IsAtLeastSm80 ? Legal : NotSm80Action;
+ };
+ for (const auto &Op : {ISD::FMINNUM, ISD::FMAXNUM}) {
+ setFP16OperationAction(Op, MVT::f16, GetMinMaxAction(Promote), Promote);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setFP16OperationAction(Op, MVT::v2f16, GetMinMaxAction(Expand), Expand);
+ }
+ for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
+ setFP16OperationAction(Op, MVT::f16, GetMinMaxAction(Expand), Expand);
+ setOperationAction(Op, MVT::f32, GetMinMaxAction(Expand));
+ setOperationAction(Op, MVT::f64, GetMinMaxAction(Expand));
+ setFP16OperationAction(Op, MVT::v2f16, GetMinMaxAction(Expand), Expand);
+ }
// No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
// No FPOW or FREM in PTX.
@@ -1341,7 +1354,7 @@ std::string NVPTXTargetLowering::getPrototype(
}
auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
- Type *ETy = PTy->getElementType();
+ Type *ETy = PTy->getPointerElementType();
Align align = Outs[OIdx].Flags.getNonZeroByValAlign();
unsigned sz = DL.getTypeAllocSize(ETy);
@@ -1564,7 +1577,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<uint64_t, 16> Offsets;
auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
assert(PTy && "Type of a byval parameter should be pointer");
- ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
+ ComputePTXValueVTs(*this, DL, PTy->getPointerElementType(), VTs, &Offsets,
+ 0);
// declare .param .align <align> .b8 .param<n>[<size>];
unsigned sz = Outs[OIdx].Flags.getByValSize();
@@ -2434,7 +2448,7 @@ static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
if (!context)
return false;
- auto *STy = dyn_cast<StructType>(PTy->getElementType());
+ auto *STy = dyn_cast<StructType>(PTy->getPointerElementType());
if (!STy || STy->isLiteral())
return false;
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
index fc0d5cc6fbfa..eeedce2d99cb 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -57,12 +57,9 @@ bool NVPTXImageOptimizer::runOnFunction(Function &F) {
InstrToDelete.clear();
// Look for call instructions in the function
- for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE;
- ++BI) {
- for (BasicBlock::iterator I = (*BI).begin(), E = (*BI).end();
- I != E; ++I) {
- Instruction &Instr = *I;
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ for (BasicBlock &BB : F) {
+ for (Instruction &Instr : BB) {
+ if (CallInst *CI = dyn_cast<CallInst>(&Instr)) {
Function *CalledF = CI->getCalledFunction();
if (CalledF && CalledF->isIntrinsic()) {
// This is an intrinsic function call, check if its an istypep
@@ -84,8 +81,8 @@ bool NVPTXImageOptimizer::runOnFunction(Function &F) {
}
// Delete any istypep instances we replaced in the IR
- for (unsigned i = 0, e = InstrToDelete.size(); i != e; ++i)
- InstrToDelete[i]->eraseFromParent();
+ for (Instruction *I : InstrToDelete)
+ I->eraseFromParent();
return Changed;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 96386af569de..22e200e77831 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -48,6 +48,7 @@ def CvtRN : PatLeaf<(i32 0x5)>;
def CvtRZ : PatLeaf<(i32 0x6)>;
def CvtRM : PatLeaf<(i32 0x7)>;
def CvtRP : PatLeaf<(i32 0x8)>;
+def CvtRNA : PatLeaf<(i32 0x9)>;
def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
@@ -62,6 +63,10 @@ def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
def CvtSAT : PatLeaf<(i32 0x20)>;
def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+def CvtNONE_RELU : PatLeaf<(i32 0x40)>;
+def CvtRN_RELU : PatLeaf<(i32 0x45)>;
+def CvtRZ_RELU : PatLeaf<(i32 0x46)>;
+
def CvtMode : Operand<i32> {
let PrintMethod = "printCvtMode";
}
@@ -249,6 +254,32 @@ multiclass F3<string OpcStr, SDNode OpNode> {
(ins Float32Regs:$a, f32imm:$b),
!strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+
+ def f16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def f16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math]>;
+
+ def f16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math]>;
}
// Template for instructions which take three FP args. The
@@ -500,6 +531,29 @@ let hasSideEffects = false in {
"cvt.s64.s16 \t$dst, $src;", []>;
def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
"cvt.s64.s32 \t$dst, $src;", []>;
+
+multiclass CVT_FROM_FLOAT_SM80<string FromName, RegisterClass RC> {
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:relu}.",
+ FromName, ".f32 \t$dst, $src;"), []>,
+ Requires<[hasPTX70, hasSM80]>;
+ }
+
+ defm CVT_bf16 : CVT_FROM_FLOAT_SM80<"bf16", Int16Regs>;
+
+ multiclass CVT_FROM_FLOAT_V2_SM80<string FromName, RegisterClass RC> {
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src1, Float32Regs:$src2, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:relu}.",
+ FromName, ".f32 \t$dst, $src1, $src2;"), []>,
+ Requires<[hasPTX70, hasSM80]>;
+ }
+
+ defm CVT_f16x2 : CVT_FROM_FLOAT_V2_SM80<"f16x2", Float16x2Regs>;
+ defm CVT_bf16x2 : CVT_FROM_FLOAT_V2_SM80<"bf16x2", Int32Regs>;
}
//-----------------------------------
@@ -842,6 +896,8 @@ defm FMUL : F3_fma_component<"mul", fmul>;
defm FMIN : F3<"min", fminnum>;
defm FMAX : F3<"max", fmaxnum>;
+defm FMINNAN : F3<"min.NaN", fminimum>;
+defm FMAXNAN : F3<"max.NaN", fmaximum>;
defm FABS : F2<"abs", fabs>;
defm FNEG : F2<"neg", fneg>;
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 511cd875ac55..ec069a0a02ae 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1046,6 +1046,38 @@ def : Pat<(int_nvvm_ui2f_rm Int32Regs:$a),
def : Pat<(int_nvvm_ui2f_rp Int32Regs:$a),
(CVT_f32_u32 Int32Regs:$a, CvtRP)>;
+def : Pat<(int_nvvm_ff2bf16x2_rn Float32Regs:$a, Float32Regs:$b),
+ (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
+def : Pat<(int_nvvm_ff2bf16x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+ (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
+def : Pat<(int_nvvm_ff2bf16x2_rz Float32Regs:$a, Float32Regs:$b),
+ (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ)>;
+def : Pat<(int_nvvm_ff2bf16x2_rz_relu Float32Regs:$a, Float32Regs:$b),
+ (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ_RELU)>;
+
+def : Pat<(int_nvvm_ff2f16x2_rn Float32Regs:$a, Float32Regs:$b),
+ (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
+def : Pat<(int_nvvm_ff2f16x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+ (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
+def : Pat<(int_nvvm_ff2f16x2_rz Float32Regs:$a, Float32Regs:$b),
+ (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ)>;
+def : Pat<(int_nvvm_ff2f16x2_rz_relu Float32Regs:$a, Float32Regs:$b),
+ (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ_RELU)>;
+
+def : Pat<(int_nvvm_f2bf16_rn Float32Regs:$a),
+ (CVT_bf16_f32 Float32Regs:$a, CvtRN)>;
+def : Pat<(int_nvvm_f2bf16_rn_relu Float32Regs:$a),
+ (CVT_bf16_f32 Float32Regs:$a, CvtRN_RELU)>;
+def : Pat<(int_nvvm_f2bf16_rz Float32Regs:$a),
+ (CVT_bf16_f32 Float32Regs:$a, CvtRZ)>;
+def : Pat<(int_nvvm_f2bf16_rz_relu Float32Regs:$a),
+ (CVT_bf16_f32 Float32Regs:$a, CvtRZ_RELU)>;
+
+def CVT_tf32_f32 :
+ NVPTXInst<(outs Int32Regs:$dest), (ins Float32Regs:$a),
+ "cvt.rna.tf32.f32 \t$dest, $a;",
+ [(set Int32Regs:$dest, (int_nvvm_f2tf32_rna Float32Regs:$a))]>;
+
def INT_NVVM_LOHI_I2D : F_MATH_2<"mov.b64 \t$dst, {{$src0, $src1}};",
Float64Regs, Int32Regs, Int32Regs, int_nvvm_lohi_i2d>;
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
index 6cf59d285e8d..f655f25602bc 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -66,10 +66,9 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
// Collect all aggregate loads and mem* calls.
- for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
- for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
- ++II) {
- if (LoadInst *LI = dyn_cast<LoadInst>(II)) {
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (!LI->hasOneUse())
continue;
@@ -81,7 +80,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
continue;
AggrLoads.push_back(LI);
}
- } else if (MemIntrinsic *IntrCall = dyn_cast<MemIntrinsic>(II)) {
+ } else if (MemIntrinsic *IntrCall = dyn_cast<MemIntrinsic>(&I)) {
// Convert intrinsic calls with variable size or with constant size
// larger than the MaxAggrCopySize threshold.
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(IntrCall->getLength())) {
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
index ddb7f097fe68..67aa49132016 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
@@ -233,7 +233,7 @@ void NVPTXLowerArgs::handleByValParam(Argument *Arg) {
assert(PType && "Expecting pointer type in handleByValParam");
- Type *StructType = PType->getElementType();
+ Type *StructType = PType->getPointerElementType();
auto IsALoadChain = [&](Value *Start) {
SmallVector<Value *, 16> ValuesToCheck = {Start};
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 05c20369abf4..5a6440c91fca 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -49,8 +49,8 @@ NVPTXSubtarget::NVPTXSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const NVPTXTargetMachine &TM)
: NVPTXGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), PTXVersion(0),
- SmVersion(20), TM(TM), InstrInfo(),
- TLInfo(TM, initializeSubtargetDependencies(CPU, FS)), FrameLowering() {}
+ SmVersion(20), TM(TM),
+ TLInfo(TM, initializeSubtargetDependencies(CPU, FS)) {}
bool NVPTXSubtarget::hasImageHandles() const {
// Enable handles for Kepler+, where CUDA supports indirect surfaces and
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
index 366d92a5a805..4645671a0cd8 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
@@ -17,7 +17,7 @@ namespace llvm {
class NVPTXTargetObjectFile : public TargetLoweringObjectFile {
public:
- NVPTXTargetObjectFile() : TargetLoweringObjectFile() {}
+ NVPTXTargetObjectFile() {}
~NVPTXTargetObjectFile() override;
diff --git a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
index 74d129d330f3..2d6d72777db2 100644
--- a/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -286,8 +286,7 @@ bool getAlign(const Function &F, unsigned index, unsigned &align) {
bool retval = findAllNVVMAnnotation(&F, "align", Vs);
if (!retval)
return false;
- for (int i = 0, e = Vs.size(); i < e; i++) {
- unsigned v = Vs[i];
+ for (unsigned v : Vs) {
if ((v >> 16) == index) {
align = v & 0xFFFF;
return true;
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index ded922329ebf..715cff72dcab 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -121,6 +121,7 @@ class PPCAsmParser : public MCTargetAsmParser {
bool ParseDirectiveMachine(SMLoc L);
bool ParseDirectiveAbiVersion(SMLoc L);
bool ParseDirectiveLocalEntry(SMLoc L);
+ bool ParseGNUAttribute(SMLoc L);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
@@ -201,7 +202,8 @@ struct PPCOperand : public MCParsedAsmOperand {
struct TLSRegOp TLSReg;
};
- PPCOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ PPCOperand(KindTy K) : Kind(K) {}
+
public:
PPCOperand(const PPCOperand &o) : MCParsedAsmOperand() {
Kind = o.Kind;
@@ -1604,6 +1606,8 @@ bool PPCAsmParser::ParseDirective(AsmToken DirectiveID) {
ParseDirectiveAbiVersion(DirectiveID.getLoc());
else if (IDVal == ".localentry")
ParseDirectiveLocalEntry(DirectiveID.getLoc());
+ else if (IDVal.startswith(".gnu_attribute"))
+ ParseGNUAttribute(DirectiveID.getLoc());
else
return true;
return false;
@@ -1719,7 +1723,16 @@ bool PPCAsmParser::ParseDirectiveLocalEntry(SMLoc L) {
return false;
}
+bool PPCAsmParser::ParseGNUAttribute(SMLoc L) {
+ int64_t Tag;
+ int64_t IntegerValue;
+ if (!getParser().parseGNUAttribute(L, Tag, IntegerValue))
+ return false;
+
+ getParser().getStreamer().emitGNUAttribute(Tag, IntegerValue);
+ return true;
+}
/// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializePowerPCAsmParser() {
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
index 7d64816ed6c7..0cd8350e3fdd 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp
@@ -65,8 +65,7 @@ private:
PPCInstructionSelector::PPCInstructionSelector(const PPCTargetMachine &TM,
const PPCSubtarget &STI,
const PPCRegisterBankInfo &RBI)
- : InstructionSelector(), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI),
+ : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "PPCGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp
index 0ca8587ba483..b92b0fc342ec 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp
@@ -40,9 +40,8 @@ PPCELFStreamer::PPCELFStreamer(MCContext &Context,
std::unique_ptr<MCAsmBackend> MAB,
std::unique_ptr<MCObjectWriter> OW,
std::unique_ptr<MCCodeEmitter> Emitter)
- : MCELFStreamer(Context, std::move(MAB), std::move(OW),
- std::move(Emitter)), LastLabel(NULL) {
-}
+ : MCELFStreamer(Context, std::move(MAB), std::move(OW), std::move(Emitter)),
+ LastLabel(nullptr) {}
void PPCELFStreamer::emitPrefixedInstruction(const MCInst &Inst,
const MCSubtargetInfo &STI) {
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index d6e02d0d0862..a651362f703b 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -271,14 +271,14 @@ private:
MCAssembler &MCA = getStreamer().getAssembler();
int64_t Offset;
if (!LocalOffset->evaluateAsAbsolute(Offset, MCA))
- MCA.getContext().reportFatalError(
- LocalOffset->getLoc(), ".localentry expression must be absolute.");
+ MCA.getContext().reportError(LocalOffset->getLoc(),
+ ".localentry expression must be absolute");
switch (Offset) {
default:
- MCA.getContext().reportFatalError(
- LocalOffset->getLoc(),
- ".localentry expression is not a valid power of 2.");
+ MCA.getContext().reportError(
+ LocalOffset->getLoc(), ".localentry expression must be a power of 2");
+ return 0;
case 0:
return 0;
case 1:
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/P10InstrResources.td b/contrib/llvm-project/llvm/lib/Target/PowerPC/P10InstrResources.td
index f3ae0010ad8e..edd3b42d47e1 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/P10InstrResources.td
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/P10InstrResources.td
@@ -409,8 +409,8 @@ def : InstRW<[P10W_DF_13C, P10W_DISP_ANY, P10DF_Read, P10DF_Read, P10DF_Read],
// 13 Cycles Decimal Floating Point operations, and 3 Cycles Store operations, 2 input operands
def : InstRW<[P10W_DF_13C, P10W_DISP_EVEN, P10W_ST_3C, P10W_DISP_ANY],
(instrs
- HASHST,
- HASHSTP
+ HASHST, HASHST8,
+ HASHSTP, HASHSTP8
)>;
// 24 Cycles Decimal Floating Point operations, 1 input operands
@@ -619,6 +619,8 @@ def : InstRW<[P10W_DX_5C, P10W_DISP_ANY, P10DX_Read, P10DX_Read],
XSCMPEXPQP,
XSCMPOQP,
XSCMPUQP,
+ XSMAXCQP,
+ XSMINCQP,
XSTSTDCQP,
XXGENPCVBM
)>;
@@ -1336,8 +1338,8 @@ def : InstRW<[P10W_LD_6C, P10W_DISP_ANY, P10LD_Read, P10LD_Read],
// 6 Cycles Load operations, and 13 Cycles Decimal Floating Point operations, 2 input operands
def : InstRW<[P10W_LD_6C, P10W_DISP_EVEN, P10W_DF_13C, P10W_DISP_ANY],
(instrs
- HASHCHK,
- HASHCHKP
+ HASHCHK, HASHCHK8,
+ HASHCHKP, HASHCHKP8
)>;
// Single crack instructions
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td b/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td
index f7c049951c54..c088d7847ce4 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/P9InstrResources.td
@@ -1415,7 +1415,7 @@ def : InstRW<[],
(instregex "NOP_GT_PWR(6|7)$"),
(instregex "TLB(IA|IVAX|SX|SX2|SX2D|LD|LI|RE|RE2|WE|WE2)$"),
(instregex "WRTEE(I)?$"),
- (instregex "HASH(ST|STP|CHK|CHKP)$"),
+ (instregex "HASH(ST|STP|CHK|CHKP)(8)?$"),
ATTN,
CLRBHRB,
MFBHRBE,
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index f26c15667a0b..780981806996 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -109,6 +109,23 @@ struct DenseMapInfo<std::pair<const MCSymbol *, MCSymbolRefExpr::VariantKind>> {
namespace {
+enum {
+ // GNU attribute tags for PowerPC ABI
+ Tag_GNU_Power_ABI_FP = 4,
+ Tag_GNU_Power_ABI_Vector = 8,
+ Tag_GNU_Power_ABI_Struct_Return = 12,
+
+ // GNU attribute values for PowerPC float ABI, as combination of two parts
+ Val_GNU_Power_ABI_NoFloat = 0b00,
+ Val_GNU_Power_ABI_HardFloat_DP = 0b01,
+ Val_GNU_Power_ABI_SoftFloat_DP = 0b10,
+ Val_GNU_Power_ABI_HardFloat_SP = 0b11,
+
+ Val_GNU_Power_ABI_LDBL_IBM128 = 0b0100,
+ Val_GNU_Power_ABI_LDBL_64 = 0b1000,
+ Val_GNU_Power_ABI_LDBL_IEEE128 = 0b1100,
+};
+
class PPCAsmPrinter : public AsmPrinter {
protected:
// For TLS on AIX, we need to be able to identify TOC entries of specific
@@ -178,6 +195,8 @@ public:
return "Linux PPC Assembly Printer";
}
+ void emitGNUAttributes(Module &M);
+
void emitStartOfAsmFile(Module &M) override;
void emitEndOfAsmFile(Module &) override;
@@ -1388,6 +1407,28 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
EmitToStreamer(*OutStreamer, TmpInst);
}
+void PPCLinuxAsmPrinter::emitGNUAttributes(Module &M) {
+ // Emit float ABI into GNU attribute
+ Metadata *MD = M.getModuleFlag("float-abi");
+ MDString *FloatABI = dyn_cast_or_null<MDString>(MD);
+ if (!FloatABI)
+ return;
+ StringRef flt = FloatABI->getString();
+ // TODO: Support emitting soft-fp and hard double/single attributes.
+ if (flt == "doubledouble")
+ OutStreamer->emitGNUAttribute(Tag_GNU_Power_ABI_FP,
+ Val_GNU_Power_ABI_HardFloat_DP |
+ Val_GNU_Power_ABI_LDBL_IBM128);
+ else if (flt == "ieeequad")
+ OutStreamer->emitGNUAttribute(Tag_GNU_Power_ABI_FP,
+ Val_GNU_Power_ABI_HardFloat_DP |
+ Val_GNU_Power_ABI_LDBL_IEEE128);
+ else if (flt == "ieeedouble")
+ OutStreamer->emitGNUAttribute(Tag_GNU_Power_ABI_FP,
+ Val_GNU_Power_ABI_HardFloat_DP |
+ Val_GNU_Power_ABI_LDBL_64);
+}
+
void PPCLinuxAsmPrinter::emitInstruction(const MachineInstr *MI) {
if (!Subtarget->isPPC64())
return PPCAsmPrinter::emitInstruction(MI);
@@ -1642,6 +1683,8 @@ void PPCLinuxAsmPrinter::emitEndOfAsmFile(Module &M) {
PPCTargetStreamer *TS =
static_cast<PPCTargetStreamer *>(OutStreamer->getTargetStreamer());
+ emitGNUAttributes(M);
+
if (!TOC.empty()) {
const char *Name = isPPC64 ? ".toc" : ".got2";
MCSectionELF *Section = OutContext.getELFSection(
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index 856569bc8a73..e7cd107c5046 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -150,7 +150,7 @@ class PPCFastISel final : public FastISel {
unsigned copyRegToRegClass(const TargetRegisterClass *ToRC,
unsigned SrcReg, unsigned Flag = 0,
unsigned SubReg = 0) {
- unsigned TmpReg = createResultReg(ToRC);
+ Register TmpReg = createResultReg(ToRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg, Flag, SubReg);
return TmpReg;
@@ -428,7 +428,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, bool &UseOffset,
// put the alloca address into a register, set the base type back to
// register and continue. This should almost never happen.
if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
- unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
Addr.Base.Reg = ResultReg;
@@ -604,7 +604,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) {
// Look at the currently assigned register for this instruction
// to determine the required register class. This is necessary
// to constrain RA from using R0/X0 when this is not legal.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
@@ -783,7 +783,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
PPCPred = PPC::InvertPredicate(PPCPred);
}
- unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
+ Register CondReg = createResultReg(&PPC::CRRCRegClass);
if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
CondReg, PPCPred))
@@ -847,7 +847,7 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
}
- unsigned SrcReg1 = getRegForValue(SrcValue1);
+ Register SrcReg1 = getRegForValue(SrcValue1);
if (SrcReg1 == 0)
return false;
@@ -928,13 +928,13 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
if (NeedsExt) {
- unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ Register ExtReg = createResultReg(&PPC::GPRCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
return false;
SrcReg1 = ExtReg;
if (!UseImm) {
- unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ Register ExtReg = createResultReg(&PPC::GPRCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg2, MVT::i32, ExtReg, IsZExt))
return false;
SrcReg2 = ExtReg;
@@ -960,7 +960,7 @@ bool PPCFastISel::SelectFPExt(const Instruction *I) {
if (SrcVT != MVT::f32 || DestVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -978,7 +978,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
if (SrcVT != MVT::f64 || DestVT != MVT::f32)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1019,7 +1019,7 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
// If necessary, extend 32-bit int to 64-bit.
if (SrcVT == MVT::i32) {
- unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ Register TmpReg = createResultReg(&PPC::G8RCRegClass);
if (!PPCEmitIntExt(MVT::i32, SrcReg, MVT::i64, TmpReg, !IsSigned))
return 0;
SrcReg = TmpReg;
@@ -1079,7 +1079,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
SrcVT != MVT::i32 && SrcVT != MVT::i64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
@@ -1091,7 +1091,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
else
Opc = IsSigned ? PPC::EFDCFSI : PPC::EFDCFUI;
- unsigned DestReg = createResultReg(&PPC::SPERCRegClass);
+ Register DestReg = createResultReg(&PPC::SPERCRegClass);
// Generate the convert.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
@@ -1114,7 +1114,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// Extend the input if necessary.
if (SrcVT == MVT::i8 || SrcVT == MVT::i16) {
- unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ Register TmpReg = createResultReg(&PPC::G8RCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg, MVT::i64, TmpReg, !IsSigned))
return false;
SrcVT = MVT::i64;
@@ -1128,7 +1128,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// Determine the opcode for the conversion.
const TargetRegisterClass *RC = &PPC::F8RCRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned Opc;
if (DstVT == MVT::f32)
@@ -1170,7 +1170,7 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
// Look at the currently assigned register for this instruction
// to determine the required register class.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
@@ -1206,7 +1206,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
@@ -1276,7 +1276,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
// Look at the currently assigned register for this instruction
// to determine the required register class. If there is no register,
// make a conservative choice (don't assign R0).
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
(AssignedReg ? MRI.getRegClass(AssignedReg) :
&PPC::GPRC_and_GPRC_NOR0RegClass);
@@ -1296,8 +1296,8 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
- unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ Register ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
+ Register SrcReg1 = getRegForValue(I->getOperand(0));
if (SrcReg1 == 0) return false;
// Handle case of small immediate operand.
@@ -1355,7 +1355,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
// Reg-reg case.
- unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ Register SrcReg2 = getRegForValue(I->getOperand(1));
if (SrcReg2 == 0) return false;
// Reverse operands for subtract-from.
@@ -1441,7 +1441,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
MVT DestVT = VA.getLocVT();
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/false))
llvm_unreachable("Failed to emit a sext!");
ArgVT = DestVT;
@@ -1453,7 +1453,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
MVT DestVT = VA.getLocVT();
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/true))
llvm_unreachable("Failed to emit a zext!");
ArgVT = DestVT;
@@ -1628,7 +1628,7 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (ArgVT.isVector() || ArgVT == MVT::f128)
return false;
- unsigned Arg = getRegForValue(ArgValue);
+ Register Arg = getRegForValue(ArgValue);
if (Arg == 0)
return false;
@@ -1734,7 +1734,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
RetRegs.push_back(RetReg);
} else {
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -1767,7 +1767,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
case CCValAssign::ZExt: {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, true))
return false;
SrcReg = TmpReg;
@@ -1776,7 +1776,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
case CCValAssign::SExt: {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, false))
return false;
SrcReg = TmpReg;
@@ -1857,7 +1857,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
// Attempt to fast-select an indirect branch instruction.
bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
- unsigned AddrReg = getRegForValue(I->getOperand(0));
+ Register AddrReg = getRegForValue(I->getOperand(0));
if (AddrReg == 0)
return false;
@@ -1884,7 +1884,7 @@ bool PPCFastISel::SelectTrunc(const Instruction *I) {
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1903,7 +1903,7 @@ bool PPCFastISel::SelectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool IsZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg) return false;
EVT SrcEVT, DestEVT;
@@ -1921,12 +1921,12 @@ bool PPCFastISel::SelectIntExt(const Instruction *I) {
// instruction, use it. Otherwise pick the register class of the
// correct size that does not contain X0/R0, since we don't know
// whether downstream uses permit that assignment.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
(AssignedReg ? MRI.getRegClass(AssignedReg) :
(DestVT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
&PPC::GPRC_and_GPRC_NOR0RegClass));
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
return false;
@@ -1966,15 +1966,6 @@ bool PPCFastISel::fastSelectInstruction(const Instruction *I) {
return SelectBinaryIntOp(I, ISD::OR);
case Instruction::Sub:
return SelectBinaryIntOp(I, ISD::SUB);
- case Instruction::Call:
- // On AIX, call lowering uses the DAG-ISEL path currently so that the
- // callee of the direct function call instruction will be mapped to the
- // symbol for the function's entry point, which is distinct from the
- // function descriptor symbol. The latter is the symbol whose XCOFF symbol
- // name is the C-linkage name of the source level function.
- if (TM.getTargetTriple().isOSAIX())
- break;
- return selectCall(I);
case Instruction::Ret:
return SelectRet(I);
case Instruction::Trunc:
@@ -2012,7 +2003,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
else
RC = ((VT == MVT::f32) ? &PPC::F4RCRegClass : &PPC::F8RCRegClass);
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
CodeModel::Model CModel = TM.getCodeModel();
MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
@@ -2026,7 +2017,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
else
Opc = ((VT == MVT::f32) ? PPC::LFS : PPC::LFD);
- unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
@@ -2043,7 +2034,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
// But for large code model, we must generate a LDtocL followed
// by the LF[SD].
if (CModel == CodeModel::Large) {
- unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
@@ -2068,7 +2059,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
assert(VT == MVT::i64 && "Non-address!");
const TargetRegisterClass *RC = &PPC::G8RC_and_G8RC_NOX0RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
// Global values may be plain old object addresses, TLS object
// addresses, constant pool entries, or jump tables. How we generate
@@ -2083,6 +2074,12 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
if (GV->isThreadLocal())
return 0;
+ // If the global has the toc-data attribute then fallback to DAG-ISEL.
+ if (TM.getTargetTriple().isOSAIX())
+ if (const GlobalVariable *Var = dyn_cast_or_null<GlobalVariable>(GV))
+ if (Var->hasAttribute("toc-data"))
+ return false;
+
PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small)
@@ -2099,7 +2096,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// Otherwise we generate:
// ADDItocL(ADDIStocHA8(%x2, GV), GV)
// Either way, start with the ADDIStocHA8:
- unsigned HighPartReg = createResultReg(RC);
+ Register HighPartReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8),
HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
@@ -2123,7 +2120,7 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
unsigned Lo = Imm & 0xFFFF;
unsigned Hi = (Imm >> 16) & 0xFFFF;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
if (isInt<16>(Imm))
@@ -2132,7 +2129,7 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
.addImm(Imm);
else if (Lo) {
// Both Lo and Hi have nonzero bits.
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
.addImm(Hi);
@@ -2195,7 +2192,7 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
TmpReg3 = TmpReg2;
if ((Lo = Remainder & 0xFFFF)) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8),
ResultReg).addReg(TmpReg3).addImm(Lo);
return ResultReg;
@@ -2211,7 +2208,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
// If we're using CR bit registers for i1 values, handle that as a special
// case first.
if (VT == MVT::i1 && Subtarget->useCRBits()) {
- unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ Register ImmReg = createResultReg(&PPC::CRBITRCRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(CI->isZero() ? PPC::CRUNSET : PPC::CRSET), ImmReg);
return ImmReg;
@@ -2231,7 +2228,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
// a range of 0..0x7fff.
if (isInt<16>(Imm)) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(Imm);
return ImmReg;
@@ -2283,7 +2280,7 @@ unsigned PPCFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(SI->second).addImm(0);
return ResultReg;
@@ -2393,7 +2390,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
// If we're using CR bit registers for i1 values, handle that as a special
// case first.
if (VT == MVT::i1 && Subtarget->useCRBits()) {
- unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ Register ImmReg = createResultReg(&PPC::CRBITRCRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Imm == 0 ? PPC::CRUNSET : PPC::CRSET), ImmReg);
return ImmReg;
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 3ca563fee970..65c969c196e1 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -674,7 +674,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
: PPC::MFCR);
const MCInstrDesc &StoreWordInst = TII.get(isPPC64 ? PPC::STW8 : PPC::STW);
const MCInstrDesc &HashST =
- TII.get(HasPrivileged ? PPC::HASHSTP : PPC::HASHST);
+ TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHSTP8 : PPC::HASHST8)
+ : (HasPrivileged ? PPC::HASHSTP : PPC::HASHST));
// Regarding this assert: Even though LR is saved in the caller's frame (i.e.,
// LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no
@@ -1172,7 +1173,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// CFA.
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
// This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
@@ -1195,7 +1196,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
// the whole CR word. In the ELFv2 ABI, every CR that was
// actually saved gets its own CFI record.
- unsigned CRReg = isELFv2ABI? Reg : (unsigned) PPC::CR2;
+ Register CRReg = isELFv2ABI? Reg : PPC::CR2;
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
@@ -1590,7 +1591,8 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
const MCInstrDesc& MoveToCRInst = TII.get( isPPC64 ? PPC::MTOCRF8
: PPC::MTOCRF);
const MCInstrDesc &HashChk =
- TII.get(HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK);
+ TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHCHKP8 : PPC::HASHCHK8)
+ : (HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK));
int64_t LROffset = getReturnSaveOffset();
int64_t FPOffset = 0;
@@ -2085,7 +2087,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
SmallVector<CalleeSavedInfo, 18> VRegs;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() ||
(Reg != PPC::X2 && Reg != PPC::R2)) &&
"Not expecting to try to spill R2 in a function that must save TOC");
@@ -2337,7 +2339,7 @@ bool PPCFrameLowering::assignCalleeSavedSpillSlots(
if (BVAllocatable.none())
return false;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
if (!PPC::G8RCRegClass.contains(Reg)) {
AllSpilledToReg = false;
@@ -2395,7 +2397,7 @@ bool PPCFrameLowering::spillCalleeSavedRegisters(
});
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// CR2 through CR4 are the nonvolatile CR fields.
bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
@@ -2581,7 +2583,7 @@ bool PPCFrameLowering::restoreCalleeSavedRegisters(
--BeforeI;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
+ Register Reg = CSI[i].getReg();
if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
continue;
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index ba74af5ef5f7..fdcf6e7e80f2 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -1365,8 +1365,7 @@ class BitPermutationSelector {
ValueBit(SDValue V, unsigned I, Kind K = Variable)
: V(V), Idx(I), K(K) {}
- ValueBit(Kind K = Variable)
- : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {}
+ ValueBit(Kind K = Variable) : Idx(UINT32_MAX), K(K) {}
bool isZero() const {
return K == ConstZero || K == VariableKnownToBeZero;
@@ -4438,7 +4437,7 @@ bool PPCDAGToDAGISel::trySETCC(SDNode *N) {
// Force the ccreg into CR7.
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
- SDValue InFlag(nullptr, 0); // Null incoming flag value.
+ SDValue InFlag; // Null incoming flag value.
CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 8d6edf07bc53..25cc34badda0 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2433,7 +2433,7 @@ unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
/// the constant being splatted. The ByteSize field indicates the number of
/// bytes of each element [124] -> [bhw].
SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
- SDValue OpVal(nullptr, 0);
+ SDValue OpVal;
// If ByteSize of the splat is bigger than the element size of the
// build_vector, then we have a case where we are checking for a splat where
@@ -3508,8 +3508,9 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
int ShuffV[] = {1, 0, 3, 2};
SDValue Shuff =
DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV);
- return DAG.getBitcast(
- MVT::v2i64, DAG.getNode(ISD::AND, dl, MVT::v4i32, Shuff, SetCC32));
+ return DAG.getBitcast(MVT::v2i64,
+ DAG.getNode(CC == ISD::SETEQ ? ISD::AND : ISD::OR,
+ dl, MVT::v4i32, Shuff, SetCC32));
}
// We handle most of these in the usual way.
@@ -4078,8 +4079,8 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// virtual ones.
if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
assert(i + 1 < e && "No second half of double precision argument");
- unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
- unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
+ Register RegLo = MF.addLiveIn(VA.getLocReg(), RC);
+ Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
if (!Subtarget.isLittleEndian())
@@ -4087,7 +4088,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
ArgValueHi);
} else {
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
ValVT == MVT::i1 ? MVT::i32 : ValVT);
if (ValVT == MVT::i1)
@@ -4179,7 +4180,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// dereferencing the result of va_next.
for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
@@ -4198,7 +4199,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// on the stack.
for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
@@ -4384,7 +4385,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(Arg);
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8);
@@ -4408,7 +4409,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
if (GPR_idx == Num_GPR_Regs)
break;
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Addr = FIN;
@@ -4432,7 +4433,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
case MVT::i64:
if (Flags.isNest()) {
// The 'nest' parameter, if any, is passed in R11.
- unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
@@ -4445,7 +4446,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. Clang may use those instead of "byval" aggregate
// types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4491,7 +4492,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// This can only ever happen in the presence of f32 array types,
// since otherwise we never run out of FPRs before running out
// of GPRs.
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4532,7 +4533,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. The latter are used to implement ELFv2 homogenous
// vector aggregates.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++VR_idx;
} else {
@@ -4591,7 +4592,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// the result of va_next.
for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx < Num_GPR_Regs; ++GPR_idx) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
@@ -7059,7 +7060,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
unsigned Offset) {
- const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
+ const Register VReg = MF.addLiveIn(PhysReg, RegClass);
// Since the callers side has left justified the aggregate in the
// register, we can simply store the entire register into the stack
// slot.
@@ -7156,7 +7157,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
(CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
GPRIndex < NumGPArgRegs; ++GPRIndex) {
- const unsigned VReg =
+ const Register VReg =
IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
: MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
@@ -11178,13 +11179,17 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::STRICT_FP_TO_SINT:
case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
+ case ISD::FP_TO_UINT: {
// LowerFP_TO_INT() can only handle f32 and f64.
if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
MVT::ppcf128)
return;
- Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
+ SDValue LoweredValue = LowerFP_TO_INT(SDValue(N, 0), DAG, dl);
+ Results.push_back(LoweredValue);
+ if (N->isStrictFPOpcode())
+ Results.push_back(LoweredValue.getValue(1));
return;
+ }
case ISD::TRUNCATE: {
if (!N->getValueType(0).isVector())
return;
@@ -17890,7 +17895,7 @@ Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
"Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
+ Type *ValTy = AlignedAddr->getType()->getPointerElementType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *RMW = Intrinsic::getDeclaration(
M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
@@ -17915,7 +17920,7 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
"Only support quadword now");
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Type *ValTy = cast<PointerType>(AlignedAddr->getType())->getElementType();
+ Type *ValTy = AlignedAddr->getType()->getPointerElementType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *IntCmpXchg =
Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 87b7f96112ec..eb52e4aa6273 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1456,4 +1456,4 @@ namespace llvm {
} // end namespace llvm
-#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
+#endif // LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 58af8037f59c..eae8e36e475e 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1760,26 +1760,27 @@ defm FCTIWUZ : XForm_26r<63, 143, (outs f8rc:$frD), (ins f8rc:$frB),
// These instructions store a hash computed from the value of the link register
// and the value of the stack pointer.
-let mayStore = 1 in {
-def HASHST : XForm_XD6_RA5_RB5<31, 722, (outs),
- (ins g8rc:$RB, memrihash:$D_RA_XD),
- "hashst $RB, $D_RA_XD", IIC_IntGeneral, []>;
-def HASHSTP : XForm_XD6_RA5_RB5<31, 658, (outs),
+let mayStore = 1, Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+def HASHST8 : XForm_XD6_RA5_RB5<31, 722, (outs),
(ins g8rc:$RB, memrihash:$D_RA_XD),
- "hashstp $RB, $D_RA_XD", IIC_IntGeneral, []>;
+ "hashst $RB, $D_RA_XD", IIC_IntGeneral, []>;
+def HASHSTP8 : XForm_XD6_RA5_RB5<31, 658, (outs),
+ (ins g8rc:$RB, memrihash:$D_RA_XD),
+ "hashstp $RB, $D_RA_XD", IIC_IntGeneral, []>;
}
// These instructions check a hash computed from the value of the link register
// and the value of the stack pointer. The hasSideEffects flag is needed as the
// instruction may TRAP if the hash does not match the hash stored at the
// specified address.
-let mayLoad = 1, hasSideEffects = 1 in {
-def HASHCHK : XForm_XD6_RA5_RB5<31, 754, (outs),
- (ins g8rc:$RB, memrihash:$D_RA_XD),
- "hashchk $RB, $D_RA_XD", IIC_IntGeneral, []>;
-def HASHCHKP : XForm_XD6_RA5_RB5<31, 690, (outs),
+let mayLoad = 1, hasSideEffects = 1,
+ Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+def HASHCHK8 : XForm_XD6_RA5_RB5<31, 754, (outs),
(ins g8rc:$RB, memrihash:$D_RA_XD),
- "hashchkp $RB, $D_RA_XD", IIC_IntGeneral, []>;
+ "hashchk $RB, $D_RA_XD", IIC_IntGeneral, []>;
+def HASHCHKP8 : XForm_XD6_RA5_RB5<31, 690, (outs),
+ (ins g8rc:$RB, memrihash:$D_RA_XD),
+ "hashchkp $RB, $D_RA_XD", IIC_IntGeneral, []>;
}
let Interpretation64Bit = 1, isCodeGenOnly = 1, hasSideEffects = 1 in
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index a0fd2111de11..eada872c2a7d 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2339,9 +2339,8 @@ bool PPCInstrInfo::ClobbersPredicate(MachineInstr &MI,
Found = true;
}
} else if (MO.isRegMask()) {
- for (TargetRegisterClass::iterator I = RC->begin(),
- IE = RC->end(); I != IE; ++I)
- if (MO.clobbersPhysReg(*I)) {
+ for (MCPhysReg R : *RC)
+ if (MO.clobbersPhysReg(R)) {
Pred.push_back(MO);
Found = true;
}
@@ -3253,7 +3252,7 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI(
Register Reg = MI.getOperand(i).getReg();
if (!Register::isVirtualRegister(Reg))
continue;
- unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI);
+ Register TrueReg = TRI->lookThruCopyLike(Reg, MRI);
if (Register::isVirtualRegister(TrueReg)) {
DefMI = MRI->getVRegDef(TrueReg);
if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI8 ||
@@ -3502,8 +3501,8 @@ bool PPCInstrInfo::foldFrameOffset(MachineInstr &MI) const {
return false;
assert(ADDIMI && "There should be ADDIMI for valid ToBeChangedReg.");
- unsigned ToBeChangedReg = ADDIMI->getOperand(0).getReg();
- unsigned ScaleReg = ADDMI->getOperand(ScaleRegIdx).getReg();
+ Register ToBeChangedReg = ADDIMI->getOperand(0).getReg();
+ Register ScaleReg = ADDMI->getOperand(ScaleRegIdx).getReg();
auto NewDefFor = [&](unsigned Reg, MachineBasicBlock::iterator Start,
MachineBasicBlock::iterator End) {
for (auto It = ++Start; It != End; It++)
@@ -3720,7 +3719,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
bool PPCInstrInfo::combineRLWINM(MachineInstr &MI,
MachineInstr **ToErase) const {
MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
- unsigned FoldingReg = MI.getOperand(1).getReg();
+ Register FoldingReg = MI.getOperand(1).getReg();
if (!Register::isVirtualRegister(FoldingReg))
return false;
MachineInstr *SrcMI = MRI->getVRegDef(FoldingReg);
@@ -5266,7 +5265,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
- if (SrcMI != NULL)
+ if (SrcMI != nullptr)
return isSignOrZeroExtended(*SrcMI, SignExt, Depth);
return false;
@@ -5290,7 +5289,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
- if (SrcMI != NULL)
+ if (SrcMI != nullptr)
return isSignOrZeroExtended(*SrcMI, SignExt, Depth);
return false;
@@ -5319,7 +5318,8 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
if (!Register::isVirtualRegister(SrcReg))
return false;
const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
- if (SrcMI == NULL || !isSignOrZeroExtended(*SrcMI, SignExt, Depth+1))
+ if (SrcMI == nullptr ||
+ !isSignOrZeroExtended(*SrcMI, SignExt, Depth + 1))
return false;
}
else
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 2340be5b5915..c26b4f6ceb7d 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -5530,6 +5530,30 @@ def DWBytes3210 {
(i64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), Word, sub_32));
}
+// These instructions store a hash computed from the value of the link register
+// and the value of the stack pointer.
+let mayStore = 1 in {
+def HASHST : XForm_XD6_RA5_RB5<31, 722, (outs),
+ (ins gprc:$RB, memrihash:$D_RA_XD),
+ "hashst $RB, $D_RA_XD", IIC_IntGeneral, []>;
+def HASHSTP : XForm_XD6_RA5_RB5<31, 658, (outs),
+ (ins gprc:$RB, memrihash:$D_RA_XD),
+ "hashstp $RB, $D_RA_XD", IIC_IntGeneral, []>;
+}
+
+// These instructions check a hash computed from the value of the link register
+// and the value of the stack pointer. The hasSideEffects flag is needed as the
+// instruction may TRAP if the hash does not match the hash stored at the
+// specified address.
+let mayLoad = 1, hasSideEffects = 1 in {
+def HASHCHK : XForm_XD6_RA5_RB5<31, 754, (outs),
+ (ins gprc:$RB, memrihash:$D_RA_XD),
+ "hashchk $RB, $D_RA_XD", IIC_IntGeneral, []>;
+def HASHCHKP : XForm_XD6_RA5_RB5<31, 690, (outs),
+ (ins gprc:$RB, memrihash:$D_RA_XD),
+ "hashchkp $RB, $D_RA_XD", IIC_IntGeneral, []>;
+}
+
// Now both high word and low word are reversed, next
// swap the high word and low word.
def : Pat<(i64 (bitreverse i64:$A)),
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
index a19289e96b3e..fe354208533b 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -2398,6 +2398,8 @@ let Predicates = [IsISA3_1] in {
let Predicates = [IsISA3_1, HasVSX] in {
def XVCVSPBF16 : XX2_XT6_XO5_XB6<60, 17, 475, "xvcvspbf16", vsrc, []>;
def XVCVBF16SPN : XX2_XT6_XO5_XB6<60, 16, 475, "xvcvbf16spn", vsrc, []>;
+ def XSMAXCQP : X_VT5_VA5_VB5<63, 676, "xsmaxcqp", []>;
+ def XSMINCQP : X_VT5_VA5_VB5<63, 740, "xsmincqp", []>;
}
// Multiclass defining patterns for Set Boolean Extension Reverse Instructions.
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index d12a9b806fd0..e5fa02bc8ccf 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -107,10 +107,10 @@ private:
void initialize(MachineFunction &MFParm);
// Perform peepholes.
- bool simplifyCode(void);
+ bool simplifyCode();
// Perform peepholes.
- bool eliminateRedundantCompare(void);
+ bool eliminateRedundantCompare();
bool eliminateRedundantTOCSaves(std::map<MachineInstr *, bool> &TOCSaves);
bool combineSEXTAndSHL(MachineInstr &MI, MachineInstr *&ToErase);
bool emitRLDICWhenLoweringJumpTables(MachineInstr &MI);
@@ -258,12 +258,12 @@ void PPCMIPeephole::UpdateTOCSaves(
}
bool Keep = true;
- for (auto It = TOCSaves.begin(); It != TOCSaves.end(); It++ ) {
- MachineInstr *CurrInst = It->first;
+ for (auto &I : TOCSaves) {
+ MachineInstr *CurrInst = I.first;
// If new instruction dominates an existing one, mark existing one as
// redundant.
- if (It->second && MDT->dominates(MI, CurrInst))
- It->second = false;
+ if (I.second && MDT->dominates(MI, CurrInst))
+ I.second = false;
// Check if the new instruction is redundant.
if (MDT->dominates(CurrInst, MI)) {
Keep = false;
@@ -381,7 +381,7 @@ static void convertUnprimedAccPHIs(const PPCInstrInfo *TII,
}
// Perform peephole optimizations.
-bool PPCMIPeephole::simplifyCode(void) {
+bool PPCMIPeephole::simplifyCode() {
bool Simplified = false;
bool TrapOpt = false;
MachineInstr* ToErase = nullptr;
@@ -481,7 +481,7 @@ bool PPCMIPeephole::simplifyCode(void) {
// PPC::ZERO.
if (!MI.getOperand(1).isImm() || MI.getOperand(1).getImm() != 0)
break;
- unsigned MIDestReg = MI.getOperand(0).getReg();
+ Register MIDestReg = MI.getOperand(0).getReg();
for (MachineInstr& UseMI : MRI->use_instructions(MIDestReg))
Simplified |= TII->onlyFoldImmediate(UseMI, MI, MIDestReg);
if (MRI->use_nodbg_empty(MIDestReg)) {
@@ -519,9 +519,9 @@ bool PPCMIPeephole::simplifyCode(void) {
// XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), immed.
// We have to look through chains of COPY and SUBREG_TO_REG
// to find the real source values for comparison.
- unsigned TrueReg1 =
+ Register TrueReg1 =
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
- unsigned TrueReg2 =
+ Register TrueReg2 =
TRI->lookThruCopyLike(MI.getOperand(2).getReg(), MRI);
if (!(TrueReg1 == TrueReg2 && Register::isVirtualRegister(TrueReg1)))
@@ -541,7 +541,7 @@ bool PPCMIPeephole::simplifyCode(void) {
auto isConversionOfLoadAndSplat = [=]() -> bool {
if (DefOpc != PPC::XVCVDPSXDS && DefOpc != PPC::XVCVDPUXDS)
return false;
- unsigned FeedReg1 =
+ Register FeedReg1 =
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
if (Register::isVirtualRegister(FeedReg1)) {
MachineInstr *LoadMI = MRI->getVRegDef(FeedReg1);
@@ -565,16 +565,16 @@ bool PPCMIPeephole::simplifyCode(void) {
// If this is a splat or a swap fed by another splat, we
// can replace it with a copy.
if (DefOpc == PPC::XXPERMDI) {
- unsigned DefReg1 = DefMI->getOperand(1).getReg();
- unsigned DefReg2 = DefMI->getOperand(2).getReg();
+ Register DefReg1 = DefMI->getOperand(1).getReg();
+ Register DefReg2 = DefMI->getOperand(2).getReg();
unsigned DefImmed = DefMI->getOperand(3).getImm();
// If the two inputs are not the same register, check to see if
// they originate from the same virtual register after only
// copy-like instructions.
if (DefReg1 != DefReg2) {
- unsigned FeedReg1 = TRI->lookThruCopyLike(DefReg1, MRI);
- unsigned FeedReg2 = TRI->lookThruCopyLike(DefReg2, MRI);
+ Register FeedReg1 = TRI->lookThruCopyLike(DefReg1, MRI);
+ Register FeedReg2 = TRI->lookThruCopyLike(DefReg2, MRI);
if (!(FeedReg1 == FeedReg2 &&
Register::isVirtualRegister(FeedReg1)))
@@ -643,7 +643,7 @@ bool PPCMIPeephole::simplifyCode(void) {
case PPC::XXSPLTW: {
unsigned MyOpcode = MI.getOpcode();
unsigned OpNo = MyOpcode == PPC::XXSPLTW ? 1 : 2;
- unsigned TrueReg =
+ Register TrueReg =
TRI->lookThruCopyLike(MI.getOperand(OpNo).getReg(), MRI);
if (!Register::isVirtualRegister(TrueReg))
break;
@@ -707,7 +707,7 @@ bool PPCMIPeephole::simplifyCode(void) {
}
case PPC::XVCVDPSP: {
// If this is a DP->SP conversion fed by an FRSP, the FRSP is redundant.
- unsigned TrueReg =
+ Register TrueReg =
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
if (!Register::isVirtualRegister(TrueReg))
break;
@@ -716,9 +716,9 @@ bool PPCMIPeephole::simplifyCode(void) {
// This can occur when building a vector of single precision or integer
// values.
if (DefMI && DefMI->getOpcode() == PPC::XXPERMDI) {
- unsigned DefsReg1 =
+ Register DefsReg1 =
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
- unsigned DefsReg2 =
+ Register DefsReg2 =
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
if (!Register::isVirtualRegister(DefsReg1) ||
!Register::isVirtualRegister(DefsReg2))
@@ -1178,7 +1178,7 @@ static unsigned getIncomingRegForBlock(MachineInstr *Phi,
static unsigned getSrcVReg(unsigned Reg, MachineBasicBlock *BB1,
MachineBasicBlock *BB2, MachineRegisterInfo *MRI) {
unsigned SrcReg = Reg;
- while (1) {
+ while (true) {
unsigned NextReg = SrcReg;
MachineInstr *Inst = MRI->getVRegDef(SrcReg);
if (BB1 && Inst->getOpcode() == PPC::PHI && Inst->getParent() == BB2) {
@@ -1334,7 +1334,7 @@ bool PPCMIPeephole::eliminateRedundantTOCSaves(
// cmpwi r3, 0 ; greather than -1 means greater or equal to 0
// bge 0, .LBB0_4
-bool PPCMIPeephole::eliminateRedundantCompare(void) {
+bool PPCMIPeephole::eliminateRedundantCompare() {
bool Simplified = false;
for (MachineBasicBlock &MBB2 : *MF) {
@@ -1737,4 +1737,3 @@ INITIALIZE_PASS_END(PPCMIPeephole, DEBUG_TYPE,
char PPCMIPeephole::ID = 0;
FunctionPass*
llvm::createPPCMIPeepholePass() { return new PPCMIPeephole(); }
-
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 4bccc5596d2b..76b016c0ee79 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -390,6 +390,18 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
+bool PPCRegisterInfo::isAsmClobberable(const MachineFunction &MF,
+ MCRegister PhysReg) const {
+ // We cannot use getReservedRegs() to find the registers that are not asm
+ // clobberable because there are some reserved registers which can be
+ // clobbered by inline asm. For example, when LR is clobbered, the register is
+ // saved and restored. We will hardcode the registers that are not asm
+ // cloberable in this function.
+
+ // The stack pointer (R1/X1) is not clobberable by inline asm
+ return PhysReg != PPC::R1 && PhysReg != PPC::X1;
+}
+
bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo();
@@ -423,7 +435,7 @@ bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) co
continue;
int FrIdx = Info[i].getFrameIdx();
- unsigned Reg = Info[i].getReg();
+ Register Reg = Info[i].getReg();
const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 2e534dd1bcd5..114f6d0f4c66 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -91,6 +91,8 @@ public:
void adjustStackMapLiveOutMask(uint32_t *Mask) const override;
BitVector getReservedRegs(const MachineFunction &MF) const override;
+ bool isAsmClobberable(const MachineFunction &MF,
+ MCRegister PhysReg) const override;
bool isCallerPreservedPhysReg(MCRegister PhysReg,
const MachineFunction &MF) const override;
@@ -185,6 +187,10 @@ public:
return RegName;
}
+
+ bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const override {
+ return Reg == PPC::LR || Reg == PPC::LR8;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index ed28731b8ef2..cc5738a5d7b6 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -374,11 +374,10 @@ bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
// clobbers ctr.
auto asmClobbersCTR = [](InlineAsm *IA) {
InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
- for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
- InlineAsm::ConstraintInfo &C = CIV[i];
+ for (const InlineAsm::ConstraintInfo &C : CIV) {
if (C.Type != InlineAsm::isInput)
- for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
- if (StringRef(C.Codes[j]).equals_insensitive("{ctr}"))
+ for (const auto &Code : C.Codes)
+ if (StringRef(Code).equals_insensitive("{ctr}"))
return true;
}
return false;
@@ -653,11 +652,17 @@ bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
}
return true;
- } else if (isa<BinaryOperator>(J) &&
- (J->getType()->getScalarType()->isFP128Ty() ||
+ } else if ((J->getType()->getScalarType()->isFP128Ty() ||
J->getType()->getScalarType()->isPPC_FP128Ty())) {
// Most operations on f128 or ppc_f128 values become calls.
return true;
+ } else if (isa<FCmpInst>(J) &&
+ J->getOperand(0)->getType()->getScalarType()->isFP128Ty()) {
+ return true;
+ } else if ((isa<FPTruncInst>(J) || isa<FPExtInst>(J)) &&
+ (cast<CastInst>(J)->getSrcTy()->getScalarType()->isFP128Ty() ||
+ cast<CastInst>(J)->getDestTy()->getScalarType()->isFP128Ty())) {
+ return true;
} else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
CastInst *CI = cast<CastInst>(J);
@@ -1295,8 +1300,8 @@ bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
LoopInfo *LI, DominatorTree *DT,
AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
// Process nested loops first.
- for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
- if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
+ for (Loop *I : *L)
+ if (canSaveCmp(I, BI, SE, LI, DT, AC, LibInfo))
return false; // Stop search.
HardwareLoopInfo HWLoopInfo(L);
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 0be35adc35c7..8a7d324ddfe1 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -297,18 +297,16 @@ protected:
// fma result.
LiveInterval &NewFMAInt = LIS->getInterval(KilledProdReg);
- for (LiveInterval::iterator AI = FMAInt.begin(), AE = FMAInt.end();
- AI != AE; ++AI) {
+ for (auto &AI : FMAInt) {
// Don't add the segment that corresponds to the original copy.
- if (AI->valno == AddendValNo)
+ if (AI.valno == AddendValNo)
continue;
VNInfo *NewFMAValNo =
- NewFMAInt.getNextValue(AI->start,
- LIS->getVNInfoAllocator());
+ NewFMAInt.getNextValue(AI.start, LIS->getVNInfoAllocator());
- NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
- NewFMAValNo));
+ NewFMAInt.addSegment(
+ LiveInterval::Segment(AI.start, AI.end, NewFMAValNo));
}
LLVM_DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 75592dd4c6f5..a2ea34fe11c7 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -302,7 +302,7 @@ struct RISCVOperand : public MCParsedAsmOperand {
struct VTypeOp VType;
};
- RISCVOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ RISCVOperand(KindTy K) : Kind(K) {}
public:
RISCVOperand(const RISCVOperand &o) : MCParsedAsmOperand() {
@@ -337,7 +337,6 @@ public:
bool isImm() const override { return Kind == KindTy::Immediate; }
bool isMem() const override { return false; }
bool isSystemRegister() const { return Kind == KindTy::SystemRegister; }
- bool isVType() const { return Kind == KindTy::VType; }
bool isGPR() const {
return Kind == KindTy::Register &&
@@ -421,7 +420,27 @@ public:
bool isCSRSystemRegister() const { return isSystemRegister(); }
- bool isVTypeI() const { return isVType(); }
+ bool isVTypeImm(unsigned N) const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isUIntN(N, Imm) && VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
+ // If the last operand of the vsetvli/vsetvli instruction is a constant
+ // expression, KindTy is Immediate.
+ bool isVTypeI10() const {
+ if (Kind == KindTy::Immediate)
+ return isVTypeImm(10);
+ return Kind == KindTy::VType;
+ }
+ bool isVTypeI11() const {
+ if (Kind == KindTy::Immediate)
+ return isVTypeImm(11);
+ return Kind == KindTy::VType;
+ }
/// Return true if the operand is a valid for the fence instruction e.g.
/// ('iorw').
@@ -547,6 +566,16 @@ public:
return IsConstantImm && isUInt<7>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
}
+ bool isRnumArg() const {
+ int64_t Imm;
+ RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && Imm >= INT64_C(0) && Imm <= INT64_C(10) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
+ }
+
bool isSImm5() const {
if (!isImm())
return false;
@@ -898,9 +927,21 @@ public:
Inst.addOperand(MCOperand::createImm(SysReg.Encoding));
}
+ // Support non-canonical syntax:
+ // "vsetivli rd, uimm, 0xabc" or "vsetvli rd, rs1, 0xabc"
+ // "vsetivli rd, uimm, (0xc << N)" or "vsetvli rd, rs1, (0xc << N)"
void addVTypeIOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createImm(getVType()));
+ int64_t Imm = 0;
+ if (Kind == KindTy::Immediate) {
+ RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ (void)IsConstantImm;
+ assert(IsConstantImm && "Invalid VTypeI Operand!");
+ } else {
+ Imm = getVType();
+ }
+ Inst.addOperand(MCOperand::createImm(Imm));
}
// Returns the rounding mode represented by this RISCVOperand. Should only
@@ -1209,6 +1250,9 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
(1 << 4),
"immediate must be in the range");
}
+ case Match_InvalidRnumArg: {
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, 10);
+ }
}
llvm_unreachable("Unknown match type detected!");
@@ -1881,8 +1925,10 @@ bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
Operands.push_back(RISCVOperand::createToken(Name, NameLoc, isRV64()));
// If there are no more operands, then finish
- if (getLexer().is(AsmToken::EndOfStatement))
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ getParser().Lex(); // Consume the EndOfStatement.
return false;
+ }
// Parse first operand
if (parseOperand(Operands, Name))
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 9cfd36745f46..01c6bd90ea58 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -191,7 +191,8 @@ enum OperandType : unsigned {
OPERAND_SIMM12,
OPERAND_UIMM20,
OPERAND_UIMMLOG2XLEN,
- OPERAND_LAST_RISCV_IMM = OPERAND_UIMMLOG2XLEN,
+ OPERAND_RVKRNUM,
+ OPERAND_LAST_RISCV_IMM = OPERAND_RVKRNUM,
// Operand is either a register or uimm5, this is used by V extension pseudo
// instructions to represent a value that be passed as AVL to either vsetvli
// or vsetivli.
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
index 59d8bb009d1c..7ce7dafb8ca1 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_RISCV_RISCVELFSTREAMER_H
-#define LLVM_LIB_TARGET_RISCV_RISCVELFSTREAMER_H
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVELFSTREAMER_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVELFSTREAMER_H
#include "RISCVTargetStreamer.h"
#include "llvm/MC/MCELFStreamer.h"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
index 89a7d54f60f8..3268740849f0 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -85,7 +85,7 @@ void RISCVInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const {
void RISCVInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI, raw_ostream &O,
const char *Modifier) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ assert((Modifier == nullptr || Modifier[0] == 0) && "No modifiers supported");
const MCOperand &MO = MI->getOperand(OpNo);
if (MO.isReg()) {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 0ee6d8de78c9..18858209aa9b 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -141,6 +141,24 @@ static void generateInstSeqImpl(int64_t Val,
Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
}
+static unsigned extractRotateInfo(int64_t Val) {
+ // for case: 0b111..1..xxxxxx1..1..
+ unsigned LeadingOnes = countLeadingOnes((uint64_t)Val);
+ unsigned TrailingOnes = countTrailingOnes((uint64_t)Val);
+ if (TrailingOnes > 0 && TrailingOnes < 64 &&
+ (LeadingOnes + TrailingOnes) > (64 - 12))
+ return 64 - TrailingOnes;
+
+ // for case: 0bxxx1..1..1...xxx
+ unsigned UpperTrailingOnes = countTrailingOnes(Hi_32(Val));
+ unsigned LowerLeadingOnes = countLeadingOnes(Lo_32(Val));
+ if (UpperTrailingOnes < 32 &&
+ (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
+ return 32 - UpperTrailingOnes;
+
+ return 0;
+}
+
namespace llvm {
namespace RISCVMatInt {
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
@@ -312,6 +330,18 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
}
}
+ // Perform optimization with rori in the Zbb extension.
+ if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbb]) {
+ if (unsigned Rotate = extractRotateInfo(Val)) {
+ RISCVMatInt::InstSeq TmpSeq;
+ uint64_t NegImm12 =
+ ((uint64_t)Val >> (64 - Rotate)) | ((uint64_t)Val << Rotate);
+ assert(isInt<12>(NegImm12));
+ TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDI, NegImm12));
+ TmpSeq.push_back(RISCVMatInt::Inst(RISCV::RORI, Rotate));
+ Res = TmpSeq;
+ }
+ }
return Res;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
index 02b4b18f54bd..6a8e0c640001 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_RISCV_MATINT_H
-#define LLVM_LIB_TARGET_RISCV_MATINT_H
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_MATINT_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_MATINT_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/SubtargetFeature.h"
@@ -15,7 +15,6 @@
namespace llvm {
class APInt;
-class MCSubtargetInfo;
namespace RISCVMatInt {
struct Inst {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
index 0bda3de0ce5d..171780d94ce7 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETSTREAMER_H
-#define LLVM_LIB_TARGET_RISCV_RISCVTARGETSTREAMER_H
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVTARGETSTREAMER_H
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h
index b415c9f35e7f..03462240fd93 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.h
@@ -40,6 +40,9 @@ FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM);
FunctionPass *createRISCVGatherScatterLoweringPass();
void initializeRISCVGatherScatterLoweringPass(PassRegistry &);
+FunctionPass *createRISCVSExtWRemovalPass();
+void initializeRISCVSExtWRemovalPass(PassRegistry &);
+
FunctionPass *createRISCVMergeBaseOffsetOptPass();
void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &);
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td
index 6aa915c01929..5b0f27c5e937 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCV.td
@@ -42,7 +42,7 @@ def HasStdExtD : Predicate<"Subtarget->hasStdExtD()">,
"'D' (Double-Precision Floating-Point)">;
def FeatureStdExtZfhmin
- : SubtargetFeature<"experimental-zfhmin", "HasStdExtZfhmin", "true",
+ : SubtargetFeature<"zfhmin", "HasStdExtZfhmin", "true",
"'Zfhmin' (Half-Precision Floating-Point Minimal)",
[FeatureStdExtF]>;
def HasStdExtZfhmin : Predicate<"Subtarget->hasStdExtZfhmin()">,
@@ -50,7 +50,7 @@ def HasStdExtZfhmin : Predicate<"Subtarget->hasStdExtZfhmin()">,
"'Zfhmin' (Half-Precision Floating-Point Minimal)">;
def FeatureStdExtZfh
- : SubtargetFeature<"experimental-zfh", "HasStdExtZfh", "true",
+ : SubtargetFeature<"zfh", "HasStdExtZfh", "true",
"'Zfh' (Half-Precision Floating-Point)",
[FeatureStdExtZfhmin, FeatureStdExtF]>;
def HasStdExtZfh : Predicate<"Subtarget->hasStdExtZfh()">,
@@ -65,83 +65,217 @@ def HasStdExtC : Predicate<"Subtarget->hasStdExtC()">,
"'C' (Compressed Instructions)">;
def FeatureStdExtZba
- : SubtargetFeature<"experimental-zba", "HasStdExtZba", "true",
- "'Zba' (Address calculation 'B' Instructions)">;
+ : SubtargetFeature<"zba", "HasStdExtZba", "true",
+ "'Zba' (Address Generation Instructions)">;
def HasStdExtZba : Predicate<"Subtarget->hasStdExtZba()">,
AssemblerPredicate<(all_of FeatureStdExtZba),
- "'Zba' (Address calculation 'B' Instructions)">;
+ "'Zba' (Address Generation Instructions)">;
def NotHasStdExtZba : Predicate<"!Subtarget->hasStdExtZba()">;
def FeatureStdExtZbb
- : SubtargetFeature<"experimental-zbb", "HasStdExtZbb", "true",
- "'Zbb' (Base 'B' Instructions)">;
+ : SubtargetFeature<"zbb", "HasStdExtZbb", "true",
+ "'Zbb' (Basic Bit-Manipulation)">;
def HasStdExtZbb : Predicate<"Subtarget->hasStdExtZbb()">,
AssemblerPredicate<(all_of FeatureStdExtZbb),
- "'Zbb' (Base 'B' Instructions)">;
+ "'Zbb' (Basic Bit-Manipulation)">;
def FeatureStdExtZbc
- : SubtargetFeature<"experimental-zbc", "HasStdExtZbc", "true",
- "'Zbc' (Carry-Less 'B' Instructions)">;
+ : SubtargetFeature<"zbc", "HasStdExtZbc", "true",
+ "'Zbc' (Carry-Less Multiplication)">;
def HasStdExtZbc : Predicate<"Subtarget->hasStdExtZbc()">,
AssemblerPredicate<(all_of FeatureStdExtZbc),
- "'Zbc' (Carry-Less 'B' Instructions)">;
+ "'Zbc' (Carry-Less Multiplication)">;
def FeatureStdExtZbe
: SubtargetFeature<"experimental-zbe", "HasStdExtZbe", "true",
- "'Zbe' (Extract-Deposit 'B' Instructions)">;
+ "'Zbe' (Extract-Deposit 'Zb' Instructions)">;
def HasStdExtZbe : Predicate<"Subtarget->hasStdExtZbe()">,
AssemblerPredicate<(all_of FeatureStdExtZbe),
- "'Zbe' (Extract-Deposit 'B' Instructions)">;
+ "'Zbe' (Extract-Deposit 'Zb' Instructions)">;
def FeatureStdExtZbf
: SubtargetFeature<"experimental-zbf", "HasStdExtZbf", "true",
- "'Zbf' (Bit-Field 'B' Instructions)">;
+ "'Zbf' (Bit-Field 'Zb' Instructions)">;
def HasStdExtZbf : Predicate<"Subtarget->hasStdExtZbf()">,
AssemblerPredicate<(all_of FeatureStdExtZbf),
- "'Zbf' (Bit-Field 'B' Instructions)">;
+ "'Zbf' (Bit-Field 'Zb' Instructions)">;
def FeatureStdExtZbm
: SubtargetFeature<"experimental-zbm", "HasStdExtZbm", "true",
- "'Zbm' (Matrix 'B' Instructions)">;
+ "'Zbm' (Matrix 'Zb' Instructions)">;
def HasStdExtZbm : Predicate<"Subtarget->hasStdExtZbm()">,
AssemblerPredicate<(all_of FeatureStdExtZbm),
- "'Zbm' (Matrix 'B' Instructions)">;
+ "'Zbm' (Matrix 'Zb' Instructions)">;
def FeatureStdExtZbp
: SubtargetFeature<"experimental-zbp", "HasStdExtZbp", "true",
- "'Zbp' (Permutation 'B' Instructions)">;
+ "'Zbp' (Permutation 'Zb' Instructions)">;
def HasStdExtZbp : Predicate<"Subtarget->hasStdExtZbp()">,
AssemblerPredicate<(all_of FeatureStdExtZbp),
- "'Zbp' (Permutation 'B' Instructions)">;
+ "'Zbp' (Permutation 'Zb' Instructions)">;
def FeatureStdExtZbr
: SubtargetFeature<"experimental-zbr", "HasStdExtZbr", "true",
- "'Zbr' (Polynomial Reduction 'B' Instructions)">;
+ "'Zbr' (Polynomial Reduction 'Zb' Instructions)">;
def HasStdExtZbr : Predicate<"Subtarget->hasStdExtZbr()">,
AssemblerPredicate<(all_of FeatureStdExtZbr),
- "'Zbr' (Polynomial Reduction 'B' Instructions)">;
+ "'Zbr' (Polynomial Reduction 'Zb' Instructions)">;
def FeatureStdExtZbs
- : SubtargetFeature<"experimental-zbs", "HasStdExtZbs", "true",
- "'Zbs' (Single-Bit 'B' Instructions)">;
+ : SubtargetFeature<"zbs", "HasStdExtZbs", "true",
+ "'Zbs' (Single-Bit Instructions)">;
def HasStdExtZbs : Predicate<"Subtarget->hasStdExtZbs()">,
AssemblerPredicate<(all_of FeatureStdExtZbs),
- "'Zbs' (Single-Bit 'B' Instructions)">;
+ "'Zbs' (Single-Bit Instructions)">;
def FeatureStdExtZbt
: SubtargetFeature<"experimental-zbt", "HasStdExtZbt", "true",
- "'Zbt' (Ternary 'B' Instructions)">;
+ "'Zbt' (Ternary 'Zb' Instructions)">;
def HasStdExtZbt : Predicate<"Subtarget->hasStdExtZbt()">,
AssemblerPredicate<(all_of FeatureStdExtZbt),
- "'Zbt' (Ternary 'B' Instructions)">;
+ "'Zbt' (Ternary 'Zb' Instructions)">;
// Some instructions belong to both the basic and the permutation
// subextensions. They should be enabled if either has been specified.
def HasStdExtZbbOrZbp
: Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()">,
AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp),
- "'Zbb' (Base 'B' Instructions) or "
- "'Zbp' (Permutation 'B' Instructions)">;
+ "'Zbb' (Basic Bit-Manipulation) or "
+ "'Zbp' (Permutation 'Zb' Instructions)">;
+
+def FeatureStdExtZbkb
+ : SubtargetFeature<"zbkb", "HasStdExtZbkb", "true",
+ "'Zbkb' (Bitmanip instructions for Cryptography)">;
+def HasStdExtZbkb : Predicate<"Subtarget->hasStdExtZbkb()">,
+ AssemblerPredicate<(all_of FeatureStdExtZbkb),
+ "'Zbkb' (Bitmanip instructions for Cryptography)">;
+
+def FeatureStdExtZbkx
+ : SubtargetFeature<"zbkx", "HasStdExtZbkx", "true",
+ "'Zbkx' (Crossbar permutation instructions)">;
+def HasStdExtZbkx : Predicate<"Subtarget->hasStdExtZbkx()">,
+ AssemblerPredicate<(all_of FeatureStdExtZbkx),
+ "'Zbkx' (Crossbar permutation instructions)">;
+
+def HasStdExtZbpOrZbkx
+ : Predicate<"Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkx()">,
+ AssemblerPredicate<(any_of FeatureStdExtZbp, FeatureStdExtZbkx),
+ "'Zbp' (Permutation 'Zb' Instructions) or "
+ "'Zbkx' (Crossbar permutation instructions)">;
+
+def HasStdExtZbpOrZbkb
+ : Predicate<"Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">,
+ AssemblerPredicate<(any_of FeatureStdExtZbp, FeatureStdExtZbkb),
+ "'Zbp' (Permutation 'Zb' Instructions) or "
+ "'Zbkb' (Bitmanip instructions for Cryptography)">;
+
+def HasStdExtZbbOrZbkb
+ : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()">,
+ AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbkb),
+ "'Zbb' (Basic Bit-Manipulation) or "
+ "'Zbkb' (Bitmanip instructions for Cryptography)">;
+
+def HasStdExtZbbOrZbpOrZbkb
+ : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">,
+ AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp, FeatureStdExtZbkb),
+ "'Zbb' (Basic Bit-Manipulation) or "
+ "'Zbp' (Permutation 'Zb' Instructions) or "
+ "'Zbkb' (Bitmanip instructions for Cryptography)">;
+
+// The Carry-less multiply subextension for cryptography is a subset of basic carry-less multiply subextension. The former should be enabled if the latter is enabled.
+def FeatureStdExtZbkc
+ : SubtargetFeature<"zbkc", "HasStdExtZbkc", "true",
+ "'Zbkc' (Carry-less multiply instructions for Cryptography)">;
+def HasStdExtZbkc
+ : Predicate<"Subtarget->hasStdExtZbkc()">,
+ AssemblerPredicate<(all_of FeatureStdExtZbkc),
+ "'Zbkc' (Carry-less multiply instructions for Cryptography)">;
+
+def HasStdExtZbcOrZbkc
+ : Predicate<"Subtarget->hasStdExtZbc() || Subtarget->hasStdExtZbkc()">,
+ AssemblerPredicate<(any_of FeatureStdExtZbc, FeatureStdExtZbkc),
+ "'Zbc' (Carry-Less Multiplication) or "
+ "'Zbkc' (Carry-less multiply instructions for Cryptography)">;
+
+def FeatureStdExtZknd
+ : SubtargetFeature<"zknd", "HasStdExtZknd", "true",
+ "'Zknd' (NIST Suite: AES Decryption)">;
+def HasStdExtZknd : Predicate<"Subtarget->hasStdExtZknd()">,
+ AssemblerPredicate<(all_of FeatureStdExtZknd),
+ "'Zknd' (NIST Suite: AES Decryption)">;
+
+def FeatureStdExtZkne
+ : SubtargetFeature<"zkne", "HasStdExtZkne", "true",
+ "'Zkne' (NIST Suite: AES Encryption)">;
+def HasStdExtZkne : Predicate<"Subtarget->hasStdExtZkne()">,
+ AssemblerPredicate<(all_of FeatureStdExtZkne),
+ "'Zkne' (NIST Suite: AES Encryption)">;
+
+// Some instructions belong to both Zknd and Zkne subextensions.
+// They should be enabled if either has been specified.
+def HasStdExtZkndOrZkne
+ : Predicate<"Subtarget->hasStdExtZknd() || Subtarget->hasStdExtZkne()">,
+ AssemblerPredicate<(any_of FeatureStdExtZknd, FeatureStdExtZkne),
+ "'Zknd' (NIST Suite: AES Decryption) or "
+ "'Zkne' (NIST Suite: AES Encryption)">;
+
+def FeatureStdExtZknh
+ : SubtargetFeature<"zknh", "HasStdExtZknh", "true",
+ "'Zknh' (NIST Suite: Hash Function Instructions)">;
+def HasStdExtZknh : Predicate<"Subtarget->hasStdExtZknh()">,
+ AssemblerPredicate<(all_of FeatureStdExtZknh),
+ "'Zknh' (NIST Suite: Hash Function Instructions)">;
+
+def FeatureStdExtZksed
+ : SubtargetFeature<"zksed", "HasStdExtZksed", "true",
+ "'Zksed' (ShangMi Suite: SM4 Block Cipher Instructions)">;
+def HasStdExtZksed : Predicate<"Subtarget->hasStdExtZksed()">,
+ AssemblerPredicate<(all_of FeatureStdExtZksed),
+ "'Zksed' (ShangMi Suite: SM4 Block Cipher Instructions)">;
+
+def FeatureStdExtZksh
+ : SubtargetFeature<"zksh", "HasStdExtZksh", "true",
+ "'Zksh' (ShangMi Suite: SM3 Hash Function Instructions)">;
+def HasStdExtZksh : Predicate<"Subtarget->hasStdExtZksh()">,
+ AssemblerPredicate<(all_of FeatureStdExtZksh),
+ "'Zksh' (ShangMi Suite: SM3 Hash Function Instructions)">;
+
+def FeatureStdExtZkr
+ : SubtargetFeature<"zkr", "HasStdExtZkr", "true",
+ "'Zkr' (Entropy Source Extension)">;
+def HasStdExtZkr : Predicate<"Subtarget->hasStdExtZkr()">,
+ AssemblerPredicate<(all_of FeatureStdExtZkr),
+ "'Zkr' (Entropy Source Extension)">;
+
+def FeatureStdExtZkn
+ : SubtargetFeature<"zkn", "HasStdExtZkn", "true",
+ "'Zkn' (NIST Algorithm Suite)",
+ [FeatureStdExtZbkb,
+ FeatureStdExtZbkc,
+ FeatureStdExtZbkx,
+ FeatureStdExtZkne,
+ FeatureStdExtZknd,
+ FeatureStdExtZknh]>;
+
+def FeatureStdExtZks
+ : SubtargetFeature<"zks", "HasStdExtZks", "true",
+ "'Zks' (ShangMi Algorithm Suite)",
+ [FeatureStdExtZbkb,
+ FeatureStdExtZbkc,
+ FeatureStdExtZbkx,
+ FeatureStdExtZksed,
+ FeatureStdExtZksh]>;
+
+def FeatureStdExtZkt
+ : SubtargetFeature<"zkt", "HasStdExtZkt", "true",
+ "'Zkt' (Data Independent Execution Latency)">;
+
+def FeatureStdExtZk
+ : SubtargetFeature<"zk", "HasStdExtZk", "true",
+ "'Zk' (Standard scalar cryptography extension)",
+ [FeatureStdExtZkn,
+ FeatureStdExtZkr,
+ FeatureStdExtZkt]>;
def FeatureNoRVCHints
: SubtargetFeature<"no-rvc-hints", "EnableRVCHintInstrs", "false",
@@ -150,23 +284,66 @@ def HasRVCHints : Predicate<"Subtarget->enableRVCHintInstrs()">,
AssemblerPredicate<(all_of(not FeatureNoRVCHints)),
"RVC Hint Instructions">;
+def FeatureStdExtZvl32b : SubtargetFeature<"zvl32b", "ZvlLen", "ExtZvl::Zvl32b",
+ "'Zvl' (Minimum Vector Length) 32">;
+
+foreach i = { 6-15 } in {
+ defvar I = !shl(1, i);
+ def FeatureStdExtZvl#I#b :
+ SubtargetFeature<"zvl"#I#"b", "ZvlLen", "ExtZvl::Zvl"#I#"b",
+ "'Zvl' (Minimum Vector Length) "#I,
+ [!cast<SubtargetFeature>("FeatureStdExtZvl"#!srl(I, 1)#"b")]>;
+}
+
+def FeatureStdExtZve32x
+ : SubtargetFeature<"zve32x", "HasStdExtZve32x", "true",
+ "'Zve32x' (Vector Extensions for Embedded Processors "
+ "with maximal 32 EEW)",
+ [FeatureStdExtZvl32b]>;
+
+def FeatureStdExtZve32f
+ : SubtargetFeature<"zve32f", "HasStdExtZve32f", "true",
+ "'Zve32f' (Vector Extensions for Embedded Processors "
+ "with maximal 32 EEW and F extension)",
+ [FeatureStdExtZve32x]>;
+
+def FeatureStdExtZve64x
+ : SubtargetFeature<"zve64x", "HasStdExtZve64x", "true",
+ "'Zve64x' (Vector Extensions for Embedded Processors "
+ "with maximal 64 EEW)", [FeatureStdExtZve32x, FeatureStdExtZvl64b]>;
+
+def FeatureStdExtZve64f
+ : SubtargetFeature<"zve64f", "HasStdExtZve64f", "true",
+ "'Zve64f' (Vector Extensions for Embedded Processors "
+ "with maximal 64 EEW and F extension)",
+ [FeatureStdExtZve32f, FeatureStdExtZve64x]>;
+
+def FeatureStdExtZve64d
+ : SubtargetFeature<"zve64d", "HasStdExtZve64d", "true",
+ "'Zve64d' (Vector Extensions for Embedded Processors "
+ "with maximal 64 EEW, F and D extension)",
+ [FeatureStdExtZve64f]>;
+
def FeatureStdExtV
- : SubtargetFeature<"experimental-v", "HasStdExtV", "true",
- "'V' (Vector Instructions)">;
-def HasStdExtV : Predicate<"Subtarget->hasStdExtV()">,
- AssemblerPredicate<(all_of FeatureStdExtV),
- "'V' (Vector Instructions)">;
-
-def HasVInstructions : Predicate<"Subtarget->hasVInstructions()">;
-def HasVInstructionsAnyF : Predicate<"Subtarget->hasVInstructionsAnyF()">;
-
-def FeatureStdExtZvlsseg
- : SubtargetFeature<"experimental-zvlsseg", "HasStdExtZvlsseg", "true",
- "'Zvlsseg' (Vector segment load/store instructions)",
- [FeatureStdExtV]>;
-def HasStdExtZvlsseg : Predicate<"Subtarget->hasStdExtZvlsseg()">,
- AssemblerPredicate<(all_of FeatureStdExtZvlsseg),
- "'Zvlsseg' (Vector segment load/store instructions)">;
+ : SubtargetFeature<"v", "HasStdExtV", "true",
+ "'V' (Vector Extension for Application Processors)",
+ [FeatureStdExtZvl128b, FeatureStdExtF, FeatureStdExtD]>;
+
+def HasVInstructions : Predicate<"Subtarget->hasVInstructions()">,
+ AssemblerPredicate<
+ (any_of FeatureStdExtZve32x, FeatureStdExtV),
+ "'V' (Vector Extension for Application Processors), 'Zve32x' or "
+ "'Zve64x' (Vector Extensions for Embedded Processors)">;
+def HasVInstructionsI64 : Predicate<"Subtarget->hasVInstructionsI64()">,
+ AssemblerPredicate<
+ (any_of FeatureStdExtZve64x, FeatureStdExtV),
+ "'V' (Vector Extension for Application Processors) or 'Zve64x' "
+ "(Vector Extensions for Embedded Processors)">;
+def HasVInstructionsAnyF : Predicate<"Subtarget->hasVInstructionsAnyF()">,
+ AssemblerPredicate<
+ (any_of FeatureStdExtZve32f, FeatureStdExtV),
+ "'V' (Vector Extension for Application Processors), 'Zve32f', "
+ "'Zve64f' or 'Zve64d' (Vector Extensions for Embedded Processors)">;
def Feature64Bit
: SubtargetFeature<"64bit", "HasRV64", "true", "Implements RV64">;
@@ -198,6 +375,9 @@ foreach i = {1-31} in
def FeatureSaveRestore : SubtargetFeature<"save-restore", "EnableSaveRestore",
"true", "Enable save/restore.">;
+def TuneSiFive7 : SubtargetFeature<"sifive7", "RISCVProcFamily", "SiFive7",
+ "SiFive 7-Series processors">;
+
//===----------------------------------------------------------------------===//
// Named operands for CSR instructions.
//===----------------------------------------------------------------------===//
@@ -226,8 +406,10 @@ def : ProcessorModel<"generic-rv64", NoSchedModel, [Feature64Bit]>;
def : ProcessorModel<"rocket-rv32", RocketModel, []>;
def : ProcessorModel<"rocket-rv64", RocketModel, [Feature64Bit]>;
-def : ProcessorModel<"sifive-7-rv32", SiFive7Model, []>;
-def : ProcessorModel<"sifive-7-rv64", SiFive7Model, [Feature64Bit]>;
+def : ProcessorModel<"sifive-7-rv32", SiFive7Model, [],
+ [TuneSiFive7]>;
+def : ProcessorModel<"sifive-7-rv64", SiFive7Model, [Feature64Bit],
+ [TuneSiFive7]>;
def : ProcessorModel<"sifive-e20", RocketModel, [FeatureStdExtM,
FeatureStdExtC]>;
@@ -253,7 +435,8 @@ def : ProcessorModel<"sifive-e34", RocketModel, [FeatureStdExtM,
def : ProcessorModel<"sifive-e76", SiFive7Model, [FeatureStdExtM,
FeatureStdExtA,
FeatureStdExtF,
- FeatureStdExtC]>;
+ FeatureStdExtC],
+ [TuneSiFive7]>;
def : ProcessorModel<"sifive-s21", RocketModel, [Feature64Bit,
FeatureStdExtM,
@@ -277,7 +460,8 @@ def : ProcessorModel<"sifive-s76", SiFive7Model, [Feature64Bit,
FeatureStdExtA,
FeatureStdExtF,
FeatureStdExtD,
- FeatureStdExtC]>;
+ FeatureStdExtC],
+ [TuneSiFive7]>;
def : ProcessorModel<"sifive-u54", RocketModel, [Feature64Bit,
FeatureStdExtM,
@@ -291,7 +475,8 @@ def : ProcessorModel<"sifive-u74", SiFive7Model, [Feature64Bit,
FeatureStdExtA,
FeatureStdExtF,
FeatureStdExtD,
- FeatureStdExtC]>;
+ FeatureStdExtC],
+ [TuneSiFive7]>;
//===----------------------------------------------------------------------===//
// Define the RISC-V target.
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f5d491938050..ad003404d793 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -242,7 +242,8 @@ bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
// adjustment, we can not use SP to access the stack objects for the
// arguments. Instead, use BP to access these stack objects.
return (MFI.hasVarSizedObjects() ||
- (!hasReservedCallFrame(MF) && MFI.getMaxCallFrameSize() != 0)) &&
+ (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
+ MFI.getMaxCallFrameSize() != 0))) &&
TRI->hasStackRealignment(MF);
}
@@ -940,11 +941,22 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
}
static bool hasRVVFrameObject(const MachineFunction &MF) {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I)
- if (MFI.getStackID(I) == TargetStackID::ScalableVector)
- return true;
- return false;
+ // Originally, the function will scan all the stack objects to check whether
+ // if there is any scalable vector object on the stack or not. However, it
+ // causes errors in the register allocator. In issue 53016, it returns false
+ // before RA because there is no RVV stack objects. After RA, it returns true
+ // because there are spilling slots for RVV values during RA. It will not
+ // reserve BP during register allocation and generate BP access in the PEI
+ // pass due to the inconsistent behavior of the function.
+ //
+ // The function is changed to use hasVInstructions() as the return value. It
+ // is not precise, but it can make the register allocation correct.
+ //
+ // FIXME: Find a better way to make the decision or revisit the solution in
+ // D103622.
+ //
+ // Refer to https://github.com/llvm/llvm-project/issues/53016.
+ return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
}
// Not preserve stack space within prologue for outgoing variables when the
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
index d47bd739235f..ba91b16661a4 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
@@ -127,6 +127,41 @@ static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
return std::make_pair(StartVal, Stride);
}
+static std::pair<Value *, Value *> matchStridedStart(Value *Start,
+ IRBuilder<> &Builder) {
+ // Base case, start is a strided constant.
+ auto *StartC = dyn_cast<Constant>(Start);
+ if (StartC)
+ return matchStridedConstant(StartC);
+
+ // Not a constant, maybe it's a strided constant with a splat added to it.
+ auto *BO = dyn_cast<BinaryOperator>(Start);
+ if (!BO || BO->getOpcode() != Instruction::Add)
+ return std::make_pair(nullptr, nullptr);
+
+ // Look for an operand that is splatted.
+ unsigned OtherIndex = 1;
+ Value *Splat = getSplatValue(BO->getOperand(0));
+ if (!Splat) {
+ Splat = getSplatValue(BO->getOperand(1));
+ OtherIndex = 0;
+ }
+ if (!Splat)
+ return std::make_pair(nullptr, nullptr);
+
+ Value *Stride;
+ std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
+ Builder);
+ if (!Start)
+ return std::make_pair(nullptr, nullptr);
+
+ // Add the splat value to the start.
+ Builder.SetInsertPoint(BO);
+ Builder.SetCurrentDebugLocation(DebugLoc());
+ Start = Builder.CreateAdd(Start, Splat);
+ return std::make_pair(Start, Stride);
+}
+
// Recursively, walk about the use-def chain until we find a Phi with a strided
// start value. Build and update a scalar recurrence as we unwind the recursion.
// We also update the Stride as we unwind. Our goal is to move all of the
@@ -161,12 +196,7 @@ bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
if (!Step)
return false;
- // Start should be a strided constant.
- auto *StartC = dyn_cast<Constant>(Start);
- if (!StartC)
- return false;
-
- std::tie(Start, Stride) = matchStridedConstant(StartC);
+ std::tie(Start, Stride) = matchStridedStart(Start, Builder);
if (!Start)
return false;
assert(Stride != nullptr);
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index b24eb5f7bbf4..5870502d74d5 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -86,8 +86,12 @@ void RISCVDAGToDAGISel::PreprocessISelDAG() {
SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
SDValue IntID =
CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
- SDValue Ops[] = {Chain, IntID, StackSlot,
- CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
+ SDValue Ops[] = {Chain,
+ IntID,
+ CurDAG->getUNDEF(VT),
+ StackSlot,
+ CurDAG->getRegister(RISCV::X0, MVT::i64),
+ VL};
SDValue Result = CurDAG->getMemIntrinsicNode(
ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
@@ -125,12 +129,37 @@ void RISCVDAGToDAGISel::PostprocessISelDAG() {
CurDAG->RemoveDeadNodes();
}
-static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
- const RISCVSubtarget &Subtarget) {
+static SDNode *selectImmWithConstantPool(SelectionDAG *CurDAG, const SDLoc &DL,
+ const MVT VT, int64_t Imm,
+ const RISCVSubtarget &Subtarget) {
+ assert(VT == MVT::i64 && "Expecting MVT::i64");
+ const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
+ ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(CurDAG->getConstantPool(
+ ConstantInt::get(EVT(VT).getTypeForEVT(*CurDAG->getContext()), Imm), VT));
+ SDValue Addr = TLI->getAddr(CP, *CurDAG);
+ SDValue Offset = CurDAG->getTargetConstant(0, DL, VT);
+ // Since there is no data race, the chain can be the entry node.
+ SDNode *Load = CurDAG->getMachineNode(RISCV::LD, DL, VT, Addr, Offset,
+ CurDAG->getEntryNode());
+ MachineFunction &MF = CurDAG->getMachineFunction();
+ MachineMemOperand *MemOp = MF.getMachineMemOperand(
+ MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
+ LLT(VT), CP->getAlign());
+ CurDAG->setNodeMemRefs(cast<MachineSDNode>(Load), {MemOp});
+ return Load;
+}
+
+static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
+ int64_t Imm, const RISCVSubtarget &Subtarget) {
MVT XLenVT = Subtarget.getXLenVT();
RISCVMatInt::InstSeq Seq =
RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+ // If Imm is expensive to build, then we put it into constant pool.
+ if (Subtarget.useConstantPoolForLargeInts() &&
+ Seq.size() > Subtarget.getMaxBuildIntsCost())
+ return selectImmWithConstantPool(CurDAG, DL, VT, Imm, Subtarget);
+
SDNode *Result = nullptr;
SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
for (RISCVMatInt::Inst &Inst : Seq) {
@@ -372,6 +401,10 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
@@ -450,6 +483,10 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
@@ -462,6 +499,75 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
ReplaceNode(Node, Store);
}
+void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
+ if (!Subtarget->hasVInstructions())
+ return;
+
+ assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
+ "Unexpected opcode");
+
+ SDLoc DL(Node);
+ MVT XLenVT = Subtarget->getXLenVT();
+
+ bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ unsigned IntNoOffset = HasChain ? 1 : 0;
+ unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
+
+ assert((IntNo == Intrinsic::riscv_vsetvli ||
+ IntNo == Intrinsic::riscv_vsetvlimax ||
+ IntNo == Intrinsic::riscv_vsetvli_opt ||
+ IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
+ "Unexpected vsetvli intrinsic");
+
+ bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
+ IntNo == Intrinsic::riscv_vsetvlimax_opt;
+ unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
+
+ assert(Node->getNumOperands() == Offset + 2 &&
+ "Unexpected number of operands");
+
+ unsigned SEW =
+ RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
+ RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
+ Node->getConstantOperandVal(Offset + 1) & 0x7);
+
+ unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
+ /*MaskAgnostic*/ false);
+ SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
+
+ SmallVector<EVT, 2> VTs = {XLenVT};
+ if (HasChain)
+ VTs.push_back(MVT::Other);
+
+ SDValue VLOperand;
+ unsigned Opcode = RISCV::PseudoVSETVLI;
+ if (VLMax) {
+ VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
+ Opcode = RISCV::PseudoVSETVLIX0;
+ } else {
+ VLOperand = Node->getOperand(IntNoOffset + 1);
+
+ if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
+ uint64_t AVL = C->getZExtValue();
+ if (isUInt<5>(AVL)) {
+ SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
+ SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
+ if (HasChain)
+ Ops.push_back(Node->getOperand(0));
+ ReplaceNode(
+ Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
+ return;
+ }
+ }
+ }
+
+ SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
+ if (HasChain)
+ Ops.push_back(Node->getOperand(0));
+
+ ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
+}
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
@@ -498,7 +604,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
Imm = SignExtend64(Imm, 32);
- ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
+ ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
return;
}
case ISD::FrameIndex: {
@@ -509,38 +615,69 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
case ISD::SRL: {
- // We don't need this transform if zext.h is supported.
- if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
+ // Optimize (srl (and X, C2), C) ->
+ // (srli (slli X, (XLen-C3), (XLen-C3) + C)
+ // Where C2 is a mask with C3 trailing ones.
+ // Taking into account that the C2 may have had lower bits unset by
+ // SimplifyDemandedBits. This avoids materializing the C2 immediate.
+ // This pattern occurs when type legalizing right shifts for types with
+ // less than XLen bits.
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
+ !isa<ConstantSDNode>(N0.getOperand(1)))
+ break;
+ unsigned ShAmt = N1C->getZExtValue();
+ uint64_t Mask = N0.getConstantOperandVal(1);
+ Mask |= maskTrailingOnes<uint64_t>(ShAmt);
+ if (!isMask_64(Mask))
+ break;
+ unsigned TrailingOnes = countTrailingOnes(Mask);
+ // 32 trailing ones should use srliw via tablegen pattern.
+ if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
break;
- // Optimize (srl (and X, 0xffff), C) ->
- // (srli (slli X, (XLen-16), (XLen-16) + C)
- // Taking into account that the 0xffff may have had lower bits unset by
- // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
- // This pattern occurs when type legalizing i16 right shifts.
- // FIXME: This could be extended to other AND masks.
+ unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(LShAmt, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ case ISD::SRA: {
+ // Optimize (sra (sext_inreg X, i16), C) ->
+ // (srai (slli X, (XLen-16), (XLen-16) + C)
+ // And (sra (sext_inreg X, i8), C) ->
+ // (srai (slli X, (XLen-8), (XLen-8) + C)
+ // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
+ // This transform matches the code we get without Zbb. The shifts are more
+ // compressible, and this can help expose CSE opportunities in the sdiv by
+ // constant optimization.
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
- if (N1C) {
- uint64_t ShAmt = N1C->getZExtValue();
- SDValue N0 = Node->getOperand(0);
- if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
- isa<ConstantSDNode>(N0.getOperand(1))) {
- uint64_t Mask = N0.getConstantOperandVal(1);
- Mask |= maskTrailingOnes<uint64_t>(ShAmt);
- if (Mask == 0xffff) {
- unsigned LShAmt = Subtarget->getXLen() - 16;
- SDNode *SLLI =
- CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
- CurDAG->getTargetConstant(LShAmt, DL, VT));
- SDNode *SRLI = CurDAG->getMachineNode(
- RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
- CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
- ReplaceNode(Node, SRLI);
- return;
- }
- }
- }
-
- break;
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
+ break;
+ unsigned ShAmt = N1C->getZExtValue();
+ unsigned ExtSize =
+ cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
+ // ExtSize of 32 should use sraiw via tablegen pattern.
+ if (ExtSize >= 32 || ShAmt >= ExtSize)
+ break;
+ unsigned LShAmt = Subtarget->getXLen() - ExtSize;
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(LShAmt, DL, VT));
+ SDNode *SRAI = CurDAG->getMachineNode(
+ RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
+ ReplaceNode(Node, SRAI);
+ return;
}
case ISD::AND: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
@@ -774,7 +911,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
ShiftedC1 = SignExtend64(ShiftedC1, 32);
// Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
- SDNode *Imm = selectImm(CurDAG, DL, ShiftedC1, *Subtarget);
+ SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LeadingZeros, DL, VT));
@@ -793,62 +930,52 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vmsge: {
SDValue Src1 = Node->getOperand(1);
SDValue Src2 = Node->getOperand(2);
+ bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
+ bool IsCmpUnsignedZero = false;
// Only custom select scalar second operand.
if (Src2.getValueType() != XLenVT)
break;
// Small constants are handled with patterns.
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
- if (CVal >= -15 && CVal <= 16)
- break;
+ if (CVal >= -15 && CVal <= 16) {
+ if (!IsUnsigned || CVal != 0)
+ break;
+ IsCmpUnsignedZero = true;
+ }
}
- bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
MVT Src1VT = Src1.getSimpleValueType();
- unsigned VMSLTOpcode, VMNANDOpcode;
+ unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
- break;
+#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
+ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
+ break;
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
+ CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
+#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
+ // If vmsgeu with 0 immediate, expand it to vmset.
+ if (IsCmpUnsignedZero) {
+ ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
+ return;
+ }
+
// Expand to
// vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
SDValue Cmp = SDValue(
@@ -862,96 +989,61 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vmsge_mask: {
SDValue Src1 = Node->getOperand(2);
SDValue Src2 = Node->getOperand(3);
+ bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
+ bool IsCmpUnsignedZero = false;
// Only custom select scalar second operand.
if (Src2.getValueType() != XLenVT)
break;
// Small constants are handled with patterns.
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
- if (CVal >= -15 && CVal <= 16)
- break;
+ if (CVal >= -15 && CVal <= 16) {
+ if (!IsUnsigned || CVal != 0)
+ break;
+ IsCmpUnsignedZero = true;
+ }
}
- bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
MVT Src1VT = Src1.getSimpleValueType();
- unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode;
+ unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
+ VMSetOpcode, VMANDOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
- : RISCV::PseudoVMSLT_VX_MF8_MASK;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
- : RISCV::PseudoVMSLT_VX_MF4_MASK;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
- : RISCV::PseudoVMSLT_VX_MF2_MASK;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
- : RISCV::PseudoVMSLT_VX_M1_MASK;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
- : RISCV::PseudoVMSLT_VX_M2_MASK;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
- : RISCV::PseudoVMSLT_VX_M4_MASK;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
- : RISCV::PseudoVMSLT_VX_M8_MASK;
- break;
+#define CASE_VMSLT_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
+ : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
+ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
+ break;
+ CASE_VMSLT_VMSET_OPCODES(LMUL_F8, MF8, B1)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_F4, MF4, B2)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_1, M1, B8)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_2, M2, B16)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_4, M4, B32)
+ CASE_VMSLT_VMSET_OPCODES(LMUL_8, M8, B64)
+#undef CASE_VMSLT_VMSET_OPCODES
}
// Mask operations use the LMUL from the mask type.
switch (RISCVTargetLowering::getLMUL(VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8;
- break;
+#define CASE_VMXOR_VMANDN_VMAND_OPCODES(lmulenum, suffix) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
+ VMANDOpcode = RISCV::PseudoVMAND_MM_##suffix; \
+ break;
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F8, MF8)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F4, MF4)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_F2, MF2)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_1, M1)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_2, M2)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_4, M4)
+ CASE_VMXOR_VMANDN_VMAND_OPCODES(LMUL_8, M8)
+#undef CASE_VMXOR_VMANDN_VMAND_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
@@ -960,6 +1052,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
selectVLOp(Node->getOperand(5), VL);
SDValue MaskedOff = Node->getOperand(1);
SDValue Mask = Node->getOperand(4);
+
+ // If vmsgeu_mask with 0 immediate, expand it to {vmset, vmand}.
+ if (IsCmpUnsignedZero) {
+ SDValue VMSet =
+ SDValue(CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW), 0);
+ ReplaceNode(Node, CurDAG->getMachineNode(VMANDOpcode, DL, VT,
+ {Mask, VMSet, VL, MaskSEW}));
+ return;
+ }
+
// If the MaskedOff value and the Mask are the same value use
// vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
// This avoids needing to copy v0 to vd before starting the next sequence.
@@ -988,6 +1090,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
{Cmp, Mask, VL, MaskSEW}));
return;
}
+ case Intrinsic::riscv_vsetvli_opt:
+ case Intrinsic::riscv_vsetvlimax_opt:
+ return selectVSETVLI(Node);
}
break;
}
@@ -997,54 +1102,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// By default we do not custom select any intrinsic.
default:
break;
-
case Intrinsic::riscv_vsetvli:
- case Intrinsic::riscv_vsetvlimax: {
- if (!Subtarget->hasVInstructions())
- break;
-
- bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
- unsigned Offset = VLMax ? 2 : 3;
-
- assert(Node->getNumOperands() == Offset + 2 &&
- "Unexpected number of operands");
-
- unsigned SEW =
- RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
- RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
- Node->getConstantOperandVal(Offset + 1) & 0x7);
-
- unsigned VTypeI = RISCVVType::encodeVTYPE(
- VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
- SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
-
- SDValue VLOperand;
- unsigned Opcode = RISCV::PseudoVSETVLI;
- if (VLMax) {
- VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
- Opcode = RISCV::PseudoVSETVLIX0;
- } else {
- VLOperand = Node->getOperand(2);
-
- if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
- uint64_t AVL = C->getZExtValue();
- if (isUInt<5>(AVL)) {
- SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
- ReplaceNode(
- Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
- MVT::Other, VLImm, VTypeIOp,
- /* Chain */ Node->getOperand(0)));
- return;
- }
- }
- }
-
- ReplaceNode(Node,
- CurDAG->getMachineNode(Opcode, DL, XLenVT,
- MVT::Other, VLOperand, VTypeIOp,
- /* Chain */ Node->getOperand(0)));
- return;
- }
+ case Intrinsic::riscv_vsetvlimax:
+ return selectVSETVLI(Node);
case Intrinsic::riscv_vlseg2:
case Intrinsic::riscv_vlseg3:
case Intrinsic::riscv_vlseg4:
@@ -1154,9 +1214,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
SmallVector<SDValue, 8> Operands;
- if (IsMasked)
+ if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
+ else
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
@@ -1169,8 +1234,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
- IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
+ IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1195,16 +1264,25 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
+ // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
+ bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU =
+ HasPassthruOperand &&
+ ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
SmallVector<SDValue, 8> Operands;
- if (IsMasked)
+ if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
+ else if (HasPassthruOperand)
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands, /*IsLoad=*/true);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
- RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
+ RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1223,9 +1301,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
+ // Masked intrinsic only have TU version pseduo instructions.
+ bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
SmallVector<SDValue, 7> Operands;
- if (IsMasked)
+ if (IsTU)
Operands.push_back(Node->getOperand(CurOp++));
+ else
+ // Skip the undef passthru operand for nomask TA version pseudo
+ CurOp++;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ false, Operands,
@@ -1233,8 +1316,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
- RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
- static_cast<unsigned>(LMUL));
+ RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
+ Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
MVT::Other, MVT::Glue, Operands);
@@ -1359,9 +1442,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+ if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+ report_fatal_error("The V extension does not support EEW=64 for index "
+ "values when XLEN=32");
+ }
const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
- IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
- static_cast<unsigned>(IndexLMUL));
+ IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
+ static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1516,10 +1603,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
case ISD::SPLAT_VECTOR:
+ case RISCVISD::VMV_S_X_VL:
+ case RISCVISD::VFMV_S_F_VL:
case RISCVISD::VMV_V_X_VL:
case RISCVISD::VFMV_V_F_VL: {
// Try to match splat of a scalar load to a strided load with stride of x0.
- SDValue Src = Node->getOperand(0);
+ bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
+ Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
+ if (IsScalarMove && !Node->getOperand(0).isUndef())
+ break;
+ SDValue Src = IsScalarMove ? Node->getOperand(1) : Node->getOperand(0);
auto *Ld = dyn_cast<LoadSDNode>(Src);
if (!Ld)
break;
@@ -1534,7 +1627,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue VL;
if (Node->getOpcode() == ISD::SPLAT_VECTOR)
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
- else
+ else if (IsScalarMove) {
+ // We could deal with more VL if we update the VSETVLI insert pass to
+ // avoid introducing more VSETVLI.
+ if (!isOneConstant(Node->getOperand(2)))
+ break;
+ selectVLOp(Node->getOperand(2), VL);
+ } else
selectVLOp(Node->getOperand(1), VL);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
@@ -1546,8 +1645,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
- /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
- static_cast<unsigned>(LMUL));
+ /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
+ Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1727,6 +1826,20 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
return false;
break;
+ case RISCV::ANDI:
+ if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
+ return false;
+ break;
+ case RISCV::SEXTB:
+ if (Bits < 8)
+ return false;
+ break;
+ case RISCV::SEXTH:
+ case RISCV::ZEXTH_RV32:
+ case RISCV::ZEXTH_RV64:
+ if (Bits < 16)
+ return false;
+ break;
case RISCV::ADDUW:
case RISCV::SH1ADDUW:
case RISCV::SH2ADDUW:
@@ -1758,7 +1871,8 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
// allows us to choose betwen VSETIVLI or VSETVLI later.
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
auto *C = dyn_cast<ConstantSDNode>(N);
- if (C && isUInt<5>(C->getZExtValue()))
+ if (C && (isUInt<5>(C->getZExtValue()) ||
+ C->getSExtValue() == RISCV::VLMaxSentinel))
VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
N->getValueType(0));
else
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index a2770089995d..c429a9298739 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -87,6 +87,8 @@ public:
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided);
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
+ void selectVSETVLI(SDNode *Node);
+
// Return the RISC-V condition code that matches the given DAG integer
// condition code. The CondCode must be one of those supported by the RISC-V
// ISA (see translateSetCCForBranch).
@@ -159,6 +161,7 @@ struct VSXSEGPseudo {
struct VLEPseudo {
uint16_t Masked : 1;
+ uint16_t IsTU : 1;
uint16_t Strided : 1;
uint16_t FF : 1;
uint16_t Log2SEW : 3;
@@ -176,6 +179,7 @@ struct VSEPseudo {
struct VLX_VSXPseudo {
uint16_t Masked : 1;
+ uint16_t IsTU : 1;
uint16_t Ordered : 1;
uint16_t Log2SEW : 3;
uint16_t LMUL : 3;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4f5512e6fb37..5cc3aa35d4d2 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -249,7 +250,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
- if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
+ if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
+ Subtarget.hasStdExtZbkb()) {
if (Subtarget.is64Bit()) {
setOperationAction(ISD::ROTL, MVT::i32, Custom);
setOperationAction(ISD::ROTR, MVT::i32, Custom);
@@ -277,7 +279,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
// pattern match it directly in isel.
setOperationAction(ISD::BSWAP, XLenVT,
- Subtarget.hasStdExtZbb() ? Legal : Expand);
+ (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
+ ? Legal
+ : Expand);
}
if (Subtarget.hasStdExtZbb()) {
@@ -330,6 +334,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::LLRINT, MVT::f16, Legal);
setOperationAction(ISD::LROUND, MVT::f16, Legal);
setOperationAction(ISD::LLROUND, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
@@ -338,6 +346,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
+ setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
for (auto CC : FPCCToExpand)
setCondCodeAction(CC, MVT::f16, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
@@ -363,6 +373,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FLOG2, MVT::f16, Promote);
setOperationAction(ISD::FLOG10, MVT::f16, Promote);
+ // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
+ // complete support for all operations in LegalizeDAG.
+
// We need to custom promote this.
if (Subtarget.is64Bit())
setOperationAction(ISD::FPOWI, MVT::i32, Custom);
@@ -375,12 +388,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::LLRINT, MVT::f32, Legal);
setOperationAction(ISD::LROUND, MVT::f32, Legal);
setOperationAction(ISD::LLROUND, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
+ setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
for (auto CC : FPCCToExpand)
setCondCodeAction(CC, MVT::f32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
@@ -402,6 +421,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::LLRINT, MVT::f64, Legal);
setOperationAction(ISD::LROUND, MVT::f64, Legal);
setOperationAction(ISD::LLROUND, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
@@ -410,6 +433,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
+ setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
for (auto CC : FPCCToExpand)
setCondCodeAction(CC, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
@@ -499,12 +524,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
- ISD::VP_SELECT};
+ ISD::VP_MERGE, ISD::VP_SELECT};
static const unsigned FloatingPointVPOps[] = {
ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
ISD::VP_FDIV, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
- ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SELECT};
+ ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
+ ISD::VP_SELECT};
if (!Subtarget.is64Bit()) {
// We must custom-lower certain vXi64 operations on RV32 due to the vector
@@ -546,6 +572,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::VP_MERGE, VT, Expand);
+ setOperationAction(ISD::VP_SELECT, VT, Expand);
setOperationAction(ISD::VP_AND, VT, Custom);
setOperationAction(ISD::VP_OR, VT, Custom);
@@ -590,6 +618,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+ // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+ if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::MULHS, VT, Expand);
+ }
+
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
@@ -886,8 +920,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UMAX, VT, Custom);
setOperationAction(ISD::ABS, VT, Custom);
- setOperationAction(ISD::MULHS, VT, Custom);
- setOperationAction(ISD::MULHU, VT, Custom);
+ // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+ if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
+ setOperationAction(ISD::MULHS, VT, Custom);
+ setOperationAction(ISD::MULHU, VT, Custom);
+ }
setOperationAction(ISD::SADDSAT, VT, Custom);
setOperationAction(ISD::UADDSAT, VT, Custom);
@@ -1002,9 +1039,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BITCAST, MVT::i16, Custom);
setOperationAction(ISD::BITCAST, MVT::i32, Custom);
setOperationAction(ISD::BITCAST, MVT::i64, Custom);
- setOperationAction(ISD::BITCAST, MVT::f16, Custom);
- setOperationAction(ISD::BITCAST, MVT::f32, Custom);
- setOperationAction(ISD::BITCAST, MVT::f64, Custom);
+ if (Subtarget.hasStdExtZfh())
+ setOperationAction(ISD::BITCAST, MVT::f16, Custom);
+ if (Subtarget.hasStdExtF())
+ setOperationAction(ISD::BITCAST, MVT::f32, Custom);
+ if (Subtarget.hasStdExtD())
+ setOperationAction(ISD::BITCAST, MVT::f64, Custom);
}
}
@@ -1024,7 +1064,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::ANY_EXTEND);
- setTargetDAGCombine(ISD::ZERO_EXTEND);
+ if (Subtarget.hasStdExtF()) {
+ setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::FP_TO_SINT);
+ setTargetDAGCombine(ISD::FP_TO_UINT);
+ setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
+ setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
+ }
if (Subtarget.hasVInstructions()) {
setTargetDAGCombine(ISD::FCOPYSIGN);
setTargetDAGCombine(ISD::MGATHER);
@@ -1072,7 +1118,7 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::riscv_masked_cmpxchg_i32: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = Align(4);
@@ -1158,10 +1204,11 @@ bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
// Zexts are free if they can be combined with a load.
+ // Don't advertise i32->i64 zextload as being free for RV64. It interacts
+ // poorly with type legalization of compares preferring sext.
if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
EVT MemVT = LD->getMemoryVT();
- if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
- (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
+ if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
(LD->getExtensionType() == ISD::NON_EXTLOAD ||
LD->getExtensionType() == ISD::ZEXTLOAD))
return true;
@@ -1189,7 +1236,9 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
if (VT.isVector())
return false;
- return Subtarget.hasStdExtZbb() && !isa<ConstantSDNode>(Y);
+ return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
+ Subtarget.hasStdExtZbkb()) &&
+ !isa<ConstantSDNode>(Y);
}
/// Check if sinking \p I's operands to I's basic block is profitable, because
@@ -1230,6 +1279,30 @@ bool RISCVTargetLowering::shouldSinkOperands(
switch (II->getIntrinsicID()) {
case Intrinsic::fma:
return Operand == 0 || Operand == 1;
+ // FIXME: Our patterns can only match vx/vf instructions when the splat
+ // it on the RHS, because TableGen doesn't recognize our VP operations
+ // as commutative.
+ case Intrinsic::vp_add:
+ case Intrinsic::vp_mul:
+ case Intrinsic::vp_and:
+ case Intrinsic::vp_or:
+ case Intrinsic::vp_xor:
+ case Intrinsic::vp_fadd:
+ case Intrinsic::vp_fmul:
+ case Intrinsic::vp_shl:
+ case Intrinsic::vp_lshr:
+ case Intrinsic::vp_ashr:
+ case Intrinsic::vp_udiv:
+ case Intrinsic::vp_sdiv:
+ case Intrinsic::vp_urem:
+ case Intrinsic::vp_srem:
+ return Operand == 1;
+ // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
+ // explicit patterns for both LHS and RHS (as 'vr' versions).
+ case Intrinsic::vp_sub:
+ case Intrinsic::vp_fsub:
+ case Intrinsic::vp_fdiv:
+ return Operand == 0 || Operand == 1;
default:
return false;
}
@@ -1277,8 +1350,6 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
return false;
if (VT == MVT::f64 && !Subtarget.hasStdExtD())
return false;
- if (Imm.isNegZero())
- return false;
return Imm.isZero();
}
@@ -1482,6 +1553,19 @@ bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
return false;
}
+static SDValue getVLOperand(SDValue Op) {
+ assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+ Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
+ "Unexpected opcode");
+ bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
+ const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ if (!II)
+ return SDValue();
+ return Op.getOperand(II->VLOperand + 1 + HasChain);
+}
+
static bool useRVVForFixedLengthVectorVT(MVT VT,
const RISCVSubtarget &Subtarget) {
assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
@@ -1667,7 +1751,8 @@ bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
return false;
}
-static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
+static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
// RISCV FP-to-int conversions saturate to the destination register size, but
// don't produce 0 for nan. We can use a conversion instruction and fix the
// nan case with a compare and a select.
@@ -1679,15 +1764,17 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
unsigned Opc;
if (SatVT == DstVT)
- Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
else if (DstVT == MVT::i64 && SatVT == MVT::i32)
- Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
else
return SDValue();
// FIXME: Support other SatVTs by clamping before or after the conversion.
SDLoc DL(Op);
- SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
+ SDValue FpToInt = DAG.getNode(
+ Opc, DL, DstVT, Src,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
@@ -1898,6 +1985,8 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// codegen across RV32 and RV64.
unsigned NumViaIntegerBits =
std::min(std::max(NumElts, 8u), Subtarget.getXLen());
+ NumViaIntegerBits = std::min(NumViaIntegerBits,
+ Subtarget.getMaxELENForFixedLengthVectors());
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
// If we have to use more than one INSERT_VECTOR_ELT then this
// optimization is likely to increase code size; avoid peforming it in
@@ -2190,6 +2279,17 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
// node in order to try and match RVV vector/scalar instructions.
if ((LoC >> 31) == HiC)
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
+
+ // If vl is equal to VLMax and Hi constant is equal to Lo, we could use
+ // vmv.v.x whose EEW = 32 to lower it.
+ auto *Const = dyn_cast<ConstantSDNode>(VL);
+ if (LoC == HiC && Const && Const->getSExtValue() == RISCV::VLMaxSentinel) {
+ MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
+ // TODO: if vl <= min(VLMAX), we can also do this. But we could not
+ // access the subtarget here now.
+ auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, VL);
+ return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
+ }
}
// Fall back to a stack store and stride x0 vector load.
@@ -2215,8 +2315,13 @@ static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
- if (VT.isFloatingPoint())
+ if (VT.isFloatingPoint()) {
+ // If VL is 1, we could use vfmv.s.f.
+ if (isOneConstant(VL))
+ return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT),
+ Scalar, VL);
return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
+ }
MVT XLenVT = Subtarget.getXLenVT();
@@ -2229,16 +2334,98 @@ static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
unsigned ExtOpc =
isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
+ // If VL is 1 and the scalar value won't benefit from immediate, we could
+ // use vmv.s.x.
+ if (isOneConstant(VL) &&
+ (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
+ return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar,
+ VL);
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
}
assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
"Unexpected scalar for splat lowering!");
+ if (isOneConstant(VL) && isNullConstant(Scalar))
+ return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT),
+ DAG.getConstant(0, DL, XLenVT), VL);
+
// Otherwise use the more complicated splatting algorithm.
return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
}
+// Is the mask a slidedown that shifts in undefs.
+static int matchShuffleAsSlideDown(ArrayRef<int> Mask) {
+ int Size = Mask.size();
+
+ // Elements shifted in should be undef.
+ auto CheckUndefs = [&](int Shift) {
+ for (int i = Size - Shift; i != Size; ++i)
+ if (Mask[i] >= 0)
+ return false;
+ return true;
+ };
+
+ // Elements should be shifted or undef.
+ auto MatchShift = [&](int Shift) {
+ for (int i = 0; i != Size - Shift; ++i)
+ if (Mask[i] >= 0 && Mask[i] != Shift + i)
+ return false;
+ return true;
+ };
+
+ // Try all possible shifts.
+ for (int Shift = 1; Shift != Size; ++Shift)
+ if (CheckUndefs(Shift) && MatchShift(Shift))
+ return Shift;
+
+ // No match.
+ return -1;
+}
+
+static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
+ const RISCVSubtarget &Subtarget) {
+ // We need to be able to widen elements to the next larger integer type.
+ if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
+ return false;
+
+ int Size = Mask.size();
+ assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
+
+ int Srcs[] = {-1, -1};
+ for (int i = 0; i != Size; ++i) {
+ // Ignore undef elements.
+ if (Mask[i] < 0)
+ continue;
+
+ // Is this an even or odd element.
+ int Pol = i % 2;
+
+ // Ensure we consistently use the same source for this element polarity.
+ int Src = Mask[i] / Size;
+ if (Srcs[Pol] < 0)
+ Srcs[Pol] = Src;
+ if (Srcs[Pol] != Src)
+ return false;
+
+ // Make sure the element within the source is appropriate for this element
+ // in the destination.
+ int Elt = Mask[i] % Size;
+ if (Elt != i / 2)
+ return false;
+ }
+
+ // We need to find a source for each polarity and they can't be the same.
+ if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
+ return false;
+
+ // Swap the sources if the second source was in the even polarity.
+ SwapSources = Srcs[0] > Srcs[1];
+
+ return true;
+}
+
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDValue V1 = Op.getOperand(0);
@@ -2284,8 +2471,12 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
SDValue IntID =
DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
- SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
- DAG.getRegister(RISCV::X0, XLenVT), VL};
+ SDValue Ops[] = {Ld->getChain(),
+ IntID,
+ DAG.getUNDEF(ContainerVT),
+ NewAddr,
+ DAG.getRegister(RISCV::X0, XLenVT),
+ VL};
SDValue NewLoad = DAG.getMemIntrinsicNode(
ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
DAG.getMachineFunction().getMachineMemOperand(
@@ -2324,10 +2515,97 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
}
}
+ ArrayRef<int> Mask = SVN->getMask();
+
+ // Try to match as a slidedown.
+ int SlideAmt = matchShuffleAsSlideDown(Mask);
+ if (SlideAmt >= 0) {
+ // TODO: Should we reduce the VL to account for the upper undef elements?
+ // Requires additional vsetvlis, but might be faster to execute.
+ V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
+ SDValue SlideDown =
+ DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), V1,
+ DAG.getConstant(SlideAmt, DL, XLenVT),
+ TrueMask, VL);
+ return convertFromScalableVector(VT, SlideDown, DAG, Subtarget);
+ }
+
+ // Detect an interleave shuffle and lower to
+ // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
+ bool SwapSources;
+ if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
+ // Swap sources if needed.
+ if (SwapSources)
+ std::swap(V1, V2);
+
+ // Extract the lower half of the vectors.
+ MVT HalfVT = VT.getHalfNumVectorElementsVT();
+ V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
+ DAG.getConstant(0, DL, XLenVT));
+ V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
+ DAG.getConstant(0, DL, XLenVT));
+
+ // Double the element width and halve the number of elements in an int type.
+ unsigned EltBits = VT.getScalarSizeInBits();
+ MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
+ MVT WideIntVT =
+ MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
+ // Convert this to a scalable vector. We need to base this on the
+ // destination size to ensure there's always a type with a smaller LMUL.
+ MVT WideIntContainerVT =
+ getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
+
+ // Convert sources to scalable vectors with the same element count as the
+ // larger type.
+ MVT HalfContainerVT = MVT::getVectorVT(
+ VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
+ V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
+ V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
+
+ // Cast sources to integer.
+ MVT IntEltVT = MVT::getIntegerVT(EltBits);
+ MVT IntHalfVT =
+ MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
+ V1 = DAG.getBitcast(IntHalfVT, V1);
+ V2 = DAG.getBitcast(IntHalfVT, V2);
+
+ // Freeze V2 since we use it twice and we need to be sure that the add and
+ // multiply see the same value.
+ V2 = DAG.getNode(ISD::FREEZE, DL, IntHalfVT, V2);
+
+ // Recreate TrueMask using the widened type's element count.
+ MVT MaskVT =
+ MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
+ TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
+
+ // Widen V1 and V2 with 0s and add one copy of V2 to V1.
+ SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
+ V2, TrueMask, VL);
+ // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
+ SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
+ DAG.getAllOnesConstant(DL, XLenVT));
+ SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
+ V2, Multiplier, TrueMask, VL);
+ // Add the new copies to our previous addition giving us 2^eltbits copies of
+ // V2. This is equivalent to shifting V2 left by eltbits. This should
+ // combine with the vwmulu.vv above to form vwmaccu.vv.
+ Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
+ TrueMask, VL);
+ // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
+ // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
+ // vector VT.
+ ContainerVT =
+ MVT::getVectorVT(VT.getVectorElementType(),
+ WideIntContainerVT.getVectorElementCount() * 2);
+ Add = DAG.getBitcast(ContainerVT, Add);
+ return convertFromScalableVector(VT, Add, DAG, Subtarget);
+ }
+
// Detect shuffles which can be re-expressed as vector selects; these are
// shuffles in which each element in the destination is taken from an element
// at the corresponding index in either source vectors.
- bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
+ bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
int MaskIndex = MaskIdx.value();
return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
});
@@ -2353,7 +2631,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
// Now construct the mask that will be used by the vselect or blended
// vrgather operation. For vrgathers, construct the appropriate indices into
// each vector.
- for (int MaskIndex : SVN->getMask()) {
+ for (int MaskIndex : Mask) {
bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
if (!IsSelect) {
@@ -2691,15 +2969,25 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
MVT VT = Op.getSimpleValueType();
assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
SDLoc DL(Op);
- if (Op.getOperand(2).getOpcode() == ISD::Constant)
- return Op;
// FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
- // use log(XLen) bits. Mask the shift amount accordingly.
+ // use log(XLen) bits. Mask the shift amount accordingly to prevent
+ // accidentally setting the extra bit.
unsigned ShAmtWidth = Subtarget.getXLen() - 1;
SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
DAG.getConstant(ShAmtWidth, DL, VT));
- unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
- return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
+ // fshl and fshr concatenate their operands in the same order. fsr and fsl
+ // instruction use different orders. fshl will return its first operand for
+ // shift of zero, fshr will return its second operand. fsl and fsr both
+ // return rs1 so the ISD nodes need to have different operand orders.
+ // Shift amount is in rs2.
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ unsigned Opc = RISCVISD::FSL;
+ if (Op.getOpcode() == ISD::FSHR) {
+ std::swap(Op0, Op1);
+ Opc = RISCVISD::FSR;
+ }
+ return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
}
case ISD::TRUNCATE: {
SDLoc DL(Op);
@@ -2774,7 +3062,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// We define our scalable vector types for lmul=1 to use a 64 bit known
// minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
// vscale as VLENB / 8.
- assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
+ static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
if (isa<ConstantSDNode>(Op.getOperand(0))) {
// We assume VLENB is a multiple of 8. We manually choose the best shift
// here because SimplifyDemandedBits isn't always able to simplify it.
@@ -3001,7 +3289,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
}
case ISD::FP_TO_SINT_SAT:
case ISD::FP_TO_UINT_SAT:
- return lowerFP_TO_INT_SAT(Op, DAG);
+ return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
case ISD::FTRUNC:
case ISD::FCEIL:
case ISD::FFLOOR:
@@ -3063,9 +3351,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
unsigned NumOpElts =
Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
SDValue Vec = DAG.getUNDEF(VT);
- for (const auto &OpIdx : enumerate(Op->ops()))
- Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
+ for (const auto &OpIdx : enumerate(Op->ops())) {
+ SDValue SubVec = OpIdx.value();
+ // Don't insert undef subvectors.
+ if (SubVec.isUndef())
+ continue;
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
+ }
return Vec;
}
case ISD::LOAD:
@@ -3181,6 +3474,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerSET_ROUNDING(Op, DAG);
case ISD::VP_SELECT:
return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
+ case ISD::VP_MERGE:
+ return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
case ISD::VP_ADD:
return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
case ISD::VP_SUB:
@@ -4044,10 +4339,10 @@ static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
- if (!II || !II->SplatOperand)
+ if (!II || !II->hasSplatOperand())
return SDValue();
- unsigned SplatOp = II->SplatOperand + HasChain;
+ unsigned SplatOp = II->SplatOperand + 1 + HasChain;
assert(SplatOp < Op.getNumOperands());
SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
@@ -4077,7 +4372,7 @@ static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
// that a widening operation never uses SEW=64.
// NOTE: If this fails the below assert, we can probably just find the
// element count from any operand or result and use it to construct the VT.
- assert(II->SplatOperand > 1 && "Unexpected splat operand!");
+ assert(II->SplatOperand > 0 && "Unexpected splat operand!");
MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
// The more complex case is when the scalar is larger than XLenVT.
@@ -4096,8 +4391,7 @@ static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
// We need to convert the scalar to a splat vector.
// FIXME: Can we implicitly truncate the scalar if it is known to
// be sign extended?
- // VL should be the last operand.
- SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
+ SDValue VL = getVLOperand(Op);
assert(VL.getValueType() == XLenVT);
ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
@@ -4138,6 +4432,15 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
: RISCVISD::BDECOMPRESS;
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
}
+ case Intrinsic::riscv_bfp:
+ return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
+ Op.getOperand(2));
+ case Intrinsic::riscv_fsl:
+ return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
+ Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::riscv_fsr:
+ return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
+ Op.getOperand(2), Op.getOperand(3));
case Intrinsic::riscv_vmv_x_s:
assert(Op.getValueType() == XLenVT && "Unexpected VT!");
return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
@@ -4176,7 +4479,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// vmerge.vvm vDest, vSrc, vVal, mMask
MVT VT = Op.getSimpleValueType();
SDValue Vec = Op.getOperand(1);
- SDValue VL = Op.getOperand(3);
+ SDValue VL = getVLOperand(Op);
SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
@@ -4222,7 +4525,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
DAG.getConstant(1, DL, XLenVT));
// Double the VL since we halved SEW.
- SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
+ SDValue VL = getVLOperand(Op);
SDValue I32VL =
DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
@@ -4294,7 +4597,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
auto *Load = cast<MemIntrinsicSDNode>(Op);
SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
- if (!IsUnmasked)
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
Ops.push_back(PassThru);
Ops.push_back(Op.getOperand(3)); // Ptr
Ops.push_back(Op.getOperand(4)); // Stride
@@ -4720,7 +5025,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
// register size. Therefore we must slide the vector group up the full
// amount.
if (SubVecVT.isFixedLengthVector()) {
- if (OrigIdx == 0 && Vec.isUndef())
+ if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
return Op;
MVT ContainerVT = VecVT;
if (VecVT.isFixedLengthVector()) {
@@ -4730,6 +5035,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
DAG.getUNDEF(ContainerVT), SubVec,
DAG.getConstant(0, DL, XLenVT));
+ if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
+ SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
+ return DAG.getBitcast(Op.getValueType(), SubVec);
+ }
SDValue Mask =
getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
// Set the vector length to only the number of elements we care about. Note
@@ -5148,7 +5457,9 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
- if (!IsUnmasked)
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
Ops.push_back(PassThru);
Ops.push_back(BasePtr);
if (!IsUnmasked)
@@ -5518,13 +5829,20 @@ SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
}
}
+ if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
+ IndexVT = IndexVT.changeVectorElementType(XLenVT);
+ Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
+ }
+
if (!VL)
VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
unsigned IntID =
IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
- if (!IsUnmasked)
+ if (IsUnmasked)
+ Ops.push_back(DAG.getUNDEF(ContainerVT));
+ else
Ops.push_back(PassThru);
Ops.push_back(BasePtr);
Ops.push_back(Index);
@@ -5619,6 +5937,11 @@ SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
}
}
+ if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
+ IndexVT = IndexVT.changeVectorElementType(XLenVT);
+ Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
+ }
+
if (!VL)
VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
@@ -5697,6 +6020,39 @@ SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
RMValue);
}
+static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
+ switch (IntNo) {
+ default:
+ llvm_unreachable("Unexpected Intrinsic");
+ case Intrinsic::riscv_grev:
+ return RISCVISD::GREVW;
+ case Intrinsic::riscv_gorc:
+ return RISCVISD::GORCW;
+ case Intrinsic::riscv_bcompress:
+ return RISCVISD::BCOMPRESSW;
+ case Intrinsic::riscv_bdecompress:
+ return RISCVISD::BDECOMPRESSW;
+ case Intrinsic::riscv_bfp:
+ return RISCVISD::BFPW;
+ case Intrinsic::riscv_fsl:
+ return RISCVISD::FSLW;
+ case Intrinsic::riscv_fsr:
+ return RISCVISD::FSRW;
+ }
+}
+
+// Converts the given intrinsic to a i64 operation with any extension.
+static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
+ unsigned IntNo) {
+ SDLoc DL(N);
+ RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
+ SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
+ SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
+ // ReplaceNodeResults requires we maintain the same type for the return value.
+ return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
+}
+
// Returns the opcode of the target-specific SDNode that implements the 32-bit
// form of the given Opcode.
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
@@ -5776,17 +6132,20 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
if (!isTypeLegal(Op0.getValueType()))
return;
if (IsStrict) {
- unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RTZ_RV64
- : RISCVISD::STRICT_FCVT_WU_RTZ_RV64;
+ unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
+ : RISCVISD::STRICT_FCVT_WU_RV64;
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
- SDValue Res = DAG.getNode(Opc, DL, VTs, N->getOperand(0), Op0);
+ SDValue Res = DAG.getNode(
+ Opc, DL, VTs, N->getOperand(0), Op0,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
Results.push_back(Res.getValue(1));
return;
}
- unsigned Opc =
- IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
+ unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+ SDValue Res =
+ DAG.getNode(Opc, DL, MVT::i64, Op0,
+ DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
return;
}
@@ -6078,15 +6437,23 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
SDValue NewOp1 =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
- SDValue NewOp2 =
+ SDValue NewShAmt =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
// FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
- // Mask the shift amount to 5 bits.
- NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
- DAG.getConstant(0x1f, DL, MVT::i64));
- unsigned Opc =
- N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
- SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
+ // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
+ NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
+ DAG.getConstant(0x1f, DL, MVT::i64));
+ // fshl and fshr concatenate their operands in the same order. fsrw and fslw
+ // instruction use different orders. fshl will return its first operand for
+ // shift of zero, fshr will return its second operand. fsl and fsr both
+ // return rs1 so the ISD nodes need to have different operand orders.
+ // Shift amount is in rs2.
+ unsigned Opc = RISCVISD::FSLW;
+ if (N->getOpcode() == ISD::FSHR) {
+ std::swap(NewOp0, NewOp1);
+ Opc = RISCVISD::FSRW;
+ }
+ SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
break;
}
@@ -6154,6 +6521,31 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
default:
llvm_unreachable(
"Don't know how to custom type legalize this intrinsic!");
+ case Intrinsic::riscv_grev:
+ case Intrinsic::riscv_gorc:
+ case Intrinsic::riscv_bcompress:
+ case Intrinsic::riscv_bdecompress:
+ case Intrinsic::riscv_bfp: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
+ break;
+ }
+ case Intrinsic::riscv_fsl:
+ case Intrinsic::riscv_fsr: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ SDValue NewOp1 =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+ SDValue NewOp2 =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
+ SDValue NewOp3 =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
+ unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
+ SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+ break;
+ }
case Intrinsic::riscv_orc_b: {
// Lower to the GORCI encoding for orc.b with the operand extended.
SDValue NewOp =
@@ -6166,20 +6558,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
return;
}
- case Intrinsic::riscv_grev:
- case Intrinsic::riscv_gorc: {
- assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
- "Unexpected custom legalisation");
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
- SDValue NewOp2 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
- unsigned Opc =
- IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
- break;
- }
case Intrinsic::riscv_shfl:
case Intrinsic::riscv_unshfl: {
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
@@ -6200,21 +6578,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
break;
}
- case Intrinsic::riscv_bcompress:
- case Intrinsic::riscv_bdecompress: {
- assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
- "Unexpected custom legalisation");
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
- SDValue NewOp2 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
- unsigned Opc = IntNo == Intrinsic::riscv_bcompress
- ? RISCVISD::BCOMPRESSW
- : RISCVISD::BDECOMPRESSW;
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
- break;
- }
case Intrinsic::riscv_vmv_x_s: {
EVT VT = N->getValueType(0);
MVT XLenVT = Subtarget.getXLenVT();
@@ -6923,9 +7286,14 @@ static SDValue performANY_EXTENDCombine(SDNode *N,
// Try to form VWMUL or VWMULU.
// FIXME: Support VWMULSU.
-static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
- SelectionDAG &DAG) {
+static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
+ bool Commute) {
assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ if (Commute)
+ std::swap(Op0, Op1);
+
bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
@@ -7002,6 +7370,123 @@ static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
}
+static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
+ switch (Op.getOpcode()) {
+ case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
+ case ISD::FTRUNC: return RISCVFPRndMode::RTZ;
+ case ISD::FFLOOR: return RISCVFPRndMode::RDN;
+ case ISD::FCEIL: return RISCVFPRndMode::RUP;
+ case ISD::FROUND: return RISCVFPRndMode::RMM;
+ }
+
+ return RISCVFPRndMode::Invalid;
+}
+
+// Fold
+// (fp_to_int (froundeven X)) -> fcvt X, rne
+// (fp_to_int (ftrunc X)) -> fcvt X, rtz
+// (fp_to_int (ffloor X)) -> fcvt X, rdn
+// (fp_to_int (fceil X)) -> fcvt X, rup
+// (fp_to_int (fround X)) -> fcvt X, rmm
+static SDValue performFP_TO_INTCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Only handle XLen or i32 types. Other types narrower than XLen will
+ // eventually be legalized to XLenVT.
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32 && VT != XLenVT)
+ return SDValue();
+
+ SDValue Src = N->getOperand(0);
+
+ // Ensure the FP type is also legal.
+ if (!TLI.isTypeLegal(Src.getValueType()))
+ return SDValue();
+
+ // Don't do this for f16 with Zfhmin and not Zfh.
+ if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+ return SDValue();
+
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
+ if (FRM == RISCVFPRndMode::Invalid)
+ return SDValue();
+
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
+
+ unsigned Opc;
+ if (VT == XLenVT)
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+ else
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+
+ SDLoc DL(N);
+ SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
+ DAG.getTargetConstant(FRM, DL, XLenVT));
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
+}
+
+// Fold
+// (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
+// (fp_to_int_sat (ftrunc X)) -> (select X == nan, 0, (fcvt X, rtz))
+// (fp_to_int_sat (ffloor X)) -> (select X == nan, 0, (fcvt X, rdn))
+// (fp_to_int_sat (fceil X)) -> (select X == nan, 0, (fcvt X, rup))
+// (fp_to_int_sat (fround X)) -> (select X == nan, 0, (fcvt X, rmm))
+static SDValue performFP_TO_INT_SATCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // Only handle XLen types. Other types narrower than XLen will eventually be
+ // legalized to XLenVT.
+ EVT DstVT = N->getValueType(0);
+ if (DstVT != XLenVT)
+ return SDValue();
+
+ SDValue Src = N->getOperand(0);
+
+ // Ensure the FP type is also legal.
+ if (!TLI.isTypeLegal(Src.getValueType()))
+ return SDValue();
+
+ // Don't do this for f16 with Zfhmin and not Zfh.
+ if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
+ return SDValue();
+
+ EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+
+ RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
+ if (FRM == RISCVFPRndMode::Invalid)
+ return SDValue();
+
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
+
+ unsigned Opc;
+ if (SatVT == DstVT)
+ Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
+ else if (DstVT == MVT::i64 && SatVT == MVT::i32)
+ Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
+ else
+ return SDValue();
+ // FIXME: Support other SatVTs by clamping before or after the conversion.
+
+ Src = Src.getOperand(0);
+
+ SDLoc DL(N);
+ SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
+ DAG.getTargetConstant(FRM, DL, XLenVT));
+
+ // RISCV FP-to-int conversions saturate to the destination register size, but
+ // don't produce 0 for nan.
+ SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
+ return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
+}
+
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -7083,25 +7568,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return SDValue(N, 0);
break;
}
- case RISCVISD::FSL:
- case RISCVISD::FSR: {
- // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
- unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
- assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
- if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
- return SDValue(N, 0);
- break;
- }
- case RISCVISD::FSLW:
- case RISCVISD::FSRW: {
- // Only the lower 32 bits of Values and lower 6 bits of shift amount are
- // read.
- if (SimplifyDemandedLowBitsHelper(0, 32) ||
- SimplifyDemandedLowBitsHelper(1, 32) ||
- SimplifyDemandedLowBitsHelper(2, 6))
- return SDValue(N, 0);
- break;
- }
case RISCVISD::GREV:
case RISCVISD::GORC: {
// Only the lower log2(Bitwidth) bits of the the shift amount are read.
@@ -7331,6 +7797,12 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ return performFP_TO_INTCombine(N, DCI, Subtarget);
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
case ISD::FCOPYSIGN: {
EVT VT = N->getValueType(0);
if (!VT.isVector())
@@ -7464,15 +7936,11 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
- case RISCVISD::MUL_VL: {
- SDValue Op0 = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
+ case RISCVISD::MUL_VL:
+ if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
return V;
- if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
- return V;
- return SDValue();
- }
+ // Mul is commutative.
+ return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
case ISD::STORE: {
auto *Store = cast<StoreSDNode>(N);
SDValue Val = Store->getValue();
@@ -7486,12 +7954,12 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (VecVT.getVectorElementType() == MemVT) {
SDLoc DL(N);
MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
- return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
- DAG.getConstant(1, DL, MaskVT),
- DAG.getConstant(1, DL, Subtarget.getXLenVT()),
- Store->getPointerInfo(),
- Store->getOriginalAlign(),
- Store->getMemOperand()->getFlags());
+ return DAG.getStoreVP(
+ Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
+ DAG.getConstant(1, DL, MaskVT),
+ DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
+ Store->getMemOperand(), Store->getAddressingMode(),
+ Store->isTruncatingStore(), /*IsCompress*/ false);
}
}
@@ -7732,14 +8200,18 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
// We assume VLENB is no more than 65536 / 8 bytes.
Known.Zero.setBitsFrom(14);
break;
- case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = Op.getConstantOperandVal(1);
+ case ISD::INTRINSIC_W_CHAIN:
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo =
+ Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
switch (IntNo) {
default:
// We can't do anything for most intrinsics.
break;
case Intrinsic::riscv_vsetvli:
case Intrinsic::riscv_vsetvlimax:
+ case Intrinsic::riscv_vsetvli_opt:
+ case Intrinsic::riscv_vsetvlimax_opt:
// Assume that VL output is positive and would fit in an int32_t.
// TODO: VLEN might be capped at 16 bits in a future V spec update.
if (BitWidth >= 32)
@@ -7779,10 +8251,11 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
case RISCVISD::UNSHFLW:
case RISCVISD::BCOMPRESSW:
case RISCVISD::BDECOMPRESSW:
- case RISCVISD::FCVT_W_RTZ_RV64:
- case RISCVISD::FCVT_WU_RTZ_RV64:
- case RISCVISD::STRICT_FCVT_W_RTZ_RV64:
- case RISCVISD::STRICT_FCVT_WU_RTZ_RV64:
+ case RISCVISD::BFPW:
+ case RISCVISD::FCVT_W_RV64:
+ case RISCVISD::FCVT_WU_RV64:
+ case RISCVISD::STRICT_FCVT_W_RV64:
+ case RISCVISD::STRICT_FCVT_WU_RV64:
// TODO: As the result is sign-extended, this is conservatively correct. A
// more precise answer could be calculated for SRAW depending on known
// bits in the shift amount.
@@ -7958,6 +8431,42 @@ static bool isSelectPseudo(MachineInstr &MI) {
}
}
+static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
+ unsigned RelOpcode, unsigned EqOpcode,
+ const RISCVSubtarget &Subtarget) {
+ DebugLoc DL = MI.getDebugLoc();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src1Reg = MI.getOperand(1).getReg();
+ Register Src2Reg = MI.getOperand(2).getReg();
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+
+ // Save the current FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
+
+ auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
+ .addReg(Src1Reg)
+ .addReg(Src2Reg);
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Restore the FFLAGS.
+ BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
+ .addReg(SavedFFlags, RegState::Kill);
+
+ // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
+ auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
+ .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
+ .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
+ if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+ MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+ // Erase the pseudoinstruction.
+ MI.eraseFromParent();
+ return BB;
+}
+
static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
MachineBasicBlock *BB,
const RISCVSubtarget &Subtarget) {
@@ -8099,6 +8608,18 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return emitBuildPairF64Pseudo(MI, BB);
case RISCV::SplitF64Pseudo:
return emitSplitF64Pseudo(MI, BB);
+ case RISCV::PseudoQuietFLE_H:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
+ case RISCV::PseudoQuietFLT_H:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
+ case RISCV::PseudoQuietFLE_S:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLT_S:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLE_D:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
+ case RISCV::PseudoQuietFLT_D:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
}
}
@@ -8393,7 +8914,8 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
LocVT = XLenVT;
LocInfo = CCValAssign::Indirect;
} else if (ValVT.isScalableVector()) {
- report_fatal_error("Unable to pass scalable vector types on the stack");
+ LocVT = XLenVT;
+ LocInfo = CCValAssign::Indirect;
} else {
// Pass fixed-length vectors on the stack.
LocVT = ValVT;
@@ -8592,8 +9114,14 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
EVT LocVT = VA.getLocVT();
EVT ValVT = VA.getValVT();
EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
+ if (ValVT.isScalableVector()) {
+ // When the value is a scalable vector, we save the pointer which points to
+ // the scalable vector value in the stack. The ValVT will be the pointer
+ // type, instead of the scalable vector type.
+ ValVT = LocVT;
+ }
int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
- /*Immutable=*/true);
+ /*IsImmutable=*/true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val;
@@ -8623,7 +9151,8 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
if (VA.isMemLoc()) {
// f64 is passed on the stack.
- int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
+ int FI =
+ MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
return DAG.getLoad(MVT::f64, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
@@ -8637,7 +9166,7 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
SDValue Hi;
if (VA.getLocReg() == RISCV::X17) {
// Second half of f64 is passed on the stack.
- int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
+ int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(MF, FI));
@@ -9510,12 +10039,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(FMV_X_ANYEXTH)
NODE_NAME_CASE(FMV_W_X_RV64)
NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
- NODE_NAME_CASE(FCVT_X_RTZ)
- NODE_NAME_CASE(FCVT_XU_RTZ)
- NODE_NAME_CASE(FCVT_W_RTZ_RV64)
- NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
- NODE_NAME_CASE(STRICT_FCVT_W_RTZ_RV64)
- NODE_NAME_CASE(STRICT_FCVT_WU_RTZ_RV64)
+ NODE_NAME_CASE(FCVT_X)
+ NODE_NAME_CASE(FCVT_XU)
+ NODE_NAME_CASE(FCVT_W_RV64)
+ NODE_NAME_CASE(FCVT_WU_RV64)
+ NODE_NAME_CASE(STRICT_FCVT_W_RV64)
+ NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
NODE_NAME_CASE(READ_CYCLE_WIDE)
NODE_NAME_CASE(GREV)
NODE_NAME_CASE(GREVW)
@@ -9525,6 +10054,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(SHFLW)
NODE_NAME_CASE(UNSHFL)
NODE_NAME_CASE(UNSHFLW)
+ NODE_NAME_CASE(BFP)
+ NODE_NAME_CASE(BFPW)
NODE_NAME_CASE(BCOMPRESS)
NODE_NAME_CASE(BCOMPRESSW)
NODE_NAME_CASE(BDECOMPRESS)
@@ -9598,8 +10129,10 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(FP_ROUND_VL)
NODE_NAME_CASE(VWMUL_VL)
NODE_NAME_CASE(VWMULU_VL)
+ NODE_NAME_CASE(VWADDU_VL)
NODE_NAME_CASE(SETCC_VL)
NODE_NAME_CASE(VSELECT_VL)
+ NODE_NAME_CASE(VP_MERGE_VL)
NODE_NAME_CASE(VMAND_VL)
NODE_NAME_CASE(VMOR_VL)
NODE_NAME_CASE(VMXOR_VL)
@@ -9768,12 +10301,18 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
.Default(RISCV::NoRegister);
if (FReg != RISCV::NoRegister) {
assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
- if (Subtarget.hasStdExtD()) {
+ if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
unsigned RegNo = FReg - RISCV::F0_F;
unsigned DReg = RISCV::F0_D + RegNo;
return std::make_pair(DReg, &RISCV::FPR64RegClass);
}
- return std::make_pair(FReg, &RISCV::FPR32RegClass);
+ if (VT == MVT::f32 || VT == MVT::Other)
+ return std::make_pair(FReg, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
+ unsigned RegNo = FReg - RISCV::F0_F;
+ unsigned HReg = RISCV::F0_H + RegNo;
+ return std::make_pair(HReg, &RISCV::FPR16RegClass);
+ }
}
}
@@ -10070,6 +10609,24 @@ bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
}
}
+unsigned RISCVTargetLowering::getJumpTableEncoding() const {
+ // If we are using the small code model, we can reduce size of jump table
+ // entry to 4 bytes.
+ if (Subtarget.is64Bit() && !isPositionIndependent() &&
+ getTargetMachine().getCodeModel() == CodeModel::Small) {
+ return MachineJumpTableInfo::EK_Custom32;
+ }
+ return TargetLowering::getJumpTableEncoding();
+}
+
+const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
+ const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
+ unsigned uid, MCContext &Ctx) const {
+ assert(Subtarget.is64Bit() && !isPositionIndependent() &&
+ getTargetMachine().getCodeModel() == CodeModel::Small);
+ return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
+}
+
bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const {
VT = VT.getScalarType();
@@ -10293,6 +10850,60 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
return SDValue();
}
+SDValue
+RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDNode *> &Created) const {
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
+ if (isIntDivCheap(N->getValueType(0), Attr))
+ return SDValue(N, 0); // Lower SDIV as SDIV
+
+ assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
+ "Unexpected divisor!");
+
+ // Conditional move is needed, so do the transformation iff Zbt is enabled.
+ if (!Subtarget.hasStdExtZbt())
+ return SDValue();
+
+ // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
+ // Besides, more critical path instructions will be generated when dividing
+ // by 2. So we keep using the original DAGs for these cases.
+ unsigned Lg2 = Divisor.countTrailingZeros();
+ if (Lg2 == 1 || Lg2 >= 12)
+ return SDValue();
+
+ // fold (sdiv X, pow2)
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue N0 = N->getOperand(0);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
+
+ // Add (N0 < 0) ? Pow2 - 1 : 0;
+ SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
+ SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
+ SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
+
+ Created.push_back(Cmp.getNode());
+ Created.push_back(Add.getNode());
+ Created.push_back(Sel.getNode());
+
+ // Divide by pow2.
+ SDValue SRA =
+ DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
+
+ // If we're dividing by a positive value, we're done. Otherwise, we must
+ // negate the result.
+ if (Divisor.isNonNegative())
+ return SRA;
+
+ Created.push_back(SRA.getNode());
+ return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
+}
+
#define GET_REGISTER_MATCHER
#include "RISCVGenAsmMatcher.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 48c5ce730933..58b7ec89f875 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -63,11 +63,11 @@ enum NodeType : unsigned {
CLZW,
CTZW,
// RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
- // instructions, but the same operand order as fshl/fshr intrinsics.
+ // instructions. Operand order is rs1, rs3, rs2/shamt.
FSR,
FSL,
- // RV64IB funnel shifts, with the semantics of the named RISC-V instructions,
- // but the same operand order as fshl/fshr intrinsics.
+ // RV64IB funnel shifts, with the semantics of the named RISC-V instructions.
+ // Operand order is rs1, rs3, rs2/shamt.
FSRW,
FSLW,
// FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
@@ -86,14 +86,16 @@ enum NodeType : unsigned {
FMV_X_ANYEXTW_RV64,
// FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
// fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
- // range inputs. These are used for FP_TO_S/UINT_SAT lowering.
- FCVT_X_RTZ,
- FCVT_XU_RTZ,
+ // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
+ // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
+ FCVT_X,
+ FCVT_XU,
// FP to 32 bit int conversions for RV64. These are used to keep track of the
// result being sign extended to 64 bit. These saturate out of range inputs.
- // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering.
- FCVT_W_RTZ_RV64,
- FCVT_WU_RTZ_RV64,
+ // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
+ // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
+ FCVT_W_RV64,
+ FCVT_WU_RV64,
// READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
// (returns (Lo, Hi)). It takes a chain operand.
READ_CYCLE_WIDE,
@@ -118,6 +120,13 @@ enum NodeType : unsigned {
BCOMPRESSW,
BDECOMPRESS,
BDECOMPRESSW,
+ // The bit field place (bfp) instruction places up to XLEN/2 LSB bits from rs2
+ // into the value in rs1. The upper bits of rs2 control the length of the bit
+ // field and target position. The layout of rs2 is chosen in a way that makes
+ // it possible to construct rs2 easily using pack[h] instructions and/or
+ // andi/lui.
+ BFP,
+ BFPW,
// Vector Extension
// VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
// for the VL value to be used for the operation.
@@ -236,6 +245,7 @@ enum NodeType : unsigned {
// Widening instructions
VWMUL_VL,
VWMULU_VL,
+ VWADDU_VL,
// Vector compare producing a mask. Fourth operand is input mask. Fifth
// operand is VL.
@@ -243,6 +253,10 @@ enum NodeType : unsigned {
// Vector select with an additional VL operand. This operation is unmasked.
VSELECT_VL,
+ // Vector select with operand #2 (the value when the condition is false) tied
+ // to the destination and an additional VL operand. This operation is
+ // unmasked.
+ VP_MERGE_VL,
// Mask binary operators.
VMAND_VL,
@@ -284,8 +298,8 @@ enum NodeType : unsigned {
// FP to 32 bit int conversions for RV64. These are used to keep track of the
// result being sign extended to 64 bit. These saturate out of range inputs.
- STRICT_FCVT_W_RTZ_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
- STRICT_FCVT_WU_RTZ_RV64,
+ STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
+ STRICT_FCVT_WU_RV64,
// Memory opcodes start here.
VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE,
@@ -462,6 +476,8 @@ public:
SelectionDAG &DAG) const override;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
+ template <class NodeTy>
+ SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override {
@@ -524,6 +540,16 @@ public:
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
+ SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ SmallVectorImpl<SDNode *> &Created) const override;
+
+ unsigned getJumpTableEncoding() const override;
+
+ const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
+ const MachineBasicBlock *MBB,
+ unsigned uid,
+ MCContext &Ctx) const override;
+
private:
/// RISCVCCAssignFn - This target-specific function extends the default
/// CCValAssign with additional information used to lower RISC-V calling
@@ -544,9 +570,6 @@ private:
bool IsRet, CallLoweringInfo *CLI,
RISCVCCAssignFn Fn) const;
- template <class NodeTy>
- SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
-
SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
bool UseGOT) const;
SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
@@ -652,6 +675,15 @@ namespace RISCVVIntrinsicsTable {
struct RISCVVIntrinsicInfo {
unsigned IntrinsicID;
uint8_t SplatOperand;
+ uint8_t VLOperand;
+ bool hasSplatOperand() const {
+ // 0xF is not valid. See NoSplatOperand in IntrinsicsRISCV.td.
+ return SplatOperand != 0xF;
+ }
+ bool hasVLOperand() const {
+ // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
+ return VLOperand != 0x1F;
+ }
};
using namespace RISCV;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index dbfc90f36f80..d39e0805a79c 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -59,12 +59,13 @@ class VSETVLIInfo {
uint8_t MaskAgnostic : 1;
uint8_t MaskRegOp : 1;
uint8_t StoreOp : 1;
+ uint8_t ScalarMovOp : 1;
uint8_t SEWLMULRatioOnly : 1;
public:
VSETVLIInfo()
: AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false),
- StoreOp(false), SEWLMULRatioOnly(false) {}
+ StoreOp(false), ScalarMovOp(false), SEWLMULRatioOnly(false) {}
static VSETVLIInfo getUnknown() {
VSETVLIInfo Info;
@@ -96,6 +97,18 @@ public:
assert(hasAVLImm());
return AVLImm;
}
+ bool hasZeroAVL() const {
+ if (hasAVLImm())
+ return getAVLImm() == 0;
+ return false;
+ }
+ bool hasNonZeroAVL() const {
+ if (hasAVLImm())
+ return getAVLImm() > 0;
+ if (hasAVLReg())
+ return getAVLReg() == RISCV::X0;
+ return false;
+ }
bool hasSameAVL(const VSETVLIInfo &Other) const {
assert(isValid() && Other.isValid() &&
@@ -120,7 +133,7 @@ public:
MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
}
void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO,
- bool IsStore) {
+ bool IsStore, bool IsScalarMovOp) {
assert(isValid() && !isUnknown() &&
"Can't set VTYPE for uninitialized or unknown");
VLMul = L;
@@ -129,6 +142,7 @@ public:
MaskAgnostic = MA;
MaskRegOp = MRO;
StoreOp = IsStore;
+ ScalarMovOp = IsScalarMovOp;
}
unsigned encodeVTYPE() const {
@@ -139,6 +153,16 @@ public:
bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; }
+ bool hasSameSEW(const VSETVLIInfo &Other) const {
+ assert(isValid() && Other.isValid() &&
+ "Can't compare invalid VSETVLIInfos");
+ assert(!isUnknown() && !Other.isUnknown() &&
+ "Can't compare VTYPE in unknown state");
+ assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly &&
+ "Can't compare when only LMUL/SEW ratio is valid.");
+ return SEW == Other.SEW;
+ }
+
bool hasSameVTYPE(const VSETVLIInfo &Other) const {
assert(isValid() && Other.isValid() &&
"Can't compare invalid VSETVLIInfos");
@@ -178,6 +202,15 @@ public:
return getSEWLMULRatio() == Other.getSEWLMULRatio();
}
+ bool hasSamePolicy(const VSETVLIInfo &Other) const {
+ assert(isValid() && Other.isValid() &&
+ "Can't compare invalid VSETVLIInfos");
+ assert(!isUnknown() && !Other.isUnknown() &&
+ "Can't compare VTYPE in unknown state");
+ return TailAgnostic == Other.TailAgnostic &&
+ MaskAgnostic == Other.MaskAgnostic;
+ }
+
bool hasCompatibleVTYPE(const VSETVLIInfo &InstrInfo, bool Strict) const {
// Simple case, see if full VTYPE matches.
if (hasSameVTYPE(InstrInfo))
@@ -222,6 +255,15 @@ public:
return true;
}
+ // For vmv.s.x and vfmv.s.f, there is only two behaviors, VL = 0 and VL > 0.
+ // So it's compatible when we could make sure that both VL be the same
+ // situation.
+ if (!Strict && InstrInfo.ScalarMovOp && InstrInfo.hasAVLImm() &&
+ ((hasNonZeroAVL() && InstrInfo.hasNonZeroAVL()) ||
+ (hasZeroAVL() && InstrInfo.hasZeroAVL())) &&
+ hasSameSEW(InstrInfo) && hasSamePolicy(InstrInfo))
+ return true;
+
// The AVL must match.
if (!hasSameAVL(InstrInfo))
return false;
@@ -414,6 +456,36 @@ static MachineInstr *elideCopies(MachineInstr *MI,
}
}
+static bool isScalarMoveInstr(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::PseudoVMV_S_X_M1:
+ case RISCV::PseudoVMV_S_X_M2:
+ case RISCV::PseudoVMV_S_X_M4:
+ case RISCV::PseudoVMV_S_X_M8:
+ case RISCV::PseudoVMV_S_X_MF2:
+ case RISCV::PseudoVMV_S_X_MF4:
+ case RISCV::PseudoVMV_S_X_MF8:
+ case RISCV::PseudoVFMV_S_F16_M1:
+ case RISCV::PseudoVFMV_S_F16_M2:
+ case RISCV::PseudoVFMV_S_F16_M4:
+ case RISCV::PseudoVFMV_S_F16_M8:
+ case RISCV::PseudoVFMV_S_F16_MF2:
+ case RISCV::PseudoVFMV_S_F16_MF4:
+ case RISCV::PseudoVFMV_S_F32_M1:
+ case RISCV::PseudoVFMV_S_F32_M2:
+ case RISCV::PseudoVFMV_S_F32_M4:
+ case RISCV::PseudoVFMV_S_F32_M8:
+ case RISCV::PseudoVFMV_S_F32_MF2:
+ case RISCV::PseudoVFMV_S_F64_M1:
+ case RISCV::PseudoVFMV_S_F64_M2:
+ case RISCV::PseudoVFMV_S_F64_M4:
+ case RISCV::PseudoVFMV_S_F64_M8:
+ return true;
+ }
+}
+
static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
const MachineRegisterInfo *MRI) {
VSETVLIInfo InstrInfo;
@@ -461,6 +533,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
// If there are no explicit defs, this is a store instruction which can
// ignore the tail and mask policies.
bool StoreOp = MI.getNumExplicitDefs() == 0;
+ bool ScalarMovOp = isScalarMoveInstr(MI);
if (RISCVII::hasVLOp(TSFlags)) {
const MachineOperand &VLOp = MI.getOperand(NumOperands - 2);
@@ -477,7 +550,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
- /*MaskAgnostic*/ false, MaskRegOp, StoreOp);
+ /*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp);
return InstrInfo;
}
@@ -1000,6 +1073,13 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
PrevVSETVLIMI->getOperand(2).setImm(NewInfo.encodeVTYPE());
NeedInsertVSETVLI = false;
}
+ if (isScalarMoveInstr(MI) &&
+ ((CurInfo.hasNonZeroAVL() && NewInfo.hasNonZeroAVL()) ||
+ (CurInfo.hasZeroAVL() && NewInfo.hasZeroAVL())) &&
+ NewInfo.hasSameVLMAX(CurInfo)) {
+ PrevVSETVLIMI->getOperand(2).setImm(NewInfo.encodeVTYPE());
+ NeedInsertVSETVLI = false;
+ }
}
if (NeedInsertVSETVLI)
insertVSETVLI(MBB, MI, NewInfo, CurInfo);
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 6a16b6354f95..f99d0f56c406 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -206,6 +206,13 @@ class Pseudo<dag outs, dag ins, list<dag> pattern, string opcodestr = "", string
let isCodeGenOnly = 1;
}
+class PseudoQuietFCMP<RegisterClass Ty>
+ : Pseudo<(outs GPR:$rd), (ins Ty:$rs1, Ty:$rs2), []> {
+ let hasSideEffects = 1;
+ let mayLoad = 0;
+ let mayStore = 0;
+}
+
// Pseudo load instructions.
class PseudoLoad<string opcodestr, RegisterClass rdty = GPR>
: Pseudo<(outs rdty:$rd), (ins bare_symbol:$addr), [], opcodestr, "$rd, $addr"> {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 2e2e00886d57..7baed2793e4e 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -201,8 +201,9 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
if (MBBI->modifiesRegister(RISCV::VL))
return false;
- // Go through all defined operands, including implicit defines.
- for (const MachineOperand &MO : MBBI->operands()) {
+ // Only converting whole register copies to vmv.v.v when the defining
+ // value appears in the explicit operands.
+ for (const MachineOperand &MO : MBBI->explicit_operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
@@ -914,7 +915,7 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
.addMBB(&DestBB, RISCVII::MO_CALL);
RS->enterBasicBlockEnd(MBB);
- unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
+ Register Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
MI.getIterator(), false, 0);
// TODO: The case when there is no scavenged register needs special handling.
assert(Scav != RISCV::NoRegister && "No register is scavenged!");
@@ -1145,6 +1146,9 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
else
Ok = isUInt<5>(Imm);
break;
+ case RISCVOp::OPERAND_RVKRNUM:
+ Ok = Imm >= 0 && Imm <= 10;
+ break;
}
if (!Ok) {
ErrInfo = "Invalid immediate";
@@ -1399,19 +1403,28 @@ MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
RISCV::PseudoV##OP##_##TYPE##_##LMUL
-#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
- CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
- case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
- case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
- case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
+#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
+#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
+ case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
+
+#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
+ case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
+
+#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
+ CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
+ case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
+
#define CASE_VFMA_SPLATS(OP) \
- CASE_VFMA_OPCODE_LMULS(OP, VF16): \
- case CASE_VFMA_OPCODE_LMULS(OP, VF32): \
- case CASE_VFMA_OPCODE_LMULS(OP, VF64)
+ CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
+ case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
+ case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
// clang-format on
bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
@@ -1430,10 +1443,10 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
case CASE_VFMA_SPLATS(FNMSUB):
case CASE_VFMA_SPLATS(FNMACC):
case CASE_VFMA_SPLATS(FNMSAC):
- case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
- case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
case CASE_VFMA_OPCODE_LMULS(MADD, VX):
case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
case CASE_VFMA_OPCODE_LMULS(MACC, VX):
@@ -1454,10 +1467,10 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
return false;
return true;
}
- case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
- case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
case CASE_VFMA_OPCODE_LMULS(MADD, VV):
case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
// If the tail policy is undisturbed we can't commute.
@@ -1533,19 +1546,28 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
break;
-#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
- CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
- CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
- CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
+
+#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
+ CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
+
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
- CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16) \
- CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32) \
- CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
+ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI,
@@ -1566,10 +1588,10 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
case CASE_VFMA_SPLATS(FNMADD):
case CASE_VFMA_SPLATS(FNMSAC):
case CASE_VFMA_SPLATS(FNMSUB):
- case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
- case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
case CASE_VFMA_OPCODE_LMULS(MADD, VX):
case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
case CASE_VFMA_OPCODE_LMULS(MACC, VX):
@@ -1592,10 +1614,10 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FMSAC, FMSUB, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FNMACC, FNMADD, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSAC, FNMSUB, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSAC, FMSUB, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMACC, FNMADD, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSAC, FNMSUB, VV)
CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
@@ -1609,10 +1631,10 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
- case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
- case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
- case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
+ case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
case CASE_VFMA_OPCODE_LMULS(MADD, VV):
case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
@@ -1623,10 +1645,10 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
- CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FMSUB, FMSAC, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FNMADD, FNMACC, VV)
- CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSUB, FNMSAC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSUB, FMSAC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMADD, FNMACC, VV)
+ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSUB, FNMSAC, VV)
CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
}
@@ -1655,13 +1677,16 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
RISCV::PseudoV##OP##_##LMUL##_TIED
-#define CASE_WIDEOP_OPCODE_LMULS(OP) \
- CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
- case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
+#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
+ CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
+
+#define CASE_WIDEOP_OPCODE_LMULS(OP) \
+ CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
// clang-format on
#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
@@ -1669,22 +1694,25 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
NewOpc = RISCV::PseudoV##OP##_##LMUL; \
break;
-#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
- CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
+#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
+#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
+ CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
+
MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV,
LiveIntervals *LIS) const {
switch (MI.getOpcode()) {
default:
break;
- case CASE_WIDEOP_OPCODE_LMULS(FWADD_WV):
- case CASE_WIDEOP_OPCODE_LMULS(FWSUB_WV):
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
+ case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
@@ -1694,14 +1722,14 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
- CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
- CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWADD_WV)
+ CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWSUB_WV)
CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
}
- //clang-format on
+ // clang-format on
MachineBasicBlock &MBB = *MI.getParent();
MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 71eb6f01a4f4..64cd89cda06a 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -402,6 +402,21 @@ def AddiPairImmB : SDNodeXForm<imm, [{
N->getValueType(0));
}]>;
+def XLenSubTrailingOnes : SDNodeXForm<imm, [{
+ uint64_t XLen = Subtarget->getXLen();
+ uint64_t TrailingOnes = N->getAPIntValue().countTrailingOnes();
+ return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N),
+ N->getValueType(0));
+}]>;
+
+// Checks if this mask is a non-empty sequence of ones starting at the
+// least significant bit with the remainder zero and exceeds simm12.
+def TrailingOnesMask : PatLeaf<(imm), [{
+ if (!N->hasOneUse())
+ return false;
+ return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue());
+}], XLenSubTrailingOnes>;
+
//===----------------------------------------------------------------------===//
// Instruction Formats
//===----------------------------------------------------------------------===//
@@ -1019,6 +1034,23 @@ def mul_const_oneuse : PatFrag<(ops node:$A, node:$B),
return false;
}]>;
+def sext_oneuse : PatFrag<(ops node:$A), (sext node:$A), [{
+ return N->hasOneUse();
+}]>;
+
+def zext_oneuse : PatFrag<(ops node:$A), (zext node:$A), [{
+ return N->hasOneUse();
+}]>;
+
+def anyext_oneuse : PatFrag<(ops node:$A), (anyext node:$A), [{
+ return N->hasOneUse();
+}]>;
+
+def fpext_oneuse : PatFrag<(ops node:$A),
+ (any_fpextend node:$A), [{
+ return N->hasOneUse();
+}]>;
+
/// Simple arithmetic operations
def : PatGprGpr<add, ADD>;
@@ -1034,6 +1066,10 @@ def : PatGprUimmLog2XLen<shl, SLLI>;
def : PatGprUimmLog2XLen<srl, SRLI>;
def : PatGprUimmLog2XLen<sra, SRAI>;
+// AND with trailing ones mask exceeding simm12.
+def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)),
+ (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>;
+
// Match both a plain shift and one where the shift amount is masked (this is
// typically introduced when the legalizer promotes the shift amount and
// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
@@ -1350,6 +1386,10 @@ def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>;
def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>;
def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>;
+let hasSideEffects = true in {
+def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>;
+def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>;
+}
/// Other pseudo-instructions
// Pessimistically assume the stack pointer will be clobbered
@@ -1476,5 +1516,6 @@ include "RISCVInstrInfoF.td"
include "RISCVInstrInfoD.td"
include "RISCVInstrInfoC.td"
include "RISCVInstrInfoZb.td"
+include "RISCVInstrInfoZk.td"
include "RISCVInstrInfoV.td"
include "RISCVInstrInfoZfh.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index d6c31c4804db..2837b92da81f 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -30,21 +30,12 @@ def RISCVSplitF64 : SDNode<"RISCVISD::SplitF64", SDT_RISCVSplitF64>;
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtD] in {
-
-let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
-def FLD : RVInstI<0b011, OPC_LOAD_FP, (outs FPR64:$rd),
- (ins GPR:$rs1, simm12:$imm12),
- "fld", "$rd, ${imm12}(${rs1})">,
- Sched<[WriteFLD64, ReadFMemBase]>;
+def FLD : FPLoad_r<0b011, "fld", FPR64, WriteFLD64>;
// Operands for stores are in the order srcreg, base, offset rather than
// reflecting the order these fields are specified in the instruction
// encoding.
-let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
-def FSD : RVInstS<0b011, OPC_STORE_FP, (outs),
- (ins FPR64:$rs2, GPR:$rs1, simm12:$imm12),
- "fsd", "$rs2, ${imm12}(${rs1})">,
- Sched<[WriteFST64, ReadStoreData, ReadFMemBase]>;
+def FSD : FPStore_r<0b011, "fsd", FPR64, WriteFST64>;
let SchedRW = [WriteFMA64, ReadFMA64, ReadFMA64, ReadFMA64] in {
def FMADD_D : FPFMA_rrr_frm<OPC_MADD, 0b01, "fmadd.d", FPR64>;
@@ -167,6 +158,10 @@ def : InstAlias<"fge.d $rd, $rs, $rt",
def PseudoFLD : PseudoFloatLoad<"fld", FPR64>;
def PseudoFSD : PseudoStore<"fsd", FPR64>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_D : PseudoQuietFCMP<FPR64>;
+def PseudoQuietFLT_D : PseudoQuietFCMP<FPR64>;
+}
} // Predicates = [HasStdExtD]
//===----------------------------------------------------------------------===//
@@ -231,13 +226,34 @@ def : PatFpr64Fpr64<fminnum, FMIN_D>;
def : PatFpr64Fpr64<fmaxnum, FMAX_D>;
/// Setcc
-
-def : PatFpr64Fpr64<seteq, FEQ_D>;
-def : PatFpr64Fpr64<setoeq, FEQ_D>;
-def : PatFpr64Fpr64<setlt, FLT_D>;
-def : PatFpr64Fpr64<setolt, FLT_D>;
-def : PatFpr64Fpr64<setle, FLE_D>;
-def : PatFpr64Fpr64<setole, FLE_D>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_D
+def : PatSetCC<FPR64, any_fsetcc, SETEQ, FEQ_D>;
+def : PatSetCC<FPR64, any_fsetcc, SETOEQ, FEQ_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETLT, PseudoQuietFLT_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETOLT, PseudoQuietFLT_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETLE, PseudoQuietFLE_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETOLE, PseudoQuietFLE_D>;
+
+// Match signaling FEQ_D
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ),
+ (AND (FLE_D $rs1, $rs2),
+ (FLE_D $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ),
+ (AND (FLE_D $rs1, $rs2),
+ (FLE_D $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ),
+ (FLE_D $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ),
+ (FLE_D $rs1, $rs1)>;
+
+def : PatSetCC<FPR64, any_fsetccs, SETLT, FLT_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLT, FLT_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETLE, FLE_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D>;
def Select_FPR64_Using_CC_GPR : SelectCC_rrirr<FPR64, GPR>;
@@ -269,20 +285,22 @@ let Predicates = [HasStdExtD, IsRV32] in {
/// Float constants
def : Pat<(f64 (fpimm0)), (FCVT_D_W (i32 X0))>;
+def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FCVT_D_W (i32 X0)),
+ (FCVT_D_W (i32 X0)))>;
// double->[u]int. Round-to-zero must be used.
def : Pat<(i32 (any_fp_to_sint FPR64:$rs1)), (FCVT_W_D FPR64:$rs1, 0b001)>;
def : Pat<(i32 (any_fp_to_uint FPR64:$rs1)), (FCVT_WU_D FPR64:$rs1, 0b001)>;
// Saturating double->[u]int32.
-def : Pat<(i32 (riscv_fcvt_x_rtz FPR64:$rs1)), (FCVT_W_D $rs1, 0b001)>;
-def : Pat<(i32 (riscv_fcvt_xu_rtz FPR64:$rs1)), (FCVT_WU_D $rs1, 0b001)>;
+def : Pat<(i32 (riscv_fcvt_x FPR64:$rs1, timm:$frm)), (FCVT_W_D $rs1, timm:$frm)>;
+def : Pat<(i32 (riscv_fcvt_xu FPR64:$rs1, timm:$frm)), (FCVT_WU_D $rs1, timm:$frm)>;
// float->int32 with current rounding mode.
-def : Pat<(i32 (lrint FPR64:$rs1)), (FCVT_W_D $rs1, 0b111)>;
+def : Pat<(i32 (any_lrint FPR64:$rs1)), (FCVT_W_D $rs1, 0b111)>;
// float->int32 rounded to nearest with ties rounded away from zero.
-def : Pat<(i32 (lround FPR64:$rs1)), (FCVT_W_D $rs1, 0b100)>;
+def : Pat<(i32 (any_lround FPR64:$rs1)), (FCVT_W_D $rs1, 0b100)>;
// [u]int->double.
def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_D_W GPR:$rs1)>;
@@ -293,6 +311,8 @@ let Predicates = [HasStdExtD, IsRV64] in {
/// Float constants
def : Pat<(f64 (fpimm0)), (FMV_D_X (i64 X0))>;
+def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FMV_D_X (i64 X0)),
+ (FMV_D_X (i64 X0)))>;
// Moves (no conversion)
def : Pat<(bitconvert (i64 GPR:$rs1)), (FMV_D_X GPR:$rs1)>;
@@ -301,28 +321,28 @@ def : Pat<(i64 (bitconvert FPR64:$rs1)), (FMV_X_D FPR64:$rs1)>;
// Use target specific isd nodes to help us remember the result is sign
// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
// duplicated if it has another user that didn't need the sign_extend.
-def : Pat<(riscv_any_fcvt_w_rtz_rv64 FPR64:$rs1), (FCVT_W_D $rs1, 0b001)>;
-def : Pat<(riscv_any_fcvt_wu_rtz_rv64 FPR64:$rs1), (FCVT_WU_D $rs1, 0b001)>;
+def : Pat<(riscv_any_fcvt_w_rv64 FPR64:$rs1, timm:$frm), (FCVT_W_D $rs1, timm:$frm)>;
+def : Pat<(riscv_any_fcvt_wu_rv64 FPR64:$rs1, timm:$frm), (FCVT_WU_D $rs1, timm:$frm)>;
// [u]int32->fp
def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_D_W $rs1)>;
def : Pat<(any_uint_to_fp (i64 (zexti32 (i64 GPR:$rs1)))), (FCVT_D_WU $rs1)>;
// Saturating double->[u]int64.
-def : Pat<(i64 (riscv_fcvt_x_rtz FPR64:$rs1)), (FCVT_L_D $rs1, 0b001)>;
-def : Pat<(i64 (riscv_fcvt_xu_rtz FPR64:$rs1)), (FCVT_LU_D $rs1, 0b001)>;
+def : Pat<(i64 (riscv_fcvt_x FPR64:$rs1, timm:$frm)), (FCVT_L_D $rs1, timm:$frm)>;
+def : Pat<(i64 (riscv_fcvt_xu FPR64:$rs1, timm:$frm)), (FCVT_LU_D $rs1, timm:$frm)>;
// double->[u]int64. Round-to-zero must be used.
def : Pat<(i64 (any_fp_to_sint FPR64:$rs1)), (FCVT_L_D FPR64:$rs1, 0b001)>;
def : Pat<(i64 (any_fp_to_uint FPR64:$rs1)), (FCVT_LU_D FPR64:$rs1, 0b001)>;
// double->int64 with current rounding mode.
-def : Pat<(i64 (lrint FPR64:$rs1)), (FCVT_L_D $rs1, 0b111)>;
-def : Pat<(i64 (llrint FPR64:$rs1)), (FCVT_L_D $rs1, 0b111)>;
+def : Pat<(i64 (any_lrint FPR64:$rs1)), (FCVT_L_D $rs1, 0b111)>;
+def : Pat<(i64 (any_llrint FPR64:$rs1)), (FCVT_L_D $rs1, 0b111)>;
// double->int64 rounded to nearest with ties rounded away from zero.
-def : Pat<(i64 (lround FPR64:$rs1)), (FCVT_L_D $rs1, 0b100)>;
-def : Pat<(i64 (llround FPR64:$rs1)), (FCVT_L_D $rs1, 0b100)>;
+def : Pat<(i64 (any_lround FPR64:$rs1)), (FCVT_L_D $rs1, 0b100)>;
+def : Pat<(i64 (any_llround FPR64:$rs1)), (FCVT_L_D $rs1, 0b100)>;
// [u]int64->fp. Match GCC and default to using dynamic rounding mode.
def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_D_L GPR:$rs1, 0b111)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index bb45ed859442..a8ac06ba8da3 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -20,36 +20,38 @@ def SDT_RISCVFMV_W_X_RV64
def SDT_RISCVFMV_X_ANYEXTW_RV64
: SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, f32>]>;
def SDT_RISCVFCVT_W_RV64
- : SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisFP<1>]>;
+ : SDTypeProfile<1, 2, [SDTCisVT<0, i64>, SDTCisFP<1>,
+ SDTCisVT<2, i64>]>;
def SDT_RISCVFCVT_X
- : SDTypeProfile<1, 1, [SDTCisVT<0, XLenVT>, SDTCisFP<1>]>;
+ : SDTypeProfile<1, 2, [SDTCisVT<0, XLenVT>, SDTCisFP<1>,
+ SDTCisVT<2, XLenVT>]>;
def riscv_fmv_w_x_rv64
: SDNode<"RISCVISD::FMV_W_X_RV64", SDT_RISCVFMV_W_X_RV64>;
def riscv_fmv_x_anyextw_rv64
: SDNode<"RISCVISD::FMV_X_ANYEXTW_RV64", SDT_RISCVFMV_X_ANYEXTW_RV64>;
-def riscv_fcvt_w_rtz_rv64
- : SDNode<"RISCVISD::FCVT_W_RTZ_RV64", SDT_RISCVFCVT_W_RV64>;
-def riscv_fcvt_wu_rtz_rv64
- : SDNode<"RISCVISD::FCVT_WU_RTZ_RV64", SDT_RISCVFCVT_W_RV64>;
-def riscv_fcvt_x_rtz
- : SDNode<"RISCVISD::FCVT_X_RTZ", SDT_RISCVFCVT_X>;
-def riscv_fcvt_xu_rtz
- : SDNode<"RISCVISD::FCVT_XU_RTZ", SDT_RISCVFCVT_X>;
-
-def riscv_strict_fcvt_w_rtz_rv64
- : SDNode<"RISCVISD::STRICT_FCVT_W_RTZ_RV64", SDT_RISCVFCVT_W_RV64,
+def riscv_fcvt_w_rv64
+ : SDNode<"RISCVISD::FCVT_W_RV64", SDT_RISCVFCVT_W_RV64>;
+def riscv_fcvt_wu_rv64
+ : SDNode<"RISCVISD::FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64>;
+def riscv_fcvt_x
+ : SDNode<"RISCVISD::FCVT_X", SDT_RISCVFCVT_X>;
+def riscv_fcvt_xu
+ : SDNode<"RISCVISD::FCVT_XU", SDT_RISCVFCVT_X>;
+
+def riscv_strict_fcvt_w_rv64
+ : SDNode<"RISCVISD::STRICT_FCVT_W_RV64", SDT_RISCVFCVT_W_RV64,
[SDNPHasChain]>;
-def riscv_strict_fcvt_wu_rtz_rv64
- : SDNode<"RISCVISD::STRICT_FCVT_WU_RTZ_RV64", SDT_RISCVFCVT_W_RV64,
+def riscv_strict_fcvt_wu_rv64
+ : SDNode<"RISCVISD::STRICT_FCVT_WU_RV64", SDT_RISCVFCVT_W_RV64,
[SDNPHasChain]>;
-def riscv_any_fcvt_w_rtz_rv64 : PatFrags<(ops node:$src),
- [(riscv_strict_fcvt_w_rtz_rv64 node:$src),
- (riscv_fcvt_w_rtz_rv64 node:$src)]>;
-def riscv_any_fcvt_wu_rtz_rv64 : PatFrags<(ops node:$src),
- [(riscv_strict_fcvt_wu_rtz_rv64 node:$src),
- (riscv_fcvt_wu_rtz_rv64 node:$src)]>;
+def riscv_any_fcvt_w_rv64 : PatFrags<(ops node:$src, node:$frm),
+ [(riscv_strict_fcvt_w_rv64 node:$src, node:$frm),
+ (riscv_fcvt_w_rv64 node:$src, node:$frm)]>;
+def riscv_any_fcvt_wu_rv64 : PatFrags<(ops node:$src, node:$frm),
+ [(riscv_strict_fcvt_wu_rv64 node:$src, node:$frm),
+ (riscv_fcvt_wu_rv64 node:$src, node:$frm)]>;
//===----------------------------------------------------------------------===//
// Operand and SDNode transformation definitions.
@@ -73,6 +75,22 @@ def frmarg : Operand<XLenVT> {
// Instruction class templates
//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
+class FPLoad_r<bits<3> funct3, string opcodestr, RegisterClass rty,
+ SchedWrite sw>
+ : RVInstI<funct3, OPC_LOAD_FP, (outs rty:$rd),
+ (ins GPR:$rs1, simm12:$imm12),
+ opcodestr, "$rd, ${imm12}(${rs1})">,
+ Sched<[sw, ReadFMemBase]>;
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+class FPStore_r<bits<3> funct3, string opcodestr, RegisterClass rty,
+ SchedWrite sw>
+ : RVInstS<funct3, OPC_STORE_FP, (outs),
+ (ins rty:$rs2, GPR:$rs1, simm12:$imm12),
+ opcodestr, "$rs2, ${imm12}(${rs1})">,
+ Sched<[sw, ReadStoreData, ReadFMemBase]>;
+
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, mayRaiseFPException = 1,
UseNamedOperandTable = 1, hasPostISelHook = 1 in
class FPFMA_rrr_frm<RISCVOpcode opcode, bits<2> funct2, string opcodestr,
@@ -138,20 +156,12 @@ class FPCmp_rr<bits<7> funct7, bits<3> funct3, string opcodestr,
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtF] in {
-let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
-def FLW : RVInstI<0b010, OPC_LOAD_FP, (outs FPR32:$rd),
- (ins GPR:$rs1, simm12:$imm12),
- "flw", "$rd, ${imm12}(${rs1})">,
- Sched<[WriteFLD32, ReadFMemBase]>;
+def FLW : FPLoad_r<0b010, "flw", FPR32, WriteFLD32>;
// Operands for stores are in the order srcreg, base, offset rather than
// reflecting the order these fields are specified in the instruction
// encoding.
-let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
-def FSW : RVInstS<0b010, OPC_STORE_FP, (outs),
- (ins FPR32:$rs2, GPR:$rs1, simm12:$imm12),
- "fsw", "$rs2, ${imm12}(${rs1})">,
- Sched<[WriteFST32, ReadStoreData, ReadFMemBase]>;
+def FSW : FPStore_r<0b010, "fsw", FPR32, WriteFST32>;
let SchedRW = [WriteFMA32, ReadFMA32, ReadFMA32, ReadFMA32] in {
def FMADD_S : FPFMA_rrr_frm<OPC_MADD, 0b00, "fmadd.s", FPR32>;
@@ -299,6 +309,10 @@ def : MnemonicAlias<"fmv.x.s", "fmv.x.w">;
def PseudoFLW : PseudoFloatLoad<"flw", FPR32>;
def PseudoFSW : PseudoStore<"fsw", FPR32>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_S : PseudoQuietFCMP<FPR32>;
+def PseudoQuietFLT_S : PseudoQuietFCMP<FPR32>;
+}
} // Predicates = [HasStdExtF]
//===----------------------------------------------------------------------===//
@@ -306,9 +320,13 @@ def PseudoFSW : PseudoStore<"fsw", FPR32>;
//===----------------------------------------------------------------------===//
/// Floating point constants
-def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>;
/// Generic pattern classes
+class PatSetCC<RegisterClass Ty, SDPatternOperator OpNode, CondCode Cond, RVInst Inst>
+ : Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>;
+
class PatFpr32Fpr32<SDPatternOperator OpNode, RVInstR Inst>
: Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>;
@@ -319,6 +337,7 @@ let Predicates = [HasStdExtF] in {
/// Float constants
def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>;
+def : Pat<(f32 (fpimmneg0)), (FSGNJN_S (FMV_W_X X0), (FMV_W_X X0))>;
/// Float conversion operations
@@ -363,13 +382,34 @@ def : PatFpr32Fpr32<fminnum, FMIN_S>;
def : PatFpr32Fpr32<fmaxnum, FMAX_S>;
/// Setcc
-
-def : PatFpr32Fpr32<seteq, FEQ_S>;
-def : PatFpr32Fpr32<setoeq, FEQ_S>;
-def : PatFpr32Fpr32<setlt, FLT_S>;
-def : PatFpr32Fpr32<setolt, FLT_S>;
-def : PatFpr32Fpr32<setle, FLE_S>;
-def : PatFpr32Fpr32<setole, FLE_S>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_S
+def : PatSetCC<FPR32, any_fsetcc, SETEQ, FEQ_S>;
+def : PatSetCC<FPR32, any_fsetcc, SETOEQ, FEQ_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETLT, PseudoQuietFLT_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETOLT, PseudoQuietFLT_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETLE, PseudoQuietFLE_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETOLE, PseudoQuietFLE_S>;
+
+// Match signaling FEQ_S
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ),
+ (AND (FLE_S $rs1, $rs2),
+ (FLE_S $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ),
+ (AND (FLE_S $rs1, $rs2),
+ (FLE_S $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ),
+ (FLE_S $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ),
+ (FLE_S $rs1, $rs1)>;
+
+def : PatSetCC<FPR32, any_fsetccs, SETLT, FLT_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETOLT, FLT_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETLE, FLE_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETOLE, FLE_S>;
def Select_FPR32_Using_CC_GPR : SelectCC_rrirr<FPR32, GPR>;
@@ -393,14 +433,14 @@ def : Pat<(i32 (any_fp_to_sint FPR32:$rs1)), (FCVT_W_S $rs1, 0b001)>;
def : Pat<(i32 (any_fp_to_uint FPR32:$rs1)), (FCVT_WU_S $rs1, 0b001)>;
// Saturating float->[u]int32.
-def : Pat<(i32 (riscv_fcvt_x_rtz FPR32:$rs1)), (FCVT_W_S $rs1, 0b001)>;
-def : Pat<(i32 (riscv_fcvt_xu_rtz FPR32:$rs1)), (FCVT_WU_S $rs1, 0b001)>;
+def : Pat<(i32 (riscv_fcvt_x FPR32:$rs1, timm:$frm)), (FCVT_W_S $rs1, timm:$frm)>;
+def : Pat<(i32 (riscv_fcvt_xu FPR32:$rs1, timm:$frm)), (FCVT_WU_S $rs1, timm:$frm)>;
// float->int32 with current rounding mode.
-def : Pat<(i32 (lrint FPR32:$rs1)), (FCVT_W_S $rs1, 0b111)>;
+def : Pat<(i32 (any_lrint FPR32:$rs1)), (FCVT_W_S $rs1, 0b111)>;
// float->int32 rounded to nearest with ties rounded away from zero.
-def : Pat<(i32 (lround FPR32:$rs1)), (FCVT_W_S $rs1, 0b100)>;
+def : Pat<(i32 (any_lround FPR32:$rs1)), (FCVT_W_S $rs1, 0b100)>;
// [u]int->float. Match GCC and default to using dynamic rounding mode.
def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_S_W $rs1, 0b111)>;
@@ -417,24 +457,24 @@ def : Pat<(sext_inreg (riscv_fmv_x_anyextw_rv64 FPR32:$src), i32),
// Use target specific isd nodes to help us remember the result is sign
// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
// duplicated if it has another user that didn't need the sign_extend.
-def : Pat<(riscv_any_fcvt_w_rtz_rv64 FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>;
-def : Pat<(riscv_any_fcvt_wu_rtz_rv64 FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>;
+def : Pat<(riscv_any_fcvt_w_rv64 FPR32:$rs1, timm:$frm), (FCVT_W_S $rs1, timm:$frm)>;
+def : Pat<(riscv_any_fcvt_wu_rv64 FPR32:$rs1, timm:$frm), (FCVT_WU_S $rs1, timm:$frm)>;
// float->[u]int64. Round-to-zero must be used.
def : Pat<(i64 (any_fp_to_sint FPR32:$rs1)), (FCVT_L_S $rs1, 0b001)>;
def : Pat<(i64 (any_fp_to_uint FPR32:$rs1)), (FCVT_LU_S $rs1, 0b001)>;
// Saturating float->[u]int64.
-def : Pat<(i64 (riscv_fcvt_x_rtz FPR32:$rs1)), (FCVT_L_S $rs1, 0b001)>;
-def : Pat<(i64 (riscv_fcvt_xu_rtz FPR32:$rs1)), (FCVT_LU_S $rs1, 0b001)>;
+def : Pat<(i64 (riscv_fcvt_x FPR32:$rs1, timm:$frm)), (FCVT_L_S $rs1, timm:$frm)>;
+def : Pat<(i64 (riscv_fcvt_xu FPR32:$rs1, timm:$frm)), (FCVT_LU_S $rs1, timm:$frm)>;
// float->int64 with current rounding mode.
-def : Pat<(i64 (lrint FPR32:$rs1)), (FCVT_L_S $rs1, 0b111)>;
-def : Pat<(i64 (llrint FPR32:$rs1)), (FCVT_L_S $rs1, 0b111)>;
+def : Pat<(i64 (any_lrint FPR32:$rs1)), (FCVT_L_S $rs1, 0b111)>;
+def : Pat<(i64 (any_llrint FPR32:$rs1)), (FCVT_L_S $rs1, 0b111)>;
// float->int64 rounded to neartest with ties rounded away from zero.
-def : Pat<(i64 (lround FPR32:$rs1)), (FCVT_L_S $rs1, 0b100)>;
-def : Pat<(i64 (llround FPR32:$rs1)), (FCVT_L_S $rs1, 0b100)>;
+def : Pat<(i64 (any_lround FPR32:$rs1)), (FCVT_L_S $rs1, 0b100)>;
+def : Pat<(i64 (any_llround FPR32:$rs1)), (FCVT_L_S $rs1, 0b100)>;
// [u]int->fp. Match GCC and default to using dynamic rounding mode.
def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_S_W $rs1, 0b111)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 173ae43a08d6..306024a3e4fd 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -19,18 +19,22 @@ include "RISCVInstrFormatsV.td"
// Operand and SDNode transformation definitions.
//===----------------------------------------------------------------------===//
-def VTypeIAsmOperand : AsmOperandClass {
- let Name = "VTypeI";
+class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
+ let Name = "VTypeI" # VTypeINum;
let ParserMethod = "parseVTypeI";
let DiagnosticType = "InvalidVTypeI";
+ let RenderMethod = "addVTypeIOperands";
}
-def VTypeIOp : Operand<XLenVT> {
- let ParserMatchClass = VTypeIAsmOperand;
+class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
+ let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
let PrintMethod = "printVTypeI";
- let DecoderMethod = "decodeUImmOperand<11>";
+ let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
}
+def VTypeIOp10 : VTypeIOp<10>;
+def VTypeIOp11 : VTypeIOp<11>;
+
def VMaskAsmOperand : AsmOperandClass {
let Name = "RVVMaskRegOpOperand";
let RenderMethod = "addRegOperands";
@@ -77,6 +81,9 @@ def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
}];
}
+def simm5_plus1_nonzero : ImmLeaf<XLenVT,
+ [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
+
//===----------------------------------------------------------------------===//
// Scheduling definitions.
//===----------------------------------------------------------------------===//
@@ -342,6 +349,27 @@ class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
// Combination of instruction classes.
// Use these multiclasses to define instructions more easily.
//===----------------------------------------------------------------------===//
+
+multiclass VIndexLoadStore<list<int> EEWList> {
+ foreach n = EEWList in {
+ defvar w = !cast<RISCVWidth>("LSWidth" # n);
+
+ def VLUXEI # n # _V :
+ VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
+ VLXSched<n, "U">;
+ def VLOXEI # n # _V :
+ VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
+ VLXSched<n, "O">;
+
+ def VSUXEI # n # _V :
+ VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
+ VSXSched<n, "U">;
+ def VSOXEI # n # _V :
+ VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
+ VSXSched<n, "O">;
+ }
+}
+
multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
@@ -757,7 +785,7 @@ multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
}
multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
- foreach l = [8, 16, 32, 64] in {
+ foreach l = [8, 16, 32] in {
defvar w = !cast<RISCVWidth>("LSWidth" # l);
defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R" # l);
@@ -765,23 +793,27 @@ multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
Sched<[s, ReadVLDX]>;
}
}
+multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
+ def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
+ Sched<[schedrw, ReadVLDX]>;
+}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtV] in {
+let Predicates = [HasVInstructions] in {
let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
-def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
+def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
"vsetvli", "$rd, $rs1, $vtypei">;
-def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
+def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
"vsetivli", "$rd, $uimm, $vtypei">;
def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
"vsetvl", "$rd, $rs1, $rs2">;
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
-foreach eew = [8, 16, 32, 64] in {
+foreach eew = [8, 16, 32] in {
defvar w = !cast<RISCVWidth>("LSWidth" # eew);
// Vector Unit-Stride Instructions
@@ -794,18 +826,12 @@ foreach eew = [8, 16, 32, 64] in {
// Vector Strided Instructions
def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSched<eew>;
def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSched<eew>;
-
- // Vector Indexed Instructions
- def VLUXEI#eew#_V :
- VIndexedLoad<MOPLDIndexedUnord, w, "vluxei"#eew#".v">, VLXSched<eew, "U">;
- def VLOXEI#eew#_V :
- VIndexedLoad<MOPLDIndexedOrder, w, "vloxei"#eew#".v">, VLXSched<eew, "O">;
- def VSUXEI#eew#_V :
- VIndexedStore<MOPSTIndexedUnord, w, "vsuxei"#eew#".v">, VSXSched<eew, "U">;
- def VSOXEI#eew#_V :
- VIndexedStore<MOPSTIndexedOrder, w, "vsoxei"#eew#".v">, VSXSched<eew, "O">;
}
+defm "" : VIndexLoadStore<[8, 16, 32]>;
+} // Predicates = [HasVInstructions]
+
+let Predicates = [HasVInstructions] in {
def VLM_V : VUnitStrideLoadMask<"vlm.v">,
Sched<[WriteVLDM, ReadVLDX]>;
def VSM_V : VUnitStrideStoreMask<"vsm.v">,
@@ -820,11 +846,6 @@ defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
-def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
-def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
-def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
-def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
-
def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
@@ -834,6 +855,40 @@ def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
+def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
+def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
+def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
+def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
+} // Predicates = [HasVInstructions]
+
+let Predicates = [HasVInstructionsI64] in {
+// Vector Unit-Stride Instructions
+def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
+ VLESched<64>;
+
+def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
+ VLFSched<64>;
+
+def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
+ VSESched<64>;
+// Vector Strided Instructions
+def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
+ VLSSched<32>;
+
+def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
+ VSSSched<64>;
+
+defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R64>;
+defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R64>;
+defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R64>;
+defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R64>;
+} // Predicates = [HasVInstructionsI64]
+let Predicates = [IsRV64, HasVInstructionsI64] in {
+ // Vector Indexed Instructions
+ defm "" : VIndexLoadStore<[64]>;
+} // [IsRV64, HasVInstructionsI64]
+
+let Predicates = [HasVInstructions] in {
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
@@ -1065,9 +1120,9 @@ let Constraints = "@earlyclobber $vd" in {
defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
} // Constraints = "@earlyclobber $vd"
-} // Predicates = [HasStdExtV]
+} // Predicates = [HasVInstructions]
-let Predicates = [HasStdExtV, HasStdExtF] in {
+let Predicates = [HasVInstructionsAnyF] in {
// Vector Single-Width Floating-Point Add/Subtract Instructions
defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
@@ -1202,9 +1257,9 @@ defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
} // Constraints = "@earlyclobber $vd"
-} // Predicates = [HasStdExtV, HasStdExtF]
+} // Predicates = HasVInstructionsAnyF]
-let Predicates = [HasStdExtV] in {
+let Predicates = [HasVInstructions] in {
// Vector Single-Width Integer Reduction Instructions
let RVVConstraint = NoConstraint in {
@@ -1228,9 +1283,9 @@ defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
-} // Predicates = [HasStdExtV]
+} // Predicates = [HasVInstructions]
-let Predicates = [HasStdExtV, HasStdExtF] in {
+let Predicates = [HasVInstructionsAnyF] in {
// Vector Single-Width Floating-Point Reduction Instructions
let RVVConstraint = NoConstraint in {
defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
@@ -1254,9 +1309,9 @@ defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
(VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
-} // Predicates = [HasStdExtV, HasStdExtF]
+} // Predicates = [HasVInstructionsAnyF]
-let Predicates = [HasStdExtV] in {
+let Predicates = [HasVInstructions] in {
// Vector Mask-Register Logical Instructions
let RVVConstraint = NoConstraint in {
defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
@@ -1337,9 +1392,9 @@ def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
-} // Predicates = [HasStdExtV]
+} // Predicates = [HasVInstructions]
-let Predicates = [HasStdExtV, HasStdExtF] in {
+let Predicates = [HasVInstructionsAnyF] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
RVVConstraint = NoConstraint in {
@@ -1354,9 +1409,9 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
-} // Predicates = [HasStdExtV, HasStdExtF]
+} // Predicates = [HasVInstructionsAnyF]
-let Predicates = [HasStdExtV] in {
+let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
@@ -1364,16 +1419,16 @@ defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
-} // Predicates = [HasStdExtV]
+} // Predicates = [HasVInstructions]
-let Predicates = [HasStdExtV, HasStdExtF] in {
+let Predicates = [HasVInstructionsAnyF] in {
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
-} // Predicates = [HasStdExtV, HasStdExtF]
+} // Predicates = [HasVInstructionsAnyF]
-let Predicates = [HasStdExtV] in {
+let Predicates = [HasVInstructions] in {
// Vector Register Gather Instruction
let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
@@ -1404,11 +1459,11 @@ foreach n = [2, 4, 8] in {
}
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
-} // Predicates = [HasStdExtV]
+} // Predicates = [HasVInstructions]
-let Predicates = [HasStdExtZvlsseg] in {
+let Predicates = [HasVInstructions] in {
foreach nf=2-8 in {
- foreach eew = [8, 16, 32, 64] in {
+ foreach eew = [8, 16, 32] in {
defvar w = !cast<RISCVWidth>("LSWidth"#eew);
def VLSEG#nf#E#eew#_V :
@@ -1439,6 +1494,41 @@ let Predicates = [HasStdExtZvlsseg] in {
"vsoxseg"#nf#"ei"#eew#".v">;
}
}
-} // Predicates = [HasStdExtZvlsseg]
+} // Predicates = [HasVInstructions]
+
+let Predicates = [HasVInstructionsI64] in {
+ foreach nf=2-8 in {
+ // Vector Unit-strided Segment Instructions
+ def VLSEG#nf#E64_V :
+ VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">;
+ def VLSEG#nf#E64FF_V :
+ VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">;
+ def VSSEG#nf#E64_V :
+ VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
+
+ // Vector Strided Segment Instructions
+ def VLSSEG#nf#E64_V :
+ VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
+ def VSSSEG#nf#E64_V :
+ VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
+ }
+} // Predicates = [HasVInstructionsI64]
+let Predicates = [HasVInstructionsI64, IsRV64] in {
+ foreach nf=2-8 in {
+ // Vector Indexed Segment Instructions
+ def VLUXSEG#nf#EI64_V :
+ VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
+ "vluxseg"#nf#"ei64.v">;
+ def VLOXSEG#nf#EI64_V :
+ VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
+ "vloxseg"#nf#"ei64.v">;
+ def VSUXSEG#nf#EI64_V :
+ VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
+ "vsuxseg"#nf#"ei64.v">;
+ def VSOXSEG#nf#EI64_V :
+ VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
+ "vsoxseg"#nf#"ei64.v">;
+ }
+} // Predicates = [HasVInstructionsI64, IsRV64]
include "RISCVInstrInfoVPseudos.td"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 073fa605e0fb..4e7e251bc412 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -71,49 +71,45 @@ def V_MF4 : LMULInfo<0b110, 2, VR, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR, "M
def V_MF2 : LMULInfo<0b111, 4, VR, VR, VR, VR,/*NoVReg*/VR, "MF2">;
// Used to iterate over all possible LMULs.
-def MxList {
- list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
-}
+defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
+// For floating point which don't need MF8.
+defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
+
// Used for widening and narrowing instructions as it doesn't contain M8.
-def MxListW {
- list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
-}
+defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
+// For floating point which don't need MF8.
+defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
+
// Use for zext/sext.vf2
-def MxListVF2 {
- list<LMULInfo> m = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
-}
+defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
+
// Use for zext/sext.vf4
-def MxListVF4 {
- list<LMULInfo> m = [V_MF2, V_M1, V_M2, V_M4, V_M8];
-}
+defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
+
// Use for zext/sext.vf8
-def MxListVF8 {
- list<LMULInfo> m = [V_M1, V_M2, V_M4, V_M8];
+defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
+
+class MxSet<int eew> {
+ list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
+ !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
+ !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
+ !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
}
-class FPR_Info<RegisterClass regclass, string fx> {
+class FPR_Info<RegisterClass regclass, string fx, list<LMULInfo> mxlist> {
RegisterClass fprclass = regclass;
string FX = fx;
+ list<LMULInfo> MxList = mxlist;
}
-def SCALAR_F16 : FPR_Info<FPR16, "F16">;
-def SCALAR_F32 : FPR_Info<FPR32, "F32">;
-def SCALAR_F64 : FPR_Info<FPR64, "F64">;
+def SCALAR_F16 : FPR_Info<FPR16, "F16", MxSet<16>.m>;
+def SCALAR_F32 : FPR_Info<FPR32, "F32", MxSet<32>.m>;
+def SCALAR_F64 : FPR_Info<FPR64, "F64", MxSet<64>.m>;
-def FPList {
- list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
-}
-// Used for widening instructions. It excludes F64.
-def FPListW {
- list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32];
-}
+defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
-class MxSet<int eew> {
- list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
- !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
- !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
- !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
-}
+// Used for widening instructions. It excludes F64.
+defvar FPListW = [SCALAR_F16, SCALAR_F32];
class NFSet<LMULInfo m> {
list<int> L = !cond(!eq(m.value, V_M8.value): [],
@@ -236,25 +232,25 @@ defset list<VTypeInfo> AllVectors = {
defset list<GroupVTypeInfo> GroupFloatVectors = {
def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
- VRM2, V_M2, f16, FPR16>;
+ VRM2, V_M2, f16, FPR16>;
def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
- VRM4, V_M4, f16, FPR16>;
+ VRM4, V_M4, f16, FPR16>;
def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
- VRM8, V_M8, f16, FPR16>;
+ VRM8, V_M8, f16, FPR16>;
def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
- VRM2, V_M2, f32, FPR32>;
+ VRM2, V_M2, f32, FPR32>;
def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t, 32,
- VRM4, V_M4, f32, FPR32>;
+ VRM4, V_M4, f32, FPR32>;
def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t, 32,
- VRM8, V_M8, f32, FPR32>;
+ VRM8, V_M8, f32, FPR32>;
def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
- VRM2, V_M2, f64, FPR64>;
+ VRM2, V_M2, f64, FPR64>;
def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
- VRM4, V_M4, f64, FPR64>;
+ VRM4, V_M4, f64, FPR64>;
def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t, 64,
- VRM8, V_M8, f64, FPR64>;
+ VRM8, V_M8, f64, FPR64>;
}
}
}
@@ -423,13 +419,14 @@ def RISCVVPseudosTable : GenericTable {
def RISCVVIntrinsicsTable : GenericTable {
let FilterClass = "RISCVVIntrinsic";
let CppTypeName = "RISCVVIntrinsicInfo";
- let Fields = ["IntrinsicID", "SplatOperand"];
+ let Fields = ["IntrinsicID", "SplatOperand", "VLOperand"];
let PrimaryKey = ["IntrinsicID"];
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
-class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
+class RISCVVLE<bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
bits<1> Masked = M;
+ bits<1> IsTU = TU;
bits<1> Strided = Str;
bits<1> FF = F;
bits<3> Log2SEW = S;
@@ -440,8 +437,8 @@ class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
def RISCVVLETable : GenericTable {
let FilterClass = "RISCVVLE";
let CppTypeName = "VLEPseudo";
- let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
- let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
+ let Fields = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVLEPseudo";
}
@@ -461,8 +458,9 @@ def RISCVVSETable : GenericTable {
let PrimaryKeyName = "getVSEPseudo";
}
-class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
+class RISCVVLX_VSX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<1> Masked = M;
+ bits<1> IsTU = TU;
bits<1> Ordered = O;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
@@ -470,15 +468,15 @@ class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
-class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
- RISCVVLX_VSX<M, O, S, L, IL>;
+class RISCVVLX<bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> :
+ RISCVVLX_VSX<M, TU, O, S, L, IL>;
class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
- RISCVVLX_VSX<M, O, S, L, IL>;
+ RISCVVLX_VSX<M, /*TU*/0, O, S, L, IL>;
class RISCVVLX_VSXTable : GenericTable {
let CppTypeName = "VLX_VSXPseudo";
- let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
- let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
+ let Fields = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
}
def RISCVVLXTable : RISCVVLX_VSXTable {
@@ -583,10 +581,11 @@ class PseudoToVInst<string PseudoInst> {
!subst("_B64", "",
!subst("_MASK", "",
!subst("_TIED", "",
+ !subst("_TU", "",
!subst("F16", "F",
!subst("F32", "F",
!subst("F64", "F",
- !subst("Pseudo", "", PseudoInst))))))))))))))))))));
+ !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
}
// The destination vector register group for a masked vector instruction cannot
@@ -632,7 +631,7 @@ class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+ RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -642,13 +641,29 @@ class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW, bit isFF> :
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo,
+ RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let Constraints = "$rd = $dest";
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+ RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -664,7 +679,7 @@ class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
+ RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -674,13 +689,29 @@ class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoSLoadNoMaskTU<VReg RetClass, int EEW>:
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$dest, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo,
+ RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let Constraints = "$rd = $dest";
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoSLoadMask<VReg RetClass, int EEW>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, GPR:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
+ RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -695,9 +726,10 @@ class VPseudoSLoadMask<VReg RetClass, int EEW>:
class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs RetClass:$rd),
- (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
+ (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl,
+ ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
+ RISCVVLX</*Masked*/0, /*TU*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -708,6 +740,24 @@ class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoILoadNoMaskTU<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
+ bit Ordered, bit EarlyClobber>:
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$dest, GPR:$rs1, IdxClass:$rs2, AVL:$vl,
+ ixlenimm:$sew),[]>,
+ RISCVVPseudo,
+ RISCVVLX</*Masked*/0, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
@@ -715,7 +765,7 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
GPR:$rs1, IdxClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
- RISCVVLX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
+ RISCVVLX</*Masked*/1, /*TU*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -932,6 +982,9 @@ class VPseudoBinaryNoMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+// Special version of VPseudoBinaryNoMask where we pretend the first source is
+// tied to the destination.
+// This allows maskedoff and rs2 to be the same register.
class VPseudoTiedBinaryNoMask<VReg RetClass,
DAGOperand Op2Class,
string Constraint> :
@@ -1083,6 +1136,30 @@ class VPseudoBinaryCarryIn<VReg RetClass,
let VLMul = MInfo.value;
}
+class VPseudoTiedBinaryCarryIn<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ LMULInfo MInfo,
+ bit CarryIn,
+ string Constraint> :
+ Pseudo<(outs RetClass:$rd),
+ !if(CarryIn,
+ (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
+ ixlenimm:$sew),
+ (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasMergeOp = 1;
+ let HasVecPolicyOp = 0;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+ let VLMul = MInfo.value;
+}
+
class VPseudoTernaryNoMask<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
@@ -1323,6 +1400,9 @@ multiclass VPseudoUSLoad {
def "E" # eew # "_V_" # LInfo :
VPseudoUSLoadNoMask<vreg, eew, false>,
VLESched<eew>;
+ def "E" # eew # "_V_" # LInfo # "_TU":
+ VPseudoUSLoadNoMaskTU<vreg, eew, false>,
+ VLESched<eew>;
def "E" # eew # "_V_" # LInfo # "_MASK" :
VPseudoUSLoadMask<vreg, eew, false>,
VLESched<eew>;
@@ -1340,6 +1420,9 @@ multiclass VPseudoFFLoad {
def "E" # eew # "FF_V_" # LInfo :
VPseudoUSLoadNoMask<vreg, eew, true>,
VLFSched<eew>;
+ def "E" # eew # "FF_V_" # LInfo # "_TU":
+ VPseudoUSLoadNoMaskTU<vreg, eew, true>,
+ VLFSched<eew>;
def "E" # eew # "FF_V_" # LInfo # "_MASK" :
VPseudoUSLoadMask<vreg, eew, true>,
VLFSched<eew>;
@@ -1364,6 +1447,8 @@ multiclass VPseudoSLoad {
let VLMul = lmul.value in {
def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
VLSSched<eew>;
+ def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
+ VLSSched<eew>;
def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>,
VLSSched<eew>;
}
@@ -1390,6 +1475,9 @@ multiclass VPseudoILoad<bit Ordered> {
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
VLXSched<eew, Order>;
+ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU":
+ VPseudoILoadNoMaskTU<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
+ VLXSched<eew, Order>;
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
VLXSched<eew, Order>;
@@ -1504,7 +1592,7 @@ multiclass VPseudoVSFS_M {
}
multiclass VPseudoVID_V {
- foreach m = MxList.m in {
+ foreach m = MxList in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
Sched<[WriteVMIdxV, ReadVMask]>;
@@ -1524,7 +1612,7 @@ multiclass VPseudoNullaryPseudoM <string BaseInst> {
multiclass VPseudoVIOT_M {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m in {
+ foreach m = MxList in {
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
@@ -1535,7 +1623,7 @@ multiclass VPseudoVIOT_M {
}
multiclass VPseudoVCPR_V {
- foreach m = MxList.m in {
+ foreach m = MxList in {
let VLMul = m.value in
def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
@@ -1596,12 +1684,18 @@ multiclass VPseudoTiedBinary<VReg RetClass,
}
multiclass VPseudoBinaryV_VV<string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
+ defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
+}
+
+// Similar to VPseudoBinaryV_VV, but uses MxListF.
+multiclass VPseudoBinaryFV_VV<string Constraint = ""> {
+ foreach m = MxListF in
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
}
multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
- foreach m = MxList.m in {
+ foreach m = MxList in {
foreach sew = EEWList in {
defvar octuple_lmul = m.octuple;
// emul = lmul * eew / sew
@@ -1617,38 +1711,38 @@ multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
}
multiclass VPseudoBinaryV_VX<string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
multiclass VPseudoVSLD1_VX<string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
}
multiclass VPseudoBinaryV_VF<string Constraint = ""> {
- foreach m = MxList.m in
- foreach f = FPList.fpinfo in
+ foreach f = FPList in
+ foreach m = f.MxList in
defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
f.fprclass, m, Constraint>;
}
multiclass VPseudoVSLD1_VF<string Constraint = ""> {
- foreach m = MxList.m in
- foreach f = FPList.fpinfo in
+ foreach f = FPList in
+ foreach m = f.MxList in
defm "_V" # f.FX :
VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
}
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
multiclass VPseudoVALU_MM {
- foreach m = MxList.m in
+ foreach m = MxList in
let VLMul = m.value in {
def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">,
Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
@@ -1662,28 +1756,28 @@ multiclass VPseudoVALU_MM {
// * The destination EEW is greater than the source EEW, the source EMUL is
// at least 1, and the overlap is in the highest-numbered part of the
// destination register group is legal. Otherwise, it is illegal.
-multiclass VPseudoBinaryW_VV {
- foreach m = MxListW.m in
+multiclass VPseudoBinaryW_VV<list<LMULInfo> mxlist = MxListW> {
+ foreach m = mxlist in
defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VX {
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VF {
- foreach m = MxListW.m in
- foreach f = FPListW.fpinfo in
+ foreach f = FPListW in
+ foreach m = f.MxList in
defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
f.fprclass, m,
"@earlyclobber $rd">;
}
-multiclass VPseudoBinaryW_WV {
- foreach m = MxListW.m in {
+multiclass VPseudoBinaryW_WV<list<LMULInfo> mxlist = MxListW> {
+ foreach m = mxlist in {
defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
"@earlyclobber $rd">;
defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
@@ -1692,13 +1786,13 @@ multiclass VPseudoBinaryW_WV {
}
multiclass VPseudoBinaryW_WX {
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
}
multiclass VPseudoBinaryW_WF {
- foreach m = MxListW.m in
- foreach f = FPListW.fpinfo in
+ foreach f = FPListW in
+ foreach m = f.MxList in
defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
f.fprclass, m>;
}
@@ -1709,19 +1803,19 @@ multiclass VPseudoBinaryW_WF {
// "The destination EEW is smaller than the source EEW and the overlap is in the
// lowest-numbered part of the source register group."
multiclass VPseudoBinaryV_WV {
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryV_WX {
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryV_WI {
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
@@ -1731,7 +1825,7 @@ multiclass VPseudoBinaryV_WI {
// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
@@ -1739,9 +1833,19 @@ multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, m.vrclass, m, CarryIn, Constraint>;
}
+multiclass VPseudoTiedBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
+ string Constraint = ""> {
+ foreach m = MxList in
+ def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU" :
+ VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, m.vrclass, m, CarryIn, Constraint>;
+}
+
multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
@@ -1749,18 +1853,34 @@ multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, GPR, m, CarryIn, Constraint>;
}
+multiclass VPseudoTiedBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
+ string Constraint = ""> {
+ foreach m = MxList in
+ def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
+ VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, GPR, m, CarryIn, Constraint>;
+}
+
multiclass VPseudoVMRG_FM {
- foreach m = MxList.m in
- foreach f = FPList.fpinfo in
+ foreach f = FPList in
+ foreach m = f.MxList in {
def "_V" # f.FX # "M_" # m.MX :
VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
+ // Tied version to allow codegen control over the tail elements
+ def "_V" # f.FX # "M_" # m.MX # "_TU":
+ VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
+ m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">,
+ Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
+ }
}
multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
@@ -1768,8 +1888,18 @@ multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, simm5, m, CarryIn, Constraint>;
}
+multiclass VPseudoTiedBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
+ string Constraint = ""> {
+ foreach m = MxList in
+ def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX # "_TU":
+ VPseudoTiedBinaryCarryIn<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, simm5, m, CarryIn, Constraint>;
+}
+
multiclass VPseudoUnaryVMV_V_X_I {
- foreach m = MxList.m in {
+ foreach m = MxList in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>,
Sched<[WriteVIMovV, ReadVIMovV]>;
@@ -1782,8 +1912,8 @@ multiclass VPseudoUnaryVMV_V_X_I {
}
multiclass VPseudoVMV_F {
- foreach m = MxList.m in {
- foreach f = FPList.fpinfo in {
+ foreach f = FPList in {
+ foreach m = f.MxList in {
let VLMul = m.value in {
def "_" # f.FX # "_" # m.MX :
VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>,
@@ -1794,7 +1924,7 @@ multiclass VPseudoVMV_F {
}
multiclass VPseudoVCLS_V {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
@@ -1805,7 +1935,7 @@ multiclass VPseudoVCLS_V {
}
multiclass VPseudoVSQR_V {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
@@ -1816,7 +1946,7 @@ multiclass VPseudoVSQR_V {
}
multiclass VPseudoVRCP_V {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
@@ -1828,7 +1958,7 @@ multiclass VPseudoVRCP_V {
multiclass PseudoVEXT_VF2 {
defvar constraints = "@earlyclobber $rd";
- foreach m = MxListVF2.m in
+ foreach m = MxListVF2 in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
@@ -1842,7 +1972,7 @@ multiclass PseudoVEXT_VF2 {
multiclass PseudoVEXT_VF4 {
defvar constraints = "@earlyclobber $rd";
- foreach m = MxListVF4.m in
+ foreach m = MxListVF4 in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
@@ -1856,7 +1986,7 @@ multiclass PseudoVEXT_VF4 {
multiclass PseudoVEXT_VF8 {
defvar constraints = "@earlyclobber $rd";
- foreach m = MxListVF8.m in
+ foreach m = MxListVF8 in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
@@ -1879,29 +2009,29 @@ multiclass PseudoVEXT_VF8 {
// lowest-numbered part of the source register group".
// With LMUL<=1 the source and dest occupy a single register so any overlap
// is in the lowest-numbered part.
-multiclass VPseudoBinaryM_VV {
- foreach m = MxList.m in
+multiclass VPseudoBinaryM_VV<list<LMULInfo> mxlist = MxList> {
+ foreach m = mxlist in
defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VX {
- foreach m = MxList.m in
+ foreach m = MxList in
defm "_VX" :
VPseudoBinaryM<VR, m.vrclass, GPR, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VF {
- foreach m = MxList.m in
- foreach f = FPList.fpinfo in
+ foreach f = FPList in
+ foreach m = f.MxList in
defm "_V" # f.FX :
VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VI {
- foreach m = MxList.m in
+ foreach m = MxList in
defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
@@ -1995,14 +2125,14 @@ multiclass VPseudoVDIV_VV_VX {
}
multiclass VPseudoVFMUL_VV_VF {
- defm "" : VPseudoBinaryV_VV,
+ defm "" : VPseudoBinaryFV_VV,
Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
defm "" : VPseudoBinaryV_VF,
Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
}
multiclass VPseudoVFDIV_VV_VF {
- defm "" : VPseudoBinaryV_VV,
+ defm "" : VPseudoBinaryFV_VV,
Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
defm "" : VPseudoBinaryV_VF,
Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
@@ -2021,21 +2151,21 @@ multiclass VPseudoVALU_VV_VX {
}
multiclass VPseudoVSGNJ_VV_VF {
- defm "" : VPseudoBinaryV_VV,
+ defm "" : VPseudoBinaryFV_VV,
Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
defm "" : VPseudoBinaryV_VF,
Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
}
multiclass VPseudoVMAX_VV_VF {
- defm "" : VPseudoBinaryV_VV,
+ defm "" : VPseudoBinaryFV_VV,
Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
defm "" : VPseudoBinaryV_VF,
Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
}
multiclass VPseudoVALU_VV_VF {
- defm "" : VPseudoBinaryV_VV,
+ defm "" : VPseudoBinaryFV_VV,
Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
defm "" : VPseudoBinaryV_VF,
Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
@@ -2068,17 +2198,12 @@ multiclass VPseudoVWMUL_VV_VX {
}
multiclass VPseudoVWMUL_VV_VF {
- defm "" : VPseudoBinaryW_VV,
+ defm "" : VPseudoBinaryW_VV<MxListFW>,
Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
defm "" : VPseudoBinaryW_VF,
Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
}
-multiclass VPseudoBinaryW_VV_VF {
- defm "" : VPseudoBinaryW_VV;
- defm "" : VPseudoBinaryW_VF;
-}
-
multiclass VPseudoVWALU_WV_WX {
defm "" : VPseudoBinaryW_WV,
Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
@@ -2087,14 +2212,14 @@ multiclass VPseudoVWALU_WV_WX {
}
multiclass VPseudoVFWALU_VV_VF {
- defm "" : VPseudoBinaryW_VV,
+ defm "" : VPseudoBinaryW_VV<MxListFW>,
Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
defm "" : VPseudoBinaryW_VF,
Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
}
multiclass VPseudoVFWALU_WV_WF {
- defm "" : VPseudoBinaryW_WV,
+ defm "" : VPseudoBinaryW_WV<MxListFW>,
Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
defm "" : VPseudoBinaryW_WF,
Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
@@ -2107,6 +2232,13 @@ multiclass VPseudoVMRG_VM_XM_IM {
Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
defm "" : VPseudoBinaryV_IM,
Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
+ // Tied versions to allow codegen control over the tail elements
+ defm "" : VPseudoTiedBinaryV_VM,
+ Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
+ defm "" : VPseudoTiedBinaryV_XM,
+ Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
+ defm "" : VPseudoTiedBinaryV_IM,
+ Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
}
multiclass VPseudoVCALU_VM_XM_IM {
@@ -2199,56 +2331,57 @@ multiclass VPseudoTernaryWithPolicy<VReg RetClass,
}
}
-multiclass VPseudoTernaryV_VV_AAXA<string Constraint = ""> {
- foreach m = MxList.m in {
+multiclass VPseudoTernaryV_VV_AAXA<string Constraint = "",
+ list<LMULInfo> mxlist = MxList> {
+ foreach m = mxlist in {
defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
Constraint, /*Commutable*/1>;
}
}
multiclass VPseudoTernaryV_VX<string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
Constraint, /*Commutable*/1>;
}
multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
- foreach m = MxList.m in
- foreach f = FPList.fpinfo in
+ foreach f = FPList in
+ foreach m = f.MxList in
defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
m.vrclass, m, Constraint,
/*Commutable*/1>;
}
-multiclass VPseudoTernaryW_VV {
+multiclass VPseudoTernaryW_VV<list<LMULInfo> mxlist = MxListW> {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxListW.m in
+ foreach m = mxlist in
defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
constraint>;
}
multiclass VPseudoTernaryW_VX {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
constraint>;
}
multiclass VPseudoTernaryW_VF {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxListW.m in
- foreach f = FPListW.fpinfo in
+ foreach f = FPListW in
+ foreach m = f.MxList in
defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
m.vrclass, m, constraint>;
}
multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
- foreach m = MxList.m in
+ foreach m = MxList in
defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
@@ -2260,7 +2393,7 @@ multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
}
multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
- defm "" : VPseudoTernaryV_VV_AAXA<Constraint>,
+ defm "" : VPseudoTernaryV_VV_AAXA<Constraint, MxListF>,
Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
defm "" : VPseudoTernaryV_VF_AAXA<Constraint>,
Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
@@ -2286,7 +2419,7 @@ multiclass VPseudoVWMAC_VX {
}
multiclass VPseudoVWMAC_VV_VF {
- defm "" : VPseudoTernaryW_VV,
+ defm "" : VPseudoTernaryW_VV<MxListFW>,
Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
defm "" : VPseudoTernaryW_VF,
Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
@@ -2309,7 +2442,7 @@ multiclass VPseudoVCMPM_VV_VX {
}
multiclass VPseudoVCMPM_VV_VF {
- defm "" : VPseudoBinaryM_VV,
+ defm "" : VPseudoBinaryM_VV<MxListF>,
Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
defm "" : VPseudoBinaryM_VF,
Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
@@ -2328,35 +2461,35 @@ multiclass VPseudoVCMPM_VX_VI {
}
multiclass VPseudoVRED_VS {
- foreach m = MxList.m in {
+ foreach m = MxList in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>;
}
}
multiclass VPseudoVWRED_VS {
- foreach m = MxList.m in {
+ foreach m = MxList in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>;
}
}
multiclass VPseudoVFRED_VS {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>;
}
}
multiclass VPseudoVFREDO_VS {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>;
}
}
multiclass VPseudoVFWRED_VS {
- foreach m = MxList.m in {
+ foreach m = MxListF in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>,
Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>;
}
@@ -2374,61 +2507,61 @@ multiclass VPseudoConversion<VReg RetClass,
}
multiclass VPseudoVCVTI_V {
- foreach m = MxList.m in
+ foreach m = MxListF in
defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
}
multiclass VPseudoVCVTF_V {
- foreach m = MxList.m in
+ foreach m = MxListF in
defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
}
multiclass VPseudoConversionW_V {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxListW.m in
+ foreach m = MxListW in
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
}
multiclass VPseudoVWCVTI_V {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m[0-5] in
+ foreach m = MxListFW in
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
}
multiclass VPseudoVWCVTF_V {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m[0-5] in
+ foreach m = MxListW in
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
}
multiclass VPseudoVWCVTD_V {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m[0-5] in
+ foreach m = MxListFW in
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>,
Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
}
multiclass VPseudoVNCVTI_W {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m[0-5] in
+ foreach m = MxListW in
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
}
multiclass VPseudoVNCVTF_W {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxList.m[0-5] in
+ foreach m = MxListFW in
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
}
multiclass VPseudoVNCVTD_W {
defvar constraint = "@earlyclobber $rd";
- foreach m = MxListW.m in
+ foreach m = MxListFW in
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>,
Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
}
@@ -3702,6 +3835,28 @@ multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
}
}
+multiclass VPatCompare_VI<string intrinsic, string inst,
+ ImmLeaf ImmType = simm5_plus1> {
+ foreach vti = AllIntegerVectors in {
+ defvar Intr = !cast<Intrinsic>(intrinsic);
+ defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
+ def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar ImmType:$rs2),
+ VLOpFrag)),
+ (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
+ GPR:$vl, vti.Log2SEW)>;
+ defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+ defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
+ def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
+ (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar ImmType:$rs2),
+ (vti.Mask V0),
+ VLOpFrag)),
+ (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
+ (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions
//===----------------------------------------------------------------------===//
@@ -3741,7 +3896,7 @@ let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>;
}
-foreach lmul = MxList.m in {
+foreach lmul = MxList in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
@@ -3765,9 +3920,9 @@ let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
// the when we aren't using one of the special X0 encodings. Otherwise it could
// be accidentally be made X0 by MachineIR optimizations. To satisfy the
// verifier, we also need a GPRX0 instruction for the special encodings.
-def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp:$vtypei), []>;
-def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp:$vtypei), []>;
-def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp:$vtypei), []>;
+def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>;
+def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>;
+def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>;
}
//===----------------------------------------------------------------------===//
@@ -4304,7 +4459,7 @@ defm PseudoVID : VPseudoVID_V;
let Predicates = [HasVInstructions] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
- foreach m = MxList.m in {
+ foreach m = MxList in {
let VLMul = m.value in {
let HasSEWOp = 1, BaseInstr = VMV_X_S in
def PseudoVMV_X_S # "_" # m.MX:
@@ -4330,8 +4485,8 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
let Predicates = [HasVInstructionsAnyF] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
- foreach m = MxList.m in {
- foreach f = FPList.fpinfo in {
+ foreach f = FPList in {
+ foreach m = f.MxList in {
let VLMul = m.value in {
let HasSEWOp = 1, BaseInstr = VFMV_F_S in
def "PseudoVFMV_" # f.FX # "_S_" # m.MX :
@@ -4452,6 +4607,30 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
uimm5>;
+foreach vti = AllIntegerVectors in {
+ // Emit shift by 1 as an add since it might be faster.
+ def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1),
+ (XLenVT 1), VLOpFrag)),
+ (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
+ vti.RegClass:$rs1,
+ GPR:$vl,
+ vti.Log2SEW)>;
+ def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$rs1),
+ (XLenVT 1),
+ (vti.Mask V0),
+ VLOpFrag,
+ (XLenVT timm:$policy))),
+ (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
+ vti.RegClass:$merge,
+ vti.RegClass:$rs1,
+ vti.RegClass:$rs1,
+ (vti.Mask V0),
+ GPR:$vl,
+ vti.Log2SEW,
+ (XLenVT timm:$policy))>;
+}
+
//===----------------------------------------------------------------------===//
// 12.7. Vector Narrowing Integer Right Shift Instructions
//===----------------------------------------------------------------------===//
@@ -4481,129 +4660,11 @@ defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors
// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This
// avoids the user needing to know that there is no vmslt(u).vi instruction.
// Similar for vmsge(u).vx intrinsics using vmslt(u).vi.
-foreach vti = AllIntegerVectors in {
- def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
+defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE">;
+defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
- def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
-
- // Special cases to avoid matching vmsltu.vi 0 (always false) to
- // vmsleu.vi -1 (always true). Instead match to vmsne.vv.
- def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar 0), VLOpFrag)),
- (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
- vti.RegClass:$rs1,
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar 0),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- vti.RegClass:$rs1,
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
-
- def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
-
- def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- (DecImm simm5_plus1:$rs2),
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
-
- // Special cases to avoid matching vmsgeu.vi 0 (always true) to
- // vmsgtu.vi -1 (always false). Instead match to vmsne.vv.
- def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar 0), VLOpFrag)),
- (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
- vti.RegClass:$rs1,
- GPR:$vl,
- vti.Log2SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
- (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar 0),
- (vti.Mask V0),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX#"_MASK")
- VR:$merge,
- vti.RegClass:$rs1,
- vti.RegClass:$rs1,
- (vti.Mask V0),
- GPR:$vl,
- vti.Log2SEW)>;
-}
+defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT">;
+defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
//===----------------------------------------------------------------------===//
// 12.9. Vector Integer Min/Max Instructions
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 711ad4335ece..e452a84a9a6f 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -363,6 +363,91 @@ multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
}
}
+multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, string instruction_name> {
+ foreach vti = AllWidenableIntVectors in {
+ def : Pat<(op (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_VV_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_VX_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, GPR:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_WX_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, GPR:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> {
+ foreach vti = AllWidenableIntVectors in {
+ def : Pat<
+ (add (vti.Wti.Vector vti.Wti.RegClass:$rd),
+ (mul_oneuse (vti.Wti.Vector (extop1 (vti.Vti.Vector vti.Vti.RegClass:$rs1))),
+ (vti.Wti.Vector (extop2 (vti.Vti.Vector vti.Vti.RegClass:$rs2))))),
+ (!cast<Instruction>(instruction_name#"_VV_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rd, vti.Vti.RegClass:$rs1, vti.Vti.RegClass:$rs2,
+ vti.Vti.AVL, vti.Vti.Log2SEW, TAIL_AGNOSTIC
+ )>;
+ }
+}
+multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> {
+ foreach vti = AllWidenableIntVectors in {
+ def : Pat<
+ (add (vti.Wti.Vector vti.Wti.RegClass:$rd),
+ (mul_oneuse (vti.Wti.Vector (extop1 (vti.Vti.Vector (SplatPat GPR:$rs1)))),
+ (vti.Wti.Vector (extop2 (vti.Vti.Vector vti.Vti.RegClass:$rs2))))),
+ (!cast<Instruction>(instruction_name#"_VX_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rd, GPR:$rs1, vti.Vti.RegClass:$rs2,
+ vti.Vti.AVL, vti.Vti.Log2SEW, TAIL_AGNOSTIC
+ )>;
+ }
+}
+
+multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> {
+ foreach vti = AllWidenableFloatVectors in {
+ def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_VV_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatPat vti.Vti.ScalarRegClass:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_V"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatWidenBinaryFPSDNode_WV_WF<SDNode op, string instruction_name> {
+ foreach vti = AllWidenableFloatVectors in {
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatPat vti.Vti.ScalarRegClass:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_W"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF<SDNode op, string instruction_name> {
+ defm : VPatWidenBinaryFPSDNode_VV_VF<op, instruction_name>;
+ defm : VPatWidenBinaryFPSDNode_WV_WF<op, instruction_name>;
+}
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -399,6 +484,15 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>;
}
+// 12.2. Vector Widening Integer Add and Subtract
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">;
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">;
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">;
+
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">;
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">;
+defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">;
+
// 12.3. Vector Integer Extension
defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2",
AllFractionableVF2IntVectors>;
@@ -513,6 +607,15 @@ foreach vti = AllIntegerVectors in {
vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
+// 12.14 Vector Widening Integer Multiply-Add Instructions
+defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">;
+defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">;
+defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">;
+defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">;
+defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">;
+defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">;
+defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">;
+
// 12.15. Vector Integer Merge Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1,
@@ -582,11 +685,18 @@ defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">;
defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">;
defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">;
+// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fadd, "PseudoVFWADD">;
+defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fsub, "PseudoVFWSUB">;
+
// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
defm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">;
+// 14.5. Vector Widening Floating-Point Multiply Instructions
+defm : VPatWidenBinaryFPSDNode_VV_VF<fmul, "PseudoVFWMUL">;
+
// 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions.
foreach fvti = AllFloatVectors in {
// NOTE: We choose VFMADD because it has the most commuting freedom. So it
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 73b97e1c3675..964f0fa54512 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -177,14 +177,13 @@ def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL",
SDTCisSameNumEltsAs<0, 3>,
SDTCisVT<4, XLenVT>]>>;
-def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL",
- SDTypeProfile<1, 4, [SDTCisVec<0>,
- SDTCisVec<1>,
- SDTCisSameNumEltsAs<0, 1>,
- SDTCVecEltisVT<1, i1>,
- SDTCisSameAs<0, 2>,
- SDTCisSameAs<2, 3>,
- SDTCisVT<4, XLenVT>]>>;
+def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [
+ SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT>
+]>;
+
+def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>;
+def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>;
def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
SDTCisSameAs<0, 2>,
@@ -216,19 +215,20 @@ def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>;
def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
SDTypeProfile<1, 3, [SDTCisVec<0>,
- SDTCisVec<1>,
+ SDTCisSameNumEltsAs<0, 1>,
SDTCisSameNumEltsAs<0, 2>,
SDTCVecEltisVT<2, i1>,
SDTCisVT<3, XLenVT>]>>;
-def SDT_RISCVVWMUL_VL : SDTypeProfile<1, 4, [SDTCisVec<0>,
- SDTCisSameNumEltsAs<0, 1>,
- SDTCisSameAs<1, 2>,
- SDTCisSameNumEltsAs<1, 3>,
- SDTCVecEltisVT<3, i1>,
- SDTCisVT<4, XLenVT>]>;
-def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
-def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
+def SDT_RISCVVWBinOp_VL : SDTypeProfile<1, 4, [SDTCisVec<0>,
+ SDTCisSameNumEltsAs<0, 1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisSameNumEltsAs<1, 3>,
+ SDTCVecEltisVT<3, i1>,
+ SDTCisVT<4, XLenVT>]>;
+def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
+def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
+def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
def SDTRVVVecReduce : SDTypeProfile<1, 5, [
SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
@@ -363,37 +363,47 @@ multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
}
}
-class VPatBinaryVL_VF<SDNode vop,
- string instruction_name,
- ValueType result_type,
- ValueType vop_type,
- ValueType mask_type,
- int sew,
- LMULInfo vlmul,
- VReg vop_reg_class,
- RegisterClass scalar_reg_class> :
- Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
- (vop_type (SplatFPOp scalar_reg_class:$rs2)),
- (mask_type true_mask),
- VLOpFrag)),
+multiclass VPatBinaryVL_VF<SDNode vop,
+ string instruction_name,
+ ValueType result_type,
+ ValueType vop_type,
+ ValueType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg vop_reg_class,
+ RegisterClass scalar_reg_class> {
+ def : Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
+ (vop_type (SplatFPOp scalar_reg_class:$rs2)),
+ (mask_type true_mask),
+ VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vlmul.MX)
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
GPR:$vl, sew)>;
+ def : Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
+ (vop_type (SplatFPOp scalar_reg_class:$rs2)),
+ (mask_type V0),
+ VLOpFrag)),
+ (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK")
+ (result_type (IMPLICIT_DEF)),
+ vop_reg_class:$rs1,
+ scalar_reg_class:$rs2,
+ (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
+}
multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
foreach vti = AllFloatVectors in {
defm : VPatBinaryVL_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass>;
- def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
- vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
- vti.LMul, vti.RegClass, vti.ScalarRegClass>;
+ defm : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, vti.ScalarRegClass>;
}
}
multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
- foreach fvti = AllFloatVectors in
+ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
(fvti.Mask true_mask),
@@ -401,6 +411,15 @@ multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
(!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
GPR:$vl, fvti.Log2SEW)>;
+ def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
+ fvti.RegClass:$rs1,
+ (fvti.Mask V0),
+ VLOpFrag)),
+ (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
+ (fvti.Vector (IMPLICIT_DEF)),
+ fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
+ (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ }
}
multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
@@ -602,6 +621,47 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
}
}
+multiclass VPatBinarySDNodeExt_V_WV<SDNode op, PatFrags extop, string instruction_name> {
+ foreach vti = AllWidenableIntVectors in {
+ def : Pat<
+ (vti.Vti.Vector
+ (riscv_trunc_vector_vl
+ (op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (riscv_vmset_vl VLMax),
+ VLMax)),
+ (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatBinarySDNodeExt_V_WX<SDNode op, PatFrags extop, string instruction_name> {
+ foreach vti = AllWidenableIntVectors in {
+ def : Pat<
+ (vti.Vti.Vector
+ (riscv_trunc_vector_vl
+ (op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))),
+ (riscv_vmset_vl VLMax),
+ VLMax)),
+ (!cast<Instruction>(instruction_name#"_WX_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, GPR:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+
+multiclass VPatBinarySDNode_V_WV<SDNode op, string instruction_name> {
+ defm : VPatBinarySDNodeExt_V_WV<op, sext_oneuse, instruction_name>;
+ defm : VPatBinarySDNodeExt_V_WV<op, zext_oneuse, instruction_name>;
+}
+
+multiclass VPatBinarySDNode_V_WX<SDNode op, string instruction_name> {
+ defm : VPatBinarySDNodeExt_V_WX<op, sext_oneuse, instruction_name>;
+ defm : VPatBinarySDNodeExt_V_WX<op, zext_oneuse, instruction_name>;
+}
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -661,6 +721,9 @@ foreach vti = AllIntegerVectors in {
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
+// 12.2. Vector Widening Integer Add/Subtract
+defm : VPatBinaryWVL_VV_VX<riscv_vwaddu_vl, "PseudoVWADDU">;
+
// 12.3. Vector Integer Extension
defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF2",
AllFractionableVF2IntVectors>;
@@ -696,14 +759,19 @@ foreach vti = AllIntegerVectors in {
}
// 12.7. Vector Narrowing Integer Right Shift Instructions
+defm : VPatBinarySDNode_V_WV<srl, "PseudoVNSRL">;
+defm : VPatBinarySDNode_V_WX<srl, "PseudoVNSRL">;
+defm : VPatBinarySDNode_V_WV<sra, "PseudoVNSRA">;
+defm : VPatBinarySDNode_V_WX<sra, "PseudoVNSRA">;
+
foreach vtiTowti = AllWidenableIntVectors in {
defvar vti = vtiTowti.Vti;
defvar wti = vtiTowti.Wti;
def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag)),
- (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX)
- wti.RegClass:$rs1, 0, GPR:$vl, vti.Log2SEW)>;
+ (!cast<Instruction>("PseudoVNSRL_WX_"#vti.LMul.MX)
+ wti.RegClass:$rs1, X0, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector
(riscv_trunc_vector_vl
@@ -760,6 +828,8 @@ foreach vti = AllIntegerVectors in {
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
+ defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
+ defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLE", SETLT,
SplatPat_simm5_plus1>;
@@ -905,6 +975,30 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ vti.RegClass:$rs1,
+ vti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ (SplatPat XLenVT:$rs1),
+ vti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1,
+ (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ (SplatPat_simm5 simm5:$rs1),
+ vti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU")
+ vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1,
+ (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
// 12.16. Vector Integer Move Instructions
@@ -1152,6 +1246,31 @@ foreach fvti = AllFloatVectors in {
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU")
+ fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU")
+ fvti.RegClass:$rs2, fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2,
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU")
+ fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
+ GPR:$vl, fvti.Log2SEW)>;
+
// 14.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
@@ -1368,6 +1487,11 @@ let Predicates = [HasVInstructionsAnyF] in {
// 17.2. Floating-Point Scalar Move Instructions
foreach vti = AllFloatVectors in {
def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
+ (vti.Scalar (fpimm0)),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
+ vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 7eb8ae7d4193..db3f5851879a 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -12,14 +12,22 @@
// Zbb - 1.0
// Zbc - 1.0
// Zbs - 1.0
-// Zbe - 0.93
-// Zbf - 0.93
-// Zbm - 0.93
-// Zbp - 0.93
-// Zbr - 0.93
-// Zbt - 0.93
-// This version is still experimental as the Bitmanip extensions haven't been
-// ratified yet.
+// Zbe - 0.93 *experimental
+// Zbf - 0.93 *experimental
+// Zbm - 0.93 *experimental
+// Zbp - 0.93 *experimental
+// Zbr - 0.93 *experimental
+// Zbt - 0.93 *experimental
+//
+// The experimental extensions appeared in an earlier draft of the Bitmanip
+// extensions. They are not ratified and subject to change.
+//
+// This file also describes RISC-V instructions from the Zbk* extensions in
+// Cryptography Extensions Volume I: Scalar & Entropy Source Instructions,
+// versions:
+// Zbkb - 1.0
+// Zbkc - 1.0
+// Zbkx - 1.0
//
//===----------------------------------------------------------------------===//
@@ -43,6 +51,8 @@ def riscv_shfl : SDNode<"RISCVISD::SHFL", SDTIntBinOp>;
def riscv_shflw : SDNode<"RISCVISD::SHFLW", SDT_RISCVIntBinOpW>;
def riscv_unshfl : SDNode<"RISCVISD::UNSHFL", SDTIntBinOp>;
def riscv_unshflw: SDNode<"RISCVISD::UNSHFLW",SDT_RISCVIntBinOpW>;
+def riscv_bfp : SDNode<"RISCVISD::BFP", SDTIntBinOp>;
+def riscv_bfpw : SDNode<"RISCVISD::BFPW", SDT_RISCVIntBinOpW>;
def riscv_bcompress : SDNode<"RISCVISD::BCOMPRESS", SDTIntBinOp>;
def riscv_bcompressw : SDNode<"RISCVISD::BCOMPRESSW", SDT_RISCVIntBinOpW>;
def riscv_bdecompress : SDNode<"RISCVISD::BDECOMPRESS", SDTIntBinOp>;
@@ -309,14 +319,14 @@ class RVBTernaryImm5<bits<2> funct2, bits<3> funct3, RISCVOpcode opcode,
// Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
def ANDN : ALU_rr<0b0100000, 0b111, "andn">,
Sched<[WriteIALU, ReadIALU, ReadIALU]>;
def ORN : ALU_rr<0b0100000, 0b110, "orn">,
Sched<[WriteIALU, ReadIALU, ReadIALU]>;
def XNOR : ALU_rr<0b0100000, 0b100, "xnor">,
Sched<[WriteIALU, ReadIALU, ReadIALU]>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
let Predicates = [HasStdExtZba] in {
def SH1ADD : ALU_rr<0b0010000, 0b010, "sh1add">,
@@ -327,18 +337,22 @@ def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">,
Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
} // Predicates = [HasStdExtZba]
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
def ROL : ALU_rr<0b0110000, 0b001, "rol">,
Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
def ROR : ALU_rr<0b0110000, 0b101, "ror">,
Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
let Predicates = [HasStdExtZbs] in {
-def BCLR : ALU_rr<0b0100100, 0b001, "bclr">, Sched<[]>;
-def BSET : ALU_rr<0b0010100, 0b001, "bset">, Sched<[]>;
-def BINV : ALU_rr<0b0110100, 0b001, "binv">, Sched<[]>;
-def BEXT : ALU_rr<0b0100100, 0b101, "bext">, Sched<[]>;
+def BCLR : ALU_rr<0b0100100, 0b001, "bclr">,
+ Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
+def BSET : ALU_rr<0b0010100, 0b001, "bset">,
+ Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
+def BINV : ALU_rr<0b0110100, 0b001, "binv">,
+ Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
+def BEXT : ALU_rr<0b0100100, 0b101, "bext">,
+ Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
} // Predicates = [HasStdExtZbs]
let Predicates = [HasStdExtZbp] in {
@@ -346,21 +360,28 @@ def GORC : ALU_rr<0b0010100, 0b101, "gorc">, Sched<[]>;
def GREV : ALU_rr<0b0110100, 0b101, "grev">, Sched<[]>;
} // Predicates = [HasStdExtZbp]
+let Predicates = [HasStdExtZbpOrZbkx] in {
+def XPERMN : ALU_rr<0b0010100, 0b010, "xperm4">, Sched<[]>;
+def XPERMB : ALU_rr<0b0010100, 0b100, "xperm8">, Sched<[]>;
+} // Predicates = [HasStdExtZbpOrZbkx]
+
let Predicates = [HasStdExtZbp] in {
-def XPERMN : ALU_rr<0b0010100, 0b010, "xperm.n">, Sched<[]>;
-def XPERMB : ALU_rr<0b0010100, 0b100, "xperm.b">, Sched<[]>;
def XPERMH : ALU_rr<0b0010100, 0b110, "xperm.h">, Sched<[]>;
} // Predicates = [HasStdExtZbp]
-let Predicates = [HasStdExtZbbOrZbp] in
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in
def RORI : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">,
Sched<[WriteRotateImm, ReadRotateImm]>;
let Predicates = [HasStdExtZbs] in {
-def BCLRI : RVBShift_ri<0b01001, 0b001, OPC_OP_IMM, "bclri">, Sched<[]>;
-def BSETI : RVBShift_ri<0b00101, 0b001, OPC_OP_IMM, "bseti">, Sched<[]>;
-def BINVI : RVBShift_ri<0b01101, 0b001, OPC_OP_IMM, "binvi">, Sched<[]>;
-def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">, Sched<[]>;
+def BCLRI : RVBShift_ri<0b01001, 0b001, OPC_OP_IMM, "bclri">,
+ Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
+def BSETI : RVBShift_ri<0b00101, 0b001, OPC_OP_IMM, "bseti">,
+ Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
+def BINVI : RVBShift_ri<0b01101, 0b001, OPC_OP_IMM, "binvi">,
+ Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
+def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">,
+ Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
} // Predicates = [HasStdExtZbs]
let Predicates = [HasStdExtZbp] in {
@@ -428,11 +449,17 @@ def CRC32CD : RVBUnary<0b0110000, 0b11011, 0b001, OPC_OP_IMM, "crc32c.d">,
Sched<[]>;
let Predicates = [HasStdExtZbc] in {
-def CLMUL : ALU_rr<0b0000101, 0b001, "clmul">, Sched<[]>;
-def CLMULR : ALU_rr<0b0000101, 0b010, "clmulr">, Sched<[]>;
-def CLMULH : ALU_rr<0b0000101, 0b011, "clmulh">, Sched<[]>;
+def CLMULR : ALU_rr<0b0000101, 0b010, "clmulr">,
+ Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
} // Predicates = [HasStdExtZbc]
+let Predicates = [HasStdExtZbcOrZbkc] in {
+def CLMUL : ALU_rr<0b0000101, 0b001, "clmul">,
+ Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
+def CLMULH : ALU_rr<0b0000101, 0b011, "clmulh">,
+ Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
+} // Predicates = [HasStdExtZbcOrZbkc]
+
let Predicates = [HasStdExtZbb] in {
def MIN : ALU_rr<0b0000101, 0b100, "min">,
Sched<[WriteIALU, ReadIALU, ReadIALU]>;
@@ -456,11 +483,13 @@ def BDECOMPRESS : ALU_rr<0b0100100, 0b110, "bdecompress">, Sched<[]>;
def BCOMPRESS : ALU_rr<0b0000100, 0b110, "bcompress">, Sched<[]>;
} // Predicates = [HasStdExtZbe]
-let Predicates = [HasStdExtZbp] in {
+let Predicates = [HasStdExtZbpOrZbkb] in {
def PACK : ALU_rr<0b0000100, 0b100, "pack">, Sched<[]>;
-def PACKU : ALU_rr<0b0100100, 0b100, "packu">, Sched<[]>;
def PACKH : ALU_rr<0b0000100, 0b111, "packh">, Sched<[]>;
-} // Predicates = [HasStdExtZbp]
+} // Predicates = [HasStdExtZbpOrZbkb]
+
+let Predicates = [HasStdExtZbp] in
+def PACKU : ALU_rr<0b0100100, 0b100, "packu">, Sched<[]>;
let Predicates = [HasStdExtZbm, IsRV64] in {
def BMATOR : ALU_rr<0b0000100, 0b011, "bmator">, Sched<[]>;
@@ -468,7 +497,8 @@ def BMATXOR : ALU_rr<0b0100100, 0b011, "bmatxor">, Sched<[]>;
} // Predicates = [HasStdExtZbm, IsRV64]
let Predicates = [HasStdExtZbf] in
-def BFP : ALU_rr<0b0100100, 0b111, "bfp">, Sched<[]>;
+def BFP : ALU_rr<0b0100100, 0b111, "bfp">,
+ Sched<[WriteBFP, ReadBFP, ReadBFP]>;
let Predicates = [HasStdExtZbp] in {
def SHFLI : RVBShfl_ri<0b0000100, 0b001, OPC_OP_IMM, "shfli">, Sched<[]>;
@@ -488,7 +518,7 @@ def SH3ADDUW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">,
Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
} // Predicates = [HasStdExtZbb, IsRV64]
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
def ROLW : ALUW_rr<0b0110000, 0b001, "rolw">,
Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
def RORW : ALUW_rr<0b0110000, 0b101, "rorw">,
@@ -504,7 +534,7 @@ let Predicates = [HasStdExtZbp, IsRV64] in {
def XPERMW : ALU_rr<0b0010100, 0b000, "xperm.w">, Sched<[]>;
} // Predicates = [HasStdExtZbp, IsRV64]
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in
def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">,
Sched<[WriteRotateImm32, ReadRotateImm32]>;
@@ -543,13 +573,15 @@ def BDECOMPRESSW : ALUW_rr<0b0100100, 0b110, "bdecompressw">, Sched<[]>;
def BCOMPRESSW : ALUW_rr<0b0000100, 0b110, "bcompressw">, Sched<[]>;
} // Predicates = [HasStdExtZbe, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV64] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in
def PACKW : ALUW_rr<0b0000100, 0b100, "packw">, Sched<[]>;
+
+let Predicates = [HasStdExtZbp, IsRV64] in
def PACKUW : ALUW_rr<0b0100100, 0b100, "packuw">, Sched<[]>;
-} // Predicates = [HasStdExtZbp, IsRV64]
let Predicates = [HasStdExtZbf, IsRV64] in
-def BFPW : ALUW_rr<0b0100100, 0b111, "bfpw">, Sched<[]>;
+def BFPW : ALUW_rr<0b0100100, 0b111, "bfpw">,
+ Sched<[WriteBFP32, ReadBFP32, ReadBFP32]>;
let Predicates = [HasStdExtZbbOrZbp, IsRV32] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
@@ -576,30 +608,30 @@ def ZEXTH_RV64 : RVInstR<0b0000100, 0b100, OPC_OP_32, (outs GPR:$rd),
// causes diagnostics to suggest that Zbp rather than Zbb is required for rev8
// or gorci. Since Zbb is closer to being finalized than Zbp this will be
// misleading to users.
-let Predicates = [HasStdExtZbbOrZbp, IsRV32] in {
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
-def REV8_RV32 : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
- "rev8", "$rd, $rs1">, Sched<[WriteREV8, ReadREV8]> {
- let imm12 = { 0b01101, 0b0011000 };
-}
-} // Predicates = [HasStdExtZbbOrZbp, IsRV32]
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV32] in {
+def REV8_RV32 : RVBUnary<0b0110100, 0b11000, 0b101, OPC_OP_IMM, "rev8">,
+ Sched<[WriteREV8, ReadREV8]>;
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV32]
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
-def REV8_RV64 : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
- "rev8", "$rd, $rs1">, Sched<[WriteREV8, ReadREV8]> {
- let imm12 = { 0b01101, 0b0111000 };
-}
-} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
+def REV8_RV64 : RVBUnary<0b0110101, 0b11000, 0b101, OPC_OP_IMM, "rev8">,
+ Sched<[WriteREV8, ReadREV8]>;
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
let Predicates = [HasStdExtZbbOrZbp] in {
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
-def ORCB : RVInstI<0b101, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
- "orc.b", "$rd, $rs1">, Sched<[WriteORCB, ReadORCB]> {
- let imm12 = { 0b00101, 0b0000111 };
-}
+def ORCB : RVBUnary<0b0010100, 0b00111, 0b101, OPC_OP_IMM, "orc.b">,
+ Sched<[WriteORCB, ReadORCB]>;
} // Predicates = [HasStdExtZbbOrZbp]
+let Predicates = [HasStdExtZbpOrZbkb] in
+def BREV8 : RVBUnary<0b0110100, 0b00111, 0b101, OPC_OP_IMM, "brev8">;
+
+let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in {
+def ZIP_RV32 : RVBUnary<0b0000100, 0b01111, 0b001, OPC_OP_IMM, "zip">;
+def UNZIP_RV32 : RVBUnary<0b0000100, 0b01111, 0b101, OPC_OP_IMM, "unzip">;
+} // Predicates = [HasStdExtZbkb, IsRV32]
+
+
//===----------------------------------------------------------------------===//
// Pseudo Instructions
//===----------------------------------------------------------------------===//
@@ -614,11 +646,11 @@ def : InstAlias<"rev2.n $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00010)>;
def : InstAlias<"rev.n $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00011)>;
def : InstAlias<"rev4.b $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00100)>;
def : InstAlias<"rev2.b $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00110)>;
-def : InstAlias<"rev.b $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b00111)>;
def : InstAlias<"rev8.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01000)>;
def : InstAlias<"rev4.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01100)>;
def : InstAlias<"rev2.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01110)>;
def : InstAlias<"rev.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01111)>;
+def : InstAlias<"rev.b $rd, $rs", (BREV8 GPR:$rd, GPR:$rs)>;
def : InstAlias<"zip.n $rd, $rs", (SHFLI GPR:$rd, GPR:$rs, 0b0001)>;
def : InstAlias<"unzip.n $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b0001)>;
@@ -658,8 +690,7 @@ def : InstAlias<"zip4 $rd, $rs", (SHFLI GPR:$rd, GPR:$rs, 0b1100)>;
def : InstAlias<"unzip4 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1100)>;
def : InstAlias<"zip2 $rd, $rs", (SHFLI GPR:$rd, GPR:$rs, 0b1110)>;
def : InstAlias<"unzip2 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1110)>;
-def : InstAlias<"zip $rd, $rs", (SHFLI GPR:$rd, GPR:$rs, 0b1111)>;
-def : InstAlias<"unzip $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1111)>;
+// zip and unzip are considered instructions rather than an alias.
def : InstAlias<"orc16 $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b10000)>;
def : InstAlias<"orc8 $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b11000)>;
@@ -741,6 +772,13 @@ def : InstAlias<"gorcw $rd, $rs1, $shamt",
(GORCIW GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
} // Predicates = [HasStdExtZbp, IsRV64]
+// Zbp is unratified and that it would likely adopt the already ratified Zbkx names.
+// Thus current Zbp instructions are defined as aliases for Zbkx instructions.
+let Predicates = [HasStdExtZbp] in {
+ def : InstAlias<"xperm.b $rd, $rs1, $rs2", (XPERMB GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+ def : InstAlias<"xperm.n $rd, $rs1, $rs2", (XPERMN GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbp]
+
let Predicates = [HasStdExtZbs] in {
def : InstAlias<"bset $rd, $rs1, $shamt",
(BSETI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
@@ -756,16 +794,16 @@ def : InstAlias<"bext $rd, $rs1, $shamt",
// Codegen patterns
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>;
def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
def : PatGprGpr<rotl, ROL>;
def : PatGprGpr<rotr, ROR>;
-} // Predicates = [HasStdExtZbbOrZbp]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb]
let Predicates = [HasStdExtZbs] in {
def : Pat<(and (not (shiftop<shl> 1, GPR:$rs2)), GPR:$rs1),
@@ -816,7 +854,7 @@ def : Pat<(and GPR:$r, BCLRIANDIMask:$i),
// There's no encoding for roli in the the 'B' extension as it can be
// implemented with rori by negating the immediate.
-let Predicates = [HasStdExtZbbOrZbp] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb] in {
def : PatGprImm<rotr, RORI, uimmlog2xlen>;
def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
(RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
@@ -834,19 +872,28 @@ def : PatGprGpr<riscv_unshfl, UNSHFL>;
def : PatGprGpr<int_riscv_xperm_n, XPERMN>;
def : PatGprGpr<int_riscv_xperm_b, XPERMB>;
def : PatGprGpr<int_riscv_xperm_h, XPERMH>;
-def : PatGprGpr<int_riscv_xperm_w, XPERMW>;
def : PatGprImm<riscv_shfl, SHFLI, shfl_uimm>;
def : PatGprImm<riscv_unshfl, UNSHFLI, shfl_uimm>;
def : PatGprImm<riscv_grev, GREVI, uimmlog2xlen>;
def : PatGprImm<riscv_gorc, GORCI, uimmlog2xlen>;
+
+// We treat brev8 as a separate instruction, so match it directly.
+def : Pat<(riscv_grev GPR:$rs1, 7), (BREV8 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp]
+let Predicates = [HasStdExtZbp, IsRV64] in
+def : PatGprGpr<int_riscv_xperm_w, XPERMW>;
+
let Predicates = [HasStdExtZbp, IsRV32] in {
def : Pat<(i32 (rotr (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>;
def : Pat<(i32 (rotl (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>;
// We treat rev8 as a separate instruction, so match it directly.
def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32 GPR:$rs1)>;
+
+// We treat zip and unzip as separate instructions, so match it directly.
+def : Pat<(i32 (riscv_shfl GPR:$rs1, 15)), (ZIP_RV32 GPR:$rs1)>;
+def : Pat<(i32 (riscv_unshfl GPR:$rs1, 15)), (UNZIP_RV32 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp, IsRV32]
let Predicates = [HasStdExtZbp, IsRV64] in {
@@ -882,21 +929,16 @@ def : Pat<(select GPR:$rs2, GPR:$rs1, GPR:$rs3),
(CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
} // Predicates = [HasStdExtZbt]
-// fshl and fshr concatenate their operands in the same order. fsr and fsl
-// instruction use different orders. fshl will return its first operand for
-// shift of zero, fshr will return its second operand. fsl and fsr both return
-// $rs1 so the patterns need to have different operand orders.
let Predicates = [HasStdExtZbt] in {
def : Pat<(riscv_fsl GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(riscv_fsr GPR:$rs3, GPR:$rs1, GPR:$rs2),
+def : Pat<(riscv_fsr GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-
-def : Pat<(fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
+def : Pat<(riscv_fsr GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt),
(FSRI GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt)>;
-// We can use FSRI for fshl by immediate if we subtract the immediate from
+// We can use FSRI for FSL by immediate if we subtract the immediate from
// XLen and swap the operands.
-def : Pat<(fshl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
+def : Pat<(riscv_fsl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
(FSRI GPR:$rs1, GPR:$rs3, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
} // Predicates = [HasStdExtZbt]
@@ -918,31 +960,38 @@ def : PatGprGpr<umin, MINU>;
def : PatGprGpr<umax, MAXU>;
} // Predicates = [HasStdExtZbb]
-let Predicates = [HasStdExtZbb, IsRV32] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
def : Pat<(i32 (bswap GPR:$rs1)), (REV8_RV32 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbb, IsRV32]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
-let Predicates = [HasStdExtZbb, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
def : Pat<(i64 (bswap GPR:$rs1)), (REV8_RV64 GPR:$rs1)>;
-} // Predicates = [HasStdExtZbb, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV32] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in
def : Pat<(i32 (or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16)))),
(PACK GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbp, IsRV32] in
def : Pat<(i32 (or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16)))),
(PACKU GPR:$rs1, GPR:$rs2)>;
-}
-let Predicates = [HasStdExtZbp, IsRV64] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in
def : Pat<(i64 (or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32)))),
(PACK GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbp, IsRV64] in
def : Pat<(i64 (or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32)))),
(PACKU GPR:$rs1, GPR:$rs2)>;
-}
-let Predicates = [HasStdExtZbp] in
+
+let Predicates = [HasStdExtZbpOrZbkb] in {
def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
(and GPR:$rs1, 0x00FF)),
(PACKH GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or (shl (and GPR:$rs2, 0x00FF), (XLenVT 8)),
+ (and GPR:$rs1, 0x00FF)),
+ (PACKH GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbpOrZbkb]
let Predicates = [HasStdExtZbbOrZbp, IsRV32] in
def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXTH_RV32 GPR:$rs)>;
@@ -1045,13 +1094,13 @@ def : Pat<(i64 (add (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), non_imm12:$rs2))
(SH3ADDUW GPR:$rs1, GPR:$rs2)>;
} // Predicates = [HasStdExtZba, IsRV64]
-let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in {
def : PatGprGpr<riscv_rolw, ROLW>;
def : PatGprGpr<riscv_rorw, RORW>;
def : PatGprImm<riscv_rorw, RORIW, uimm5>;
def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
(RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
-} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
+} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in {
def : Pat<(riscv_rorw (riscv_grevw GPR:$rs1, 24), 16), (GREVIW GPR:$rs1, 8)>;
@@ -1067,10 +1116,12 @@ def : PatGprImm<riscv_gorcw, GORCIW, uimm5>;
let Predicates = [HasStdExtZbt, IsRV64] in {
def : Pat<(riscv_fslw GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSLW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(riscv_fsrw GPR:$rs3, GPR:$rs1, GPR:$rs2),
+def : Pat<(riscv_fsrw GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSRW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(riscv_fsrw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
+def : Pat<(riscv_fsrw GPR:$rs1, GPR:$rs3, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, uimm5:$shamt)>;
+// We can use FSRIW for FSLW by immediate if we subtract the immediate from
+// 32 and swap the operands.
def : Pat<(riscv_fslw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, (ImmSubFrom32 uimm5:$shamt))>;
} // Predicates = [HasStdExtZbt, IsRV64]
@@ -1081,7 +1132,7 @@ def : PatGpr<riscv_ctzw, CTZW>;
def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
} // Predicates = [HasStdExtZbb, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV64] in {
+let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in {
def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)),
(and GPR:$rs1, 0x000000000000FFFF)),
i32)),
@@ -1089,16 +1140,21 @@ def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)),
def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
(and GPR:$rs1, 0x000000000000FFFF))),
(PACKW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtZbp, IsRV64] in
def : Pat<(i64 (or (and (assertsexti32 GPR:$rs2), 0xFFFFFFFFFFFF0000),
(srl (and GPR:$rs1, 0xFFFFFFFF), (i64 16)))),
(PACKUW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-let Predicates = [HasStdExtZbc] in {
+
+let Predicates = [HasStdExtZbcOrZbkc] in {
def : PatGprGpr<int_riscv_clmul, CLMUL>;
def : PatGprGpr<int_riscv_clmulh, CLMULH>;
+} // Predicates = [HasStdExtZbcOrZbkc]
+
+let Predicates = [HasStdExtZbc] in
def : PatGprGpr<int_riscv_clmulr, CLMULR>;
-} // Predicates = [HasStdExtZbc]
let Predicates = [HasStdExtZbe] in {
def : PatGprGpr<riscv_bcompress, BCOMPRESS>;
@@ -1123,3 +1179,23 @@ let Predicates = [HasStdExtZbr, IsRV64] in {
def : PatGpr<int_riscv_crc32_d, CRC32D>;
def : PatGpr<int_riscv_crc32c_d, CRC32CD>;
} // Predicates = [HasStdExtZbr, IsRV64]
+
+let Predicates = [HasStdExtZbf] in
+def : PatGprGpr<riscv_bfp, BFP>;
+
+let Predicates = [HasStdExtZbf, IsRV64] in
+def : PatGprGpr<riscv_bfpw, BFPW>;
+
+let Predicates = [HasStdExtZbkb] in {
+def : PatGpr<int_riscv_brev8, BREV8>;
+} // Predicates = [HasStdExtZbkb]
+
+let Predicates = [HasStdExtZbkb, IsRV32] in {
+def : PatGpr<int_riscv_zip, ZIP_RV32>;
+def : PatGpr<int_riscv_unzip, UNZIP_RV32>;
+} // Predicates = [HasStdExtZbkb, IsRV32]
+
+let Predicates = [HasStdExtZbkx] in {
+def : PatGprGpr<int_riscv_xperm4, XPERMN>;
+def : PatGprGpr<int_riscv_xperm8, XPERMB>;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 663e44813899..dfd0c74ee26c 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -1,4 +1,4 @@
-//===-- RISCVInstrInfoFH.td - RISC-V 'FH' instructions -----*- tablegen -*-===//
+//===-- RISCVInstrInfoZfh.td - RISC-V 'Zfh' instructions ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -7,9 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This file describes the RISC-V instructions from the standard 'Zfh'
-// half-precision floating-point extension, version 0.1.
-// This version is still experimental as the 'Zfh' extension hasn't been
-// ratified yet.
+// half-precision floating-point extension, version 1.0.
//
//===----------------------------------------------------------------------===//
@@ -32,20 +30,12 @@ def riscv_fmv_x_anyexth
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtZfhmin] in {
-let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
-def FLH : RVInstI<0b001, OPC_LOAD_FP, (outs FPR16:$rd),
- (ins GPR:$rs1, simm12:$imm12),
- "flh", "$rd, ${imm12}(${rs1})">,
- Sched<[WriteFLD16, ReadFMemBase]>;
+def FLH : FPLoad_r<0b001, "flh", FPR16, WriteFLD16>;
// Operands for stores are in the order srcreg, base, offset rather than
// reflecting the order these fields are specified in the instruction
// encoding.
-let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
-def FSH : RVInstS<0b001, OPC_STORE_FP, (outs),
- (ins FPR16:$rs2, GPR:$rs1, simm12:$imm12),
- "fsh", "$rs2, ${imm12}(${rs1})">,
- Sched<[WriteFST16, ReadStoreData, ReadFMemBase]>;
+def FSH : FPStore_r<0b001, "fsh", FPR16, WriteFST16>;
} // Predicates = [HasStdExtZfhmin]
let Predicates = [HasStdExtZfh] in {
@@ -190,6 +180,10 @@ def : InstAlias<"fge.h $rd, $rs, $rt",
let Predicates = [HasStdExtZfhmin] in {
def PseudoFLH : PseudoFloatLoad<"flh", FPR16>;
def PseudoFSH : PseudoStore<"fsh", FPR16>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_H : PseudoQuietFCMP<FPR16>;
+def PseudoQuietFLT_H : PseudoQuietFCMP<FPR16>;
+}
} // Predicates = [HasStdExtZfhmin]
//===----------------------------------------------------------------------===//
@@ -207,6 +201,7 @@ let Predicates = [HasStdExtZfh] in {
/// Float constants
def : Pat<(f16 (fpimm0)), (FMV_H_X X0)>;
+def : Pat<(f16 (fpimmneg0)), (FSGNJN_H (FMV_H_X X0), (FMV_H_X X0))>;
/// Float conversion operations
@@ -254,13 +249,34 @@ def : PatFpr16Fpr16<fminnum, FMIN_H>;
def : PatFpr16Fpr16<fmaxnum, FMAX_H>;
/// Setcc
-
-def : PatFpr16Fpr16<seteq, FEQ_H>;
-def : PatFpr16Fpr16<setoeq, FEQ_H>;
-def : PatFpr16Fpr16<setlt, FLT_H>;
-def : PatFpr16Fpr16<setolt, FLT_H>;
-def : PatFpr16Fpr16<setle, FLE_H>;
-def : PatFpr16Fpr16<setole, FLE_H>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_D
+def : PatSetCC<FPR16, any_fsetcc, SETEQ, FEQ_H>;
+def : PatSetCC<FPR16, any_fsetcc, SETOEQ, FEQ_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETLT, PseudoQuietFLT_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETOLT, PseudoQuietFLT_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETLE, PseudoQuietFLE_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETOLE, PseudoQuietFLE_H>;
+
+// Match signaling FEQ_H
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ),
+ (AND (FLE_H $rs1, $rs2),
+ (FLE_H $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ),
+ (AND (FLE_H $rs1, $rs2),
+ (FLE_H $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ),
+ (FLE_H $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ),
+ (FLE_H $rs1, $rs1)>;
+
+def : PatSetCC<FPR16, any_fsetccs, SETLT, FLT_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETOLT, FLT_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETLE, FLE_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETOLE, FLE_H>;
def Select_FPR16_Using_CC_GPR : SelectCC_rrirr<FPR16, GPR>;
} // Predicates = [HasStdExtZfh]
@@ -291,14 +307,14 @@ def : Pat<(i32 (any_fp_to_sint FPR16:$rs1)), (FCVT_W_H $rs1, 0b001)>;
def : Pat<(i32 (any_fp_to_uint FPR16:$rs1)), (FCVT_WU_H $rs1, 0b001)>;
// Saturating float->[u]int32.
-def : Pat<(i32 (riscv_fcvt_x_rtz FPR16:$rs1)), (FCVT_W_H $rs1, 0b001)>;
-def : Pat<(i32 (riscv_fcvt_xu_rtz FPR16:$rs1)), (FCVT_WU_H $rs1, 0b001)>;
+def : Pat<(i32 (riscv_fcvt_x FPR16:$rs1, timm:$frm)), (FCVT_W_H $rs1, timm:$frm)>;
+def : Pat<(i32 (riscv_fcvt_xu FPR16:$rs1, timm:$frm)), (FCVT_WU_H $rs1, timm:$frm)>;
// half->int32 with current rounding mode.
-def : Pat<(i32 (lrint FPR16:$rs1)), (FCVT_W_H $rs1, 0b111)>;
+def : Pat<(i32 (any_lrint FPR16:$rs1)), (FCVT_W_H $rs1, 0b111)>;
// half->int32 rounded to nearest with ties rounded away from zero.
-def : Pat<(i32 (lround FPR16:$rs1)), (FCVT_W_H $rs1, 0b100)>;
+def : Pat<(i32 (any_lround FPR16:$rs1)), (FCVT_W_H $rs1, 0b100)>;
// [u]int->half. Match GCC and default to using dynamic rounding mode.
def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_H_W $rs1, 0b111)>;
@@ -309,24 +325,24 @@ let Predicates = [HasStdExtZfh, IsRV64] in {
// Use target specific isd nodes to help us remember the result is sign
// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
// duplicated if it has another user that didn't need the sign_extend.
-def : Pat<(riscv_any_fcvt_w_rtz_rv64 FPR16:$rs1), (FCVT_W_H $rs1, 0b001)>;
-def : Pat<(riscv_any_fcvt_wu_rtz_rv64 FPR16:$rs1), (FCVT_WU_H $rs1, 0b001)>;
+def : Pat<(riscv_any_fcvt_w_rv64 FPR16:$rs1, timm:$frm), (FCVT_W_H $rs1, timm:$frm)>;
+def : Pat<(riscv_any_fcvt_wu_rv64 FPR16:$rs1, timm:$frm), (FCVT_WU_H $rs1, timm:$frm)>;
// half->[u]int64. Round-to-zero must be used.
def : Pat<(i64 (any_fp_to_sint FPR16:$rs1)), (FCVT_L_H $rs1, 0b001)>;
def : Pat<(i64 (any_fp_to_uint FPR16:$rs1)), (FCVT_LU_H $rs1, 0b001)>;
// Saturating float->[u]int64.
-def : Pat<(i64 (riscv_fcvt_x_rtz FPR16:$rs1)), (FCVT_L_H $rs1, 0b001)>;
-def : Pat<(i64 (riscv_fcvt_xu_rtz FPR16:$rs1)), (FCVT_LU_H $rs1, 0b001)>;
+def : Pat<(i64 (riscv_fcvt_x FPR16:$rs1, timm:$frm)), (FCVT_L_H $rs1, timm:$frm)>;
+def : Pat<(i64 (riscv_fcvt_xu FPR16:$rs1, timm:$frm)), (FCVT_LU_H $rs1, timm:$frm)>;
// half->int64 with current rounding mode.
-def : Pat<(i64 (lrint FPR16:$rs1)), (FCVT_L_H $rs1, 0b111)>;
-def : Pat<(i64 (llrint FPR16:$rs1)), (FCVT_L_H $rs1, 0b111)>;
+def : Pat<(i64 (any_lrint FPR16:$rs1)), (FCVT_L_H $rs1, 0b111)>;
+def : Pat<(i64 (any_llrint FPR16:$rs1)), (FCVT_L_H $rs1, 0b111)>;
// half->int64 rounded to nearest with ties rounded away from zero.
-def : Pat<(i64 (lround FPR16:$rs1)), (FCVT_L_H $rs1, 0b100)>;
-def : Pat<(i64 (llround FPR16:$rs1)), (FCVT_L_H $rs1, 0b100)>;
+def : Pat<(i64 (any_lround FPR16:$rs1)), (FCVT_L_H $rs1, 0b100)>;
+def : Pat<(i64 (any_llround FPR16:$rs1)), (FCVT_L_H $rs1, 0b100)>;
// [u]int->fp. Match GCC and default to using dynamic rounding mode.
def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_H_W $rs1, 0b111)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
new file mode 100644
index 000000000000..4a41cddedc71
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td
@@ -0,0 +1,203 @@
+//===- RISCVInstrInfoZk.td - RISC-V Scalar Crypto instructions - tablegen -*===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard 'Zk',
+// Scalar Cryptography Instructions extension, version 1.0.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Operand and SDNode transformation definitions.
+//===----------------------------------------------------------------------===//
+
+def RnumArg : AsmOperandClass {
+ let Name = "RnumArg";
+ let RenderMethod = "addImmOperands";
+ let DiagnosticType = "InvalidRnumArg";
+}
+
+def rnum : Operand<i32>, TImmLeaf<i32, [{return (Imm >= 0 && Imm <= 10);}]> {
+ let ParserMatchClass = RnumArg;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeUImmOperand<4>";
+ let OperandType = "OPERAND_RVKRNUM";
+ let OperandNamespace = "RISCVOp";
+}
+
+def byteselect : Operand<i8>, TImmLeaf<i8, [{return isUInt<2>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<2>;
+ let DecoderMethod = "decodeUImmOperand<2>";
+ let OperandType = "OPERAND_UIMM2";
+ let OperandNamespace = "RISCVOp";
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction class templates
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class RVKUnary<bits<12> imm12_in, bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1),
+ opcodestr, "$rd, $rs1">{
+ let imm12 = imm12_in;
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class RVKByteSelect<bits<5> funct5, string opcodestr>
+ : RVInstR<{0b00, funct5}, 0b000, OPC_OP, (outs GPR:$rd),
+ (ins GPR:$rs1, GPR:$rs2, byteselect:$bs),
+ opcodestr, "$rd, $rs1, $rs2, $bs">{
+ bits<2> bs;
+ let Inst{31-30} = bs;
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+class RVKUnary_rnum<bits<7> funct7, bits<3> funct3, string opcodestr>
+ : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, rnum:$rnum),
+ opcodestr, "$rd, $rs1, $rnum">{
+ bits<4> rnum;
+ let Inst{31-25} = funct7;
+ let Inst{24} = 1;
+ let Inst{23-20} = rnum;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZknd, IsRV32] in {
+def AES32DSI : RVKByteSelect<0b10101, "aes32dsi">;
+def AES32DSMI : RVKByteSelect<0b10111, "aes32dsmi">;
+} // Predicates = [HasStdExtZknd, IsRV32]
+
+let Predicates = [HasStdExtZknd, IsRV64] in {
+def AES64DS : ALU_rr<0b0011101, 0b000, "aes64ds">;
+def AES64DSM : ALU_rr<0b0011111, 0b000, "aes64dsm">;
+
+def AES64IM : RVKUnary<0b001100000000, 0b001, "aes64im">;
+} // Predicates = [HasStdExtZknd, IsRV64]
+
+let Predicates = [HasStdExtZkndOrZkne, IsRV64] in {
+def AES64KS2 : ALU_rr<0b0111111, 0b000, "aes64ks2">;
+
+def AES64KS1I : RVKUnary_rnum<0b0011000, 0b001, "aes64ks1i">;
+} // Predicates = [HasStdExtZkndOrZkne, IsRV64]
+
+let Predicates = [HasStdExtZkne, IsRV32] in {
+def AES32ESI : RVKByteSelect<0b10001, "aes32esi">;
+def AES32ESMI : RVKByteSelect<0b10011, "aes32esmi">;
+} // Predicates = [HasStdExtZkne, IsRV32]
+
+let Predicates = [HasStdExtZkne, IsRV64] in {
+def AES64ES : ALU_rr<0b0011001, 0b000, "aes64es">;
+def AES64ESM : ALU_rr<0b0011011, 0b000, "aes64esm">;
+} // Predicates = [HasStdExtZkne, IsRV64]
+
+let Predicates = [HasStdExtZknh] in {
+def SHA256SIG0 : RVKUnary<0b000100000010, 0b001, "sha256sig0">;
+def SHA256SIG1 : RVKUnary<0b000100000011, 0b001, "sha256sig1">;
+def SHA256SUM0 : RVKUnary<0b000100000000, 0b001, "sha256sum0">;
+def SHA256SUM1 : RVKUnary<0b000100000001, 0b001, "sha256sum1">;
+} // Predicates = [HasStdExtZknh]
+
+let Predicates = [HasStdExtZknh, IsRV32] in {
+def SHA512SIG0H : ALU_rr<0b0101110, 0b000, "sha512sig0h">;
+def SHA512SIG0L : ALU_rr<0b0101010, 0b000, "sha512sig0l">;
+def SHA512SIG1H : ALU_rr<0b0101111, 0b000, "sha512sig1h">;
+def SHA512SIG1L : ALU_rr<0b0101011, 0b000, "sha512sig1l">;
+def SHA512SUM0R : ALU_rr<0b0101000, 0b000, "sha512sum0r">;
+def SHA512SUM1R : ALU_rr<0b0101001, 0b000, "sha512sum1r">;
+} // [HasStdExtZknh, IsRV32]
+
+let Predicates = [HasStdExtZknh, IsRV64] in {
+def SHA512SIG0 : RVKUnary<0b000100000110, 0b001, "sha512sig0">;
+def SHA512SIG1 : RVKUnary<0b000100000111, 0b001, "sha512sig1">;
+def SHA512SUM0 : RVKUnary<0b000100000100, 0b001, "sha512sum0">;
+def SHA512SUM1 : RVKUnary<0b000100000101, 0b001, "sha512sum1">;
+} // Predicates = [HasStdExtZknh, IsRV64]
+
+let Predicates = [HasStdExtZksed] in {
+def SM4ED : RVKByteSelect<0b11000, "sm4ed">;
+def SM4KS : RVKByteSelect<0b11010, "sm4ks">;
+} // Predicates = [HasStdExtZksed]
+
+let Predicates = [HasStdExtZksh] in {
+def SM3P0 : RVKUnary<0b000100001000, 0b001, "sm3p0">;
+def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">;
+} // Predicates = [HasStdExtZksh]
+
+//===----------------------------------------------------------------------===//
+// Codegen patterns
+//===----------------------------------------------------------------------===//
+
+class PatGprGprByteSelect<SDPatternOperator OpNode, RVInst Inst>
+ : Pat<(OpNode GPR:$rs1, GPR:$rs2, i8:$imm),
+ (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>;
+
+// Zknd
+let Predicates = [HasStdExtZknd, IsRV32] in {
+def : PatGprGprByteSelect<int_riscv_aes32dsi, AES32DSI>;
+def : PatGprGprByteSelect<int_riscv_aes32dsmi, AES32DSMI>;
+} // Predicates = [HasStdExtZknd, IsRV32]
+
+let Predicates = [HasStdExtZknd, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64ds, AES64DS>;
+def : PatGprGpr<int_riscv_aes64dsm, AES64DSM>;
+def : PatGpr<int_riscv_aes64im, AES64IM>;
+} // Predicates = [HasStdExtZknd, IsRV64]
+
+let Predicates = [HasStdExtZkndOrZkne, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64ks2, AES64KS2>;
+def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum),
+ (AES64KS1I GPR:$rs1, rnum:$rnum)>;
+} // Predicates = [HasStdExtZkndOrZkne, IsRV64]
+
+// Zkne
+let Predicates = [HasStdExtZkne, IsRV32] in {
+def : PatGprGprByteSelect<int_riscv_aes32esi, AES32ESI>;
+def : PatGprGprByteSelect<int_riscv_aes32esmi, AES32ESMI>;
+} // Predicates = [HasStdExtZkne, IsRV32]
+
+let Predicates = [HasStdExtZkne, IsRV64] in {
+def : PatGprGpr<int_riscv_aes64es, AES64ES>;
+def : PatGprGpr<int_riscv_aes64esm, AES64ESM>;
+} // Predicates = [HasStdExtZkne, IsRV64]
+
+// Zknh
+let Predicates = [HasStdExtZknh] in {
+def : PatGpr<int_riscv_sha256sig0, SHA256SIG0>;
+def : PatGpr<int_riscv_sha256sig1, SHA256SIG1>;
+def : PatGpr<int_riscv_sha256sum0, SHA256SUM0>;
+def : PatGpr<int_riscv_sha256sum1, SHA256SUM1>;
+} // Predicates = [HasStdExtZknh]
+
+let Predicates = [HasStdExtZknh, IsRV32] in {
+def : PatGprGpr<int_riscv_sha512sig0l, SHA512SIG0L>;
+def : PatGprGpr<int_riscv_sha512sig0h, SHA512SIG0H>;
+def : PatGprGpr<int_riscv_sha512sig1l, SHA512SIG1L>;
+def : PatGprGpr<int_riscv_sha512sig1h, SHA512SIG1H>;
+def : PatGprGpr<int_riscv_sha512sum0r, SHA512SUM0R>;
+def : PatGprGpr<int_riscv_sha512sum1r, SHA512SUM1R>;
+} // Predicates = [HasStdExtZknh, IsRV32]
+
+let Predicates = [HasStdExtZknh, IsRV64] in {
+def : PatGpr<int_riscv_sha512sig0, SHA512SIG0>;
+def : PatGpr<int_riscv_sha512sig1, SHA512SIG1>;
+def : PatGpr<int_riscv_sha512sum0, SHA512SUM0>;
+def : PatGpr<int_riscv_sha512sum1, SHA512SUM1>;
+} // Predicates = [HasStdExtZknh, IsRV64]
+
+// Zksed
+let Predicates = [HasStdExtZksed] in {
+def : PatGprGprByteSelect<int_riscv_sm4ks, SM4KS>;
+def : PatGprGprByteSelect<int_riscv_sm4ed, SM4ED>;
+} // Predicates = [HasStdExtZksed]
+
+// Zksh
+let Predicates = [HasStdExtZksh] in {
+def : PatGpr<int_riscv_sm3p0, SM3P0>;
+def : PatGpr<int_riscv_sm3p1, SM3P1>;
+} // Predicates = [HasStdExtZksh]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstructionSelector.cpp
index 4d1f47da209d..8dfd71ac0b6b 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstructionSelector.cpp
@@ -69,8 +69,7 @@ private:
RISCVInstructionSelector::RISCVInstructionSelector(
const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
const RISCVRegisterBankInfo &RBI)
- : InstructionSelector(), STI(STI), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI),
+ : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "RISCVGenGlobalISel.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index dd084f53e511..c167c095521a 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -172,7 +172,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
default:
llvm_unreachable("Unknown operand type");
case MachineOperand::MO_Register: {
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
if (RISCV::VRM2RegClass.contains(Reg) ||
RISCV::VRM4RegClass.contains(Reg) ||
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 9094dff1dda1..35363bf37c0d 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -347,3 +347,8 @@ void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
Ops.push_back(dwarf::DW_OP_minus);
}
}
+
+unsigned
+RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
+ return MF.getSubtarget<RISCVSubtarget>().hasStdExtC() ? 1 : 0;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 2b2bbdfbdf32..9e0ef7902210 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -66,6 +66,8 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
void getOffsetOpcodes(const StackOffset &Offset,
SmallVectorImpl<uint64_t> &Ops) const override;
+
+ unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override;
};
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 20903b317180..8c1c03b51c24 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -73,12 +73,11 @@ def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>;
// are not part of GPRC, the most restrictive register class used by the
// compressed instruction set. This will influence the greedy register
// allocator to reduce the use of registers that can't be encoded in 16 bit
-// instructions. This affects register allocation even when compressed
-// instruction isn't targeted, we see no major negative codegen impact.
+// instructions.
let RegAltNameIndices = [ABIRegAltName] in {
def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>;
- let CostPerUse = [1] in {
+ let CostPerUse = [0, 1] in {
def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>;
def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>;
def X3 : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>;
@@ -95,7 +94,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>;
def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>;
def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>;
- let CostPerUse = [1] in {
+ let CostPerUse = [0, 1] in {
def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>;
def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>;
def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>;
@@ -138,27 +137,11 @@ def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> {
let RegInfos = XLenRI;
}
-// The order of registers represents the preferred allocation sequence.
-// Registers are listed in the order caller-save, callee-save, specials.
-def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (add
- (sequence "X%u", 10, 17),
- (sequence "X%u", 5, 7),
- (sequence "X%u", 28, 31),
- (sequence "X%u", 8, 9),
- (sequence "X%u", 18, 27),
- (sequence "X%u", 1, 4)
- )> {
+def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0)> {
let RegInfos = XLenRI;
}
-def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add
- (sequence "X%u", 10, 17),
- (sequence "X%u", 5, 7),
- (sequence "X%u", 28, 31),
- (sequence "X%u", 8, 9),
- (sequence "X%u", 18, 27),
- X1, X3, X4
- )> {
+def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, X0, X2)> {
let RegInfos = XLenRI;
}
@@ -166,13 +149,7 @@ def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add
// stack on some microarchitectures. Also remove the reserved registers X0, X2,
// X3, and X4 as it reduces the number of register classes that get synthesized
// by tablegen.
-def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (add
- (sequence "X%u", 10, 17),
- (sequence "X%u", 6, 7),
- (sequence "X%u", 28, 31),
- (sequence "X%u", 8, 9),
- (sequence "X%u", 18, 27)
- )> {
+def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (sub GPR, (sequence "X%u", 0, 5))> {
let RegInfos = XLenRI;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
new file mode 100644
index 000000000000..12ec52925798
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
@@ -0,0 +1,278 @@
+//===-------------- RISCVSExtWRemoval.cpp - MI sext.w Removal -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This pass removes unneeded sext.w instructions at the MI level.
+//
+//===---------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-sextw-removal"
+
+STATISTIC(NumRemovedSExtW, "Number of removed sign-extensions");
+
+static cl::opt<bool> DisableSExtWRemoval("riscv-disable-sextw-removal",
+ cl::desc("Disable removal of sext.w"),
+ cl::init(false), cl::Hidden);
+namespace {
+
+class RISCVSExtWRemoval : public MachineFunctionPass {
+public:
+ static char ID;
+
+ RISCVSExtWRemoval() : MachineFunctionPass(ID) {
+ initializeRISCVSExtWRemovalPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "RISCV sext.w Removal"; }
+};
+
+} // end anonymous namespace
+
+char RISCVSExtWRemoval::ID = 0;
+INITIALIZE_PASS(RISCVSExtWRemoval, DEBUG_TYPE, "RISCV sext.w Removal", false,
+ false)
+
+FunctionPass *llvm::createRISCVSExtWRemovalPass() {
+ return new RISCVSExtWRemoval();
+}
+
+// This function returns true if the machine instruction always outputs a value
+// where bits 63:32 match bit 31.
+// TODO: Allocate a bit in TSFlags for the W instructions?
+// TODO: Add other W instructions.
+static bool isSignExtendingOpW(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case RISCV::LUI:
+ case RISCV::LW:
+ case RISCV::ADDW:
+ case RISCV::ADDIW:
+ case RISCV::SUBW:
+ case RISCV::MULW:
+ case RISCV::SLLW:
+ case RISCV::SLLIW:
+ case RISCV::SRAW:
+ case RISCV::SRAIW:
+ case RISCV::SRLW:
+ case RISCV::SRLIW:
+ case RISCV::DIVW:
+ case RISCV::DIVUW:
+ case RISCV::REMW:
+ case RISCV::REMUW:
+ case RISCV::ROLW:
+ case RISCV::RORW:
+ case RISCV::RORIW:
+ case RISCV::CLZW:
+ case RISCV::CTZW:
+ case RISCV::CPOPW:
+ case RISCV::FCVT_W_H:
+ case RISCV::FCVT_WU_H:
+ case RISCV::FCVT_W_S:
+ case RISCV::FCVT_WU_S:
+ case RISCV::FCVT_W_D:
+ case RISCV::FCVT_WU_D:
+ // The following aren't W instructions, but are either sign extended from a
+ // smaller size or put zeros in bits 63:31.
+ case RISCV::LBU:
+ case RISCV::LHU:
+ case RISCV::LB:
+ case RISCV::LH:
+ case RISCV::SLT:
+ case RISCV::SLTI:
+ case RISCV::SLTU:
+ case RISCV::SLTIU:
+ case RISCV::SEXTB:
+ case RISCV::SEXTH:
+ case RISCV::ZEXTH_RV64:
+ return true;
+ // shifting right sufficiently makes the value 32-bit sign-extended
+ case RISCV::SRAI:
+ return MI.getOperand(2).getImm() >= 32;
+ case RISCV::SRLI:
+ return MI.getOperand(2).getImm() > 32;
+ // The LI pattern ADDI rd, X0, imm is sign extended.
+ case RISCV::ADDI:
+ return MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0;
+ // An ANDI with an 11 bit immediate will zero bits 63:11.
+ case RISCV::ANDI:
+ return isUInt<11>(MI.getOperand(2).getImm());
+ // An ORI with an >11 bit immediate (negative 12-bit) will set bits 63:11.
+ case RISCV::ORI:
+ return !isUInt<11>(MI.getOperand(2).getImm());
+ // Copying from X0 produces zero.
+ case RISCV::COPY:
+ return MI.getOperand(1).getReg() == RISCV::X0;
+ }
+
+ return false;
+}
+
+static bool isSignExtendedW(const MachineInstr &OrigMI,
+ MachineRegisterInfo &MRI) {
+
+ SmallPtrSet<const MachineInstr *, 4> Visited;
+ SmallVector<const MachineInstr *, 4> Worklist;
+
+ Worklist.push_back(&OrigMI);
+
+ while (!Worklist.empty()) {
+ const MachineInstr *MI = Worklist.pop_back_val();
+
+ // If we already visited this instruction, we don't need to check it again.
+ if (!Visited.insert(MI).second)
+ continue;
+
+ // If this is a sign extending operation we don't need to look any further.
+ if (isSignExtendingOpW(*MI))
+ continue;
+
+ // Is this an instruction that propagates sign extend.
+ switch (MI->getOpcode()) {
+ default:
+ // Unknown opcode, give up.
+ return false;
+ case RISCV::COPY: {
+ Register SrcReg = MI->getOperand(1).getReg();
+
+ // TODO: Handle arguments and returns from calls?
+
+ // If this is a copy from another register, check its source instruction.
+ if (!SrcReg.isVirtual())
+ return false;
+ const MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
+ if (!SrcMI)
+ return false;
+
+ // Add SrcMI to the worklist.
+ Worklist.push_back(SrcMI);
+ break;
+ }
+ case RISCV::REM:
+ case RISCV::ANDI:
+ case RISCV::ORI:
+ case RISCV::XORI: {
+ // |Remainder| is always <= |Dividend|. If D is 32-bit, then so is R.
+ // DIV doesn't work because of the edge case 0xf..f 8000 0000 / (long)-1
+ // Logical operations use a sign extended 12-bit immediate. We just need
+ // to check if the other operand is sign extended.
+ Register SrcReg = MI->getOperand(1).getReg();
+ if (!SrcReg.isVirtual())
+ return false;
+ const MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
+ if (!SrcMI)
+ return false;
+
+ // Add SrcMI to the worklist.
+ Worklist.push_back(SrcMI);
+ break;
+ }
+ case RISCV::REMU:
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ case RISCV::ANDN:
+ case RISCV::ORN:
+ case RISCV::XNOR:
+ case RISCV::MAX:
+ case RISCV::MAXU:
+ case RISCV::MIN:
+ case RISCV::MINU:
+ case RISCV::PHI: {
+ // If all incoming values are sign-extended, the output of AND, OR, XOR,
+ // MIN, MAX, or PHI is also sign-extended.
+
+ // The input registers for PHI are operand 1, 3, ...
+ // The input registers for others are operand 1 and 2.
+ unsigned E = 3, D = 1;
+ if (MI->getOpcode() == RISCV::PHI) {
+ E = MI->getNumOperands();
+ D = 2;
+ }
+
+ for (unsigned I = 1; I != E; I += D) {
+ if (!MI->getOperand(I).isReg())
+ return false;
+
+ Register SrcReg = MI->getOperand(I).getReg();
+ if (!SrcReg.isVirtual())
+ return false;
+ const MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
+ if (!SrcMI)
+ return false;
+
+ // Add SrcMI to the worklist.
+ Worklist.push_back(SrcMI);
+ }
+
+ break;
+ }
+ }
+ }
+
+ // If we get here, then every node we visited produces a sign extended value
+ // or propagated sign extended values. So the result must be sign extended.
+ return true;
+}
+
+bool RISCVSExtWRemoval::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(MF.getFunction()) || DisableSExtWRemoval)
+ return false;
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+
+ if (!ST.is64Bit())
+ return false;
+
+ bool MadeChange = false;
+ for (MachineBasicBlock &MBB : MF) {
+ for (auto I = MBB.begin(), IE = MBB.end(); I != IE;) {
+ MachineInstr *MI = &*I++;
+
+ // We're looking for the sext.w pattern ADDIW rd, rs1, 0.
+ if (MI->getOpcode() != RISCV::ADDIW || !MI->getOperand(2).isImm() ||
+ MI->getOperand(2).getImm() != 0 || !MI->getOperand(1).isReg())
+ continue;
+
+ // Input should be a virtual register.
+ Register SrcReg = MI->getOperand(1).getReg();
+ if (!SrcReg.isVirtual())
+ continue;
+
+ const MachineInstr &SrcMI = *MRI.getVRegDef(SrcReg);
+ if (!isSignExtendedW(SrcMI, MRI))
+ continue;
+
+ Register DstReg = MI->getOperand(0).getReg();
+ if (!MRI.constrainRegClass(SrcReg, MRI.getRegClass(DstReg)))
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n");
+ MRI.replaceRegWith(DstReg, SrcReg);
+ MRI.clearKillFlags(SrcReg);
+ MI->eraseFromParent();
+ ++NumRemovedSExtW;
+ MadeChange = true;
+ }
+ }
+
+ return MadeChange;
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedRocket.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedRocket.td
index d5a0932c8778..78cf34c8c582 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedRocket.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedRocket.td
@@ -17,7 +17,10 @@ def RocketModel : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 3;
let CompleteModel = false;
- let UnsupportedFeatures = [HasStdExtV, HasStdExtZvlsseg];
+ let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
+ HasStdExtZknd, HasStdExtZkne, HasStdExtZknh,
+ HasStdExtZksed, HasStdExtZksh, HasStdExtZkr,
+ HasVInstructions, HasVInstructionsI64];
}
//===----------------------------------------------------------------------===//
@@ -237,5 +240,8 @@ def : ReadAdvance<ReadFClass64, 0>;
defm : UnsupportedSchedV;
defm : UnsupportedSchedZba;
defm : UnsupportedSchedZbb;
+defm : UnsupportedSchedZbc;
+defm : UnsupportedSchedZbs;
+defm : UnsupportedSchedZbf;
defm : UnsupportedSchedZfh;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 7f9d0aabc4ed..9f5e5ff1223c 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -15,7 +15,10 @@ def SiFive7Model : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 3;
let CompleteModel = 0;
- let UnsupportedFeatures = [HasStdExtV, HasStdExtZvlsseg];
+ let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
+ HasStdExtZknd, HasStdExtZkne, HasStdExtZknh,
+ HasStdExtZksed, HasStdExtZksh, HasStdExtZkr,
+ HasVInstructions];
}
// The SiFive7 microarchitecture has two pipelines: A and B.
@@ -224,5 +227,8 @@ def : ReadAdvance<ReadFClass64, 0>;
defm : UnsupportedSchedV;
defm : UnsupportedSchedZba;
defm : UnsupportedSchedZbb;
+defm : UnsupportedSchedZbc;
+defm : UnsupportedSchedZbs;
+defm : UnsupportedSchedZbf;
defm : UnsupportedSchedZfh;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVScheduleB.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVScheduleB.td
index b668b0acd719..193760e1e15b 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVScheduleB.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVScheduleB.td
@@ -26,6 +26,17 @@ def WriteCPOP32 : SchedWrite;
def WriteREV8 : SchedWrite;
def WriteORCB : SchedWrite;
+// Zbc extension
+def WriteCLMUL : SchedWrite; // CLMUL/CLMULR/CLMULH
+
+// Zbs extension
+def WriteSingleBit : SchedWrite; // BCLR/BSET/BINV/BEXT
+def WriteSingleBitImm: SchedWrite; // BCLRI/BSETI/BINVI/BEXTI
+
+// Zbf extension
+def WriteBFP : SchedWrite; // BFP
+def WriteBFP32 : SchedWrite; // BFPW
+
/// Define scheduler resources associated with use operands.
// Zba extension
@@ -46,6 +57,17 @@ def ReadCPOP32 : SchedRead;
def ReadREV8 : SchedRead;
def ReadORCB : SchedRead;
+// Zbc extension
+def ReadCLMUL : SchedRead; // CLMUL/CLMULR/CLMULH
+
+// Zbs extension
+def ReadSingleBit : SchedRead; // BCLR/BSET/BINV/BEXT
+def ReadSingleBitImm: SchedRead; // BCLRI/BSETI/BINVI/BEXTI
+
+// Zbf extension
+def ReadBFP : SchedRead; // BFP
+def ReadBFP32 : SchedRead; // BFPW
+
/// Define default scheduler resources for B.
multiclass UnsupportedSchedZba {
@@ -87,3 +109,31 @@ def : ReadAdvance<ReadREV8, 0>;
def : ReadAdvance<ReadORCB, 0>;
}
}
+
+multiclass UnsupportedSchedZbc {
+let Unsupported = true in {
+def : WriteRes<WriteCLMUL, []>;
+
+def : ReadAdvance<ReadCLMUL, 0>;
+}
+}
+
+multiclass UnsupportedSchedZbs {
+let Unsupported = true in {
+def : WriteRes<WriteSingleBit, []>;
+def : WriteRes<WriteSingleBitImm, []>;
+
+def : ReadAdvance<ReadSingleBit, 0>;
+def : ReadAdvance<ReadSingleBitImm, 0>;
+}
+}
+
+multiclass UnsupportedSchedZbf {
+let Unsupported = true in {
+def : WriteRes<WriteBFP, []>;
+def : WriteRes<WriteBFP32, []>;
+
+def : ReadAdvance<ReadBFP, 0>;
+def : ReadAdvance<ReadBFP32, 0>;
+}
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index 1063134b8a6c..976e4ccb1422 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -18,6 +18,7 @@
#include "RISCVRegisterBankInfo.h"
#include "RISCVTargetMachine.h"
#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -50,6 +51,16 @@ static cl::opt<unsigned> RVVVectorELENMax(
cl::desc("The maximum ELEN value to use for fixed length vectors."),
cl::init(64), cl::Hidden);
+static cl::opt<bool> RISCVDisableUsingConstantPoolForLargeInts(
+ "riscv-disable-using-constant-pool-for-large-ints",
+ cl::desc("Disable using constant pool for large integers."),
+ cl::init(false), cl::Hidden);
+
+static cl::opt<unsigned> RISCVMaxBuildIntsCost(
+ "riscv-max-build-ints-cost",
+ cl::desc("The maximum cost used for building integers."), cl::init(0),
+ cl::Hidden);
+
void RISCVSubtarget::anchor() {}
RISCVSubtarget &
@@ -110,37 +121,69 @@ const RegisterBankInfo *RISCVSubtarget::getRegBankInfo() const {
return RegBankInfo.get();
}
+bool RISCVSubtarget::useConstantPoolForLargeInts() const {
+ return !RISCVDisableUsingConstantPoolForLargeInts;
+}
+
+unsigned RISCVSubtarget::getMaxBuildIntsCost() const {
+ // Loading integer from constant pool needs two instructions (the reason why
+ // the minimum cost is 2): an address calculation instruction and a load
+ // instruction. Usually, address calculation and instructions used for
+ // building integers (addi, slli, etc.) can be done in one cycle, so here we
+ // set the default cost to (LoadLatency + 1) if no threshold is provided.
+ return RISCVMaxBuildIntsCost == 0
+ ? getSchedModel().LoadLatency + 1
+ : std::max<unsigned>(2, RISCVMaxBuildIntsCost);
+}
+
unsigned RISCVSubtarget::getMaxRVVVectorSizeInBits() const {
assert(hasVInstructions() &&
"Tried to get vector length without Zve or V extension support!");
if (RVVVectorBitsMax == 0)
return 0;
- assert(RVVVectorBitsMax >= 128 && RVVVectorBitsMax <= 65536 &&
- isPowerOf2_32(RVVVectorBitsMax) &&
- "V extension requires vector length to be in the range of 128 to "
- "65536 and a power of 2!");
+
+ // ZvlLen specifies the minimum required vlen. The upper bound provided by
+ // riscv-v-vector-bits-max should be no less than it.
+ if (RVVVectorBitsMax < ZvlLen)
+ report_fatal_error("riscv-v-vector-bits-max specified is lower "
+ "than the Zvl*b limitation");
+
+ // FIXME: Change to >= 32 when VLEN = 32 is supported
+ assert(
+ RVVVectorBitsMax >= 64 && RVVVectorBitsMax <= 65536 &&
+ isPowerOf2_32(RVVVectorBitsMax) &&
+ "V or Zve* extension requires vector length to be in the range of 64 to "
+ "65536 and a power of 2!");
assert(RVVVectorBitsMax >= RVVVectorBitsMin &&
"Minimum V extension vector length should not be larger than its "
"maximum!");
unsigned Max = std::max(RVVVectorBitsMin, RVVVectorBitsMax);
- return PowerOf2Floor((Max < 128 || Max > 65536) ? 0 : Max);
+ return PowerOf2Floor((Max < 64 || Max > 65536) ? 0 : Max);
}
unsigned RISCVSubtarget::getMinRVVVectorSizeInBits() const {
+ // ZvlLen specifies the minimum required vlen. The lower bound provided by
+ // riscv-v-vector-bits-min should be no less than it.
+ if (RVVVectorBitsMin != 0 && RVVVectorBitsMin < ZvlLen)
+ report_fatal_error("riscv-v-vector-bits-min specified is lower "
+ "than the Zvl*b limitation");
+
assert(hasVInstructions() &&
"Tried to get vector length without Zve or V extension support!");
- assert((RVVVectorBitsMin == 0 ||
- (RVVVectorBitsMin >= 128 && RVVVectorBitsMax <= 65536 &&
- isPowerOf2_32(RVVVectorBitsMin))) &&
- "V extension requires vector length to be in the range of 128 to "
- "65536 and a power of 2!");
+ // FIXME: Change to >= 32 when VLEN = 32 is supported
+ assert(
+ (RVVVectorBitsMin == 0 ||
+ (RVVVectorBitsMin >= 64 && RVVVectorBitsMin <= 65536 &&
+ isPowerOf2_32(RVVVectorBitsMin))) &&
+ "V or Zve* extension requires vector length to be in the range of 64 to "
+ "65536 and a power of 2!");
assert((RVVVectorBitsMax >= RVVVectorBitsMin || RVVVectorBitsMax == 0) &&
"Minimum V extension vector length should not be larger than its "
"maximum!");
unsigned Min = RVVVectorBitsMin;
if (RVVVectorBitsMax != 0)
Min = std::min(RVVVectorBitsMin, RVVVectorBitsMax);
- return PowerOf2Floor((Min < 128 || Min > 65536) ? 0 : Min);
+ return PowerOf2Floor((Min < 64 || Min > 65536) ? 0 : Min);
}
unsigned RISCVSubtarget::getMaxLMULForFixedLengthVectors() const {
@@ -158,8 +201,9 @@ unsigned RISCVSubtarget::getMaxELENForFixedLengthVectors() const {
assert(RVVVectorELENMax <= 64 && RVVVectorELENMax >= 8 &&
isPowerOf2_32(RVVVectorELENMax) &&
"V extension requires a ELEN to be a power of 2 between 8 and 64!");
+ unsigned ELEN = hasVInstructionsI64() ? 64 : 32;
return PowerOf2Floor(
- std::max<unsigned>(std::min<unsigned>(RVVVectorELENMax, 64), 8));
+ std::max<unsigned>(std::min<unsigned>(RVVVectorELENMax, ELEN), 8));
}
bool RISCVSubtarget::useRVVForFixedLengthVectors() const {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
index d0330e6984a5..044dda0a1ccc 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -33,7 +33,33 @@ namespace llvm {
class StringRef;
class RISCVSubtarget : public RISCVGenSubtargetInfo {
+public:
+ enum ExtZvl : unsigned {
+ NotSet = 0,
+ Zvl32b = 32,
+ Zvl64b = 64,
+ Zvl128b = 128,
+ Zvl256b = 256,
+ Zvl512b = 512,
+ Zvl1024b = 1024,
+ Zvl2048b = 2048,
+ Zvl4096b = 4096,
+ Zvl8192b = 8192,
+ Zvl16384b = 16384,
+ Zvl32768b = 32768,
+ Zvl65536b = 65536
+ };
+
+ enum RISCVProcFamilyEnum : uint8_t {
+ Others,
+ SiFive7,
+ };
+
+private:
virtual void anchor();
+
+ RISCVProcFamilyEnum RISCVProcFamily = Others;
+
bool HasStdExtM = false;
bool HasStdExtA = false;
bool HasStdExtF = false;
@@ -50,15 +76,33 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
bool HasStdExtZbs = false;
bool HasStdExtZbt = false;
bool HasStdExtV = false;
- bool HasStdExtZvlsseg = false;
+ bool HasStdExtZve32x = false;
+ bool HasStdExtZve32f = false;
+ bool HasStdExtZve64x = false;
+ bool HasStdExtZve64f = false;
+ bool HasStdExtZve64d = false;
bool HasStdExtZfhmin = false;
bool HasStdExtZfh = false;
+ bool HasStdExtZbkb = false;
+ bool HasStdExtZbkc = false;
+ bool HasStdExtZbkx = false;
+ bool HasStdExtZknd = false;
+ bool HasStdExtZkne = false;
+ bool HasStdExtZknh = false;
+ bool HasStdExtZksed = false;
+ bool HasStdExtZksh = false;
+ bool HasStdExtZkr = false;
+ bool HasStdExtZkn = false;
+ bool HasStdExtZks = false;
+ bool HasStdExtZkt = false;
+ bool HasStdExtZk = false;
bool HasRV64 = false;
bool IsRV32E = false;
bool EnableLinkerRelax = false;
bool EnableRVCHintInstrs = true;
bool EnableSaveRestore = false;
unsigned XLen = 32;
+ ExtZvl ZvlLen = ExtZvl::NotSet;
MVT XLenVT = MVT::i32;
uint8_t MaxInterleaveFactor = 2;
RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
@@ -100,11 +144,19 @@ public:
return &TSInfo;
}
bool enableMachineScheduler() const override { return true; }
+
+ /// Returns RISCV processor family.
+ /// Avoid this function! CPU specifics should be kept local to this class
+ /// and preferably modeled with SubtargetFeatures or properties in
+ /// initializeProperties().
+ RISCVProcFamilyEnum getProcFamily() const { return RISCVProcFamily; }
+
bool hasStdExtM() const { return HasStdExtM; }
bool hasStdExtA() const { return HasStdExtA; }
bool hasStdExtF() const { return HasStdExtF; }
bool hasStdExtD() const { return HasStdExtD; }
bool hasStdExtC() const { return HasStdExtC; }
+ bool hasStdExtV() const { return HasStdExtV; }
bool hasStdExtZba() const { return HasStdExtZba; }
bool hasStdExtZbb() const { return HasStdExtZbb; }
bool hasStdExtZbc() const { return HasStdExtZbc; }
@@ -115,10 +167,18 @@ public:
bool hasStdExtZbr() const { return HasStdExtZbr; }
bool hasStdExtZbs() const { return HasStdExtZbs; }
bool hasStdExtZbt() const { return HasStdExtZbt; }
- bool hasStdExtV() const { return HasStdExtV; }
- bool hasStdExtZvlsseg() const { return HasStdExtZvlsseg; }
+ bool hasStdExtZvl() const { return ZvlLen != ExtZvl::NotSet; }
bool hasStdExtZfhmin() const { return HasStdExtZfhmin; }
bool hasStdExtZfh() const { return HasStdExtZfh; }
+ bool hasStdExtZbkb() const { return HasStdExtZbkb; }
+ bool hasStdExtZbkc() const { return HasStdExtZbkc; }
+ bool hasStdExtZbkx() const { return HasStdExtZbkx; }
+ bool hasStdExtZknd() const { return HasStdExtZknd; }
+ bool hasStdExtZkne() const { return HasStdExtZkne; }
+ bool hasStdExtZknh() const { return HasStdExtZknh; }
+ bool hasStdExtZksed() const { return HasStdExtZksed; }
+ bool hasStdExtZksh() const { return HasStdExtZksh; }
+ bool hasStdExtZkr() const { return HasStdExtZkr; }
bool is64Bit() const { return HasRV64; }
bool isRV32E() const { return IsRV32E; }
bool enableLinkerRelax() const { return EnableLinkerRelax; }
@@ -126,6 +186,15 @@ public:
bool enableSaveRestore() const { return EnableSaveRestore; }
MVT getXLenVT() const { return XLenVT; }
unsigned getXLen() const { return XLen; }
+ unsigned getFLen() const {
+ if (HasStdExtD)
+ return 64;
+
+ if (HasStdExtF)
+ return 32;
+
+ return 0;
+ }
RISCVABI::ABI getTargetABI() const { return TargetABI; }
bool isRegisterReservedByUser(Register i) const {
assert(i < RISCV::NUM_TARGET_REGS && "Register out of range");
@@ -133,11 +202,19 @@ public:
}
// Vector codegen related methods.
- bool hasVInstructions() const { return HasStdExtV; }
- bool hasVInstructionsI64() const { return HasStdExtV; }
- bool hasVInstructionsF16() const { return HasStdExtV && hasStdExtZfh(); }
- bool hasVInstructionsF32() const { return HasStdExtV && hasStdExtF(); }
- bool hasVInstructionsF64() const { return HasStdExtV && hasStdExtD(); }
+ bool hasVInstructions() const { return HasStdExtV || HasStdExtZve32x; }
+ bool hasVInstructionsI64() const { return HasStdExtV || HasStdExtZve64x; }
+ bool hasVInstructionsF16() const {
+ return (HasStdExtV || HasStdExtZve32f) && HasStdExtZfh;
+ }
+ // FIXME: Consider Zfinx in the future
+ bool hasVInstructionsF32() const {
+ return HasStdExtV || (HasStdExtZve32f && HasStdExtF);
+ }
+ // FIXME: Consider Zdinx in the future
+ bool hasVInstructionsF64() const {
+ return HasStdExtV || (HasStdExtZve64d && HasStdExtD);
+ }
// F16 and F64 both require F32.
bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); }
unsigned getMaxInterleaveFactor() const {
@@ -157,6 +234,12 @@ public:
const LegalizerInfo *getLegalizerInfo() const override;
const RegisterBankInfo *getRegBankInfo() const override;
+ bool useConstantPoolForLargeInts() const;
+
+ // Maximum cost used for building integers, integers will be put into constant
+ // pool if exceeded.
+ unsigned getMaxBuildIntsCost() const;
+
// Return the known range for the bit length of RVV data registers. A value
// of 0 means nothing is known about that particular limit beyond what's
// implied by the architecture.
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td
index 5a4c579dd708..b9aa25b321b0 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSystemOperands.td
@@ -1,4 +1,4 @@
-//===- RISCVSystemOperands.td ----------------------------*- tablegen -*-===//
+//===- RISCVSystemOperands.td ------------------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -70,16 +70,16 @@ def lookupSysRegByDeprecatedName : SearchIndex {
// 2.3, 2.4 and 2.5 in the RISC-V Instruction Set Manual
// Volume II: Privileged Architecture.
-//===--------------------------
+//===----------------------------------------------------------------------===//
// User Trap Setup
-//===--------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"ustatus", 0x000>;
def : SysReg<"uie", 0x004>;
def : SysReg<"utvec", 0x005>;
-//===--------------------------
+//===----------------------------------------------------------------------===//
// User Trap Handling
-//===--------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"uscratch", 0x040>;
def : SysReg<"uepc", 0x041>;
def : SysReg<"ucause", 0x042>;
@@ -87,100 +87,57 @@ let DeprecatedName = "ubadaddr" in
def : SysReg<"utval", 0x043>;
def : SysReg<"uip", 0x044>;
-//===--------------------------
+//===----------------------------------------------------------------------===//
// User Floating-Point CSRs
-//===--------------------------
+//===----------------------------------------------------------------------===//
def SysRegFFLAGS : SysReg<"fflags", 0x001>;
def SysRegFRM : SysReg<"frm", 0x002>;
def SysRegFCSR : SysReg<"fcsr", 0x003>;
-//===--------------------------
+//===----------------------------------------------------------------------===//
// User Counter/Timers
-//===--------------------------
+//===----------------------------------------------------------------------===//
def CYCLE : SysReg<"cycle", 0xC00>;
def TIME : SysReg<"time", 0xC01>;
def INSTRET : SysReg<"instret", 0xC02>;
-def : SysReg<"hpmcounter3", 0xC03>;
-def : SysReg<"hpmcounter4", 0xC04>;
-def : SysReg<"hpmcounter5", 0xC05>;
-def : SysReg<"hpmcounter6", 0xC06>;
-def : SysReg<"hpmcounter7", 0xC07>;
-def : SysReg<"hpmcounter8", 0xC08>;
-def : SysReg<"hpmcounter9", 0xC09>;
-def : SysReg<"hpmcounter10", 0xC0A>;
-def : SysReg<"hpmcounter11", 0xC0B>;
-def : SysReg<"hpmcounter12", 0xC0C>;
-def : SysReg<"hpmcounter13", 0xC0D>;
-def : SysReg<"hpmcounter14", 0xC0E>;
-def : SysReg<"hpmcounter15", 0xC0F>;
-def : SysReg<"hpmcounter16", 0xC10>;
-def : SysReg<"hpmcounter17", 0xC11>;
-def : SysReg<"hpmcounter18", 0xC12>;
-def : SysReg<"hpmcounter19", 0xC13>;
-def : SysReg<"hpmcounter20", 0xC14>;
-def : SysReg<"hpmcounter21", 0xC15>;
-def : SysReg<"hpmcounter22", 0xC16>;
-def : SysReg<"hpmcounter23", 0xC17>;
-def : SysReg<"hpmcounter24", 0xC18>;
-def : SysReg<"hpmcounter25", 0xC19>;
-def : SysReg<"hpmcounter26", 0xC1A>;
-def : SysReg<"hpmcounter27", 0xC1B>;
-def : SysReg<"hpmcounter28", 0xC1C>;
-def : SysReg<"hpmcounter29", 0xC1D>;
-def : SysReg<"hpmcounter30", 0xC1E>;
-def : SysReg<"hpmcounter31", 0xC1F>;
+// hpmcounter3-hpmcounter31 at 0xC03-0xC1F.
+foreach i = 3...31 in
+ def : SysReg<"hpmcounter"#i, !add(0xC03, !sub(i, 3))>;
let isRV32Only = 1 in {
def CYCLEH : SysReg<"cycleh", 0xC80>;
def TIMEH : SysReg<"timeh", 0xC81>;
def INSTRETH : SysReg<"instreth", 0xC82>;
-def: SysReg<"hpmcounter3h", 0xC83>;
-def: SysReg<"hpmcounter4h", 0xC84>;
-def: SysReg<"hpmcounter5h", 0xC85>;
-def: SysReg<"hpmcounter6h", 0xC86>;
-def: SysReg<"hpmcounter7h", 0xC87>;
-def: SysReg<"hpmcounter8h", 0xC88>;
-def: SysReg<"hpmcounter9h", 0xC89>;
-def: SysReg<"hpmcounter10h", 0xC8A>;
-def: SysReg<"hpmcounter11h", 0xC8B>;
-def: SysReg<"hpmcounter12h", 0xC8C>;
-def: SysReg<"hpmcounter13h", 0xC8D>;
-def: SysReg<"hpmcounter14h", 0xC8E>;
-def: SysReg<"hpmcounter15h", 0xC8F>;
-def: SysReg<"hpmcounter16h", 0xC90>;
-def: SysReg<"hpmcounter17h", 0xC91>;
-def: SysReg<"hpmcounter18h", 0xC92>;
-def: SysReg<"hpmcounter19h", 0xC93>;
-def: SysReg<"hpmcounter20h", 0xC94>;
-def: SysReg<"hpmcounter21h", 0xC95>;
-def: SysReg<"hpmcounter22h", 0xC96>;
-def: SysReg<"hpmcounter23h", 0xC97>;
-def: SysReg<"hpmcounter24h", 0xC98>;
-def: SysReg<"hpmcounter25h", 0xC99>;
-def: SysReg<"hpmcounter26h", 0xC9A>;
-def: SysReg<"hpmcounter27h", 0xC9B>;
-def: SysReg<"hpmcounter28h", 0xC9C>;
-def: SysReg<"hpmcounter29h", 0xC9D>;
-def: SysReg<"hpmcounter30h", 0xC9E>;
-def: SysReg<"hpmcounter31h", 0xC9F>;
+// hpmcounter3h-hpmcounter31h at 0xC83-0xC9F.
+foreach i = 3...31 in
+ def : SysReg<"hpmcounter"#i#"h", !add(0xC83, !sub(i, 3))>;
}
-//===--------------------------
+//===----------------------------------------------------------------------===//
// Supervisor Trap Setup
-//===--------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"sstatus", 0x100>;
def : SysReg<"sedeleg", 0x102>;
def : SysReg<"sideleg", 0x103>;
def : SysReg<"sie", 0x104>;
def : SysReg<"stvec", 0x105>;
def : SysReg<"scounteren", 0x106>;
+def : SysReg<"stimecmp", 0x14D>;
+let isRV32Only = 1 in
+def : SysReg<"stimecmph", 0x15D>;
+
+//===----------------------------------------------------------------------===//
+// Supervisor Configuration
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"senvcfg", 0x10A>;
-//===--------------------------
+//===----------------------------------------------------------------------===//
// Supervisor Trap Handling
-//===--------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"sscratch", 0x140>;
def : SysReg<"sepc", 0x141>;
def : SysReg<"scause", 0x142>;
@@ -188,24 +145,103 @@ let DeprecatedName = "sbadaddr" in
def : SysReg<"stval", 0x143>;
def : SysReg<"sip", 0x144>;
-//===-------------------------------------
+//===----------------------------------------------------------------------===//
// Supervisor Protection and Translation
-//===-------------------------------------
+//===----------------------------------------------------------------------===//
let DeprecatedName = "sptbr" in
def : SysReg<"satp", 0x180>;
-//===-----------------------------
+//===----------------------------------------------------------------------===//
+// Debug/Trace Registers
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"scontext", 0x5A8>;
+
+//===----------------------------------------------------------------------===//
+// Supervisor Count Overflow (defined in Sscofpmf)
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"scountovf", 0xDA0>;
+
+//===----------------------------------------------------------------------===//
+// Hypervisor Trap Setup
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"hstatus", 0x600>;
+def : SysReg<"hedeleg", 0x602>;
+def : SysReg<"hideleg", 0x603>;
+def : SysReg<"hie", 0x604>;
+def : SysReg<"hcounteren", 0x606>;
+def : SysReg<"hgeie", 0x607>;
+
+//===----------------------------------------------------------------------===//
+// Hypervisor Trap Handling
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"htval", 0x643>;
+def : SysReg<"hip", 0x644>;
+def : SysReg<"hvip", 0x645>;
+def : SysReg<"htinst", 0x64A>;
+def : SysReg<"hgeip", 0xE12>;
+
+//===----------------------------------------------------------------------===//
+// Hypervisor Configuration
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"henvcfg", 0x60A>;
+let isRV32Only = 1 in
+def : SysReg<"henvcfgh", 0x61A>;
+
+//===----------------------------------------------------------------------===//
+// Hypervisor Protection and Translation
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"hgatp", 0x680>;
+
+//===----------------------------------------------------------------------===//
+// Debug/Trace Registers
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"hcontext", 0x6A8>;
+
+//===----------------------------------------------------------------------===//
+// Hypervisor Counter/Timer Virtualization Registers
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"htimedelta", 0x605>;
+let isRV32Only = 1 in
+def : SysReg<"htimedeltah", 0x615>;
+
+//===----------------------------------------------------------------------===//
+// Virtual Supervisor Registers
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"vsstatus", 0x200>;
+def : SysReg<"vsie", 0x204>;
+def : SysReg<"vstvec", 0x205>;
+def : SysReg<"vsscratch", 0x240>;
+def : SysReg<"vsepc", 0x241>;
+def : SysReg<"vscause", 0x242>;
+def : SysReg<"vstval", 0x243>;
+def : SysReg<"vsip", 0x244>;
+def : SysReg<"vstimecmp", 0x24D>;
+let isRV32Only = 1 in
+def : SysReg<"vstimecmph", 0x25D>;
+def : SysReg<"vsatp", 0x280>;
+
+//===----------------------------------------------------------------------===//
// Machine Information Registers
-//===-----------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"mvendorid", 0xF11>;
def : SysReg<"marchid", 0xF12>;
def : SysReg<"mimpid", 0xF13>;
def : SysReg<"mhartid", 0xF14>;
+def : SysReg<"mconfigptr", 0xF15>;
-//===-----------------------------
+//===----------------------------------------------------------------------===//
// Machine Trap Setup
-//===-----------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"mstatus", 0x300>;
def : SysReg<"misa", 0x301>;
def : SysReg<"medeleg", 0x302>;
@@ -213,163 +249,93 @@ def : SysReg<"mideleg", 0x303>;
def : SysReg<"mie", 0x304>;
def : SysReg<"mtvec", 0x305>;
def : SysReg<"mcounteren", 0x306>;
+let isRV32Only = 1 in
+def : SysReg<"mstatush", 0x310>;
-//===-----------------------------
+//===----------------------------------------------------------------------===//
// Machine Trap Handling
-//===-----------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"mscratch", 0x340>;
def : SysReg<"mepc", 0x341>;
def : SysReg<"mcause", 0x342>;
let DeprecatedName = "mbadaddr" in
def : SysReg<"mtval", 0x343>;
def : SysReg<"mip", 0x344>;
+def : SysReg<"mtinst", 0x34A>;
+def : SysReg<"mtval2", 0x34B>;
-//===----------------------------------
+//===----------------------------------------------------------------------===//
+// Machine Configuration
+//===----------------------------------------------------------------------===//
+
+def : SysReg<"menvcfg", 0x30A>;
+let isRV32Only = 1 in
+def : SysReg<"menvcfgh", 0x31A>;
+def : SysReg<"mseccfg", 0x747>;
+let isRV32Only = 1 in
+def : SysReg<"mseccfgh", 0x757>;
+
+//===----------------------------------------------------------------------===//
// Machine Protection and Translation
-//===----------------------------------
-def : SysReg<"pmpcfg0", 0x3A0>;
-def : SysReg<"pmpcfg2", 0x3A2>;
-let isRV32Only = 1 in {
-def : SysReg<"pmpcfg1", 0x3A1>;
-def : SysReg<"pmpcfg3", 0x3A3>;
+//===----------------------------------------------------------------------===//
+
+// pmpcfg0-pmpcfg15 at 0x3A0-0x3AF. Odd-numbered registers are RV32-only.
+foreach i = 0...15 in {
+ let isRV32Only = !and(i, 1) in
+ def : SysReg<"pmpcfg"#i, !add(0x3A0, i)>;
}
-def : SysReg<"pmpaddr0", 0x3B0>;
-def : SysReg<"pmpaddr1", 0x3B1>;
-def : SysReg<"pmpaddr2", 0x3B2>;
-def : SysReg<"pmpaddr3", 0x3B3>;
-def : SysReg<"pmpaddr4", 0x3B4>;
-def : SysReg<"pmpaddr5", 0x3B5>;
-def : SysReg<"pmpaddr6", 0x3B6>;
-def : SysReg<"pmpaddr7", 0x3B7>;
-def : SysReg<"pmpaddr8", 0x3B8>;
-def : SysReg<"pmpaddr9", 0x3B9>;
-def : SysReg<"pmpaddr10", 0x3BA>;
-def : SysReg<"pmpaddr11", 0x3BB>;
-def : SysReg<"pmpaddr12", 0x3BC>;
-def : SysReg<"pmpaddr13", 0x3BD>;
-def : SysReg<"pmpaddr14", 0x3BE>;
-def : SysReg<"pmpaddr15", 0x3BF>;
-
-
-//===--------------------------
+// pmpaddr0-pmpaddr63 at 0x3B0-0x3EF.
+foreach i = 0...63 in
+ def : SysReg<"pmpaddr"#i, !add(0x3B0, i)>;
+
+//===----------------------------------------------------------------------===//
// Machine Counter and Timers
-//===--------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"mcycle", 0xB00>;
def : SysReg<"minstret", 0xB02>;
-def : SysReg<"mhpmcounter3", 0xB03>;
-def : SysReg<"mhpmcounter4", 0xB04>;
-def : SysReg<"mhpmcounter5", 0xB05>;
-def : SysReg<"mhpmcounter6", 0xB06>;
-def : SysReg<"mhpmcounter7", 0xB07>;
-def : SysReg<"mhpmcounter8", 0xB08>;
-def : SysReg<"mhpmcounter9", 0xB09>;
-def : SysReg<"mhpmcounter10", 0xB0A>;
-def : SysReg<"mhpmcounter11", 0xB0B>;
-def : SysReg<"mhpmcounter12", 0xB0C>;
-def : SysReg<"mhpmcounter13", 0xB0D>;
-def : SysReg<"mhpmcounter14", 0xB0E>;
-def : SysReg<"mhpmcounter15", 0xB0F>;
-def : SysReg<"mhpmcounter16", 0xB10>;
-def : SysReg<"mhpmcounter17", 0xB11>;
-def : SysReg<"mhpmcounter18", 0xB12>;
-def : SysReg<"mhpmcounter19", 0xB13>;
-def : SysReg<"mhpmcounter20", 0xB14>;
-def : SysReg<"mhpmcounter21", 0xB15>;
-def : SysReg<"mhpmcounter22", 0xB16>;
-def : SysReg<"mhpmcounter23", 0xB17>;
-def : SysReg<"mhpmcounter24", 0xB18>;
-def : SysReg<"mhpmcounter25", 0xB19>;
-def : SysReg<"mhpmcounter26", 0xB1A>;
-def : SysReg<"mhpmcounter27", 0xB1B>;
-def : SysReg<"mhpmcounter28", 0xB1C>;
-def : SysReg<"mhpmcounter29", 0xB1D>;
-def : SysReg<"mhpmcounter30", 0xB1E>;
-def : SysReg<"mhpmcounter31", 0xB1F>;
+// mhpmcounter3-mhpmcounter31 at 0xB03-0xB1F.
+foreach i = 3...31 in
+ def : SysReg<"mhpmcounter"#i, !add(0xB03, !sub(i, 3))>;
let isRV32Only = 1 in {
def: SysReg<"mcycleh", 0xB80>;
def: SysReg<"minstreth", 0xB82>;
-def: SysReg<"mhpmcounter3h", 0xB83>;
-def: SysReg<"mhpmcounter4h", 0xB84>;
-def: SysReg<"mhpmcounter5h", 0xB85>;
-def: SysReg<"mhpmcounter6h", 0xB86>;
-def: SysReg<"mhpmcounter7h", 0xB87>;
-def: SysReg<"mhpmcounter8h", 0xB88>;
-def: SysReg<"mhpmcounter9h", 0xB89>;
-def: SysReg<"mhpmcounter10h", 0xB8A>;
-def: SysReg<"mhpmcounter11h", 0xB8B>;
-def: SysReg<"mhpmcounter12h", 0xB8C>;
-def: SysReg<"mhpmcounter13h", 0xB8D>;
-def: SysReg<"mhpmcounter14h", 0xB8E>;
-def: SysReg<"mhpmcounter15h", 0xB8F>;
-def: SysReg<"mhpmcounter16h", 0xB90>;
-def: SysReg<"mhpmcounter17h", 0xB91>;
-def: SysReg<"mhpmcounter18h", 0xB92>;
-def: SysReg<"mhpmcounter19h", 0xB93>;
-def: SysReg<"mhpmcounter20h", 0xB94>;
-def: SysReg<"mhpmcounter21h", 0xB95>;
-def: SysReg<"mhpmcounter22h", 0xB96>;
-def: SysReg<"mhpmcounter23h", 0xB97>;
-def: SysReg<"mhpmcounter24h", 0xB98>;
-def: SysReg<"mhpmcounter25h", 0xB99>;
-def: SysReg<"mhpmcounter26h", 0xB9A>;
-def: SysReg<"mhpmcounter27h", 0xB9B>;
-def: SysReg<"mhpmcounter28h", 0xB9C>;
-def: SysReg<"mhpmcounter29h", 0xB9D>;
-def: SysReg<"mhpmcounter30h", 0xB9E>;
-def: SysReg<"mhpmcounter31h", 0xB9F>;
+// mhpmcounter3h-mhpmcounter31h at 0xB83-0xB9F.
+foreach i = 3...31 in
+ def : SysReg<"mhpmcounter"#i#"h", !add(0xB83, !sub(i, 3))>;
}
-//===--------------------------
+//===----------------------------------------------------------------------===//
// Machine Counter Setup
-//===--------------------------
+//===----------------------------------------------------------------------===//
let AltName = "mucounteren" in // Privileged spec v1.9.1 Name
def : SysReg<"mcountinhibit", 0x320>;
-def : SysReg<"mhpmevent3", 0x323>;
-def : SysReg<"mhpmevent4", 0x324>;
-def : SysReg<"mhpmevent5", 0x325>;
-def : SysReg<"mhpmevent6", 0x326>;
-def : SysReg<"mhpmevent7", 0x327>;
-def : SysReg<"mhpmevent8", 0x328>;
-def : SysReg<"mhpmevent9", 0x329>;
-def : SysReg<"mhpmevent10", 0x32A>;
-def : SysReg<"mhpmevent11", 0x32B>;
-def : SysReg<"mhpmevent12", 0x32C>;
-def : SysReg<"mhpmevent13", 0x32D>;
-def : SysReg<"mhpmevent14", 0x32E>;
-def : SysReg<"mhpmevent15", 0x32F>;
-def : SysReg<"mhpmevent16", 0x330>;
-def : SysReg<"mhpmevent17", 0x331>;
-def : SysReg<"mhpmevent18", 0x332>;
-def : SysReg<"mhpmevent19", 0x333>;
-def : SysReg<"mhpmevent20", 0x334>;
-def : SysReg<"mhpmevent21", 0x335>;
-def : SysReg<"mhpmevent22", 0x336>;
-def : SysReg<"mhpmevent23", 0x337>;
-def : SysReg<"mhpmevent24", 0x338>;
-def : SysReg<"mhpmevent25", 0x339>;
-def : SysReg<"mhpmevent26", 0x33A>;
-def : SysReg<"mhpmevent27", 0x33B>;
-def : SysReg<"mhpmevent28", 0x33C>;
-def : SysReg<"mhpmevent29", 0x33D>;
-def : SysReg<"mhpmevent30", 0x33E>;
-def : SysReg<"mhpmevent31", 0x33F>;
+// mhpmevent3-mhpmevent31 at 0x323-0x33F.
+foreach i = 3...31 in
+ def : SysReg<"mhpmevent"#i, !add(0x323, !sub(i, 3))>;
-//===-----------------------------------------------
+// mhpmevent3h-mhpmevent31h at 0x723-0x73F
+foreach i = 3...31 in {
+ let isRV32Only = 1 in
+ def : SysReg<"mhpmevent"#i#"h", !add(0x723, !sub(i, 3))>;
+}
+
+//===----------------------------------------------------------------------===//
// Debug/ Trace Registers (shared with Debug Mode)
-//===-----------------------------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"tselect", 0x7A0>;
def : SysReg<"tdata1", 0x7A1>;
def : SysReg<"tdata2", 0x7A2>;
def : SysReg<"tdata3", 0x7A3>;
+def : SysReg<"mcontext", 0x7A8>;
-//===-----------------------------------------------
+//===----------------------------------------------------------------------===//
// Debug Mode Registers
-//===-----------------------------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"dcsr", 0x7B0>;
def : SysReg<"dpc", 0x7B1>;
@@ -379,9 +345,9 @@ let AltName = "dscratch" in
def : SysReg<"dscratch0", 0x7B2>;
def : SysReg<"dscratch1", 0x7B3>;
-//===-----------------------------------------------
+//===----------------------------------------------------------------------===//
// User Vector CSRs
-//===-----------------------------------------------
+//===----------------------------------------------------------------------===//
def : SysReg<"vstart", 0x008>;
def : SysReg<"vxsat", 0x009>;
def : SysReg<"vxrm", 0x00A>;
@@ -389,3 +355,26 @@ def : SysReg<"vcsr", 0x00F>;
def : SysReg<"vl", 0xC20>;
def : SysReg<"vtype", 0xC21>;
def SysRegVLENB: SysReg<"vlenb", 0xC22>;
+
+//===----------------------------------------------------------------------===//
+// State Enable Extension (Smstateen)
+//===----------------------------------------------------------------------===//
+
+// sstateen0-sstateen3 at 0x10C-0x10F, mstateen0-mstateen3 at 0x30C-0x30F,
+// mstateen0h-mstateen3h at 0x31C-0x31F, hstateen0-hstateen3 at 0x60C-0x60F,
+// and hstateen0h-hstateen3h at 0x61C-0x61F.
+foreach i = 0...3 in {
+ def : SysReg<"sstateen"#i, !add(0x10C, i)>;
+ def : SysReg<"mstateen"#i, !add(0x30C, i)>;
+ let isRV32Only = 1 in
+ def : SysReg<"mstateen"#i#"h", !add(0x31C, i)>;
+ def : SysReg<"hstateen"#i, !add(0x60C, i)>;
+ let isRV32Only = 1 in
+ def : SysReg<"hstateen"#i#"h", !add(0x61C, i)>;
+}
+
+//===-----------------------------------------------
+// Entropy Source CSR
+//===-----------------------------------------------
+
+def SEED : SysReg<"seed", 0x015>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index b421eba8d442..db5e2f1eeb6f 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -39,6 +39,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
initializeGlobalISel(*PR);
initializeRISCVGatherScatterLoweringPass(*PR);
initializeRISCVMergeBaseOffsetOptPass(*PR);
+ initializeRISCVSExtWRemovalPass(*PR);
initializeRISCVExpandPseudoPass(*PR);
initializeRISCVInsertVSETVLIPass(*PR);
}
@@ -140,6 +141,7 @@ public:
void addPreEmitPass() override;
void addPreEmitPass2() override;
void addPreSched2() override;
+ void addMachineSSAOptimization() override;
void addPreRegAlloc() override;
};
} // namespace
@@ -194,6 +196,13 @@ void RISCVPassConfig::addPreEmitPass2() {
addPass(createRISCVExpandAtomicPseudoPass());
}
+void RISCVPassConfig::addMachineSSAOptimization() {
+ TargetPassConfig::addMachineSSAOptimization();
+
+ if (TM->getTargetTriple().getArch() == Triple::riscv64)
+ addPass(createRISCVSExtWRemovalPass());
+}
+
void RISCVPassConfig::addPreRegAlloc() {
if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createRISCVMergeBaseOffsetOptPass());
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index c435430a1288..99e6774a02e4 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -15,6 +15,13 @@ using namespace llvm;
#define DEBUG_TYPE "riscvtti"
+static cl::opt<unsigned> RVVRegisterWidthLMUL(
+ "riscv-v-register-bit-width-lmul",
+ cl::desc(
+ "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
+ "by autovectorized code. Fractional LMULs are not supported."),
+ cl::init(1), cl::Hidden);
+
InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) {
assert(Ty->isIntegerTy() &&
@@ -137,6 +144,24 @@ Optional<unsigned> RISCVTTIImpl::getMaxVScale() const {
return BaseT::getMaxVScale();
}
+TypeSize
+RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
+ unsigned LMUL = PowerOf2Floor(
+ std::max<unsigned>(std::min<unsigned>(RVVRegisterWidthLMUL, 8), 1));
+ switch (K) {
+ case TargetTransformInfo::RGK_Scalar:
+ return TypeSize::getFixed(ST->getXLen());
+ case TargetTransformInfo::RGK_FixedWidthVector:
+ return TypeSize::getFixed(
+ ST->hasVInstructions() ? LMUL * ST->getMinRVVVectorSizeInBits() : 0);
+ case TargetTransformInfo::RGK_ScalableVector:
+ return TypeSize::getScalable(
+ ST->hasVInstructions() ? LMUL * RISCV::RVVBitsPerBlock : 0);
+ }
+
+ llvm_unreachable("Unsupported register kind");
+}
+
InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
@@ -172,10 +197,7 @@ void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
// Support explicit targets enabled for SiFive with the unrolling preferences
// below
bool UseDefaultPreferences = true;
- if (ST->getTuneCPU().contains("sifive-e76") ||
- ST->getTuneCPU().contains("sifive-s76") ||
- ST->getTuneCPU().contains("sifive-u74") ||
- ST->getTuneCPU().contains("sifive-7"))
+ if (ST->getProcFamily() == RISCVSubtarget::SiFive7)
UseDefaultPreferences = false;
if (UseDefaultPreferences)
@@ -253,3 +275,16 @@ void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) {
BaseT::getPeelingPreferences(L, SE, PP);
}
+
+InstructionCost RISCVTTIImpl::getRegUsageForType(Type *Ty) {
+ TypeSize Size = Ty->getPrimitiveSizeInBits();
+ if (Ty->isVectorTy()) {
+ if (Size.isScalable() && ST->hasVInstructions())
+ return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
+
+ if (ST->useRVVForFixedLengthVectors())
+ return divideCeil(Size, ST->getMinRVVVectorSizeInBits());
+ }
+
+ return BaseT::getRegUsageForType(Ty);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 7353496f4684..e79c4f75712b 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -58,20 +58,9 @@ public:
bool supportsScalableVectors() const { return ST->hasVInstructions(); }
Optional<unsigned> getMaxVScale() const;
- TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- switch (K) {
- case TargetTransformInfo::RGK_Scalar:
- return TypeSize::getFixed(ST->getXLen());
- case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::getFixed(
- ST->hasVInstructions() ? ST->getMinRVVVectorSizeInBits() : 0);
- case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::getScalable(
- ST->hasVInstructions() ? RISCV::RVVBitsPerBlock : 0);
- }
+ TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
- llvm_unreachable("Unsupported register kind");
- }
+ InstructionCost getRegUsageForType(Type *Ty);
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP,
@@ -81,7 +70,7 @@ public:
TTI::PeelingPreferences &PP);
unsigned getMinVectorRegisterBitWidth() const {
- return ST->hasVInstructions() ? ST->getMinRVVVectorSizeInBits() : 0;
+ return ST->useRVVForFixedLengthVectors() ? 16 : 0;
}
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
@@ -189,6 +178,20 @@ public:
// Let regular unroll to unroll the loop.
return VF == 1 ? 1 : ST->getMaxInterleaveFactor();
}
+
+ // TODO: We should define RISC-V's own register classes.
+ // e.g. register class for FPR.
+ unsigned getNumberOfRegisters(unsigned ClassID) const {
+ bool Vector = (ClassID == 1);
+ if (Vector) {
+ if (ST->hasVInstructions())
+ return 32;
+ return 0;
+ }
+ // 31 = 32 GPR - x0 (zero register)
+ // FIXME: Should we exclude fixed registers like SP, TP or GP?
+ return 31;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index 48e6903bd1b1..af3304f0907d 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -257,7 +257,7 @@ private:
};
public:
- SparcOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ SparcOperand(KindTy K) : Kind(K) {}
bool isToken() const override { return Kind == k_Token; }
bool isReg() const override { return Kind == k_Register; }
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index afb69899e724..c5d0f1de7dfd 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -66,7 +66,7 @@ private:
} // end anonymous namespace
SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
+ Register GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
return CurDAG->getRegister(GlobalBaseReg,
TLI->getPointerTy(CurDAG->getDataLayout()))
.getNode();
@@ -168,8 +168,7 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
// placement.
SDLoc dl(N);
- SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
- : SDValue(nullptr,0);
+ SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps - 1) : SDValue();
SmallVector<bool, 8> OpChanged;
// Glue node will be appended late.
@@ -221,8 +220,8 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
SDValue V0 = N->getOperand(i+1);
SDValue V1 = N->getOperand(i+2);
- unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
- unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
+ Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
+ Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
SDValue PairedReg;
MachineRegisterInfo &MRI = MF->getRegInfo();
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index ed1faf6b1fe8..6d6879bc94b3 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -826,7 +826,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// sret only allowed on first argument
assert(Outs[realArgIdx].OrigArgIndex == 0);
PointerType *Ty = cast<PointerType>(CLI.getArgs()[0].Ty);
- Type *ElementTy = Ty->getElementType();
+ Type *ElementTy = Ty->getPointerElementType();
SRetArgSize = DAG.getDataLayout().getTypeAllocSize(ElementTy);
continue;
}
@@ -2684,7 +2684,7 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
SDValue RetAddr;
if (depth == 0) {
auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
- unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
+ Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
return RetAddr;
}
@@ -3245,7 +3245,7 @@ LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
- SDValue Result(nullptr, 0);
+ SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1)
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetObjectFile.h b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
index 9bbe602b32b3..f30ddc7b4955 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
@@ -18,9 +18,7 @@ class TargetMachine;
class SparcELFTargetObjectFile : public TargetLoweringObjectFileELF {
public:
- SparcELFTargetObjectFile() :
- TargetLoweringObjectFileELF()
- {}
+ SparcELFTargetObjectFile() {}
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
index 899fec6c3328..e76fa03af3bf 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -23,11 +23,7 @@ class MCObjectTargetWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCTargetOptions;
-class StringRef;
class Target;
-class Triple;
-class raw_pwrite_stream;
-class raw_ostream;
namespace SystemZMC {
// How many bytes are in the ABI-defined, caller-allocated part of
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZ.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZ.h
index bedbd061ea5c..5be19f0e3b46 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZ.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZ.h
@@ -20,6 +20,7 @@
namespace llvm {
class SystemZTargetMachine;
class FunctionPass;
+class PassRegistry;
namespace SystemZ {
// Condition-code mask values.
@@ -196,6 +197,15 @@ FunctionPass *createSystemZLDCleanupPass(SystemZTargetMachine &TM);
FunctionPass *createSystemZCopyPhysRegsPass(SystemZTargetMachine &TM);
FunctionPass *createSystemZPostRewritePass(SystemZTargetMachine &TM);
FunctionPass *createSystemZTDCPass();
+
+void initializeSystemZElimComparePass(PassRegistry &);
+void initializeSystemZShortenInstPass(PassRegistry &);
+void initializeSystemZLongBranchPass(PassRegistry &);
+void initializeSystemZLDCleanupPass(PassRegistry &);
+void initializeSystemZCopyPhysRegsPass(PassRegistry &);
+void initializeSystemZPostRewritePass(PassRegistry &);
+void initializeSystemZTDCPassPass(PassRegistry &);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index defab665f924..e01adcce04ab 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -786,6 +786,50 @@ void SystemZAsmPrinter::emitEndOfAsmFile(Module &M) {
emitStackMaps(SM);
}
+void SystemZAsmPrinter::emitFunctionEntryLabel() {
+ const SystemZSubtarget &Subtarget =
+ static_cast<const SystemZSubtarget &>(MF->getSubtarget());
+
+ if (Subtarget.getTargetTriple().isOSzOS()) {
+ MCContext &OutContext = OutStreamer->getContext();
+ MCSymbol *EPMarkerSym = OutContext.createTempSymbol("CM_", true);
+
+ // EntryPoint Marker
+ const MachineFrameInfo &MFFrame = MF->getFrameInfo();
+ bool IsUsingAlloca = MFFrame.hasVarSizedObjects();
+
+ // Set Flags
+ uint8_t Flags = 0;
+ if (IsUsingAlloca)
+ Flags |= 0x04;
+
+ uint32_t DSASize = MFFrame.getStackSize();
+
+ // Combine into top 27 bits of DSASize and bottom 5 bits of Flags.
+ uint32_t DSAAndFlags = DSASize & 0xFFFFFFE0; // (x/32) << 5
+ DSAAndFlags |= Flags;
+
+ // Emit entry point marker section.
+ OutStreamer->AddComment("XPLINK Routine Layout Entry");
+ OutStreamer->emitLabel(EPMarkerSym);
+ OutStreamer->AddComment("Eyecatcher 0x00C300C500C500");
+ OutStreamer->emitIntValueInHex(0x00C300C500C500, 7); // Eyecatcher.
+ OutStreamer->AddComment("Mark Type C'1'");
+ OutStreamer->emitInt8(0xF1); // Mark Type.
+ if (OutStreamer->isVerboseAsm()) {
+ OutStreamer->AddComment("DSA Size 0x" + Twine::utohexstr(DSASize));
+ OutStreamer->AddComment("Entry Flags");
+ if (Flags & 0x04)
+ OutStreamer->AddComment(" Bit 2: 1 = Uses alloca");
+ else
+ OutStreamer->AddComment(" Bit 2: 0 = Does not use alloca");
+ }
+ OutStreamer->emitInt32(DSAAndFlags);
+ }
+
+ AsmPrinter::emitFunctionEntryLabel();
+}
+
// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSystemZAsmPrinter() {
RegisterAsmPrinter<SystemZAsmPrinter> X(getTheSystemZTarget());
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
index 6cfd7bd4c486..80d68d1b93ff 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
@@ -19,7 +19,6 @@
namespace llvm {
class MCStreamer;
-class MachineBasicBlock;
class MachineInstr;
class Module;
class raw_ostream;
@@ -52,6 +51,7 @@ public:
SM.reset();
return AsmPrinter::doInitialization(M);
}
+ void emitFunctionEntryLabel() override;
private:
void LowerFENTRY_CALL(const MachineInstr &MI, SystemZMCInstLower &MCIL);
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
index 7d21d29d270e..763aa8c0e41f 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZCopyPhysRegs.cpp
@@ -25,12 +25,6 @@
using namespace llvm;
-#define SYSTEMZ_COPYPHYSREGS_NAME "SystemZ Copy Physregs"
-
-namespace llvm {
- void initializeSystemZCopyPhysRegsPass(PassRegistry&);
-}
-
namespace {
class SystemZCopyPhysRegs : public MachineFunctionPass {
@@ -41,8 +35,6 @@ public:
initializeSystemZCopyPhysRegsPass(*PassRegistry::getPassRegistry());
}
- StringRef getPassName() const override { return SYSTEMZ_COPYPHYSREGS_NAME; }
-
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
@@ -59,7 +51,7 @@ char SystemZCopyPhysRegs::ID = 0;
} // end anonymous namespace
INITIALIZE_PASS(SystemZCopyPhysRegs, "systemz-copy-physregs",
- SYSTEMZ_COPYPHYSREGS_NAME, false, false)
+ "SystemZ Copy Physregs", false, false)
FunctionPass *llvm::createSystemZCopyPhysRegsPass(SystemZTargetMachine &TM) {
return new SystemZCopyPhysRegs();
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index 631cbff303e8..4893acc81335 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -65,11 +65,8 @@ class SystemZElimCompare : public MachineFunctionPass {
public:
static char ID;
- SystemZElimCompare(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID) {}
-
- StringRef getPassName() const override {
- return "SystemZ Comparison Elimination";
+ SystemZElimCompare() : MachineFunctionPass(ID) {
+ initializeSystemZElimComparePass(*PassRegistry::getPassRegistry());
}
bool processBlock(MachineBasicBlock &MBB);
@@ -106,6 +103,9 @@ char SystemZElimCompare::ID = 0;
} // end anonymous namespace
+INITIALIZE_PASS(SystemZElimCompare, DEBUG_TYPE,
+ "SystemZ Comparison Elimination", false, false)
+
// Returns true if MI is an instruction whose output equals the value in Reg.
static bool preservesValueOf(MachineInstr &MI, unsigned Reg) {
switch (MI.getOpcode()) {
@@ -746,5 +746,5 @@ bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
}
FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) {
- return new SystemZElimCompare(TM);
+ return new SystemZElimCompare();
}
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index 99ab4c5455d6..ccc7d0737f53 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -103,7 +103,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
unsigned HighGPR = SystemZ::R15D;
int StartSPOffset = SystemZMC::ELFCallFrameSize;
for (auto &CS : CSI) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
int Offset = getRegSpillOffset(MF, Reg);
if (Offset) {
if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) {
@@ -124,7 +124,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
// Also save the GPR varargs, if any. R6D is call-saved, so would
// already be included, but we also need to handle the call-clobbered
// argument registers.
- unsigned FirstGPR = ZFI->getVarArgsFirstGPR();
+ Register FirstGPR = ZFI->getVarArgsFirstGPR();
if (FirstGPR < SystemZ::ELFNumArgGPRs) {
unsigned Reg = SystemZ::ELFArgGPRs[FirstGPR];
int Offset = getRegSpillOffset(MF, Reg);
@@ -143,7 +143,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
for (auto &CS : CSI) {
if (CS.getFrameIdx() != INT32_MAX)
continue;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getSpillSize(*RC);
CurrOffset -= Size;
@@ -271,7 +271,7 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
// Make sure all call-saved GPRs are included as operands and are
// marked as live on entry.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg))
addSavedGPR(MBB, MIB, Reg, true);
}
@@ -284,7 +284,7 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
// Save FPRs/VRs in the normal TargetInstrInfo way.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
@@ -314,7 +314,7 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
// Restore FPRs/VRs in the normal TargetInstrInfo way.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
&SystemZ::FP64BitRegClass, TRI);
@@ -346,7 +346,7 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
// Do a second scan adding regs as being defined by instruction
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR &&
SystemZ::GR64BitRegClass.contains(Reg))
MIB.addReg(Reg, RegState::ImplicitDefine);
@@ -500,7 +500,7 @@ void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF,
// Add CFI for the GPR saves.
for (auto &Save : CSI) {
- unsigned Reg = Save.getReg();
+ Register Reg = Save.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
int FI = Save.getFrameIdx();
int64_t Offset = MFFrame.getObjectOffset(FI);
@@ -580,7 +580,7 @@ void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF,
// Skip over the FPR/VR saves.
SmallVector<unsigned, 8> CFIIndexes;
for (auto &Save : CSI) {
- unsigned Reg = Save.getReg();
+ Register Reg = Save.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
if (MBBI != MBB.end() &&
(MBBI->getOpcode() == SystemZ::STD ||
@@ -764,8 +764,7 @@ void SystemZELFFrameLowering::inlineStackProbe(
bool SystemZELFFrameLowering::hasFP(const MachineFunction &MF) const {
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
- MF.getFrameInfo().hasVarSizedObjects() ||
- MF.getInfo<SystemZMachineFunctionInfo>()->getManipulatesSP());
+ MF.getFrameInfo().hasVarSizedObjects());
}
StackOffset SystemZELFFrameLowering::getFrameIndexReference(
@@ -850,7 +849,7 @@ bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots(
auto ProcessCSI = [&](std::vector<CalleeSavedInfo> &CSIList) {
for (auto &CS : CSIList) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
int Offset = RegSpillOffsets[Reg];
if (Offset >= 0) {
if (GRRegClass.contains(Reg)) {
@@ -895,7 +894,7 @@ bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots(
for (auto &CS : CSI) {
if (CS.getFrameIdx() != INT32_MAX)
continue;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
Align Alignment = TRI->getSpillAlign(*RC);
unsigned Size = TRI->getSpillSize(*RC);
@@ -966,7 +965,7 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
// marked as live on entry.
auto &GRRegClass = SystemZ::GR64BitRegClass;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (GRRegClass.contains(Reg))
addSavedGPR(MBB, MIB, Reg, true);
}
@@ -974,7 +973,7 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
// Spill FPRs to the stack in the normal TargetInstrInfo way
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
@@ -1007,7 +1006,7 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
// Restore FPRs in the normal TargetInstrInfo way.
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- unsigned Reg = CSI[I].getReg();
+ Register Reg = CSI[I].getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(),
&SystemZ::FP64BitRegClass, TRI);
@@ -1041,7 +1040,7 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
// Do a second scan adding regs as being defined by instruction
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- unsigned Reg = CSI[I].getReg();
+ Register Reg = CSI[I].getReg();
if (Reg > RestoreGPRs.LowGPR && Reg < RestoreGPRs.HighGPR)
MIB.addReg(Reg, RegState::ImplicitDefine);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index 106b9e8ebe06..3a1af888d8f9 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -17,7 +17,6 @@
#include "llvm/Support/TypeSize.h"
namespace llvm {
-class SystemZTargetMachine;
class SystemZSubtarget;
class SystemZFrameLowering : public TargetFrameLowering {
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 39a82e2c07e0..cf55318d328d 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -62,8 +62,7 @@ struct SystemZAddressingMode {
bool IncludesDynAlloc;
SystemZAddressingMode(AddrForm form, DispRange dr)
- : Form(form), DR(dr), Base(), Disp(0), Index(),
- IncludesDynAlloc(false) {}
+ : Form(form), DR(dr), Disp(0), IncludesDynAlloc(false) {}
// True if the address can have an index register.
bool hasIndexField() { return Form != FormBD; }
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 24de52850771..f10651d5c5d7 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -318,8 +318,6 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
- // Use custom expanders so that we can force the function to use
- // a frame pointer.
setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
@@ -1571,7 +1569,7 @@ SDValue SystemZTargetLowering::LowerFormalArguments(
int FI =
MFI.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize + Offset, true);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- unsigned VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I],
+ Register VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I],
&SystemZ::FP64BitRegClass);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
@@ -3417,7 +3415,7 @@ SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
}
// Return R14D, which has the return address. Mark it an implicit live-in.
- unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
+ Register LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
}
@@ -4194,7 +4192,6 @@ SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
auto *Regs = Subtarget->getSpecialRegisters();
- MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
report_fatal_error("Variable-sized stack allocations are not supported "
"in GHC calling convention");
@@ -4207,7 +4204,6 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
auto *Regs = Subtarget->getSpecialRegisters();
- MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
@@ -8318,13 +8314,11 @@ MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
// Add FPR/VR clobbers.
if (!NoFloat && (Control & 4) != 0) {
if (Subtarget.hasVector()) {
- for (int I = 0; I < 32; I++) {
- unsigned Reg = SystemZMC::VR128Regs[I];
+ for (unsigned Reg : SystemZMC::VR128Regs) {
MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
} else {
- for (int I = 0; I < 16; I++) {
- unsigned Reg = SystemZMC::FP64Regs[I];
+ for (unsigned Reg : SystemZMC::FP64Regs) {
MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 940c0a857ea4..a8ddb8c62d18 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -381,7 +381,6 @@ enum {
} // end namespace SystemZICMP
class SystemZSubtarget;
-class SystemZTargetMachine;
class SystemZTargetLowering : public TargetLowering {
public:
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index e80496e37781..6db9bf3056b7 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1309,7 +1309,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// allocated regs are in an FP reg-class per previous check above.
for (const MachineOperand &MO : MIB->operands())
if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp
index 06d893d043e9..d6c795985448 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp
@@ -29,11 +29,8 @@ namespace {
class SystemZLDCleanup : public MachineFunctionPass {
public:
static char ID;
- SystemZLDCleanup(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(nullptr), MF(nullptr) {}
-
- StringRef getPassName() const override {
- return "SystemZ Local Dynamic TLS Access Clean-up";
+ SystemZLDCleanup() : MachineFunctionPass(ID), TII(nullptr), MF(nullptr) {
+ initializeSystemZLDCleanupPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
@@ -52,8 +49,11 @@ char SystemZLDCleanup::ID = 0;
} // end anonymous namespace
+INITIALIZE_PASS(SystemZLDCleanup, "systemz-ld-cleanup",
+ "SystemZ Local Dynamic TLS Access Clean-up", false, false)
+
FunctionPass *llvm::createSystemZLDCleanupPass(SystemZTargetMachine &TM) {
- return new SystemZLDCleanup(TM);
+ return new SystemZLDCleanup();
}
void SystemZLDCleanup::getAnalysisUsage(AnalysisUsage &AU) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index 9c985c16f082..d53693154d40 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -135,10 +135,9 @@ class SystemZLongBranch : public MachineFunctionPass {
public:
static char ID;
- SystemZLongBranch(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID) {}
-
- StringRef getPassName() const override { return "SystemZ Long Branch"; }
+ SystemZLongBranch() : MachineFunctionPass(ID) {
+ initializeSystemZLongBranchPass(*PassRegistry::getPassRegistry());
+ }
bool runOnMachineFunction(MachineFunction &F) override;
@@ -174,6 +173,9 @@ const uint64_t MaxForwardRange = 0xfffe;
} // end anonymous namespace
+INITIALIZE_PASS(SystemZLongBranch, DEBUG_TYPE, "SystemZ Long Branch", false,
+ false)
+
// Position describes the state immediately before Block. Update Block
// accordingly and move Position to the end of the block's non-terminator
// instructions.
@@ -481,5 +483,5 @@ bool SystemZLongBranch::runOnMachineFunction(MachineFunction &F) {
}
FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) {
- return new SystemZLongBranch(TM);
+ return new SystemZLongBranch();
}
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMCInstLower.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
index 14ad06488312..eb09033d1850 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
@@ -18,7 +18,6 @@ class MCInst;
class MCOperand;
class MachineInstr;
class MachineOperand;
-class Mangler;
class SystemZAsmPrinter;
class LLVM_LIBRARY_VISIBILITY SystemZMCInstLower {
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index f755d5cd3d5b..ec4b812eb0e1 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -34,14 +34,12 @@ class SystemZMachineFunctionInfo : public MachineFunctionInfo {
unsigned VarArgsFrameIndex;
unsigned RegSaveFrameIndex;
int FramePointerSaveIndex;
- bool ManipulatesSP;
unsigned NumLocalDynamics;
public:
explicit SystemZMachineFunctionInfo(MachineFunction &MF)
: VarArgsFirstGPR(0), VarArgsFirstFPR(0), VarArgsFrameIndex(0),
- RegSaveFrameIndex(0), FramePointerSaveIndex(0), ManipulatesSP(false),
- NumLocalDynamics(0) {}
+ RegSaveFrameIndex(0), FramePointerSaveIndex(0), NumLocalDynamics(0) {}
// Get and set the first and last call-saved GPR that should be saved by
// this function and the SP offset for the STMG. These are 0 if no GPRs
@@ -85,11 +83,6 @@ public:
int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
- // Get and set whether the function directly manipulates the stack pointer,
- // e.g. through STACKSAVE or STACKRESTORE.
- bool getManipulatesSP() const { return ManipulatesSP; }
- void setManipulatesSP(bool MSP) { ManipulatesSP = MSP; }
-
// Count number of local-dynamic TLS symbols used.
unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
index aaa7f8fc88f5..5a2cfc53da49 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZPostRewrite.cpp
@@ -21,16 +21,10 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
using namespace llvm;
-#define SYSTEMZ_POSTREWRITE_NAME "SystemZ Post Rewrite pass"
-
#define DEBUG_TYPE "systemz-postrewrite"
STATISTIC(MemFoldCopies, "Number of copies inserted before folded mem ops.");
STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");
-namespace llvm {
- void initializeSystemZPostRewritePass(PassRegistry&);
-}
-
namespace {
class SystemZPostRewrite : public MachineFunctionPass {
@@ -44,8 +38,6 @@ public:
bool runOnMachineFunction(MachineFunction &Fn) override;
- StringRef getPassName() const override { return SYSTEMZ_POSTREWRITE_NAME; }
-
private:
void selectLOCRMux(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -70,7 +62,7 @@ char SystemZPostRewrite::ID = 0;
} // end anonymous namespace
INITIALIZE_PASS(SystemZPostRewrite, "systemz-post-rewrite",
- SYSTEMZ_POSTREWRITE_NAME, false, false)
+ "SystemZ Post Rewrite pass", false, false)
/// Returns an instance of the Post Rewrite pass.
FunctionPass *llvm::createSystemZPostRewritePass(SystemZTargetMachine &TM) {
@@ -178,15 +170,15 @@ bool SystemZPostRewrite::expandCondMove(MachineBasicBlock &MBB,
MF.insert(std::next(MachineFunction::iterator(MBB)), RestMBB);
RestMBB->splice(RestMBB->begin(), &MBB, MI, MBB.end());
RestMBB->transferSuccessors(&MBB);
- for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
- RestMBB->addLiveIn(*I);
+ for (MCPhysReg R : LiveRegs)
+ RestMBB->addLiveIn(R);
// Create a new block MoveMBB to hold the move instruction.
MachineBasicBlock *MoveMBB = MF.CreateMachineBasicBlock(BB);
MF.insert(std::next(MachineFunction::iterator(MBB)), MoveMBB);
MoveMBB->addLiveIn(SrcReg);
- for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
- MoveMBB->addLiveIn(*I);
+ for (MCPhysReg R : LiveRegs)
+ MoveMBB->addLiveIn(R);
// At the end of MBB, create a conditional branch to RestMBB if the
// condition is false, otherwise fall through to MoveMBB.
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
index a4a5b1fbdf90..da6725777e43 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -17,8 +17,6 @@
namespace llvm {
-class SystemZTargetMachine;
-
class SystemZSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
explicit SystemZSelectionDAGInfo() = default;
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
index 254e5e92449b..92930dad80ef 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -26,11 +26,7 @@ namespace {
class SystemZShortenInst : public MachineFunctionPass {
public:
static char ID;
- SystemZShortenInst(const SystemZTargetMachine &tm);
-
- StringRef getPassName() const override {
- return "SystemZ Instruction Shortening";
- }
+ SystemZShortenInst();
bool processBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) override;
@@ -56,12 +52,17 @@ private:
char SystemZShortenInst::ID = 0;
} // end anonymous namespace
+INITIALIZE_PASS(SystemZShortenInst, DEBUG_TYPE,
+ "SystemZ Instruction Shortening", false, false)
+
FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) {
- return new SystemZShortenInst(TM);
+ return new SystemZShortenInst();
}
-SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(nullptr) {}
+SystemZShortenInst::SystemZShortenInst()
+ : MachineFunctionPass(ID), TII(nullptr) {
+ initializeSystemZShortenInstPass(*PassRegistry::getPassRegistry());
+}
// Tie operands if MI has become a two-address instruction.
static void tieOpsIfNeeded(MachineInstr &MI) {
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
index 0f03d96655bf..75c0d454d904 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -89,7 +89,7 @@ SystemZSubtarget::SystemZSubtarget(const Triple &TT, const std::string &CPU,
HasSoftFloat(false), TargetTriple(TT),
SpecialRegisters(initializeSpecialRegisters()),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
- TSInfo(), FrameLowering(SystemZFrameLowering::create(*this)) {}
+ FrameLowering(SystemZFrameLowering::create(*this)) {}
bool SystemZSubtarget::enableSubRegLiveness() const {
return UseSubRegLiveness;
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.h
index 67c5b8eb09b6..98f7094fcb48 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZSubtarget.h
@@ -85,7 +85,7 @@ private:
SystemZSubtarget &initializeSubtargetDependencies(StringRef CPU,
StringRef FS);
- SystemZCallingConventionRegisters *initializeSpecialRegisters(void);
+ SystemZCallingConventionRegisters *initializeSpecialRegisters();
public:
SystemZSubtarget(const Triple &TT, const std::string &CPU,
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTDC.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTDC.cpp
index 7cb7dca2ea28..f62afb8ddfcf 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTDC.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTDC.cpp
@@ -61,10 +61,6 @@
using namespace llvm;
-namespace llvm {
- void initializeSystemZTDCPassPass(PassRegistry&);
-}
-
namespace {
class SystemZTDCPass : public FunctionPass {
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index deb3358102ed..f1469fe8f56b 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -32,6 +32,14 @@ using namespace llvm;
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSystemZTarget() {
// Register the target.
RegisterTargetMachine<SystemZTargetMachine> X(getTheSystemZTarget());
+ auto &PR = *PassRegistry::getPassRegistry();
+ initializeSystemZElimComparePass(PR);
+ initializeSystemZShortenInstPass(PR);
+ initializeSystemZLongBranchPass(PR);
+ initializeSystemZLDCleanupPass(PR);
+ initializeSystemZShortenInstPass(PR);
+ initializeSystemZPostRewritePass(PR);
+ initializeSystemZTDCPassPass(PR);
}
// Determine whether we use the vector ABI.
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
index fd9dc32b04f5..4a318e493c52 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
@@ -210,7 +210,7 @@ private:
};
public:
- VEOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+ VEOperand(KindTy K) : Kind(K) {}
bool isToken() const override { return Kind == k_Token; }
bool isReg() const override { return Kind == k_Register; }
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/LVLGen.cpp b/contrib/llvm-project/llvm/lib/Target/VE/LVLGen.cpp
index c4588926af9e..4db6a59284c2 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/LVLGen.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/VE/LVLGen.cpp
@@ -125,8 +125,8 @@ bool LVLGen::runOnMachineFunction(MachineFunction &F) {
TII = Subtarget.getInstrInfo();
TRI = Subtarget.getRegisterInfo();
- for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
- Changed |= runOnMachineBasicBlock(*FI);
+ for (MachineBasicBlock &MBB : F)
+ Changed |= runOnMachineBasicBlock(MBB);
if (Changed) {
LLVM_DEBUG(dbgs() << "\n");
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h
index 7fb8a556aa74..f0bb6e3acdee 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h
@@ -27,10 +27,6 @@ class MCRegisterInfo;
class MCSubtargetInfo;
class MCTargetOptions;
class Target;
-class Triple;
-class StringRef;
-class raw_pwrite_stream;
-class raw_ostream;
MCCodeEmitter *createVEMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI, MCContext &Ctx);
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VE.h b/contrib/llvm-project/llvm/lib/Target/VE/VE.h
index 8c1fa840f19c..2a729a1a311c 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VE.h
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VE.h
@@ -22,7 +22,6 @@
namespace llvm {
class FunctionPass;
class VETargetMachine;
-class formatted_raw_ostream;
class AsmPrinter;
class MCInst;
class MachineInstr;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.cpp b/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.cpp
new file mode 100644
index 000000000000..af3e4af13814
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.cpp
@@ -0,0 +1,81 @@
+//===-- VECustomDAG.h - VE Custom DAG Nodes ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that VE uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "VECustomDAG.h"
+
+#ifndef DEBUG_TYPE
+#define DEBUG_TYPE "vecustomdag"
+#endif
+
+namespace llvm {
+
+static const int StandardVectorWidth = 256;
+
+bool isPackedVectorType(EVT SomeVT) {
+ if (!SomeVT.isVector())
+ return false;
+ return SomeVT.getVectorNumElements() > StandardVectorWidth;
+}
+
+/// \returns the VVP_* SDNode opcode corresponsing to \p OC.
+Optional<unsigned> getVVPOpcode(unsigned Opcode) {
+ switch (Opcode) {
+#define HANDLE_VP_TO_VVP(VPOPC, VVPNAME) \
+ case ISD::VPOPC: \
+ return VEISD::VVPNAME;
+#define ADD_VVP_OP(VVPNAME, SDNAME) \
+ case VEISD::VVPNAME: \
+ case ISD::SDNAME: \
+ return VEISD::VVPNAME;
+#include "VVPNodes.def"
+ }
+ return None;
+}
+
+bool isVVPBinaryOp(unsigned VVPOpcode) {
+ switch (VVPOpcode) {
+#define ADD_BINARY_VVP_OP(VVPNAME, ...) \
+ case VEISD::VVPNAME: \
+ return true;
+#include "VVPNodes.def"
+ }
+ return false;
+}
+
+SDValue VECustomDAG::getConstant(uint64_t Val, EVT VT, bool IsTarget,
+ bool IsOpaque) const {
+ return DAG.getConstant(Val, DL, VT, IsTarget, IsOpaque);
+}
+
+SDValue VECustomDAG::getBroadcast(EVT ResultVT, SDValue Scalar,
+ SDValue AVL) const {
+ assert(ResultVT.isVector());
+ auto ScaVT = Scalar.getValueType();
+ assert(ScaVT != MVT::i1 && "TODO: Mask broadcasts");
+
+ if (isPackedVectorType(ResultVT)) {
+ // v512x packed mode broadcast
+ // Replicate the scalar reg (f32 or i32) onto the opposing half of the full
+ // scalar register. If it's an I64 type, assume that this has already
+ // happened.
+ if (ScaVT == MVT::f32) {
+ Scalar = getNode(VEISD::REPL_F32, MVT::i64, Scalar);
+ } else if (ScaVT == MVT::i32) {
+ Scalar = getNode(VEISD::REPL_I32, MVT::i64, Scalar);
+ }
+ }
+
+ return getNode(VEISD::VEC_BROADCAST, ResultVT, {Scalar, AVL});
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.h b/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.h
new file mode 100644
index 000000000000..ddd6ce783366
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VECustomDAG.h
@@ -0,0 +1,79 @@
+//===------------ VECustomDAG.h - VE Custom DAG Nodes -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the helper functions that VE uses to lower LLVM code into a
+// selection DAG. For example, hiding SDLoc, and easy to use SDNodeFlags.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_VE_VECUSTOMDAG_H
+#define LLVM_LIB_TARGET_VE_VECUSTOMDAG_H
+
+#include "VE.h"
+#include "VEISelLowering.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+
+Optional<unsigned> getVVPOpcode(unsigned Opcode);
+
+bool isVVPBinaryOp(unsigned Opcode);
+
+bool isPackedVectorType(EVT SomeVT);
+
+class VECustomDAG {
+ SelectionDAG &DAG;
+ SDLoc DL;
+
+public:
+ SelectionDAG *getDAG() const { return &DAG; }
+
+ VECustomDAG(SelectionDAG &DAG, SDLoc DL) : DAG(DAG), DL(DL) {}
+
+ VECustomDAG(SelectionDAG &DAG, SDValue WhereOp) : DAG(DAG), DL(WhereOp) {}
+
+ VECustomDAG(SelectionDAG &DAG, const SDNode *WhereN) : DAG(DAG), DL(WhereN) {}
+
+ /// getNode {
+ SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef<SDValue> OpV,
+ Optional<SDNodeFlags> Flags = None) const {
+ auto N = DAG.getNode(OC, DL, VTL, OpV);
+ if (Flags)
+ N->setFlags(*Flags);
+ return N;
+ }
+
+ SDValue getNode(unsigned OC, ArrayRef<EVT> ResVT, ArrayRef<SDValue> OpV,
+ Optional<SDNodeFlags> Flags = None) const {
+ auto N = DAG.getNode(OC, DL, ResVT, OpV);
+ if (Flags)
+ N->setFlags(*Flags);
+ return N;
+ }
+
+ SDValue getNode(unsigned OC, EVT ResVT, ArrayRef<SDValue> OpV,
+ Optional<SDNodeFlags> Flags = None) const {
+ auto N = DAG.getNode(OC, DL, ResVT, OpV);
+ if (Flags)
+ N->setFlags(*Flags);
+ return N;
+ }
+
+ SDValue getUNDEF(EVT VT) const { return DAG.getUNDEF(VT); }
+ /// } getNode
+
+ SDValue getConstant(uint64_t Val, EVT VT, bool IsTarget = false,
+ bool IsOpaque = false) const;
+
+ SDValue getBroadcast(EVT ResultVT, SDValue Scalar, SDValue AVL) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_VE_VECUSTOMDAG_H
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.cpp
index 5ef223d6030b..9137c476777e 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -13,6 +13,7 @@
#include "VEISelLowering.h"
#include "MCTargetDesc/VEMCExpr.h"
+#include "VECustomDAG.h"
#include "VEInstrBuilder.h"
#include "VEMachineFunctionInfo.h"
#include "VERegisterInfo.h"
@@ -419,7 +420,7 @@ SDValue VETargetLowering::LowerFormalArguments(
// All integer register arguments are promoted by the caller to i64.
// Create a virtual register for the promoted live-in value.
- unsigned VReg =
+ Register VReg =
MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT()));
SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
@@ -754,7 +755,7 @@ SDValue VETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(!VA.needsCustom() && "Unexpected custom lowering");
- unsigned Reg = VA.getLocReg();
+ Register Reg = VA.getLocReg();
// When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
// reside in the same register in the high and low bits. Reuse the
@@ -898,6 +899,8 @@ const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const {
TARGET_NODE_CASE(RET_FLAG)
TARGET_NODE_CASE(TS1AM)
TARGET_NODE_CASE(VEC_BROADCAST)
+ TARGET_NODE_CASE(REPL_I32)
+ TARGET_NODE_CASE(REPL_F32)
// Register the VVP_* SDNodes.
#define ADD_VVP_OP(VVP_NAME, ...) TARGET_NODE_CASE(VVP_NAME)
@@ -1545,7 +1548,7 @@ static SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
unsigned Depth = Op.getConstantOperandVal(0);
const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- unsigned FrameReg = RegInfo->getFrameRegister(MF);
+ Register FrameReg = RegInfo->getFrameRegister(MF);
SDValue FrameAddr =
DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, PtrVT);
while (Depth--)
@@ -1640,28 +1643,26 @@ static SDValue getSplatValue(SDNode *N) {
SDValue VETargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
- SDLoc DL(Op);
- unsigned NumEls = Op.getValueType().getVectorNumElements();
- MVT ElemVT = Op.getSimpleValueType().getVectorElementType();
+ VECustomDAG CDAG(DAG, Op);
+ MVT ResultVT = Op.getSimpleValueType();
// If there is just one element, expand to INSERT_VECTOR_ELT.
unsigned UniqueIdx;
if (getUniqueInsertion(Op.getNode(), UniqueIdx)) {
- SDValue AccuV = DAG.getUNDEF(Op.getValueType());
+ SDValue AccuV = CDAG.getUNDEF(Op.getValueType());
auto ElemV = Op->getOperand(UniqueIdx);
- SDValue IdxV = DAG.getConstant(UniqueIdx, DL, MVT::i64);
- return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(), AccuV,
- ElemV, IdxV);
+ SDValue IdxV = CDAG.getConstant(UniqueIdx, MVT::i64);
+ return CDAG.getNode(ISD::INSERT_VECTOR_ELT, ResultVT, {AccuV, ElemV, IdxV});
}
// Else emit a broadcast.
if (SDValue ScalarV = getSplatValue(Op.getNode())) {
- // lower to VEC_BROADCAST
- MVT LegalResVT = MVT::getVectorVT(ElemVT, 256);
-
- auto AVL = DAG.getConstant(NumEls, DL, MVT::i32);
- return DAG.getNode(VEISD::VEC_BROADCAST, DL, LegalResVT, Op.getOperand(0),
- AVL);
+ unsigned NumEls = ResultVT.getVectorNumElements();
+ // TODO: Legalize packed-mode AVL.
+ // For now, cap the AVL at 256.
+ auto CappedLength = std::min<unsigned>(256, NumEls);
+ auto AVL = CDAG.getConstant(CappedLength, MVT::i32);
+ return CDAG.getBroadcast(ResultVT, Op.getOperand(0), AVL);
}
// Expand
@@ -1720,7 +1721,7 @@ SDValue VETargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EXTRACT_VECTOR_ELT:
return lowerEXTRACT_VECTOR_ELT(Op, DAG);
-#define ADD_BINARY_VVP_OP(VVP_NAME, VP_NAME, ISD_NAME) case ISD::ISD_NAME:
+#define ADD_VVP_OP(VVP_NAME, ISD_NAME) case ISD::ISD_NAME:
#include "VVPNodes.def"
return lowerToVVP(Op, DAG);
}
@@ -2666,21 +2667,6 @@ bool VETargetLowering::hasAndNot(SDValue Y) const {
return true;
}
-/// \returns the VVP_* SDNode opcode corresponsing to \p OC.
-static Optional<unsigned> getVVPOpcode(unsigned Opcode) {
- switch (Opcode) {
-#define HANDLE_VP_TO_VVP(VPOPC, VVPNAME) \
- case ISD::VPOPC: \
- return VEISD::VVPNAME;
-#define ADD_VVP_OP(VVPNAME, SDNAME) \
- case VEISD::VVPNAME: \
- case ISD::SDNAME: \
- return VEISD::VVPNAME;
-#include "VVPNodes.def"
- }
- return None;
-}
-
SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
// Can we represent this as a VVP node.
const unsigned Opcode = Op->getOpcode();
@@ -2691,7 +2677,7 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
const bool FromVP = ISD::isVPOpcode(Opcode);
// The representative and legalized vector type of this operation.
- SDLoc DL(Op);
+ VECustomDAG CDAG(DAG, Op);
MVT MaskVT = MVT::v256i1; // TODO: packed mode.
EVT OpVecVT = Op.getValueType();
EVT LegalVecVT = getTypeToTransformTo(*DAG.getContext(), OpVecVT);
@@ -2708,27 +2694,21 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
} else {
// Materialize the VL parameter.
- AVL = DAG.getConstant(OpVecVT.getVectorNumElements(), DL, MVT::i32);
- SDValue ConstTrue = DAG.getConstant(1, DL, MVT::i32);
- Mask = DAG.getNode(VEISD::VEC_BROADCAST, DL, MaskVT,
- ConstTrue); // emit a VEISD::VEC_BROADCAST here.
+ AVL = CDAG.getConstant(OpVecVT.getVectorNumElements(), MVT::i32);
+ SDValue ConstTrue = CDAG.getConstant(1, MVT::i32);
+ Mask = CDAG.getBroadcast(MaskVT, ConstTrue, AVL);
}
- // Categories we are interested in.
- bool IsBinaryOp = false;
-
- switch (VVPOpcode) {
-#define ADD_BINARY_VVP_OP(VVPNAME, ...) \
- case VEISD::VVPNAME: \
- IsBinaryOp = true; \
- break;
-#include "VVPNodes.def"
- }
-
- if (IsBinaryOp) {
+ if (isVVPBinaryOp(VVPOpcode)) {
assert(LegalVecVT.isSimple());
- return DAG.getNode(VVPOpcode, DL, LegalVecVT, Op->getOperand(0),
- Op->getOperand(1), Mask, AVL);
+ return CDAG.getNode(VVPOpcode, LegalVecVT,
+ {Op->getOperand(0), Op->getOperand(1), Mask, AVL});
+ }
+ if (VVPOpcode == VEISD::VVP_SELECT) {
+ auto Mask = Op->getOperand(0);
+ auto OnTrue = Op->getOperand(1);
+ auto OnFalse = Op->getOperand(2);
+ return CDAG.getNode(VVPOpcode, LegalVecVT, {OnTrue, OnFalse, Mask, AVL});
}
llvm_unreachable("lowerToVVP called for unexpected SDNode.");
}
@@ -2750,7 +2730,7 @@ SDValue VETargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Idx = Op.getOperand(1);
SDLoc DL(Op);
SDValue Result = Op;
- if (0 /* Idx->isConstant() */) {
+ if (false /* Idx->isConstant() */) {
// TODO: optimized implementation using constant values
} else {
SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
@@ -2808,7 +2788,7 @@ SDValue VETargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
SDValue Result = Op;
- if (0 /* Idx->isConstant()*/) {
+ if (false /* Idx->isConstant()*/) {
// TODO: optimized implementation using constant values
} else {
SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.h b/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.h
index b4ce8906fd51..09bd19e83717 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEISelLowering.h
@@ -40,6 +40,8 @@ enum NodeType : unsigned {
TS1AM, // A TS1AM instruction used for 1/2 bytes swap.
VEC_BROADCAST, // A vector broadcast instruction.
// 0: scalar value, 1: VL
+ REPL_I32,
+ REPL_F32, // Replicate subregister to other half.
// VVP_* nodes.
#define ADD_VVP_OP(VVP_NAME, ...) VVP_NAME,
@@ -219,4 +221,4 @@ public:
};
} // namespace llvm
-#endif // VE_ISELLOWERING_H
+#endif // LLVM_LIB_TARGET_VE_VEISELLOWERING_H
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.cpp
index 46846edfeafb..7c1bd5201867 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -248,7 +248,7 @@ unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineFunction *MF = MBB.getParent();
const MachineRegisterInfo &MRI = MF->getRegInfo();
- unsigned Reg = Cond[2].getReg();
+ Register Reg = Cond[2].getReg();
if (IsIntegerCC(Cond[0].getImm())) {
if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
opc[0] = VE::BRCFWir;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.td
index c3abbe2cafab..717427c3f48d 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrInfo.td
@@ -1576,6 +1576,12 @@ def f2l : OutPatFrag<(ops node:$exp),
def l2f : OutPatFrag<(ops node:$exp),
(EXTRACT_SUBREG $exp, sub_f32)>;
+// Zero out subregisters.
+def zero_i32 : OutPatFrag<(ops node:$expr),
+ (ANDrm $expr, 32)>;
+def zero_f32 : OutPatFrag<(ops node:$expr),
+ (ANDrm $expr, !add(32, 64))>;
+
// Small immediates.
def : Pat<(i32 simm7:$val), (EXTRACT_SUBREG (ORim (LO7 $val), 0), sub_i32)>;
def : Pat<(i64 simm7:$val), (ORim (LO7 $val), 0)>;
@@ -2287,6 +2293,16 @@ class IsVLVT<int OpIdx> : SDTCisVT<OpIdx,i32>;
def vec_broadcast : SDNode<"VEISD::VEC_BROADCAST", SDTypeProfile<1, 2,
[SDTCisVec<0>, IsVLVT<2>]>>;
+// replicate lower 32bit to upper 32bit (f32 scalar replication).
+def repl_f32 : SDNode<"VEISD::REPL_F32",
+ SDTypeProfile<1, 1,
+ [SDTCisInt<0>, SDTCisFP<1>]>>;
+// replicate upper 32bit to lower 32 bit (i32 scalar replication).
+def repl_i32 : SDNode<"VEISD::REPL_I32",
+ SDTypeProfile<1, 1,
+ [SDTCisInt<0>, SDTCisInt<1>]>>;
+
+
// Whether this is an all-true mask (assuming undef-bits above VL are all-true).
def true_mask : PatLeaf<
(vec_broadcast (i32 nonzero), (i32 srcvalue))>;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrPatternsVec.td b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrPatternsVec.td
index dc3c913c918a..6c5b80315efb 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEInstrPatternsVec.td
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEInstrPatternsVec.td
@@ -15,6 +15,17 @@
// Instruction format superclass
//===----------------------------------------------------------------------===//
+// Sub-register replication for packed broadcast.
+def: Pat<(i64 (repl_f32 f32:$val)),
+ (ORrr
+ (SRLri (f2l $val), 32),
+ (zero_i32 (f2l $val)))>;
+def: Pat<(i64 (repl_i32 i32:$val)),
+ (ORrr
+ (zero_f32 (i2l $val)),
+ (SLLri (i2l $val), 32))>;
+
+
multiclass vbrd_elem32<ValueType v32, ValueType s32, SDPatternOperator ImmOp,
SDNodeXForm ImmCast, OutPatFrag SuperRegCast> {
// VBRDil
@@ -89,3 +100,8 @@ defm : patterns_elem32<v256f32, f32, simm7fp, LO7FP, l2f, f2l>;
defm : patterns_elem64<v256i64, i64, simm7, LO7>;
defm : patterns_elem64<v256f64, f64, simm7fp, LO7FP>;
+
+defm : vbrd_elem64<v512i32, i64, simm7, LO7>;
+defm : vbrd_elem64<v512f32, i64, simm7, LO7>;
+defm : vbrd_elem64<v512i32, f64, simm7fp, LO7FP>;
+defm : vbrd_elem64<v512f32, f64, simm7fp, LO7FP>;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEMCInstLower.cpp b/contrib/llvm-project/llvm/lib/Target/VE/VEMCInstLower.cpp
index bc5577ce4f97..57195f238cf6 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEMCInstLower.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEMCInstLower.cpp
@@ -78,8 +78,7 @@ void llvm::LowerVEMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
AsmPrinter &AP) {
OutMI.setOpcode(MI->getOpcode());
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (const MachineOperand &MO : MI->operands()) {
MCOperand MCOp = LowerOperand(MI, MO, AP);
if (MCOp.isValid())
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VEMachineFunctionInfo.h b/contrib/llvm-project/llvm/lib/Target/VE/VEMachineFunctionInfo.h
index 16b25fed3f11..3160f6a552d7 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VEMachineFunctionInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VEMachineFunctionInfo.h
@@ -29,10 +29,9 @@ private:
bool IsLeafProc;
public:
- VEMachineFunctionInfo()
- : GlobalBaseReg(), VarArgsFrameOffset(0), IsLeafProc(false) {}
+ VEMachineFunctionInfo() : VarArgsFrameOffset(0), IsLeafProc(false) {}
explicit VEMachineFunctionInfo(MachineFunction &MF)
- : GlobalBaseReg(), VarArgsFrameOffset(0), IsLeafProc(false) {}
+ : VarArgsFrameOffset(0), IsLeafProc(false) {}
Register getGlobalBaseReg() const { return GlobalBaseReg; }
void setGlobalBaseReg(Register Reg) { GlobalBaseReg = Reg; }
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VESubtarget.h b/contrib/llvm-project/llvm/lib/Target/VE/VESubtarget.h
index 213aca2ea3f9..0c3dc0a08072 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VESubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VESubtarget.h
@@ -76,7 +76,7 @@ public:
/// Get the size of RSA, return address, and frame pointer as described
/// in VEFrameLowering.cpp.
- unsigned getRsaSize(void) const { return 176; };
+ unsigned getRsaSize() const { return 176; };
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
};
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrInfo.td
index 99566e91ec11..ef9c238066c0 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrInfo.td
@@ -39,6 +39,15 @@ def SDTFPBinOpVVP : SDTypeProfile<1, 4, [ // vvp_fadd, etc.
IsVLVT<4>
]>;
+// Select(OnTrue, OnFalse, SelMask, vl)
+def SDTSelectVVP : SDTypeProfile<1, 4, [ // vp_select, vp_merge
+ SDTCisVec<0>,
+ SDTCisSameNumEltsAs<0, 3>,
+ SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>,
+ IsVLVT<4>
+]>;
+
// Binary operator commutative pattern.
class vvp_commutative<SDNode RootOp> :
PatFrags<
@@ -79,3 +88,5 @@ def c_vvp_fmul : vvp_commutative<vvp_fmul>;
def vvp_fdiv : SDNode<"VEISD::VVP_FDIV", SDTFPBinOpVVP>;
// } Binary Operators
+
+def vvp_select : SDNode<"VEISD::VVP_SELECT", SDTSelectVVP>;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrPatternsVec.td b/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrPatternsVec.td
index 8d5d9d103547..74720fd1f419 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrPatternsVec.td
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VVPInstrPatternsVec.td
@@ -20,8 +20,22 @@ include "VVPInstrInfo.td"
multiclass Binary_rv<SDPatternOperator OpNode,
ValueType ScalarVT, ValueType DataVT,
ValueType MaskVT, string OpBaseName> {
- // Masked with select, broadcast.
- // TODO
+ // Masked with passthru, broadcast.
+ def : Pat<(vvp_select
+ (OpNode
+ (any_broadcast ScalarVT:$sx),
+ DataVT:$vy,
+ (MaskVT srcvalue),
+ (i32 srcvalue)),
+ DataVT:$vfalse,
+ MaskVT:$mask,
+ i32:$pivot),
+ (!cast<Instruction>(OpBaseName#"rvml_v")
+ ScalarVT:$sx,
+ $vy,
+ $mask,
+ $pivot,
+ $vfalse)>;
// Unmasked, broadcast.
def : Pat<(OpNode
@@ -42,8 +56,22 @@ multiclass Binary_rv<SDPatternOperator OpNode,
multiclass Binary_vr<SDPatternOperator OpNode,
ValueType ScalarVT, ValueType DataVT,
ValueType MaskVT, string OpBaseName> {
- // Masked with select, broadcast.
- // TODO
+ // Masked with passthru, broadcast.
+ def : Pat<(vvp_select
+ (OpNode
+ DataVT:$vx,
+ (any_broadcast ScalarVT:$sy),
+ (MaskVT srcvalue),
+ (i32 srcvalue)),
+ DataVT:$vfalse,
+ MaskVT:$mask,
+ i32:$pivot),
+ (!cast<Instruction>(OpBaseName#"vrml_v")
+ $vx,
+ ScalarVT:$sy,
+ $mask,
+ $pivot,
+ $vfalse)>;
// Unmasked, broadcast.
def : Pat<(OpNode
@@ -64,6 +92,23 @@ multiclass Binary_vr<SDPatternOperator OpNode,
multiclass Binary_vv<SDPatternOperator OpNode,
ValueType DataVT,
ValueType MaskVT, string OpBaseName> {
+ // Masked with passthru, broadcast.
+ def : Pat<(vvp_select
+ (OpNode
+ DataVT:$vx,
+ DataVT:$vy,
+ (MaskVT srcvalue),
+ (i32 srcvalue)),
+ DataVT:$vfalse,
+ MaskVT:$mask,
+ i32:$pivot),
+ (!cast<Instruction>(OpBaseName#"vvml_v")
+ $vx,
+ $vy,
+ $mask,
+ $pivot,
+ $vfalse)>;
+
// Masked with select.
// TODO
@@ -191,3 +236,35 @@ defm : Binary_rv_vv_ShortLong<vvp_fsub,
defm : Binary_rv_vr_vv_ShortLong<vvp_fdiv,
f64, v256f64, "VFDIVD",
f32, v256f32, "VFDIVS">;
+
+multiclass Merge_mvv<
+ SDPatternOperator OpNode,
+ ValueType DataVT, ValueType MaskVT,
+ string OpBaseName> {
+ // Masked.
+ def : Pat<(OpNode
+ DataVT:$vtrue, DataVT:$vfalse,
+ MaskVT:$vm,
+ i32:$avl),
+ (!cast<Instruction>(OpBaseName#"vvml_v")
+ $vfalse, $vtrue, $vm, $avl, $vfalse)>;
+}
+
+multiclass Merge_mvv_ShortLong<
+ SDPatternOperator OpNode,
+ ValueType LongDataVT, ValueType ShortDataVT,
+ string OpBaseName> {
+ defm : Merge_mvv<OpNode,
+ LongDataVT, v256i1,
+ OpBaseName>;
+ defm : Merge_mvv<OpNode,
+ ShortDataVT, v256i1,
+ OpBaseName>;
+}
+
+defm : Merge_mvv_ShortLong<vvp_select,
+ v256f64,
+ v256f32, "VMRG">;
+defm : Merge_mvv_ShortLong<vvp_select,
+ v256i64,
+ v256i32, "VMRG">;
diff --git a/contrib/llvm-project/llvm/lib/Target/VE/VVPNodes.def b/contrib/llvm-project/llvm/lib/Target/VE/VVPNodes.def
index 8a9231f7d3e6..8000f84c5dbe 100644
--- a/contrib/llvm-project/llvm/lib/Target/VE/VVPNodes.def
+++ b/contrib/llvm-project/llvm/lib/Target/VE/VVPNodes.def
@@ -59,6 +59,11 @@ ADD_BINARY_VVP_OP_COMPACT(FSUB)
ADD_BINARY_VVP_OP_COMPACT(FMUL)
ADD_BINARY_VVP_OP_COMPACT(FDIV)
+// Shuffles.
+ADD_VVP_OP(VVP_SELECT,VSELECT)
+HANDLE_VP_TO_VVP(VP_SELECT, VVP_SELECT)
+HANDLE_VP_TO_VVP(VP_MERGE, VVP_SELECT)
+
#undef ADD_BINARY_VVP_OP
#undef ADD_BINARY_VVP_OP_COMPACT
#undef ADD_VVP_OP
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index b2f10ca93a4f..75d5d0675990 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -26,7 +26,6 @@ class MCAsmBackend;
class MCCodeEmitter;
class MCInstrInfo;
class MCObjectTargetWriter;
-class MVT;
class Triple;
MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII);
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h
index d024185defb4..57e40f6cd8d7 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h
@@ -25,7 +25,6 @@ class MachineInstr;
class MachineOperand;
class MCContext;
class MCSymbolWasm;
-class StringRef;
class WebAssemblyFunctionInfo;
class WebAssemblySubtarget;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index 910a4e5e0d1a..eeec0fc671cc 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -406,7 +406,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
// TODO: Sort the locals for better compression.
MFI.setNumLocals(CurLocal - MFI.getParams().size());
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto RL = Reg2Local.find(Reg);
if (RL == Reg2Local.end() || RL->second < MFI.getParams().size())
continue;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 642aa6b4028a..406edef8ff3f 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -286,7 +286,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
}
if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
// An unscaled add of a register. Set it as the new base.
- unsigned Reg = getRegForValue(Op);
+ Register Reg = getRegForValue(Op);
if (Reg == 0)
return false;
Addr.setReg(Reg);
@@ -372,7 +372,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
if (Addr.isSet()) {
return false;
}
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (Reg == 0)
return false;
Addr.setReg(Reg);
@@ -430,7 +430,7 @@ unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V,
}
Not = false;
- unsigned Reg = getRegForValue(V);
+ Register Reg = getRegForValue(V);
if (Reg == 0)
return 0;
return maskI1Value(Reg, V);
@@ -458,12 +458,12 @@ unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
return 0;
}
- unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::CONST_I32), Imm)
.addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
- unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::AND_I32), Result)
.addReg(Reg)
@@ -488,18 +488,18 @@ unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
return 0;
}
- unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::CONST_I32), Imm)
.addImm(32 - MVT(From).getSizeInBits());
- unsigned Left = createResultReg(&WebAssembly::I32RegClass);
+ Register Left = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::SHL_I32), Left)
.addReg(Reg)
.addReg(Imm);
- unsigned Right = createResultReg(&WebAssembly::I32RegClass);
+ Register Right = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::SHR_S_I32), Right)
.addReg(Left)
@@ -517,7 +517,7 @@ unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
Reg = zeroExtendToI32(Reg, V, From);
- unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
.addReg(Reg);
@@ -539,7 +539,7 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
Reg = signExtendToI32(Reg, V, From);
- unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
.addReg(Reg);
@@ -555,7 +555,7 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
MVT::SimpleValueType From = getSimpleType(V->getType());
MVT::SimpleValueType To = getLegalType(From);
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
return zeroExtend(VReg, V, From, To);
@@ -564,7 +564,7 @@ unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
MVT::SimpleValueType From = getSimpleType(V->getType());
MVT::SimpleValueType To = getLegalType(From);
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
return signExtend(VReg, V, From, To);
@@ -578,7 +578,7 @@ unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
- unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
+ Register NotReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::EQZ_I32), NotReg)
.addReg(Reg);
@@ -586,7 +586,7 @@ unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
}
unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
- unsigned ResultReg = createResultReg(MRI.getRegClass(Reg));
+ Register ResultReg = createResultReg(MRI.getRegClass(Reg));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(WebAssembly::COPY),
ResultReg)
.addReg(Reg);
@@ -598,7 +598,7 @@ unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg =
+ Register ResultReg =
createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
: &WebAssembly::I32RegClass);
unsigned Opc =
@@ -617,7 +617,7 @@ unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
return 0;
if (GV->isThreadLocal())
return 0;
- unsigned ResultReg =
+ Register ResultReg =
createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
: &WebAssembly::I32RegClass);
unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
@@ -715,7 +715,7 @@ bool WebAssemblyFastISel::fastLowerArguments() {
default:
return false;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(I);
updateValueMap(&Arg, ResultReg);
@@ -887,7 +887,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
if (Subtarget->hasAddr64()) {
auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), DbgLoc,
TII.get(WebAssembly::I32_WRAP_I64));
- unsigned Reg32 = createResultReg(&WebAssembly::I32RegClass);
+ Register Reg32 = createResultReg(&WebAssembly::I32RegClass);
Wrap.addReg(Reg32, RegState::Define);
Wrap.addReg(CalleeReg);
CalleeReg = Reg32;
@@ -914,11 +914,11 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
if (CondReg == 0)
return false;
- unsigned TrueReg = getRegForValue(Select->getTrueValue());
+ Register TrueReg = getRegForValue(Select->getTrueValue());
if (TrueReg == 0)
return false;
- unsigned FalseReg = getRegForValue(Select->getFalseValue());
+ Register FalseReg = getRegForValue(Select->getFalseValue());
if (FalseReg == 0)
return false;
@@ -959,7 +959,7 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
return false;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(TrueReg)
.addReg(FalseReg)
@@ -972,12 +972,12 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
const auto *Trunc = cast<TruncInst>(I);
- unsigned Reg = getRegForValue(Trunc->getOperand(0));
+ Register Reg = getRegForValue(Trunc->getOperand(0));
if (Reg == 0)
return false;
if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
- unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I32_WRAP_I64), Result)
.addReg(Reg);
@@ -994,7 +994,7 @@ bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
const Value *Op = ZExt->getOperand(0);
MVT::SimpleValueType From = getSimpleType(Op->getType());
MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
- unsigned In = getRegForValue(Op);
+ Register In = getRegForValue(Op);
if (In == 0)
return false;
unsigned Reg = zeroExtend(In, Op, From, To);
@@ -1011,7 +1011,7 @@ bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
const Value *Op = SExt->getOperand(0);
MVT::SimpleValueType From = getSimpleType(Op->getType());
MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
- unsigned In = getRegForValue(Op);
+ Register In = getRegForValue(Op);
if (In == 0)
return false;
unsigned Reg = signExtend(In, Op, From, To);
@@ -1075,7 +1075,7 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
if (RHS == 0)
return false;
- unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(LHS)
.addReg(RHS);
@@ -1086,11 +1086,11 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
const auto *FCmp = cast<FCmpInst>(I);
- unsigned LHS = getRegForValue(FCmp->getOperand(0));
+ Register LHS = getRegForValue(FCmp->getOperand(0));
if (LHS == 0)
return false;
- unsigned RHS = getRegForValue(FCmp->getOperand(1));
+ Register RHS = getRegForValue(FCmp->getOperand(1));
if (RHS == 0)
return false;
@@ -1136,7 +1136,7 @@ bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
return false;
}
- unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(LHS)
.addReg(RHS);
@@ -1157,7 +1157,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
if (!VT.isSimple() || !RetVT.isSimple())
return false;
- unsigned In = getRegForValue(I->getOperand(0));
+ Register In = getRegForValue(I->getOperand(0));
if (In == 0)
return false;
@@ -1229,7 +1229,7 @@ bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
materializeLoadStoreOperands(Addr);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
ResultReg);
@@ -1284,7 +1284,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) {
materializeLoadStoreOperands(Addr);
- unsigned ValueReg = getRegForValue(Store->getValueOperand());
+ Register ValueReg = getRegForValue(Store->getValueOperand());
if (ValueReg == 0)
return false;
if (VTIsi1)
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 38ed4c73fb93..a221f37cfd94 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1491,8 +1491,7 @@ bool WebAssemblyTargetLowering::MatchTableForLowering(SelectionDAG &DAG,
if (GA) {
// We are in Case 2 above.
Idx = Base->getOperand(1);
- if (!Idx || GA->getNumValues() != 1 || Idx->getNumValues() != 1)
- return false;
+ assert(GA->getNumValues() == 1);
} else {
// This might be Case 1 above (or an error)
SDValue V = Base->getOperand(0);
@@ -1629,7 +1628,7 @@ SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
// local.copy between Op and its FI operand.
SDValue Chain = Op.getOperand(0);
SDLoc DL(Op);
- unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
+ Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
EVT VT = Src.getValueType();
SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
: WebAssembly::COPY_I64,
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 23aaa5160abd..fe656753889f 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -279,6 +279,7 @@
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Transforms/Utils/SSAUpdaterBulk.h"
@@ -454,12 +455,12 @@ static Function *getEmscriptenFunction(FunctionType *Ty, const Twine &Name,
// Tell the linker that this function is expected to be imported from the
// 'env' module.
if (!F->hasFnAttribute("wasm-import-module")) {
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(M->getContext());
B.addAttribute("wasm-import-module", "env");
F->addFnAttrs(B);
}
if (!F->hasFnAttribute("wasm-import-name")) {
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(M->getContext());
B.addAttribute("wasm-import-name", F->getName());
F->addFnAttrs(B);
}
@@ -547,7 +548,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
for (unsigned I = 0, E = CI->arg_size(); I < E; ++I)
ArgAttributes.push_back(InvokeAL.getParamAttrs(I));
- AttrBuilder FnAttrs(InvokeAL.getFnAttrs());
+ AttrBuilder FnAttrs(CI->getContext(), InvokeAL.getFnAttrs());
if (FnAttrs.contains(Attribute::AllocSize)) {
// The allocsize attribute (if any) referes to parameters by index and needs
// to be adjusted.
@@ -610,6 +611,8 @@ static bool canLongjmp(const Value *Callee) {
return false;
StringRef CalleeName = Callee->getName();
+ // TODO Include more functions or consider checking with mangled prefixes
+
// The reason we include malloc/free here is to exclude the malloc/free
// calls generated in setjmp prep / cleanup routines.
if (CalleeName == "setjmp" || CalleeName == "malloc" || CalleeName == "free")
@@ -626,11 +629,50 @@ static bool canLongjmp(const Value *Callee) {
return false;
// Exception-catching related functions
- if (CalleeName == "__cxa_begin_catch" || CalleeName == "__cxa_end_catch" ||
+ //
+ // We intentionally excluded __cxa_end_catch here even though it surely cannot
+ // longjmp, in order to maintain the unwind relationship from all existing
+ // catchpads (and calls within them) to catch.dispatch.longjmp.
+ //
+ // In Wasm EH + Wasm SjLj, we
+ // 1. Make all catchswitch and cleanuppad that unwind to caller unwind to
+ // catch.dispatch.longjmp instead
+ // 2. Convert all longjmpable calls to invokes that unwind to
+ // catch.dispatch.longjmp
+ // But catchswitch BBs are removed in isel, so if an EH catchswitch (generated
+ // from an exception)'s catchpad does not contain any calls that are converted
+ // into invokes unwinding to catch.dispatch.longjmp, this unwind relationship
+ // (EH catchswitch BB -> catch.dispatch.longjmp BB) is lost and
+ // catch.dispatch.longjmp BB can be placed before the EH catchswitch BB in
+ // CFGSort.
+ // int ret = setjmp(buf);
+ // try {
+ // foo(); // longjmps
+ // } catch (...) {
+ // }
+ // Then in this code, if 'foo' longjmps, it first unwinds to 'catch (...)'
+ // catchswitch, and is not caught by that catchswitch because it is a longjmp,
+ // then it should next unwind to catch.dispatch.longjmp BB. But if this 'catch
+ // (...)' catchswitch -> catch.dispatch.longjmp unwind relationship is lost,
+ // it will not unwind to catch.dispatch.longjmp, producing an incorrect
+ // result.
+ //
+ // Every catchpad generated by Wasm C++ contains __cxa_end_catch, so we
+ // intentionally treat it as longjmpable to work around this problem. This is
+ // a hacky fix but an easy one.
+ //
+ // The comment block in findWasmUnwindDestinations() in
+ // SelectionDAGBuilder.cpp is addressing a similar problem.
+ if (CalleeName == "__cxa_begin_catch" ||
CalleeName == "__cxa_allocate_exception" || CalleeName == "__cxa_throw" ||
CalleeName == "__clang_call_terminate")
return false;
+ // std::terminate, which is generated when another exception occurs while
+ // handling an exception, cannot longjmp.
+ if (CalleeName == "_ZSt9terminatev")
+ return false;
+
// Otherwise we don't know
return true;
}
@@ -817,6 +859,32 @@ static bool containsLongjmpableCalls(const Function *F) {
return false;
}
+// When a function contains a setjmp call but not other calls that can longjmp,
+// we don't do setjmp transformation for that setjmp. But we need to convert the
+// setjmp calls into "i32 0" so they don't cause link time errors. setjmp always
+// returns 0 when called directly.
+static void nullifySetjmp(Function *F) {
+ Module &M = *F->getParent();
+ IRBuilder<> IRB(M.getContext());
+ Function *SetjmpF = M.getFunction("setjmp");
+ SmallVector<Instruction *, 1> ToErase;
+
+ for (User *U : SetjmpF->users()) {
+ auto *CI = dyn_cast<CallInst>(U);
+ // FIXME 'invoke' to setjmp can happen when we use Wasm EH + Wasm SjLj, but
+ // we don't support two being used together yet.
+ if (!CI)
+ report_fatal_error("Wasm EH + Wasm SjLj is not fully supported yet");
+ BasicBlock *BB = CI->getParent();
+ if (BB->getParent() != F) // in other function
+ continue;
+ ToErase.push_back(CI);
+ CI->replaceAllUsesWith(IRB.getInt32(0));
+ }
+ for (auto *I : ToErase)
+ I->eraseFromParent();
+}
+
bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
LLVM_DEBUG(dbgs() << "********** Lower Emscripten EH & SjLj **********\n");
@@ -886,6 +954,10 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
EHTypeIDF = getEmscriptenFunction(EHTypeIDTy, "llvm_eh_typeid_for", &M);
}
+ // Functions that contains calls to setjmp but don't have other longjmpable
+ // calls within them.
+ SmallPtrSet<Function *, 4> SetjmpUsersToNullify;
+
if ((EnableEmSjLj || EnableWasmSjLj) && SetjmpF) {
// Precompute setjmp users
for (User *U : SetjmpF->users()) {
@@ -896,6 +968,8 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
// so can ignore it
if (containsLongjmpableCalls(UserF))
SetjmpUsers.insert(UserF);
+ else
+ SetjmpUsersToNullify.insert(UserF);
} else {
std::string S;
raw_string_ostream SS(S);
@@ -975,6 +1049,14 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
runSjLjOnFunction(*F);
}
+ // Replace unnecessary setjmp calls with 0
+ if ((EnableEmSjLj || EnableWasmSjLj) && !SetjmpUsersToNullify.empty()) {
+ Changed = true;
+ assert(SetjmpF);
+ for (Function *F : SetjmpUsersToNullify)
+ nullifySetjmp(F);
+ }
+
if (!Changed) {
// Delete unused global variables and functions
if (ResumeF)
@@ -1078,20 +1160,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
} else {
// This can't throw, and we don't need this invoke, just replace it with a
// call+branch
- SmallVector<Value *, 16> Args(II->args());
- CallInst *NewCall =
- IRB.CreateCall(II->getFunctionType(), II->getCalledOperand(), Args);
- NewCall->takeName(II);
- NewCall->setCallingConv(II->getCallingConv());
- NewCall->setDebugLoc(II->getDebugLoc());
- NewCall->setAttributes(II->getAttributes());
- II->replaceAllUsesWith(NewCall);
- ToErase.push_back(II);
-
- IRB.CreateBr(II->getNormalDest());
-
- // Remove any PHI node entries from the exception destination
- II->getUnwindDest()->removePredecessor(&BB);
+ changeToCall(II);
}
}
@@ -1243,16 +1312,19 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
// Setjmp transformation
SmallVector<PHINode *, 4> SetjmpRetPHIs;
Function *SetjmpF = M.getFunction("setjmp");
- for (User *U : SetjmpF->users()) {
- auto *CI = dyn_cast<CallInst>(U);
- // FIXME 'invoke' to setjmp can happen when we use Wasm EH + Wasm SjLj, but
- // we don't support two being used together yet.
- if (!CI)
- report_fatal_error("Wasm EH + Wasm SjLj is not fully supported yet");
- BasicBlock *BB = CI->getParent();
+ for (auto *U : make_early_inc_range(SetjmpF->users())) {
+ auto *CB = dyn_cast<CallBase>(U);
+ BasicBlock *BB = CB->getParent();
if (BB->getParent() != &F) // in other function
continue;
+ CallInst *CI = nullptr;
+ // setjmp cannot throw. So if it is an invoke, lower it to a call
+ if (auto *II = dyn_cast<InvokeInst>(CB))
+ CI = llvm::changeToCall(II);
+ else
+ CI = cast<CallInst>(CB);
+
// The tail is everything right after the call, and will be reached once
// when setjmp is called, and later when longjmp returns to the setjmp
BasicBlock *Tail = SplitBlock(BB, CI->getNextNode());
@@ -1568,6 +1640,13 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForEmscriptenSjLj(
I->eraseFromParent();
}
+static BasicBlock *getCleanupRetUnwindDest(const CleanupPadInst *CPI) {
+ for (const User *U : CPI->users())
+ if (const auto *CRI = dyn_cast<CleanupReturnInst>(U))
+ return CRI->getUnwindDest();
+ return nullptr;
+}
+
// Create a catchpad in which we catch a longjmp's env and val arguments, test
// if the longjmp corresponds to one of setjmps in the current function, and if
// so, jump to the setjmp dispatch BB from which we go to one of post-setjmp
@@ -1619,18 +1698,18 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
BasicBlock::Create(C, "setjmp.dispatch", &F, OrigEntry);
cast<BranchInst>(Entry->getTerminator())->setSuccessor(0, SetjmpDispatchBB);
- // Create catch.dispatch.longjmp BB a catchswitch instruction
- BasicBlock *CatchSwitchBB =
+ // Create catch.dispatch.longjmp BB and a catchswitch instruction
+ BasicBlock *CatchDispatchLongjmpBB =
BasicBlock::Create(C, "catch.dispatch.longjmp", &F);
- IRB.SetInsertPoint(CatchSwitchBB);
- CatchSwitchInst *CatchSwitch =
+ IRB.SetInsertPoint(CatchDispatchLongjmpBB);
+ CatchSwitchInst *CatchSwitchLongjmp =
IRB.CreateCatchSwitch(ConstantTokenNone::get(C), nullptr, 1);
// Create catch.longjmp BB and a catchpad instruction
BasicBlock *CatchLongjmpBB = BasicBlock::Create(C, "catch.longjmp", &F);
- CatchSwitch->addHandler(CatchLongjmpBB);
+ CatchSwitchLongjmp->addHandler(CatchLongjmpBB);
IRB.SetInsertPoint(CatchLongjmpBB);
- CatchPadInst *CatchPad = IRB.CreateCatchPad(CatchSwitch, {});
+ CatchPadInst *CatchPad = IRB.CreateCatchPad(CatchSwitchLongjmp, {});
// Wasm throw and catch instructions can throw and catch multiple values, but
// that requires multivalue support in the toolchain, which is currently not
@@ -1696,9 +1775,9 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
// Convert all longjmpable call instructions to invokes that unwind to the
// newly created catch.dispatch.longjmp BB.
- SmallVector<Instruction *, 64> ToErase;
+ SmallVector<CallInst *, 64> LongjmpableCalls;
for (auto *BB = &*F.begin(); BB; BB = BB->getNextNode()) {
- for (Instruction &I : *BB) {
+ for (auto &I : *BB) {
auto *CI = dyn_cast<CallInst>(&I);
if (!CI)
continue;
@@ -1716,29 +1795,66 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
// setjmps in this function. We should not convert this call to an invoke.
if (CI == WasmLongjmpCI)
continue;
- ToErase.push_back(CI);
+ LongjmpableCalls.push_back(CI);
+ }
+ }
- // Even if the callee function has attribute 'nounwind', which is true for
- // all C functions, it can longjmp, which means it can throw a Wasm
- // exception now.
- CI->removeFnAttr(Attribute::NoUnwind);
- if (Function *CalleeF = CI->getCalledFunction()) {
- CalleeF->removeFnAttr(Attribute::NoUnwind);
+ for (auto *CI : LongjmpableCalls) {
+ // Even if the callee function has attribute 'nounwind', which is true for
+ // all C functions, it can longjmp, which means it can throw a Wasm
+ // exception now.
+ CI->removeFnAttr(Attribute::NoUnwind);
+ if (Function *CalleeF = CI->getCalledFunction())
+ CalleeF->removeFnAttr(Attribute::NoUnwind);
+
+ // Change it to an invoke and make it unwind to the catch.dispatch.longjmp
+ // BB. If the call is enclosed in another catchpad/cleanuppad scope, unwind
+ // to its parent pad's unwind destination instead to preserve the scope
+ // structure. It will eventually unwind to the catch.dispatch.longjmp.
+ SmallVector<OperandBundleDef, 1> Bundles;
+ BasicBlock *UnwindDest = nullptr;
+ if (auto Bundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
+ Instruction *FromPad = cast<Instruction>(Bundle->Inputs[0]);
+ while (!UnwindDest && FromPad) {
+ if (auto *CPI = dyn_cast<CatchPadInst>(FromPad)) {
+ UnwindDest = CPI->getCatchSwitch()->getUnwindDest();
+ FromPad = nullptr; // stop searching
+ } else if (auto *CPI = dyn_cast<CleanupPadInst>(FromPad)) {
+ // getCleanupRetUnwindDest() can return nullptr when
+ // 1. This cleanuppad's matching cleanupret uwninds to caller
+ // 2. There is no matching cleanupret because it ends with
+ // unreachable.
+ // In case of 2, we need to traverse the parent pad chain.
+ UnwindDest = getCleanupRetUnwindDest(CPI);
+ FromPad = cast<Instruction>(CPI->getParentPad());
+ }
}
+ }
+ if (!UnwindDest)
+ UnwindDest = CatchDispatchLongjmpBB;
+ changeToInvokeAndSplitBasicBlock(CI, UnwindDest);
+ }
- IRB.SetInsertPoint(CI);
- BasicBlock *Tail = SplitBlock(BB, CI->getNextNode());
- // We will add a new invoke. So remove the branch created when we split
- // the BB
- ToErase.push_back(BB->getTerminator());
- SmallVector<Value *, 8> Args(CI->args());
- InvokeInst *II =
- IRB.CreateInvoke(CI->getFunctionType(), CI->getCalledOperand(), Tail,
- CatchSwitchBB, Args);
- II->takeName(CI);
- II->setDebugLoc(CI->getDebugLoc());
- II->setAttributes(CI->getAttributes());
- CI->replaceAllUsesWith(II);
+ SmallVector<Instruction *, 16> ToErase;
+ for (auto &BB : F) {
+ if (auto *CSI = dyn_cast<CatchSwitchInst>(BB.getFirstNonPHI())) {
+ if (CSI != CatchSwitchLongjmp && CSI->unwindsToCaller()) {
+ IRB.SetInsertPoint(CSI);
+ ToErase.push_back(CSI);
+ auto *NewCSI = IRB.CreateCatchSwitch(CSI->getParentPad(),
+ CatchDispatchLongjmpBB, 1);
+ NewCSI->addHandler(*CSI->handler_begin());
+ NewCSI->takeName(CSI);
+ CSI->replaceAllUsesWith(NewCSI);
+ }
+ }
+
+ if (auto *CRI = dyn_cast<CleanupReturnInst>(BB.getTerminator())) {
+ if (CRI->unwindsToCaller()) {
+ IRB.SetInsertPoint(CRI);
+ ToErase.push_back(CRI);
+ IRB.CreateCleanupRet(CRI->getCleanupPad(), CatchDispatchLongjmpBB);
+ }
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
index 3a0bef8c765c..ca6f3f194645 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
@@ -26,6 +26,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include <map>
+
using namespace llvm;
#define DEBUG_TYPE "wasm-lower-global-dtors"
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
index 9d83a75a8247..6a6cac6d956f 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -82,7 +82,7 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
// Split multiple-VN LiveIntervals into multiple LiveIntervals.
SmallVector<LiveInterval *, 4> SplitLIs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
if (MRI.reg_nodbg_empty(Reg))
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
index 8b8593ddcbdd..5682cadc1a64 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -95,7 +95,7 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(
// TODO: This is fairly heavy-handed; find a better approach.
//
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
// Skip unused registers.
if (MRI.use_nodbg_empty(Reg))
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index fe127dec8aed..5252db4858b9 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -98,7 +98,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
for (unsigned I = 0; I < NumVRegs; ++I) {
- unsigned VReg = Register::index2VirtReg(I);
+ Register VReg = Register::index2VirtReg(I);
if (MFI.isVRegStackified(VReg))
continue;
// Skip unused registers, which can use $drop.
@@ -135,7 +135,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
LiveInterval *LI = SortedIntervals[I];
- unsigned Old = LI->reg();
+ Register Old = LI->reg();
size_t Color = I;
const TargetRegisterClass *RC = MRI->getRegClass(Old);
@@ -152,7 +152,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
continue_outer:;
}
- unsigned New = SortedIntervals[Color]->reg();
+ Register New = SortedIntervals[Color]->reg();
SlotMapping[I] = New;
Changed |= Old != New;
UsedColors.set(Color);
@@ -168,7 +168,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
// Rewrite register operands.
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
- unsigned Old = SortedIntervals[I]->reg();
+ Register Old = SortedIntervals[I]->reg();
unsigned New = SlotMapping[I];
if (Old != New)
MRI->replaceRegWith(Old, New);
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
index c73b8a29daeb..76c78cd23130 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -89,7 +89,7 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
// Start the numbering for locals after the arg regs
unsigned CurReg = MFI.getParams().size();
for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
- unsigned VReg = Register::index2VirtReg(VRegIdx);
+ Register VReg = Register::index2VirtReg(VRegIdx);
// Skip unused registers.
if (MRI.use_empty(VReg))
continue;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index 42419259802e..d3ad47147ac8 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -909,8 +909,8 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
SubsequentUse != Use.getParent()->uses().end()) {
if (!SubsequentDef->isReg() || !SubsequentUse->isReg())
break;
- unsigned DefReg = SubsequentDef->getReg();
- unsigned UseReg = SubsequentUse->getReg();
+ Register DefReg = SubsequentDef->getReg();
+ Register UseReg = SubsequentUse->getReg();
// TODO: This single-use restriction could be relaxed by using tees
if (DefReg != UseReg || !MRI.hasOneUse(DefReg))
break;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index add3c799f4aa..912f61765579 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -42,8 +42,7 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
const std::string &FS,
const TargetMachine &TM)
: WebAssemblyGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
- TargetTriple(TT), FrameLowering(),
- InstrInfo(initializeSubtargetDependencies(CPU, FS)), TSInfo(),
+ TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
TLInfo(TM, *this) {}
bool WebAssemblySubtarget::enableAtomicExpand() const {
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 2ba0b97229cc..e9ecff3bf514 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -216,7 +216,7 @@ private:
// The operator on the top of the stack has higher precedence than the
// new operator.
unsigned ParenCount = 0;
- while (1) {
+ while (true) {
// Nothing to process.
if (InfixOperatorStack.empty())
break;
@@ -3030,7 +3030,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
ForcedDispEncoding = DispEncoding_Default;
// Parse pseudo prefixes.
- while (1) {
+ while (true) {
if (Name == "{") {
if (getLexer().isNot(AsmToken::Identifier))
return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
@@ -3370,7 +3370,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
// Read the operands.
- while(1) {
+ while (true) {
if (ParseOperand(Operands))
return true;
if (HandleAVX512Operand(Operands))
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp b/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp
new file mode 100644
index 000000000000..78379290aae9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp
@@ -0,0 +1,64 @@
+//===------------------- X86CustomBehaviour.cpp -----------------*-C++ -* -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements methods from the X86CustomBehaviour class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86CustomBehaviour.h"
+#include "TargetInfo/X86TargetInfo.h"
+#include "X86InstrInfo.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/WithColor.h"
+
+namespace llvm {
+namespace mca {
+
+void X86InstrPostProcess::setMemBarriers(std::unique_ptr<Instruction> &Inst,
+ const MCInst &MCI) {
+ switch (MCI.getOpcode()) {
+ case X86::MFENCE:
+ Inst->setLoadBarrier(true);
+ Inst->setStoreBarrier(true);
+ break;
+ case X86::LFENCE:
+ Inst->setLoadBarrier(true);
+ break;
+ case X86::SFENCE:
+ Inst->setStoreBarrier(true);
+ break;
+ }
+}
+
+void X86InstrPostProcess::postProcessInstruction(
+ std::unique_ptr<Instruction> &Inst, const MCInst &MCI) {
+ // Currently, we only modify certain instructions' IsALoadBarrier and
+ // IsAStoreBarrier flags.
+ setMemBarriers(Inst, MCI);
+}
+
+} // namespace mca
+} // namespace llvm
+
+using namespace llvm;
+using namespace mca;
+
+static InstrPostProcess *createX86InstrPostProcess(const MCSubtargetInfo &STI,
+ const MCInstrInfo &MCII) {
+ return new X86InstrPostProcess(STI, MCII);
+}
+
+/// Extern function to initialize the targets for the X86 backend
+
+extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86TargetMCA() {
+ TargetRegistry::RegisterInstrPostProcess(getTheX86_32Target(),
+ createX86InstrPostProcess);
+ TargetRegistry::RegisterInstrPostProcess(getTheX86_64Target(),
+ createX86InstrPostProcess);
+}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h b/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h
new file mode 100644
index 000000000000..24d26751f0a1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h
@@ -0,0 +1,47 @@
+//===-------------------- X86CustomBehaviour.h ------------------*-C++ -* -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the X86CustomBehaviour class which inherits from
+/// CustomBehaviour. This class is used by the tool llvm-mca to enforce
+/// target specific behaviour that is not expressed well enough in the
+/// scheduling model for mca to enforce it automatically.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_MCA_X86CUSTOMBEHAVIOUR_H
+#define LLVM_LIB_TARGET_X86_MCA_X86CUSTOMBEHAVIOUR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MCA/CustomBehaviour.h"
+#include "llvm/Support/TargetParser.h"
+
+namespace llvm {
+namespace mca {
+
+class X86InstrPostProcess : public InstrPostProcess {
+ void processWaitCnt(std::unique_ptr<Instruction> &Inst, const MCInst &MCI);
+
+ /// Called within X86InstrPostProcess to specify certain instructions
+ /// as load and store barriers.
+ void setMemBarriers(std::unique_ptr<Instruction> &Inst, const MCInst &MCI);
+
+public:
+ X86InstrPostProcess(const MCSubtargetInfo &STI, const MCInstrInfo &MCII)
+ : InstrPostProcess(STI, MCII) {}
+
+ ~X86InstrPostProcess() {}
+
+ void postProcessInstruction(std::unique_ptr<Instruction> &Inst,
+ const MCInst &MCI) override;
+};
+
+} // namespace mca
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h b/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h
index bb12ede3b729..fd82bdcd1a23 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.h
@@ -40,4 +40,4 @@ protected:
} // end namespace llvm
-#endif // LLVM_LIB_TARGET_X86_MCTARGETDESC_X86ATTINSTPRINTER_H
+#endif // LLVM_LIB_TARGET_X86_MCTARGETDESC_X86INSTPRINTERCOMMON_H
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 9da0a8129f23..8913e405539e 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -111,6 +111,15 @@ void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
{codeview::RegisterId::EFLAGS, X86::EFLAGS},
+ {codeview::RegisterId::ST0, X86::ST0},
+ {codeview::RegisterId::ST1, X86::ST1},
+ {codeview::RegisterId::ST2, X86::ST2},
+ {codeview::RegisterId::ST3, X86::ST3},
+ {codeview::RegisterId::ST4, X86::ST4},
+ {codeview::RegisterId::ST5, X86::ST5},
+ {codeview::RegisterId::ST6, X86::ST6},
+ {codeview::RegisterId::ST7, X86::ST7},
+
{codeview::RegisterId::ST0, X86::FP0},
{codeview::RegisterId::ST1, X86::FP1},
{codeview::RegisterId::ST2, X86::FP2},
@@ -281,8 +290,8 @@ void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
{codeview::RegisterId::AMD64_XMM31, X86::XMM31},
};
- for (unsigned I = 0; I < array_lengthof(RegMap); ++I)
- MRI->mapLLVMRegToCVReg(RegMap[I].Reg, static_cast<int>(RegMap[I].CVReg));
+ for (const auto &I : RegMap)
+ MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
}
MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm-project/llvm/lib/Target/X86/X86AsmPrinter.h
index b22f25af26cf..94679e6e3d11 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -23,7 +23,6 @@ class MCCodeEmitter;
class MCStreamer;
class X86Subtarget;
class TargetMachine;
-struct ASanAccessInfo;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86CallLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86CallLowering.h
index ac5b92bf4aae..0ad67cfd3532 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86CallLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86CallLowering.h
@@ -20,8 +20,6 @@
namespace llvm {
template <typename T> class ArrayRef;
-class DataLayout;
-class MachineRegisterInfo;
class X86TargetLowering;
class X86CallLowering : public CallLowering {
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86FastTileConfig.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86FastTileConfig.cpp
index 47874e82ff3b..061fff50bcea 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86FastTileConfig.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86FastTileConfig.cpp
@@ -56,8 +56,6 @@ public:
bool isTileLoad(MachineInstr &MI);
bool isTileStore(MachineInstr &MI);
bool isAMXInstr(MachineInstr &MI);
- void getTileStoreShape(MachineInstr &MI,
- SmallVector<MachineOperand *> &ShapedTiles);
MachineInstr *getKeyAMXInstr(MachineInstr *MI);
void getTileShapesCfg(MachineInstr *MI,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
index 0a7aea467809..51f2ced321bb 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -149,6 +149,17 @@ static unsigned getLEArOpcode(bool IsLP64) {
return IsLP64 ? X86::LEA64r : X86::LEA32r;
}
+static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm) {
+ if (Use64BitReg) {
+ if (isUInt<32>(Imm))
+ return X86::MOV32ri64;
+ if (isInt<32>(Imm))
+ return X86::MOV64ri32;
+ return X86::MOV64ri;
+ }
+ return X86::MOV32ri;
+}
+
static bool isEAXLiveIn(MachineBasicBlock &MBB) {
for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
unsigned Reg = RegMask.PhysReg;
@@ -237,11 +248,10 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
else
Reg = TRI->findDeadCallerSavedReg(MBB, MBBI);
- unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
unsigned AddSubRROpc =
isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
if (Reg) {
- BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
+ BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Reg)
.addImm(Offset)
.setMIFlag(Flag);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
@@ -267,7 +277,7 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
Offset = -(Offset - SlotSize);
else
Offset = Offset + SlotSize;
- BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
+ BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Rax)
.addImm(Offset)
.setMIFlag(Flag);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
@@ -434,7 +444,7 @@ void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
/// Emits Dwarf Info specifying offsets of callee saved registers and
/// frame pointer. This is called only when basic block sections are enabled.
-void X86FrameLowering::emitCalleeSavedFrameMoves(
+void X86FrameLowering::emitCalleeSavedFrameMovesFullCFA(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
MachineFunction &MF = *MBB.getParent();
if (!hasFP(MF)) {
@@ -469,7 +479,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(
// Calculate offsets.
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
if (IsPrologue) {
@@ -637,6 +647,8 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
uint64_t AlignOffset) const {
assert(Offset && "null offset");
+ const bool NeedsDwarfCFI = needsDwarfCFI(MF);
+ const bool HasFP = hasFP(MF);
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86TargetLowering &TLI = *STI.getTargetLowering();
const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
@@ -676,17 +688,36 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
Register FinalStackProbed = Uses64BitFramePtr ? X86::R11
: Is64Bit ? X86::R11D
: X86::EAX;
+
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
.addReg(StackPtr)
.setMIFlag(MachineInstr::FrameSetup);
// save loop bound
{
- const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset);
+ const unsigned BoundOffset = alignDown(Offset, StackProbeSize);
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, BoundOffset);
BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
.addReg(FinalStackProbed)
- .addImm(Offset / StackProbeSize * StackProbeSize)
+ .addImm(BoundOffset)
.setMIFlag(MachineInstr::FrameSetup);
+
+ // while in the loop, use loop-invariant reg for CFI,
+ // instead of the stack pointer, which changes during the loop
+ if (!HasFP && NeedsDwarfCFI) {
+ // x32 uses the same DWARF register numbers as x86-64,
+ // so there isn't a register number for r11d, we must use r11 instead
+ const Register DwarfFinalStackProbed =
+ STI.isTarget64BitILP32()
+ ? Register(getX86SubSuperRegister(FinalStackProbed, 64))
+ : FinalStackProbed;
+
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createDefCfaRegister(
+ nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true)));
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, BoundOffset));
+ }
}
// allocate a page
@@ -725,15 +756,30 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
MBB.addSuccessor(testMBB);
// handle tail
- unsigned TailOffset = Offset % StackProbeSize;
+ const unsigned TailOffset = Offset % StackProbeSize;
+ MachineBasicBlock::iterator TailMBBIter = tailMBB->begin();
if (TailOffset) {
const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset);
- BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr)
+ BuildMI(*tailMBB, TailMBBIter, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr)
.addImm(TailOffset)
.setMIFlag(MachineInstr::FrameSetup);
}
+ // after the loop, switch back to stack pointer for CFI
+ if (!HasFP && NeedsDwarfCFI) {
+ // x32 uses the same DWARF register numbers as x86-64,
+ // so there isn't a register number for esp, we must use rsp instead
+ const Register DwarfStackPtr =
+ STI.isTarget64BitILP32()
+ ? Register(getX86SubSuperRegister(StackPtr, 64))
+ : Register(StackPtr);
+
+ BuildCFI(*tailMBB, TailMBBIter, DL,
+ MCCFIInstruction::createDefCfaRegister(
+ nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true)));
+ }
+
// Update Live In information
recomputeLiveIns(*testMBB);
recomputeLiveIns(*tailMBB);
@@ -1705,19 +1751,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Handle the 64-bit Windows ABI case where we need to call __chkstk.
// Function prologue is responsible for adjusting the stack pointer.
int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
- if (isUInt<32>(Alloc)) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
- .addImm(Alloc)
- .setMIFlag(MachineInstr::FrameSetup);
- } else if (isInt<32>(Alloc)) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
- .addImm(Alloc)
- .setMIFlag(MachineInstr::FrameSetup);
- } else {
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
- .addImm(Alloc)
- .setMIFlag(MachineInstr::FrameSetup);
- }
+ BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Alloc)), X86::RAX)
+ .addImm(Alloc)
+ .setMIFlag(MachineInstr::FrameSetup);
} else {
// Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
// We'll also use 4 already allocated bytes for EAX.
@@ -2497,7 +2533,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// Assign slots for GPRs. It increases frame size.
for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
@@ -2514,7 +2550,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// Assign slots for XMMs.
for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
@@ -2560,7 +2596,7 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
@@ -2594,7 +2630,7 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
// Make XMM regs spilled. X86 does not have ability of push/pop XMM.
// It can be done by spilling XMMs to stack frame.
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
@@ -2672,7 +2708,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
// Reload XMMs from stack frame.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) ||
X86::GR32RegClass.contains(Reg))
continue;
@@ -2689,7 +2725,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
// POP GPRs.
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) &&
!X86::GR32RegClass.contains(Reg))
continue;
@@ -2944,15 +2980,16 @@ void X86FrameLowering::adjustForSegmentedStacks(
const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
- const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
if (IsNested)
BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
- .addImm(StackSize);
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
- .addImm(X86FI->getArgumentStackSize());
+ BuildMI(allocMBB, DL, TII.get(getMOVriOpcode(IsLP64, StackSize)), Reg10)
+ .addImm(StackSize);
+ BuildMI(allocMBB, DL,
+ TII.get(getMOVriOpcode(IsLP64, X86FI->getArgumentStackSize())),
+ Reg11)
+ .addImm(X86FI->getArgumentStackSize());
} else {
BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
.addImm(X86FI->getArgumentStackSize());
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
index e18be0d26321..987facbfeae4 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
@@ -65,9 +65,8 @@ public:
void inlineStackProbe(MachineFunction &MF,
MachineBasicBlock &PrologMBB) const override;
- void
- emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) const override;
+ void emitCalleeSavedFrameMovesFullCFA(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const override;
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 7ed05fd0331d..5b90c67deae6 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -80,9 +80,9 @@ namespace {
bool NegateIndex = false;
X86ISelAddressMode()
- : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
- Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
- MCSym(nullptr), JT(-1), SymbolFlags(X86II::MO_NO_FLAG) {}
+ : BaseType(RegBase), Base_FrameIndex(0), Scale(1), Disp(0), GV(nullptr),
+ CP(nullptr), BlockAddr(nullptr), ES(nullptr), MCSym(nullptr), JT(-1),
+ SymbolFlags(X86II::MO_NO_FLAG) {}
bool hasSymbolicDisplacement() const {
return GV != nullptr || CP != nullptr || ES != nullptr ||
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6f6361b6757b..aff72452af6c 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1096,6 +1096,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::ROTR, VT, Custom);
}
+ setOperationAction(ISD::FSHL, MVT::v16i8, Custom);
+ setOperationAction(ISD::FSHR, MVT::v16i8, Custom);
+ setOperationAction(ISD::FSHL, MVT::v4i32, Custom);
+ setOperationAction(ISD::FSHR, MVT::v4i32, Custom);
+
setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
@@ -1284,6 +1289,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::ROTR, VT, Custom);
}
+ setOperationAction(ISD::FSHL, MVT::v32i8, Custom);
+ setOperationAction(ISD::FSHR, MVT::v32i8, Custom);
+ setOperationAction(ISD::FSHL, MVT::v8i32, Custom);
+ setOperationAction(ISD::FSHR, MVT::v8i32, Custom);
+
// These types need custom splitting if their input is a 128-bit vector.
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
@@ -1688,6 +1698,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
}
+ setOperationAction(ISD::FSHL, MVT::v64i8, Custom);
+ setOperationAction(ISD::FSHR, MVT::v64i8, Custom);
+ setOperationAction(ISD::FSHL, MVT::v16i32, Custom);
+ setOperationAction(ISD::FSHR, MVT::v16i32, Custom);
+
if (Subtarget.hasDQI()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
@@ -5475,10 +5490,9 @@ bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
/// materialize the FP immediate as a load from a constant pool.
bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const {
- for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
- if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
+ for (const APFloat &FPImm : LegalFPImmediates)
+ if (Imm.bitwiseIsEqual(FPImm))
return true;
- }
return false;
}
@@ -6132,6 +6146,29 @@ static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
return DAG.getBitcast(VT, Vec);
}
+// Helper to determine if the ops are all the extracted subvectors come from a
+// single source. If we allow commute they don't have to be in order (Lo/Hi).
+static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
+ if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
+ RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
+ LHS.getValueType() != RHS.getValueType() ||
+ LHS.getOperand(0) != RHS.getOperand(0))
+ return SDValue();
+
+ SDValue Src = LHS.getOperand(0);
+ if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
+ return SDValue();
+
+ unsigned NumElts = LHS.getValueType().getVectorNumElements();
+ if ((LHS.getConstantOperandAPInt(1) == 0 &&
+ RHS.getConstantOperandAPInt(1) == NumElts) ||
+ (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
+ LHS.getConstantOperandAPInt(1) == NumElts))
+ return Src;
+
+ return SDValue();
+}
+
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
const SDLoc &dl, unsigned vectorWidth) {
EVT VT = Vec.getValueType();
@@ -6850,8 +6887,8 @@ static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
- if (DAG.ComputeMinSignedBits(LHS) <= EltSizeInBits &&
- DAG.ComputeMinSignedBits(RHS) <= EltSizeInBits)
+ if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
+ DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
}
@@ -7907,6 +7944,7 @@ static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
// Attempt to decode ops that could be represented as a shuffle mask.
// The decoded shuffle mask may contain a different number of elements to the
// destination value type.
+// TODO: Merge into getTargetShuffleInputs()
static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
SmallVectorImpl<int> &Mask,
SmallVectorImpl<SDValue> &Ops,
@@ -8355,6 +8393,9 @@ static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
APInt &KnownUndef, APInt &KnownZero,
const SelectionDAG &DAG, unsigned Depth,
bool ResolveKnownElts) {
+ if (Depth >= SelectionDAG::MaxRecursionDepth)
+ return false; // Limit search depth.
+
EVT VT = Op.getValueType();
if (!VT.isSimple() || !VT.isVector())
return false;
@@ -9233,8 +9274,13 @@ static bool isFoldableUseOfShuffle(SDNode *N) {
return true;
if (Opc == ISD::BITCAST) // Ignore bitcasts
return isFoldableUseOfShuffle(U);
- if (N->hasOneUse())
+ if (N->hasOneUse()) {
+ // TODO, there may be some general way to know if a SDNode can
+ // be folded. We now only know whether an MI is foldable.
+ if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
+ return false;
return true;
+ }
}
return false;
}
@@ -10055,13 +10101,18 @@ static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
if (IsSubAdd)
return SDValue();
- // Do not generate X86ISD::ADDSUB node for 512-bit types even though
- // the ADDSUB idiom has been successfully recognized. There are no known
- // X86 targets with 512-bit ADDSUB instructions!
- // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
- // recognition.
- if (VT.is512BitVector())
- return SDValue();
+ // There are no known X86 targets with 512-bit ADDSUB instructions!
+ // Convert to blend(fsub,fadd).
+ if (VT.is512BitVector()) {
+ SmallVector<int> Mask;
+ for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
+ Mask.push_back(I);
+ Mask.push_back(I + E + 1);
+ }
+ SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
+ SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
+ return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
+ }
return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}
@@ -12162,12 +12213,13 @@ static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
return SDValue();
}
-/// Check whether a compaction lowering can be done by dropping even
-/// elements and compute how many times even elements must be dropped.
+/// Check whether a compaction lowering can be done by dropping even/odd
+/// elements and compute how many times even/odd elements must be dropped.
///
/// This handles shuffles which take every Nth element where N is a power of
/// two. Example shuffle masks:
///
+/// (even)
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
/// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
@@ -12175,16 +12227,20 @@ static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
/// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
/// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
///
+/// (odd)
+/// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14
+/// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
+///
/// Any of these lanes can of course be undef.
///
/// This routine only supports N <= 3.
/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
/// for larger N.
///
-/// \returns N above, or the number of times even elements must be dropped if
-/// there is such a number. Otherwise returns zero.
-static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
- bool IsSingleInput) {
+/// \returns N above, or the number of times even/odd elements must be dropped
+/// if there is such a number. Otherwise returns zero.
+static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
+ bool IsSingleInput) {
// The modulus for the shuffle vector entries is based on whether this is
// a single input or not.
int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
@@ -12192,6 +12248,7 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
"We should only be called with masks with a power-of-2 size!");
uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
+ int Offset = MatchEven ? 0 : 1;
// We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
// and 2^3 simultaneously. This is because we may have ambiguity with
@@ -12210,7 +12267,7 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
uint64_t N = j + 1;
// The shuffle mask must be equal to (i * 2^N) % M.
- if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
+ if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
IsAnyViable = true;
else
ViableForN[j] = false;
@@ -15724,7 +15781,7 @@ static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
// We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
// be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
- int NumEvenDrops = canLowerByDroppingEvenElements(Mask, false);
+ int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
!Subtarget.hasVLX()) {
// Check if this is part of a 256-bit vector truncation.
@@ -15758,6 +15815,20 @@ static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
return Result;
}
+ // When compacting odd (upper) elements, use PACKSS pre-SSE41.
+ int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
+ if (NumOddDrops == 1) {
+ bool HasSSE41 = Subtarget.hasSSE41();
+ V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
+ DAG.getBitcast(MVT::v4i32, V1),
+ DAG.getTargetConstant(16, DL, MVT::i8));
+ V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
+ DAG.getBitcast(MVT::v4i32, V2),
+ DAG.getTargetConstant(16, DL, MVT::i8));
+ return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
+ MVT::v8i16, V1, V2);
+ }
+
// Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
Mask, Subtarget, DAG))
@@ -16024,7 +16095,7 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Check for compaction patterns.
bool IsSingleInput = V2.isUndef();
- int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput);
+ int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
// with PSHUFB. It is important to do this before we attempt to generate any
@@ -16135,6 +16206,19 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
return Result;
}
+ int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
+ if (NumOddDrops == 1) {
+ V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
+ DAG.getBitcast(MVT::v8i16, V1),
+ DAG.getTargetConstant(8, DL, MVT::i8));
+ if (!IsSingleInput)
+ V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
+ DAG.getBitcast(MVT::v8i16, V2),
+ DAG.getTargetConstant(8, DL, MVT::i8));
+ return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
+ IsSingleInput ? V1 : V2);
+ }
+
// Handle multi-input cases by blending/unpacking single-input shuffles.
if (NumV2Elements > 0)
return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
@@ -16538,20 +16622,19 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
// If there are only inputs from one 128-bit lane, splitting will in fact be
// less expensive. The flags track whether the given lane contains an element
// that crosses to another lane.
+ bool AllLanes;
if (!Subtarget.hasAVX2()) {
bool LaneCrossing[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
- if (!LaneCrossing[0] || !LaneCrossing[1])
- return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
+ AllLanes = LaneCrossing[0] && LaneCrossing[1];
} else {
bool LaneUsed[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
LaneUsed[(Mask[i] % Size) / LaneSize] = true;
- if (!LaneUsed[0] || !LaneUsed[1])
- return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
+ AllLanes = LaneUsed[0] && LaneUsed[1];
}
// TODO - we could support shuffling V2 in the Flipped input.
@@ -16569,6 +16652,11 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
"In-lane shuffle mask expected");
+ // If we're not using both lanes in each lane and the inlane mask is not
+ // repeating, then we're better off splitting.
+ if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
+ return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
+
// Flip the lanes, and shuffle the results which should now be in-lane.
MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
SDValue Flipped = DAG.getBitcast(PVT, V1);
@@ -22598,7 +22686,7 @@ SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
/// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
/// This mode isn't supported in hardware on X86. But as long as we aren't
/// compiling with trapping math, we can emulate this with
-/// floor(X + copysign(nextafter(0.5, 0.0), X)).
+/// trunc(X + copysign(nextafter(0.5, 0.0), X)).
static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
@@ -23157,10 +23245,10 @@ static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
// For equality comparisons try to use SIGN_EXTEND if the input was
// truncate from something with enough sign bits.
if (Op0.getOpcode() == ISD::TRUNCATE) {
- if (DAG.ComputeMinSignedBits(Op0.getOperand(0)) <= 16)
+ if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
ExtendOp = ISD::SIGN_EXTEND;
} else if (Op1.getOpcode() == ISD::TRUNCATE) {
- if (DAG.ComputeMinSignedBits(Op1.getOperand(0)) <= 16)
+ if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
ExtendOp = ISD::SIGN_EXTEND;
}
}
@@ -24543,32 +24631,27 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
} else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
-
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
- // Apply further optimizations for special cases
- // (select (x != 0), -1, 0) -> neg & sbb
- // (select (x == 0), 0, -1) -> neg & sbb
- if (isNullConstant(Y) &&
- (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
+ // 'X - 1' sets the carry flag if X == 0.
+ // '0 - X' sets the carry flag if X != 0.
+ // Convert the carry flag to a -1/0 mask with sbb:
+ // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
+ // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
+ // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
+ // select (X == 0), -1, Y --> X - 1; or (sbb), Y
+ SDValue Sub;
+ if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
- SDValue Neg = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
- Zero = DAG.getConstant(0, DL, Op.getValueType());
- return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Neg.getValue(1));
+ Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
+ } else {
+ SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
+ Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
}
-
- Cmp = DAG.getNode(X86ISD::SUB, DL, CmpVTs,
- CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
-
- SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
- SDValue Res = // Res = 0 or -1.
- DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp.getValue(1));
-
- if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
- Res = DAG.getNOT(DL, Res, Res.getValueType());
-
- return DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
+ SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
+ DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
+ Sub.getValue(1));
+ return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
} else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
Cmp.getOperand(0).getOpcode() == ISD::AND &&
isOneConstant(Cmp.getOperand(0).getOperand(1))) {
@@ -25725,9 +25808,9 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
- SDValue PreservedSrc,
- const X86Subtarget &Subtarget,
- SelectionDAG &DAG) {
+ SDValue PreservedSrc,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
unsigned OpcodeSelect = ISD::VSELECT;
@@ -29743,20 +29826,106 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
bool IsFSHR = Op.getOpcode() == ISD::FSHR;
if (VT.isVector()) {
- assert(Subtarget.hasVBMI2() && "Expected VBMI2");
+ APInt APIntShiftAmt;
+ bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
- if (IsFSHR)
- std::swap(Op0, Op1);
+ if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
+ if (IsFSHR)
+ std::swap(Op0, Op1);
- APInt APIntShiftAmt;
- if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
- uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
- SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
- return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
- {Op0, Op1, Imm}, DAG, Subtarget);
+ if (IsCstSplat) {
+ uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
+ SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
+ return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
+ {Op0, Op1, Imm}, DAG, Subtarget);
+ }
+ return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
+ {Op0, Op1, Amt}, DAG, Subtarget);
}
- return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
- {Op0, Op1, Amt}, DAG, Subtarget);
+ assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
+ VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
+ "Unexpected funnel shift type!");
+
+ // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
+ // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
+ if (IsCstSplat)
+ return SDValue();
+
+ SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
+ SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
+ bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
+
+ unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
+ MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
+
+ // Split 256-bit integers on XOP/pre-AVX2 targets.
+ // Split 512-bit integers on non 512-bit BWI targets.
+ if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 32) ||
+ !Subtarget.hasAVX2())) ||
+ (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
+ EltSizeInBits < 32)) {
+ // Pre-mask the amount modulo using the wider vector.
+ Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
+ return splitVectorOp(Op, DAG);
+ }
+
+ // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
+ if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
+ if (SDValue ScalarAmt = DAG.getSplatValue(AmtMod)) {
+ SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
+ SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
+ ScalarAmt = DAG.getZExtOrTrunc(ScalarAmt, DL, MVT::i32);
+ Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt, Subtarget,
+ DAG);
+ Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt, Subtarget,
+ DAG);
+ return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
+ }
+ }
+
+ MVT WideSVT = MVT::getIntegerVT(
+ std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
+ MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
+
+ // If per-element shifts are legal, fallback to generic expansion.
+ if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
+ return SDValue();
+
+ // Attempt to fold as:
+ // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
+ // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
+ if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
+ supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
+ Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
+ Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
+ AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
+ Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
+ EltSizeInBits, DAG);
+ SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
+ Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
+ if (!IsFSHR)
+ Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
+ EltSizeInBits, DAG);
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
+ }
+
+ // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
+ if ((IsCst && !IsFSHR && EltSizeInBits == 8) ||
+ supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
+ SDValue Z = DAG.getConstant(0, DL, VT);
+ SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
+ SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
+ SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
+ SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
+ SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
+ SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
+ return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
+ }
+
+ // Fallback to generic expansion.
+ return SDValue();
}
assert(
(VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
@@ -29901,8 +30070,9 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
// Attempt to fold as unpack(x,x) << zext(splat(y)):
// rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
// rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
- // TODO: Handle vXi16 cases.
- if (EltSizeInBits == 8 || EltSizeInBits == 32) {
+ // TODO: Handle vXi16 cases on all targets.
+ if (EltSizeInBits == 8 || EltSizeInBits == 32 ||
+ (IsROTL && EltSizeInBits == 16 && !Subtarget.hasAVX())) {
if (SDValue BaseRotAmt = DAG.getSplatValue(AmtMod)) {
unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
@@ -33013,7 +33183,7 @@ bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
// AVX512BW has shifts such as vpsllvw.
if (Subtarget.hasBWI() && Bits == 16)
- return false;
+ return false;
// Otherwise, it's significantly cheaper to shift by a scalar amount than by a
// fully general vector.
@@ -33029,6 +33199,11 @@ bool X86TargetLowering::isBinOp(unsigned Opcode) const {
case X86ISD::FMAX:
case X86ISD::FMIN:
case X86ISD::FANDN:
+ case X86ISD::VPSHA:
+ case X86ISD::VPSHL:
+ case X86ISD::VSHLV:
+ case X86ISD::VSRLV:
+ case X86ISD::VSRAV:
return true;
}
@@ -33285,9 +33460,7 @@ bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
MachineBasicBlock *BB) {
// Scan forward through BB for a use/def of EFLAGS.
- for (MachineBasicBlock::iterator miI = std::next(Itr), miE = BB->end();
- miI != miE; ++miI) {
- const MachineInstr& mi = *miI;
+ for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
if (mi.readsRegister(X86::EFLAGS))
return true;
// If we found a def, we can stop searching.
@@ -38724,6 +38897,8 @@ static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
case X86ISD::VBROADCAST:
case X86ISD::MOVDDUP:
case X86ISD::PSHUFD:
+ case X86ISD::PSHUFHW:
+ case X86ISD::PSHUFLW:
case X86ISD::VPERMI:
case X86ISD::VPERMILPI: {
if (N.getOperand(0).getValueType() == ShuffleVT &&
@@ -38877,9 +39052,6 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
return R;
- if (SDValue R = canonicalizeShuffleWithBinOps(N, DAG, DL))
- return R;
-
// Handle specific target shuffles.
switch (Opcode) {
case X86ISD::MOVDDUP: {
@@ -39844,6 +40016,12 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero,
DCI))
return SDValue(N, 0);
+
+ // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
+ // Perform this after other shuffle combines to allow inner shuffles to be
+ // combined away first.
+ if (SDValue BinOp = canonicalizeShuffleWithBinOps(Op, DAG, SDLoc(N)))
+ return BinOp;
}
return SDValue();
@@ -40037,6 +40215,24 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
break;
}
+ case X86ISD::VPSHA:
+ case X86ISD::VPSHL:
+ case X86ISD::VSHLV:
+ case X86ISD::VSRLV:
+ case X86ISD::VSRAV: {
+ APInt LHSUndef, LHSZero;
+ APInt RHSUndef, RHSZero;
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
+ Depth + 1))
+ return true;
+ if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
+ Depth + 1))
+ return true;
+ KnownZero = LHSZero;
+ break;
+ }
case X86ISD::KSHIFTL: {
SDValue Src = Op.getOperand(0);
auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
@@ -41799,6 +41995,37 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// (mul (zext a), (sext, b))
+static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
+ SDValue &Op1) {
+ Op0 = Mul.getOperand(0);
+ Op1 = Mul.getOperand(1);
+
+ // The operand1 should be signed extend
+ if (Op0.getOpcode() == ISD::SIGN_EXTEND)
+ std::swap(Op0, Op1);
+
+ auto IsFreeTruncation = [](SDValue &Op) -> bool {
+ if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND) &&
+ Op.getOperand(0).getScalarValueSizeInBits() <= 8)
+ return true;
+
+ auto *BV = dyn_cast<BuildVectorSDNode>(Op);
+ return (BV && BV->isConstant());
+ };
+
+ // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
+ // value, we need to check Op0 is zero extended value. Op1 should be signed
+ // value, so we just check the signed bits.
+ if ((IsFreeTruncation(Op0) &&
+ DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
+ (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
+ return true;
+
+ return false;
+}
+
// Given a ABS node, detect the following pattern:
// (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
// This is useful as it is the input into a SAD pattern.
@@ -41820,6 +42047,50 @@ static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
return true;
}
+static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
+ unsigned &LogBias, const SDLoc &DL,
+ const X86Subtarget &Subtarget) {
+ // Extend or truncate to MVT::i8 first.
+ MVT Vi8VT =
+ MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
+ LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
+ RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
+
+ // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
+ // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
+ // The src A, B element type is i8, but the dst C element type is i32.
+ // When we calculate the reduce stage, we use src vector type vXi8 for it
+ // so we need logbias 2 to avoid extra 2 stages.
+ LogBias = 2;
+
+ unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
+ if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
+ RegSize = std::max(512u, RegSize);
+
+ // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
+ // fill in the missing vector elements with 0.
+ unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
+ SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
+ Ops[0] = LHS;
+ MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
+ SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+ Ops[0] = RHS;
+ SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+
+ // Actually build the DotProduct, split as 256/512 bits for
+ // AVXVNNI/AVX512VNNI.
+ auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
+ ArrayRef<SDValue> Ops) {
+ MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
+ return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
+ };
+ MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ SDValue Zero = DAG.getConstant(0, DL, DpVT);
+
+ return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
+ DpBuilder, false);
+}
+
// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
// to these zexts.
static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
@@ -41967,18 +42238,19 @@ static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
Movmsk = DAG.getBitcast(MovmskVT, Match);
} else {
- // For all_of(setcc(vec,0,eq)) - avoid vXi64 comparisons if we don't have
- // PCMPEQQ (SSE41+), use PCMPEQD instead.
- if (BinOp == ISD::AND && !Subtarget.hasSSE41() &&
- Match.getOpcode() == ISD::SETCC &&
- ISD::isBuildVectorAllZeros(Match.getOperand(1).getNode()) &&
+ // For all_of(setcc(x,y,eq))
+ // - avoid vXi64 comparisons without PCMPEQQ (SSE41+), use PCMPEQD.
+ // - avoid vXi16 comparisons, use PMOVMSKB(PCMPEQB()).
+ if (BinOp == ISD::AND && Match.getOpcode() == ISD::SETCC &&
cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
ISD::CondCode::SETEQ) {
SDValue Vec = Match.getOperand(0);
- if (Vec.getValueType().getScalarType() == MVT::i64 &&
- (2 * NumElts) <= MaxElts) {
+ EVT VecSVT = Vec.getValueType().getScalarType();
+ if ((VecSVT == MVT::i16 && !Subtarget.hasBWI()) ||
+ (VecSVT == MVT::i64 && !Subtarget.hasSSE41())) {
NumElts *= 2;
- EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ VecSVT = VecSVT.getHalfSizedIntegerVT(*DAG.getContext());
+ EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumElts);
MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
Match = DAG.getSetCC(
DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
@@ -42069,6 +42341,77 @@ static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
}
+static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
+ return SDValue();
+
+ EVT ExtractVT = Extract->getValueType(0);
+ // Verify the type we're extracting is i32, as the output element type of
+ // vpdpbusd is i32.
+ if (ExtractVT != MVT::i32)
+ return SDValue();
+
+ EVT VT = Extract->getOperand(0).getValueType();
+ if (!isPowerOf2_32(VT.getVectorNumElements()))
+ return SDValue();
+
+ // Match shuffle + add pyramid.
+ ISD::NodeType BinOp;
+ SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
+
+ // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
+ // done by vpdpbusd compute a signed 16-bit product that will be sign extended
+ // before adding into the accumulator.
+ // TODO:
+ // We also need to verify that the multiply has at least 2x the number of bits
+ // of the input. We shouldn't match
+ // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
+ // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
+ // Root = Root.getOperand(0);
+
+ // If there was a match, we want Root to be a mul.
+ if (!Root || Root.getOpcode() != ISD::MUL)
+ return SDValue();
+
+ // Check whether we have an extend and mul pattern
+ SDValue LHS, RHS;
+ if (!detectExtMul(DAG, Root, LHS, RHS))
+ return SDValue();
+
+ // Create the dot product instruction.
+ SDLoc DL(Extract);
+ unsigned StageBias;
+ SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
+
+ // If the original vector was wider than 4 elements, sum over the results
+ // in the DP vector.
+ unsigned Stages = Log2_32(VT.getVectorNumElements());
+ EVT DpVT = DP.getValueType();
+
+ if (Stages > StageBias) {
+ unsigned DpElems = DpVT.getVectorNumElements();
+
+ for (unsigned i = Stages - StageBias; i > 0; --i) {
+ SmallVector<int, 16> Mask(DpElems, -1);
+ for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
+ Mask[j] = MaskEnd + j;
+
+ SDValue Shuffle =
+ DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
+ DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
+ }
+ }
+
+ // Return the lowest ExtractSizeInBits bits.
+ EVT ResVT =
+ EVT::getVectorVT(*DAG.getContext(), ExtractVT,
+ DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
+ DP = DAG.getBitcast(ResVT, DP);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
+ Extract->getOperand(1));
+}
+
static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// PSADBW is only supported on SSE2 and up.
@@ -42676,6 +43019,9 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
return SAD;
+ if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
+ return VPDPBUSD;
+
// Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
return Cmp;
@@ -42903,6 +43249,15 @@ static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
// multiplier, convert to 'and' + 'add'.
const APInt &TrueVal = TrueC->getAPIntValue();
const APInt &FalseVal = FalseC->getAPIntValue();
+
+ // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
+ if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
+ Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
+ if (CC == ISD::SETEQ || CC == ISD::SETNE)
+ return SDValue();
+ }
+
bool OV;
APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
if (OV)
@@ -44052,6 +44407,23 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
// TESTZ(X,-1) == TESTZ(X,X)
if (ISD::isBuildVectorAllOnes(Op1.getNode()))
return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
+
+ // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
+ // TODO: Add COND_NE handling?
+ if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
+ SDValue Src0 = peekThroughBitcasts(Op0);
+ SDValue Src1 = peekThroughBitcasts(Op1);
+ if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
+ Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
+ peekThroughBitcasts(Src0.getOperand(1)), true);
+ Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
+ peekThroughBitcasts(Src1.getOperand(1)), true);
+ if (Src0 && Src1)
+ return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
+ DAG.getBitcast(MVT::v4i64, Src0),
+ DAG.getBitcast(MVT::v4i64, Src1));
+ }
+ }
}
return SDValue();
@@ -44117,21 +44489,58 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
BCNumEltBits > NumEltBits &&
DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
SDLoc DL(EFLAGS);
- unsigned CmpMask = IsAnyOf ? 0 : ((1 << BCNumElts) - 1);
+ APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
DAG.getConstant(CmpMask, DL, MVT::i32));
}
}
+ // MOVMSK(CONCAT(X,Y)) == 0 -> MOVMSK(OR(X,Y)).
+ // MOVMSK(CONCAT(X,Y)) != 0 -> MOVMSK(OR(X,Y)).
+ // MOVMSK(CONCAT(X,Y)) == -1 -> MOVMSK(AND(X,Y)).
+ // MOVMSK(CONCAT(X,Y)) != -1 -> MOVMSK(AND(X,Y)).
+ if (VecVT.is256BitVector()) {
+ SmallVector<SDValue> Ops;
+ if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops) &&
+ Ops.size() == 2) {
+ SDLoc DL(EFLAGS);
+ EVT SubVT = Ops[0].getValueType();
+ APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
+ SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT, Ops);
+ V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
+ return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
+ DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
+ DAG.getConstant(CmpMask, DL, MVT::i32));
+ }
+ }
+
// MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
// MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
+ // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(SUB(X,Y),SUB(X,Y)).
+ // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(SUB(X,Y),SUB(X,Y)).
if (IsAllOf && Subtarget.hasSSE41()) {
+ MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
SDValue BC = peekThroughBitcasts(Vec);
- if (BC.getOpcode() == X86ISD::PCMPEQ &&
- ISD::isBuildVectorAllZeros(BC.getOperand(1).getNode())) {
- MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
- SDValue V = DAG.getBitcast(TestVT, BC.getOperand(0));
+ if (BC.getOpcode() == X86ISD::PCMPEQ) {
+ SDValue V = DAG.getNode(ISD::SUB, SDLoc(BC), BC.getValueType(),
+ BC.getOperand(0), BC.getOperand(1));
+ V = DAG.getBitcast(TestVT, V);
+ return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
+ }
+ // Check for 256-bit split vector cases.
+ if (BC.getOpcode() == ISD::AND &&
+ BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
+ BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
+ SDValue LHS = BC.getOperand(0);
+ SDValue RHS = BC.getOperand(1);
+ LHS = DAG.getNode(ISD::SUB, SDLoc(LHS), LHS.getValueType(),
+ LHS.getOperand(0), LHS.getOperand(1));
+ RHS = DAG.getNode(ISD::SUB, SDLoc(RHS), RHS.getValueType(),
+ RHS.getOperand(0), RHS.getOperand(1));
+ LHS = DAG.getBitcast(TestVT, LHS);
+ RHS = DAG.getBitcast(TestVT, RHS);
+ SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
}
}
@@ -44162,23 +44571,28 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
// PMOVMSKB(PACKSSBW(LO(X), HI(X)))
// -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
if (CmpBits >= 16 && Subtarget.hasInt256() &&
- VecOp0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- VecOp1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- VecOp0.getOperand(0) == VecOp1.getOperand(0) &&
- VecOp0.getConstantOperandAPInt(1) == 0 &&
- VecOp1.getConstantOperandAPInt(1) == 8 &&
(IsAnyOf || (SignExt0 && SignExt1))) {
- SDLoc DL(EFLAGS);
- SDValue Result = DAG.getBitcast(MVT::v32i8, VecOp0.getOperand(0));
- Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
- unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
- if (!SignExt0 || !SignExt1) {
- assert(IsAnyOf && "Only perform v16i16 signmasks for any_of patterns");
- Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
- DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
+ if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
+ SDLoc DL(EFLAGS);
+ SDValue Result = peekThroughBitcasts(Src);
+ if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ) {
+ SDValue V = DAG.getNode(ISD::SUB, DL, Result.getValueType(),
+ Result.getOperand(0), Result.getOperand(1));
+ V = DAG.getBitcast(MVT::v4i64, V);
+ return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
+ }
+ Result = DAG.getBitcast(MVT::v32i8, Result);
+ Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
+ unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
+ if (!SignExt0 || !SignExt1) {
+ assert(IsAnyOf &&
+ "Only perform v16i16 signmasks for any_of patterns");
+ Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
+ DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
+ }
+ return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
+ DAG.getConstant(CmpMask, DL, MVT::i32));
}
- return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
- DAG.getConstant(CmpMask, DL, MVT::i32));
}
}
@@ -44732,7 +45146,8 @@ static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
return SDValue();
// Sign bits must extend down to the lowest i16.
- if (DAG.ComputeMinSignedBits(N1) > 16 || DAG.ComputeMinSignedBits(N0) > 16)
+ if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
+ DAG.ComputeMaxSignificantBits(N0) > 16)
return SDValue();
// At least one of the elements must be zero in the upper 17 bits, or can be
@@ -45224,33 +45639,28 @@ static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
// truncation trees that help us avoid lane crossing shuffles.
// TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
// TODO: We don't handle vXf64 shuffles yet.
- if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32 &&
- BC0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- BC1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
- BC0.getOperand(0) == BC1.getOperand(0) &&
- BC0.getOperand(0).getValueType().is256BitVector() &&
- BC0.getConstantOperandAPInt(1) == 0 &&
- BC1.getConstantOperandAPInt(1) ==
- BC0.getValueType().getVectorNumElements()) {
- SmallVector<SDValue> ShuffleOps;
- SmallVector<int> ShuffleMask, ScaledMask;
- SDValue Vec = peekThroughBitcasts(BC0.getOperand(0));
- if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
- resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
- // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
- // shuffle to a v4X64 width - we can probably relax this in the future.
- if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
- ShuffleOps[0].getValueType().is256BitVector() &&
- scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
- SDValue Lo, Hi;
- MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
- std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
- Lo = DAG.getBitcast(SrcVT, Lo);
- Hi = DAG.getBitcast(SrcVT, Hi);
- SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
- Res = DAG.getBitcast(ShufVT, Res);
- Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
- return DAG.getBitcast(VT, Res);
+ if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
+ if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
+ SmallVector<SDValue> ShuffleOps;
+ SmallVector<int> ShuffleMask, ScaledMask;
+ SDValue Vec = peekThroughBitcasts(BCSrc);
+ if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
+ resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
+ // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
+ // shuffle to a v4X64 width - we can probably relax this in the future.
+ if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
+ ShuffleOps[0].getValueType().is256BitVector() &&
+ scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
+ SDValue Lo, Hi;
+ MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
+ std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
+ Lo = DAG.getBitcast(SrcVT, Lo);
+ Hi = DAG.getBitcast(SrcVT, Hi);
+ SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
+ Res = DAG.getBitcast(ShufVT, Res);
+ Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
+ return DAG.getBitcast(VT, Res);
+ }
}
}
}
@@ -46047,6 +46457,49 @@ static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
}
+// Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
+// NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
+// handles in InstCombine.
+static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
+ unsigned Opc = N->getOpcode();
+ assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
+ "Unexpected bit opcode");
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+
+ // Both operands must be single use.
+ if (!N0.hasOneUse() || !N1.hasOneUse())
+ return SDValue();
+
+ // Search for matching shifts.
+ SDValue BC0 = peekThroughOneUseBitcasts(N0);
+ SDValue BC1 = peekThroughOneUseBitcasts(N1);
+
+ unsigned BCOpc = BC0.getOpcode();
+ EVT BCVT = BC0.getValueType();
+ if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
+ return SDValue();
+
+ switch (BCOpc) {
+ case X86ISD::VSHLI:
+ case X86ISD::VSRLI:
+ case X86ISD::VSRAI: {
+ if (BC0.getOperand(1) != BC1.getOperand(1))
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue BitOp =
+ DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
+ SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
+ return DAG.getBitcast(VT, Shift);
+ }
+ }
+
+ return SDValue();
+}
+
/// If this is a zero/all-bits result that is bitwise-anded with a low bits
/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
/// with a shift-right to eliminate loading the vector constant mask value.
@@ -46350,6 +46803,9 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
return R;
+ if (SDValue R = combineBitOpWithShift(N, DAG))
+ return R;
+
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
return FPLogic;
@@ -46797,6 +47253,9 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
return R;
+ if (SDValue R = combineBitOpWithShift(N, DAG))
+ return R;
+
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
return FPLogic;
@@ -47837,7 +48296,8 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
- SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
+ St->getValue().getOperand(0));
return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
MVT::v16i8, St->getMemOperand());
}
@@ -48630,7 +49090,7 @@ static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
// originally concatenated from subvectors.
SmallVector<SDValue> ConcatOps;
if (VT.getSizeInBits() > 128 || !collectConcatOps(In.getNode(), ConcatOps))
- return SDValue();
+ return SDValue();
}
unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
@@ -48714,7 +49174,7 @@ static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
// sequence or using AVX512 truncations. If the inputs are sext/zext then the
// truncations may actually be free by peeking through to the ext source.
auto IsSext = [&DAG](SDValue V) {
- return DAG.ComputeMinSignedBits(V) <= 16;
+ return DAG.ComputeMaxSignificantBits(V) <= 16;
};
auto IsZext = [&DAG](SDValue V) {
return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
@@ -49268,6 +49728,9 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
return R;
+ if (SDValue R = combineBitOpWithShift(N, DAG))
+ return R;
+
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
return FPLogic;
@@ -52185,6 +52648,22 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
unsigned NumOps = Ops.size();
switch (Op0.getOpcode()) {
+ case X86ISD::VBROADCAST: {
+ if (!IsSplat && VT == MVT::v4f64 && llvm::all_of(Ops, [](SDValue Op) {
+ return Op.getOperand(0).getValueType().is128BitVector();
+ }))
+ return DAG.getNode(X86ISD::MOVDDUP, DL, VT,
+ ConcatSubOperand(VT, Ops, 0));
+ break;
+ }
+ case X86ISD::MOVDDUP:
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP: {
+ if (!IsSplat)
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ ConcatSubOperand(VT, Ops, 0));
+ break;
+ }
case X86ISD::SHUFP: {
// Add SHUFPD support if/when necessary.
if (!IsSplat && VT.getScalarType() == MVT::f32 &&
@@ -52207,14 +52686,21 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
}
LLVM_FALLTHROUGH;
case X86ISD::VPERMILPI:
- // TODO - add support for vXf64/vXi64 shuffles.
if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
- Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
+ Op0.getOperand(1) == Ops[1].getOperand(1)) {
SDValue Res = DAG.getBitcast(MVT::v8f32, ConcatSubOperand(VT, Ops, 0));
Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
Op0.getOperand(1));
return DAG.getBitcast(VT, Res);
}
+ if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
+ uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
+ uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
+ uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ ConcatSubOperand(VT, Ops, 0),
+ DAG.getTargetConstant(Idx, DL, MVT::i8));
+ }
break;
case X86ISD::VPERMV3:
if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
@@ -52268,6 +52754,9 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
}
LLVM_FALLTHROUGH;
case X86ISD::VSRAI:
+ case X86ISD::VSHL:
+ case X86ISD::VSRL:
+ case X86ISD::VSRA:
if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
(VT.is512BitVector() && Subtarget.useAVX512Regs() &&
(EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
index d1d6e319f16b..3f6d567d3f4d 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1540,7 +1540,7 @@ namespace llvm {
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
- unsigned getAddressSpace(void) const;
+ unsigned getAddressSpace() const;
SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned,
SDValue &Chain) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
index 6642f46e64b2..7e751a4c8811 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
@@ -95,14 +95,45 @@ static bool IsCallReturnTwice(llvm::MachineOperand &MOp) {
return Attrs.hasFnAttr(Attribute::ReturnsTwice);
}
+// Checks if function should have an ENDBR in its prologue
+static bool needsPrologueENDBR(MachineFunction &MF, const Module *M) {
+ Function &F = MF.getFunction();
+
+ if (F.doesNoCfCheck())
+ return false;
+
+ const X86TargetMachine *TM =
+ static_cast<const X86TargetMachine *>(&MF.getTarget());
+ Metadata *IBTSeal = M->getModuleFlag("ibt-seal");
+
+ switch (TM->getCodeModel()) {
+ // Large code model functions always reachable through indirect calls.
+ case CodeModel::Large:
+ return true;
+ // Only address taken functions in LTO'ed kernel are reachable indirectly.
+ // IBTSeal implies LTO, thus only check if function is address taken.
+ case CodeModel::Kernel:
+ // Check if ibt-seal was enabled (implies LTO is being used).
+ if (IBTSeal) {
+ return F.hasAddressTaken();
+ }
+ // if !IBTSeal, fall into default case.
+ LLVM_FALLTHROUGH;
+ // Address taken or externally linked functions may be reachable.
+ default:
+ return (F.hasAddressTaken() || !F.hasLocalLinkage());
+ }
+}
+
bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
const X86Subtarget &SubTarget = MF.getSubtarget<X86Subtarget>();
+ const Module *M = MF.getMMI().getModule();
// Check that the cf-protection-branch is enabled.
- Metadata *isCFProtectionSupported =
- MF.getMMI().getModule()->getModuleFlag("cf-protection-branch");
- // NB: We need to enable IBT in jitted code if JIT compiler is CET
- // enabled.
+ Metadata *isCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
+
+ // NB: We need to enable IBT in jitted code if JIT compiler is CET
+ // enabled.
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
#ifdef __CET__
@@ -119,13 +150,8 @@ bool X86IndirectBranchTrackingPass::runOnMachineFunction(MachineFunction &MF) {
TII = SubTarget.getInstrInfo();
EndbrOpcode = SubTarget.is64Bit() ? X86::ENDBR64 : X86::ENDBR32;
- // Large code model, non-internal function or function whose address
- // was taken, can be accessed through indirect calls. Mark the first
- // BB with ENDBR instruction unless nocf_check attribute is used.
- if ((TM->getCodeModel() == CodeModel::Large ||
- MF.getFunction().hasAddressTaken() ||
- !MF.getFunction().hasLocalLinkage()) &&
- !MF.getFunction().doesNoCfCheck()) {
+ // If function is reachable indirectly, mark the first BB with ENDBR.
+ if (needsPrologueENDBR(MF, M)) {
auto MBB = MF.begin();
Changed |= addENDBR(*MBB, MBB->begin());
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrAVX512.td b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrAVX512.td
index ecd4777c3533..bc67d1f89d7f 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -10537,13 +10537,12 @@ def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
string OpcodeStr, Predicate prd> {
-// TODO - Replace WriteMove with WriteVecTrunc?
let Predicates = [prd] in
- defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr, WriteMove>, EVEX_V512;
+ defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr, WriteVecMoveZ>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
- defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr, WriteMove>, EVEX_V256;
- defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr, WriteMove>, EVEX_V128;
+ defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr, WriteVecMoveY>, EVEX_V256;
+ defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr, WriteVecMoveX>, EVEX_V128;
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp
index c379aa8d9258..4dcd886fa3b2 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4088,8 +4088,8 @@ bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
Register SrcReg, Register SrcReg2,
int64_t ImmMask, int64_t ImmValue,
- const MachineInstr &OI,
- bool *IsSwapped) const {
+ const MachineInstr &OI, bool *IsSwapped,
+ int64_t *ImmDelta) const {
switch (OI.getOpcode()) {
case X86::CMP64rr:
case X86::CMP32rr:
@@ -4140,10 +4140,21 @@ bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
int64_t OIMask;
int64_t OIValue;
if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) &&
- SrcReg == OISrcReg && ImmMask == OIMask && OIValue == ImmValue) {
- assert(SrcReg2 == X86::NoRegister && OISrcReg2 == X86::NoRegister &&
- "should not have 2nd register");
- return true;
+ SrcReg == OISrcReg && ImmMask == OIMask) {
+ if (OIValue == ImmValue) {
+ *ImmDelta = 0;
+ return true;
+ } else if (static_cast<uint64_t>(ImmValue) ==
+ static_cast<uint64_t>(OIValue) - 1) {
+ *ImmDelta = -1;
+ return true;
+ } else if (static_cast<uint64_t>(ImmValue) ==
+ static_cast<uint64_t>(OIValue) + 1) {
+ *ImmDelta = 1;
+ return true;
+ } else {
+ return false;
+ }
}
}
return FlagI.isIdenticalTo(OI);
@@ -4393,6 +4404,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
bool ShouldUpdateCC = false;
bool IsSwapped = false;
X86::CondCode NewCC = X86::COND_INVALID;
+ int64_t ImmDelta = 0;
// Search backward from CmpInstr for the next instruction defining EFLAGS.
const TargetRegisterInfo *TRI = &getRegisterInfo();
@@ -4439,7 +4451,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
// ... // EFLAGS not changed
// cmp x, y // <-- can be removed
if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
- Inst, &IsSwapped)) {
+ Inst, &IsSwapped, &ImmDelta)) {
Sub = &Inst;
break;
}
@@ -4473,7 +4485,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
// It is safe to remove CmpInstr if EFLAGS is redefined or killed.
// If we are done with the basic block, we need to check whether EFLAGS is
// live-out.
- bool IsSafe = false;
+ bool FlagsMayLiveOut = true;
SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
MachineBasicBlock::iterator AfterCmpInstr =
std::next(MachineBasicBlock::iterator(CmpInstr));
@@ -4483,7 +4495,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
// We should check the usage if this instruction uses and updates EFLAGS.
if (!UseEFLAGS && ModifyEFLAGS) {
// It is safe to remove CmpInstr if EFLAGS is updated again.
- IsSafe = true;
+ FlagsMayLiveOut = false;
break;
}
if (!UseEFLAGS && !ModifyEFLAGS)
@@ -4491,7 +4503,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
// EFLAGS is used by this instruction.
X86::CondCode OldCC = X86::COND_INVALID;
- if (MI || IsSwapped) {
+ if (MI || IsSwapped || ImmDelta != 0) {
// We decode the condition code from opcode.
if (Instr.isBranch())
OldCC = X86::getCondFromBranch(Instr);
@@ -4545,9 +4557,59 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
ReplacementCC = getSwappedCondition(OldCC);
if (ReplacementCC == X86::COND_INVALID)
return false;
+ ShouldUpdateCC = true;
+ } else if (ImmDelta != 0) {
+ unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
+ // Shift amount for min/max constants to adjust for 8/16/32 instruction
+ // sizes.
+ switch (OldCC) {
+ case X86::COND_L: // x <s (C + 1) --> x <=s C
+ if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_LE;
+ break;
+ case X86::COND_B: // x <u (C + 1) --> x <=u C
+ if (ImmDelta != 1 || CmpValue == 0)
+ return false;
+ ReplacementCC = X86::COND_BE;
+ break;
+ case X86::COND_GE: // x >=s (C + 1) --> x >s C
+ if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_G;
+ break;
+ case X86::COND_AE: // x >=u (C + 1) --> x >u C
+ if (ImmDelta != 1 || CmpValue == 0)
+ return false;
+ ReplacementCC = X86::COND_A;
+ break;
+ case X86::COND_G: // x >s (C - 1) --> x >=s C
+ if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_GE;
+ break;
+ case X86::COND_A: // x >u (C - 1) --> x >=u C
+ if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_AE;
+ break;
+ case X86::COND_LE: // x <=s (C - 1) --> x <s C
+ if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_L;
+ break;
+ case X86::COND_BE: // x <=u (C - 1) --> x <u C
+ if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
+ return false;
+ ReplacementCC = X86::COND_B;
+ break;
+ default:
+ return false;
+ }
+ ShouldUpdateCC = true;
}
- if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
+ if (ShouldUpdateCC && ReplacementCC != OldCC) {
// Push the MachineInstr to OpsToUpdate.
// If it is safe to remove CmpInstr, the condition code of these
// instructions will be modified.
@@ -4555,14 +4617,14 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
}
if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
// It is safe to remove CmpInstr if EFLAGS is updated again or killed.
- IsSafe = true;
+ FlagsMayLiveOut = false;
break;
}
}
- // If EFLAGS is not killed nor re-defined, we should check whether it is
- // live-out. If it is live-out, do not optimize.
- if ((MI || IsSwapped) && !IsSafe) {
+ // If we have to update users but EFLAGS is live-out abort, since we cannot
+ // easily find all of the users.
+ if ((MI != nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
for (MachineBasicBlock *Successor : CmpMBB.successors())
if (Successor->isLiveIn(X86::EFLAGS))
return false;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
index 537ada6222bf..33ce55bbdb2b 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
@@ -643,7 +643,8 @@ private:
/// CMP %1, %2 and %3 = SUB %2, %1 ; IsSwapped=true
bool isRedundantFlagInstr(const MachineInstr &FlagI, Register SrcReg,
Register SrcReg2, int64_t ImmMask, int64_t ImmValue,
- const MachineInstr &OI, bool *IsSwapped) const;
+ const MachineInstr &OI, bool *IsSwapped,
+ int64_t *ImmDelta) const;
};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 8abbaa92c8cf..28d57ca9ae3c 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -153,8 +153,8 @@ private:
X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
const X86Subtarget &STI,
const X86RegisterBankInfo &RBI)
- : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
- TRI(*STI.getRegisterInfo()), RBI(RBI),
+ : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
+ RBI(RBI),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "X86GenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 6967a96ce83b..d0562214a025 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -610,7 +610,7 @@ MachineInstr *X86OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
auto replaceOldReg = [OldReg, NewReg](const MachineOperand &Op) {
if (Op.isReg() && Op.getReg() == OldReg)
return MachineOperand::CreateReg(NewReg, false, false, false, false,
- false, false, false, false, 0,
+ false, false, false, false, false,
/*IsRenamable*/ true);
return Op;
};
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86PadShortFunction.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86PadShortFunction.cpp
index 47ae517ae76d..e92b1b002bb0 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86PadShortFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86PadShortFunction.cpp
@@ -129,10 +129,9 @@ bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
bool MadeChange = false;
// Pad the identified basic blocks with NOOPs
- for (DenseMap<MachineBasicBlock*, unsigned int>::iterator I = ReturnBBs.begin();
- I != ReturnBBs.end(); ++I) {
- MachineBasicBlock *MBB = I->first;
- unsigned Cycles = I->second;
+ for (const auto &ReturnBB : ReturnBBs) {
+ MachineBasicBlock *MBB = ReturnBB.first;
+ unsigned Cycles = ReturnBB.second;
// Function::hasOptSize is already checked above.
bool OptForSize = llvm::shouldOptimizeForSize(MBB, PSI, MBFI);
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86PartialReduction.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86PartialReduction.cpp
index babd923e7496..4342ac089cae 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86PartialReduction.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86PartialReduction.cpp
@@ -13,15 +13,16 @@
//===----------------------------------------------------------------------===//
#include "X86.h"
+#include "X86TargetMachine.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsX86.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
-#include "X86TargetMachine.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
@@ -49,7 +50,7 @@ public:
}
private:
- bool tryMAddReplacement(Instruction *Op);
+ bool tryMAddReplacement(Instruction *Op, bool ReduceInOneBB);
bool trySADReplacement(Instruction *Op);
};
}
@@ -63,7 +64,43 @@ char X86PartialReduction::ID = 0;
INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE,
"X86 Partial Reduction", false, false)
-bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
+// This function should be aligned with detectExtMul() in X86ISelLowering.cpp.
+static bool matchVPDPBUSDPattern(const X86Subtarget *ST, BinaryOperator *Mul,
+ const DataLayout *DL) {
+ if (!ST->hasVNNI() && !ST->hasAVXVNNI())
+ return false;
+
+ Value *LHS = Mul->getOperand(0);
+ Value *RHS = Mul->getOperand(1);
+
+ if (isa<SExtInst>(LHS))
+ std::swap(LHS, RHS);
+
+ auto IsFreeTruncation = [&](Value *Op) {
+ if (auto *Cast = dyn_cast<CastInst>(Op)) {
+ if (Cast->getParent() == Mul->getParent() &&
+ (Cast->getOpcode() == Instruction::SExt ||
+ Cast->getOpcode() == Instruction::ZExt) &&
+ Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 8)
+ return true;
+ }
+
+ return isa<Constant>(Op);
+ };
+
+ // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
+ // value, we need to check LHS is zero extended value. RHS should be signed
+ // value, so we just check the signed bits.
+ if ((IsFreeTruncation(LHS) &&
+ computeKnownBits(LHS, *DL).countMaxActiveBits() <= 8) &&
+ (IsFreeTruncation(RHS) && ComputeMaxSignificantBits(RHS, *DL) <= 8))
+ return true;
+
+ return false;
+}
+
+bool X86PartialReduction::tryMAddReplacement(Instruction *Op,
+ bool ReduceInOneBB) {
if (!ST->hasSSE2())
return false;
@@ -82,6 +119,13 @@ bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
Value *LHS = Mul->getOperand(0);
Value *RHS = Mul->getOperand(1);
+ // If the target support VNNI, leave it to ISel to combine reduce operation
+ // to VNNI instruction.
+ // TODO: we can support transforming reduce to VNNI intrinsic for across block
+ // in this pass.
+ if (ReduceInOneBB && matchVPDPBUSDPattern(ST, Mul, DL))
+ return false;
+
// LHS and RHS should be only used once or if they are the same then only
// used twice. Only check this when SSE4.1 is enabled and we have zext/sext
// instructions, otherwise we use punpck to emulate zero extend in stages. The
@@ -300,7 +344,9 @@ bool X86PartialReduction::trySADReplacement(Instruction *Op) {
// Walk backwards from the ExtractElementInst and determine if it is the end of
// a horizontal reduction. Return the input to the reduction if we find one.
-static Value *matchAddReduction(const ExtractElementInst &EE) {
+static Value *matchAddReduction(const ExtractElementInst &EE,
+ bool &ReduceInOneBB) {
+ ReduceInOneBB = true;
// Make sure we're extracting index 0.
auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
if (!Index || !Index->isNullValue())
@@ -309,6 +355,8 @@ static Value *matchAddReduction(const ExtractElementInst &EE) {
const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
return nullptr;
+ if (EE.getParent() != BO->getParent())
+ ReduceInOneBB = false;
unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
// Ensure the reduction size is a power of 2.
@@ -321,6 +369,8 @@ static Value *matchAddReduction(const ExtractElementInst &EE) {
const auto *BO = dyn_cast<BinaryOperator>(Op);
if (!BO || BO->getOpcode() != Instruction::Add)
return nullptr;
+ if (EE.getParent() != BO->getParent())
+ ReduceInOneBB = false;
// If this isn't the first add, then it should only have 2 users, the
// shuffle and another add which we checked in the previous iteration.
@@ -460,9 +510,10 @@ bool X86PartialReduction::runOnFunction(Function &F) {
if (!EE)
continue;
+ bool ReduceInOneBB;
// First find a reduction tree.
// FIXME: Do we need to handle other opcodes than Add?
- Value *Root = matchAddReduction(*EE);
+ Value *Root = matchAddReduction(*EE, ReduceInOneBB);
if (!Root)
continue;
@@ -470,7 +521,7 @@ bool X86PartialReduction::runOnFunction(Function &F) {
collectLeaves(Root, Leaves);
for (Instruction *I : Leaves) {
- if (tryMAddReplacement(I)) {
+ if (tryMAddReplacement(I, ReduceInOneBB)) {
MadeChange = true;
continue;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
index a6ff472aac6f..8e317dc22bd6 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
@@ -255,6 +255,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [BWPort0,BWPort4,BWPort237,BWPort15], 5
defm : X86WriteRes<WriteFMove, [BWPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [BWPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [BWPort5], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [BWPort01,BWPort15,BWPort015,BWPort0156], 31, [8,1,21,1], 31>;
defm : BWWriteResPair<WriteFAdd, [BWPort1], 3, [1], 1, 5>; // Floating point add/sub.
@@ -418,6 +419,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [BWPort0,BWPort4,BWPort237,BWPort15],
defm : X86WriteRes<WriteVecMove, [BWPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [BWPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [BWPort015], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [BWPort0], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [BWPort5], 1, [1], 1>;
@@ -1741,4 +1743,40 @@ def BWSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[BWSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[BWSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+
+ // ymm variants.
+ VXORPSYrr, VXORPDYrr, VPXORYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedHaswell.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedHaswell.td
index 371a9571ae39..1cd0b3379684 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -257,6 +257,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [HWPort0,HWPort4,HWPort237,HWPort15], 5
defm : X86WriteRes<WriteFMove, [HWPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [HWPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [HWPort5], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [HWPort01,HWPort15,HWPort015,HWPort0156], 31, [8,1,21,1], 31>;
defm : HWWriteResPair<WriteFAdd, [HWPort1], 3, [1], 1, 5>;
@@ -416,6 +417,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [HWPort0,HWPort4,HWPort237,HWPort15],
defm : X86WriteRes<WriteVecMove, [HWPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [HWPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [HWPort015], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [HWPort0], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [HWPort5], 1, [1], 1>;
@@ -2030,4 +2032,40 @@ def HWSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[HWSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[HWSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+
+ // ymm variants.
+ VXORPSYrr, VXORPDYrr, VPXORYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedIceLake.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedIceLake.td
index 789de9eb5751..9fd986e34181 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedIceLake.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedIceLake.td
@@ -252,6 +252,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [ICXPort237,ICXPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteFMove, [ICXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [ICXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [ICXPort015], 1, [1], 1>;
+defm : X86WriteRes<WriteFMoveZ, [ICXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteEMMS, [ICXPort05,ICXPort0156], 10, [9,1], 10>;
defm : ICXWriteResPair<WriteFAdd, [ICXPort01], 4, [1], 1, 5>; // Floating point add/sub.
@@ -367,6 +368,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [ICXPort237,ICXPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteVecMove, [ICXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [ICXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [ICXPort015], 1, [1], 1>;
+defm : X86WriteRes<WriteVecMoveZ, [ICXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveToGpr, [ICXPort0], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [ICXPort5], 1, [1], 1>;
@@ -2630,4 +2632,48 @@ def ICXSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[ICXSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[ICXSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+
+ // ymm variants.
+ VXORPSYrr, VXORPDYrr, VPXORYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr,
+
+ // zmm variants.
+ VXORPSZrr, VXORPDZrr, VPXORDZrr, VPXORQZrr,
+ VXORPSZ128rr, VXORPDZ128rr, VPXORDZ128rr, VPXORQZ128rr,
+ VXORPSZ256rr, VXORPDZ256rr, VPXORDZ256rr, VPXORQZ256rr,
+ VPSUBBZrr, VPSUBWZrr, VPSUBDZrr, VPSUBQZrr,
+ VPSUBBZ128rr, VPSUBWZ128rr, VPSUBDZ128rr, VPSUBQZ128rr,
+ VPSUBBZ256rr, VPSUBWZ256rr, VPSUBDZ256rr, VPSUBQZ256rr,
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSandyBridge.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSandyBridge.td
index af5c0540deb5..7e619a3a8722 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSandyBridge.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSandyBridge.td
@@ -223,6 +223,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [SBPort4,SBPort01,SBPort23], 5, [1,1,1]
defm : X86WriteRes<WriteFMove, [SBPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [SBPort5], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [SBPort5], 1, [1], 1>;
+defm : X86WriteRes<WriteFMoveZ, [SBPort5], 1, [1], 1>;
defm : X86WriteRes<WriteEMMS, [SBPort015], 31, [31], 31>;
defm : SBWriteResPair<WriteFAdd, [SBPort1], 3, [1], 1, 6>;
@@ -380,6 +381,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [SBPort4,SBPort01,SBPort23], 5, [1,1,
defm : X86WriteRes<WriteVecMove, [SBPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [SBPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [SBPort05], 1, [1], 1>;
+defm : X86WriteRes<WriteVecMoveZ, [SBPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveToGpr, [SBPort0], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [SBPort5], 1, [1], 1>;
@@ -1230,4 +1232,35 @@ def SBSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[SBSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[SBSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeClient.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeClient.td
index b3c13c72dd01..0a88bac5aa66 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeClient.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeClient.td
@@ -244,6 +244,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [SKLPort237,SKLPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteFMove, [SKLPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [SKLPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [SKLPort015], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [SKLPort05,SKLPort0156], 10, [9,1], 10>;
defm : SKLWriteResPair<WriteFAdd, [SKLPort01], 4, [1], 1, 5>; // Floating point add/sub.
@@ -359,6 +360,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [SKLPort237,SKLPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteVecMove, [SKLPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [SKLPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [SKLPort015], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [SKLPort0], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [SKLPort5], 1, [1], 1>;
@@ -1901,4 +1903,40 @@ def SKLSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[SKLSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[SKLSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+
+ // ymm variants.
+ VXORPSYrr, VXORPDYrr, VPXORYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
index 74f9da158353..b28a18f0dcd7 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
@@ -244,6 +244,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [SKXPort237,SKXPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteFMove, [SKXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [SKXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [SKXPort015], 1, [1], 1>;
+defm : X86WriteRes<WriteFMoveZ, [SKXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteEMMS, [SKXPort05,SKXPort0156], 10, [9,1], 10>;
defm : SKXWriteResPair<WriteFAdd, [SKXPort01], 4, [1], 1, 5>; // Floating point add/sub.
@@ -359,6 +360,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [SKXPort237,SKXPort0], 2, [1,1], 2>;
defm : X86WriteRes<WriteVecMove, [SKXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [SKXPort015], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [SKXPort015], 1, [1], 1>;
+defm : X86WriteRes<WriteVecMoveZ, [SKXPort05], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveToGpr, [SKXPort0], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [SKXPort5], 1, [1], 1>;
@@ -2613,4 +2615,48 @@ def SKXSETA_SETBErm : SchedWriteVariant<[
def : InstRW<[SKXSETA_SETBErr], (instrs SETCCr)>;
def : InstRW<[SKXSETA_SETBErm], (instrs SETCCm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ SUB32rr, SUB64rr, XOR32rr, XOR64rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX Zero-idioms.
+ DepBreakingClass<[
+ // xmm fp variants.
+ VXORPSrr, VXORPDrr,
+
+ // xmm int variants.
+ VPXORrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+
+ // ymm variants.
+ VXORPSYrr, VXORPDYrr, VPXORYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr,
+
+ // zmm variants.
+ VXORPSZrr, VXORPDZrr, VPXORDZrr, VPXORQZrr,
+ VXORPSZ128rr, VXORPDZ128rr, VPXORDZ128rr, VPXORQZ128rr,
+ VXORPSZ256rr, VXORPDZ256rr, VPXORDZ256rr, VPXORQZ256rr,
+ VPSUBBZrr, VPSUBWZrr, VPSUBDZrr, VPSUBQZrr,
+ VPSUBBZ128rr, VPSUBWZ128rr, VPSUBDZ128rr, VPSUBQZ128rr,
+ VPSUBBZ256rr, VPSUBWZ256rr, VPSUBDZ256rr, VPSUBQZ256rr,
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86Schedule.td b/contrib/llvm-project/llvm/lib/Target/X86/X86Schedule.td
index 1cb48175260a..d57e14715a4e 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86Schedule.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86Schedule.td
@@ -239,6 +239,7 @@ def WriteFMaskedStore64Y : SchedWrite;
def WriteFMove : SchedWrite;
def WriteFMoveX : SchedWrite;
def WriteFMoveY : SchedWrite;
+def WriteFMoveZ : SchedWrite;
defm WriteFAdd : X86SchedWritePair<ReadAfterVecLd>; // Floating point add/sub.
defm WriteFAddX : X86SchedWritePair<ReadAfterVecXLd>; // Floating point add/sub (XMM).
@@ -354,6 +355,7 @@ def WriteVecMaskedStore64Y : SchedWrite;
def WriteVecMove : SchedWrite;
def WriteVecMoveX : SchedWrite;
def WriteVecMoveY : SchedWrite;
+def WriteVecMoveZ : SchedWrite;
def WriteVecMoveToGpr : SchedWrite;
def WriteVecMoveFromGpr : SchedWrite;
@@ -516,9 +518,11 @@ def WriteFMoveLSX
: X86SchedWriteMoveLS<WriteFMoveX, WriteFLoadX, WriteFStoreX>;
def WriteFMoveLSY
: X86SchedWriteMoveLS<WriteFMoveY, WriteFLoadY, WriteFStoreY>;
+def WriteFMoveLSZ
+ : X86SchedWriteMoveLS<WriteFMoveZ, WriteFLoadY, WriteFStoreY>;
def SchedWriteFMoveLS
: X86SchedWriteMoveLSWidths<WriteFMoveLS, WriteFMoveLSX,
- WriteFMoveLSY, WriteFMoveLSY>;
+ WriteFMoveLSY, WriteFMoveLSZ>;
def WriteFMoveLSNT
: X86SchedWriteMoveLS<WriteFMove, WriteFLoad, WriteFStoreNT>;
@@ -536,9 +540,11 @@ def WriteVecMoveLSX
: X86SchedWriteMoveLS<WriteVecMoveX, WriteVecLoadX, WriteVecStoreX>;
def WriteVecMoveLSY
: X86SchedWriteMoveLS<WriteVecMoveY, WriteVecLoadY, WriteVecStoreY>;
+def WriteVecMoveLSZ
+ : X86SchedWriteMoveLS<WriteVecMoveZ, WriteVecLoadY, WriteVecStoreY>;
def SchedWriteVecMoveLS
: X86SchedWriteMoveLSWidths<WriteVecMoveLS, WriteVecMoveLSX,
- WriteVecMoveLSY, WriteVecMoveLSY>;
+ WriteVecMoveLSY, WriteVecMoveLSZ>;
def WriteVecMoveLSNT
: X86SchedWriteMoveLS<WriteVecMove, WriteVecLoadNT, WriteVecStoreNT>;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleAtom.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleAtom.td
index 0fedfc01092c..8ae8e574f87a 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleAtom.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleAtom.td
@@ -229,6 +229,7 @@ defm : X86WriteResUnsupported<WriteFMaskedStore64Y>;
def : WriteRes<WriteFMove, [AtomPort01]>;
def : WriteRes<WriteFMoveX, [AtomPort01]>;
defm : X86WriteResUnsupported<WriteFMoveY>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [AtomPort01], 5, [5], 1>;
@@ -382,6 +383,7 @@ defm : X86WriteResUnsupported<WriteVecMaskedStore64Y>;
def : WriteRes<WriteVecMove, [AtomPort0]>;
def : WriteRes<WriteVecMoveX, [AtomPort01]>;
defm : X86WriteResUnsupported<WriteVecMoveY>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [AtomPort0], 3, [3], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [AtomPort0], 1, [1], 1>;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBdVer2.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBdVer2.td
index 0f6f24f9f1fe..cb75c3660728 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBdVer2.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBdVer2.td
@@ -772,6 +772,7 @@ defm : PdWriteRes<WriteFMaskedStore64Y, [PdStore, PdFPU01, PdFPFMA], 6, [2, 2
defm : PdWriteRes<WriteFMove, [PdFPU01, PdFPFMA]>;
defm : PdWriteRes<WriteFMoveX, [PdFPU01, PdFPFMA], 1, [1, 2]>;
defm : PdWriteRes<WriteFMoveY, [PdFPU01, PdFPFMA], 2, [2, 2], 2>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : PdWriteRes<WriteEMMS, [PdFPU01, PdFPFMA], 2>;
@@ -1107,6 +1108,7 @@ defm : X86WriteResUnsupported<WriteVecMaskedStore64Y>;
defm : PdWriteRes<WriteVecMove, [PdFPU01, PdFPMAL], 2>;
defm : PdWriteRes<WriteVecMoveX, [PdFPU01, PdFPMAL], 1, [1, 2]>;
defm : PdWriteRes<WriteVecMoveY, [PdFPU01, PdFPMAL], 2, [2, 2], 2>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
def PdWriteMOVDQArr : SchedWriteRes<[PdFPU01, PdFPMAL]> {
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBtVer2.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBtVer2.td
index a070da34cab5..4b2fa87a25b5 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBtVer2.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleBtVer2.td
@@ -525,6 +525,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [JFPU0, JFPA, JFPU1, JSTC, JLAGU, JSAGU
defm : X86WriteRes<WriteFMove, [JFPU01, JFPX], 1, [1, 1], 1>;
defm : X86WriteRes<WriteFMoveX, [JFPU01, JFPX], 1, [1, 1], 1>;
defm : X86WriteRes<WriteFMoveY, [JFPU01, JFPX], 1, [2, 2], 2>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [JFPU01, JFPX], 2, [1, 1], 1>;
@@ -682,6 +683,7 @@ defm : X86WriteResUnsupported<WriteVecMaskedStore64Y>;
defm : X86WriteRes<WriteVecMove, [JFPU01, JVALU], 1, [1, 1], 1>;
defm : X86WriteRes<WriteVecMoveX, [JFPU01, JVALU], 1, [1, 1], 1>;
defm : X86WriteRes<WriteVecMoveY, [JFPU01, JVALU], 1, [2, 2], 2>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [JFPU0, JFPA, JALU0], 4, [1, 1, 1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [JFPU01, JFPX], 8, [1, 1], 2>;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleSLM.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleSLM.td
index 36e5b55a4194..52605c031617 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleSLM.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleSLM.td
@@ -200,6 +200,7 @@ def : WriteRes<WriteFMaskedStore64Y, [SLM_MEC_RSV]>;
def : WriteRes<WriteFMove, [SLM_FPC_RSV01]>;
def : WriteRes<WriteFMoveX, [SLM_FPC_RSV01]>;
def : WriteRes<WriteFMoveY, [SLM_FPC_RSV01]>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : X86WriteRes<WriteEMMS, [SLM_FPC_RSV01], 10, [10], 9>;
defm : SLMWriteResPair<WriteFAdd, [SLM_FPC_RSV1], 3>;
@@ -345,6 +346,7 @@ def : WriteRes<WriteVecMaskedStore64Y, [SLM_MEC_RSV]>;
def : WriteRes<WriteVecMove, [SLM_FPC_RSV01]>;
def : WriteRes<WriteVecMoveX, [SLM_FPC_RSV01]>;
def : WriteRes<WriteVecMoveY, [SLM_FPC_RSV01]>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
def : WriteRes<WriteVecMoveToGpr, [SLM_IEC_RSV01]>;
def : WriteRes<WriteVecMoveFromGpr, [SLM_IEC_RSV01]>;
@@ -480,4 +482,22 @@ def: InstRW<[SLMWriteResGroup1rm], (instrs MMX_PADDQrm, PADDQrm,
MMX_PSUBQrm, PSUBQrm,
PCMPEQQrm)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ XOR32rr ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+
+ // int variants.
+ PXORrr,
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver1.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver1.td
index 4343e1ed45d1..fe0484afd227 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver1.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver1.td
@@ -286,6 +286,7 @@ defm : X86WriteRes<WriteFMaskedStore64Y, [ZnAGU,ZnFPU01], 5, [1,2], 2>;
defm : X86WriteRes<WriteFMove, [ZnFPU], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [ZnFPU], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [ZnFPU], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : ZnWriteResFpuPair<WriteFAdd, [ZnFPU0], 3>;
defm : ZnWriteResFpuPair<WriteFAddX, [ZnFPU0], 3>;
@@ -404,6 +405,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [ZnAGU,ZnFPU01], 5, [1,2], 2>;
defm : X86WriteRes<WriteVecMove, [ZnFPU], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [ZnFPU], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [ZnFPU], 2, [1], 2>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [ZnFPU2], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [ZnFPU2], 3, [1], 1>;
defm : X86WriteRes<WriteEMMS, [ZnFPU], 2, [1], 1>;
@@ -1541,4 +1543,83 @@ def : InstRW<[WriteMicrocoded], (instrs VZEROUPPER)>;
// VZEROALL.
def : InstRW<[WriteMicrocoded], (instrs VZEROALL)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[
+ SUB32rr, SUB64rr,
+ XOR32rr, XOR64rr
+ ], ZeroIdiomPredicate>,
+
+ // MMX Zero-idioms.
+ DepBreakingClass<[
+ MMX_PXORrr, MMX_PANDNrr, MMX_PSUBBrr,
+ MMX_PSUBDrr, MMX_PSUBQrr, MMX_PSUBWrr,
+ MMX_PSUBSBrr, MMX_PSUBSWrr, MMX_PSUBUSBrr, MMX_PSUBUSWrr,
+ MMX_PCMPGTBrr, MMX_PCMPGTDrr, MMX_PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr, ANDNPSrr, ANDNPDrr,
+
+ // int variants.
+ PXORrr, PANDNrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ VXORPSrr, VXORPDrr, VANDNPSrr, VANDNPDrr,
+
+ // int variants.
+ VPXORrr, VPANDNrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants
+ VXORPSYrr, VXORPDYrr, VANDNPSYrr, VANDNPDYrr,
+
+ // int variants
+ VPXORYrr, VPANDNYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr
+ ], ZeroIdiomPredicate>
+]>;
+
+def : IsDepBreakingFunction<[
+ // GPR
+ DepBreakingClass<[ SBB32rr, SBB64rr ], ZeroIdiomPredicate>,
+ DepBreakingClass<[ CMP32rr, CMP64rr ], CheckSameRegOperand<0, 1> >,
+
+ // MMX
+ DepBreakingClass<[
+ MMX_PCMPEQBrr, MMX_PCMPEQWrr, MMX_PCMPEQDrr
+ ], ZeroIdiomPredicate>,
+
+ // SSE
+ DepBreakingClass<[
+ PCMPEQBrr, PCMPEQWrr, PCMPEQDrr, PCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM
+ DepBreakingClass<[
+ VPCMPEQBrr, VPCMPEQWrr, VPCMPEQDrr, VPCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM
+ DepBreakingClass<[
+ VPCMPEQBYrr, VPCMPEQWYrr, VPCMPEQDYrr, VPCMPEQQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver2.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver2.td
index 96d2837880c7..38908a987595 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver2.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver2.td
@@ -274,6 +274,7 @@ defm : X86WriteRes<WriteFStoreNTY, [Zn2AGU], 1, [1], 1>;
defm : X86WriteRes<WriteFMove, [Zn2FPU], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveX, [Zn2FPU], 1, [1], 1>;
defm : X86WriteRes<WriteFMoveY, [Zn2FPU], 1, [1], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : Zn2WriteResFpuPair<WriteFAdd, [Zn2FPU0], 3>;
defm : Zn2WriteResFpuPair<WriteFAddX, [Zn2FPU0], 3>;
@@ -388,6 +389,7 @@ defm : X86WriteRes<WriteVecMaskedStore64Y, [Zn2AGU,Zn2FPU01], 5, [1,2], 2>;
defm : X86WriteRes<WriteVecMove, [Zn2FPU], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveX, [Zn2FPU], 1, [1], 1>;
defm : X86WriteRes<WriteVecMoveY, [Zn2FPU], 2, [1], 2>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
defm : X86WriteRes<WriteVecMoveToGpr, [Zn2FPU2], 2, [1], 1>;
defm : X86WriteRes<WriteVecMoveFromGpr, [Zn2FPU2], 3, [1], 1>;
defm : X86WriteRes<WriteEMMS, [Zn2FPU], 2, [1], 1>;
@@ -1530,4 +1532,83 @@ def : InstRW<[WriteALU], (instrs VZEROUPPER)>;
// VZEROALL.
def : InstRW<[WriteMicrocoded], (instrs VZEROALL)>;
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[
+ SUB32rr, SUB64rr,
+ XOR32rr, XOR64rr
+ ], ZeroIdiomPredicate>,
+
+ // MMX Zero-idioms.
+ DepBreakingClass<[
+ MMX_PXORrr, MMX_PANDNrr, MMX_PSUBBrr,
+ MMX_PSUBDrr, MMX_PSUBQrr, MMX_PSUBWrr,
+ MMX_PSUBSBrr, MMX_PSUBSWrr, MMX_PSUBUSBrr, MMX_PSUBUSWrr,
+ MMX_PCMPGTBrr, MMX_PCMPGTDrr, MMX_PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // SSE Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr, ANDNPSrr, ANDNPDrr,
+
+ // int variants.
+ PXORrr, PANDNrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PCMPGTBrr, PCMPGTDrr, PCMPGTQrr, PCMPGTWrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ VXORPSrr, VXORPDrr, VANDNPSrr, VANDNPDrr,
+
+ // int variants.
+ VPXORrr, VPANDNrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants
+ VXORPSYrr, VXORPDYrr, VANDNPSYrr, VANDNPDYrr,
+
+ // int variants
+ VPXORYrr, VPANDNYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr
+ ], ZeroIdiomPredicate>
+]>;
+
+def : IsDepBreakingFunction<[
+ // GPR
+ DepBreakingClass<[ SBB32rr, SBB64rr ], ZeroIdiomPredicate>,
+ DepBreakingClass<[ CMP32rr, CMP64rr ], CheckSameRegOperand<0, 1> >,
+
+ // MMX
+ DepBreakingClass<[
+ MMX_PCMPEQBrr, MMX_PCMPEQWrr, MMX_PCMPEQDrr
+ ], ZeroIdiomPredicate>,
+
+ // SSE
+ DepBreakingClass<[
+ PCMPEQBrr, PCMPEQWrr, PCMPEQDrr, PCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM
+ DepBreakingClass<[
+ VPCMPEQBrr, VPCMPEQWrr, VPCMPEQDrr, VPCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM
+ DepBreakingClass<[
+ VPCMPEQBYrr, VPCMPEQWYrr, VPCMPEQDYrr, VPCMPEQQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
} // SchedModel
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver3.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver3.td
index f4e03ac11f0b..02f7f8376fdb 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver3.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver3.td
@@ -1446,10 +1446,12 @@ defm : Zn3WriteResInt<WriteXCHG, [Zn3ALU0123], 0, [8], 2>; // Compare+Exc
defm : Zn3WriteResXMM<WriteFMove, [Zn3FPVMisc0123], 1, [1], 1>; // Empty sched class
defm : Zn3WriteResXMM<WriteFMoveX, [], 0, [], 1>;
defm : Zn3WriteResYMM<WriteFMoveY, [], 0, [], 1>;
+defm : X86WriteResUnsupported<WriteFMoveZ>;
defm : Zn3WriteResXMM<WriteVecMove, [Zn3FPFMisc0123], 1, [1], 1>; // MMX
defm : Zn3WriteResXMM<WriteVecMoveX, [], 0, [], 1>;
defm : Zn3WriteResYMM<WriteVecMoveY, [], 0, [], 1>;
+defm : X86WriteResUnsupported<WriteVecMoveZ>;
def : IsOptimizableRegisterMove<[
InstructionEquivalenceClass<[
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 83a4a025f518..dba11e8b4000 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1139,7 +1139,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
// branch back to itself. We can do this here because at this point, every
// predecessor of this block has an available value. This is basically just
// automating the construction of a PHI node for this target.
- unsigned TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
+ Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
// Insert a comparison of the incoming target register with this block's
// address. This also requires us to mark the block as having its address
@@ -1642,7 +1642,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
return;
// Compute the current predicate state.
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
auto InsertPt = MI.getIterator();
@@ -1913,7 +1913,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
auto *RC = MRI->getRegClass(Reg);
int Bytes = TRI->getRegSizeInBits(*RC) / 8;
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
"Unknown register size");
@@ -2078,7 +2078,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
// First, we transfer the predicate state into the called function by merging
// it into the stack pointer. This will kill the current def of the state.
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
// If this call is also a return, it is a tail call and we don't need anything
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetMachine.cpp
index 78bc5519c23f..e3d0128dd73d 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -127,7 +127,7 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align long double to 128 bits, others to 32.
if (TT.isOSNaCl() || TT.isOSIAMCU())
; // No f80
- else if (TT.isArch64Bit() || TT.isOSDarwin())
+ else if (TT.isArch64Bit() || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
Ret += "-f80:128";
else
Ret += "-f80:32";
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index d8cd7311a0d5..5b95c10332dc 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -43,6 +43,7 @@
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
@@ -3429,6 +3430,20 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
if (ICA.isTypeBasedOnly())
return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
+ static const CostTblEntry AVX512BWCostTbl[] = {
+ { ISD::ROTL, MVT::v32i16, 2 },
+ { ISD::ROTL, MVT::v16i16, 2 },
+ { ISD::ROTL, MVT::v8i16, 2 },
+ { ISD::ROTL, MVT::v64i8, 5 },
+ { ISD::ROTL, MVT::v32i8, 5 },
+ { ISD::ROTL, MVT::v16i8, 5 },
+ { ISD::ROTR, MVT::v32i16, 2 },
+ { ISD::ROTR, MVT::v16i16, 2 },
+ { ISD::ROTR, MVT::v8i16, 2 },
+ { ISD::ROTR, MVT::v64i8, 5 },
+ { ISD::ROTR, MVT::v32i8, 5 },
+ { ISD::ROTR, MVT::v16i8, 5 }
+ };
static const CostTblEntry AVX512CostTbl[] = {
{ ISD::ROTL, MVT::v8i64, 1 },
{ ISD::ROTL, MVT::v4i64, 1 },
@@ -3506,6 +3521,10 @@ X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
MVT MTy = LT.second;
// Attempt to lookup cost.
+ if (ST->hasBWI())
+ if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
if (ST->hasAVX512())
if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
return LT.first * Entry->Cost;
@@ -4976,9 +4995,13 @@ InstructionCost X86TTIImpl::getGatherScatterOpCost(
const Instruction *I = nullptr) {
if (CostKind != TTI::TCK_RecipThroughput) {
if ((Opcode == Instruction::Load &&
- isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
+ isLegalMaskedGather(SrcVTy, Align(Alignment)) &&
+ !forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
+ Align(Alignment))) ||
(Opcode == Instruction::Store &&
- isLegalMaskedScatter(SrcVTy, Align(Alignment))))
+ isLegalMaskedScatter(SrcVTy, Align(Alignment)) &&
+ !forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
+ Align(Alignment))))
return 1;
return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
Alignment, CostKind, I);
@@ -4993,9 +5016,13 @@ InstructionCost X86TTIImpl::getGatherScatterOpCost(
unsigned AddressSpace = PtrTy->getAddressSpace();
if ((Opcode == Instruction::Load &&
- !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
+ (!isLegalMaskedGather(SrcVTy, Align(Alignment)) ||
+ forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
+ Align(Alignment)))) ||
(Opcode == Instruction::Store &&
- !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
+ (!isLegalMaskedScatter(SrcVTy, Align(Alignment)) ||
+ forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
+ Align(Alignment)))))
return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
AddressSpace);
@@ -5118,35 +5145,21 @@ bool X86TTIImpl::supportsGather() const {
return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
}
+bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
+ // Gather / Scatter for vector 2 is not profitable on KNL / SKX
+ // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend
+ // it to 8 elements, but zeroing upper bits of the mask vector will add more
+ // instructions. Right now we give the scalar cost of vector-4 for KNL. TODO:
+ // Check, maybe the gather/scatter instruction is better in the VariableMask
+ // case.
+ unsigned NumElts = cast<FixedVectorType>(VTy)->getNumElements();
+ return NumElts == 1 ||
+ (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())));
+}
+
bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
if (!supportsGather())
return false;
-
- // This function is called now in two cases: from the Loop Vectorizer
- // and from the Scalarizer.
- // When the Loop Vectorizer asks about legality of the feature,
- // the vectorization factor is not calculated yet. The Loop Vectorizer
- // sends a scalar type and the decision is based on the width of the
- // scalar element.
- // Later on, the cost model will estimate usage this intrinsic based on
- // the vector type.
- // The Scalarizer asks again about legality. It sends a vector type.
- // In this case we can reject non-power-of-2 vectors.
- // We also reject single element vectors as the type legalizer can't
- // scalarize it.
- if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
- unsigned NumElts = DataVTy->getNumElements();
- if (NumElts == 1)
- return false;
- // Gather / Scatter for vector 2 is not profitable on KNL / SKX
- // Vector-4 of gather/scatter instruction does not exist on KNL.
- // We can extend it to 8 elements, but zeroing upper bits of
- // the mask vector will add more instructions. Right now we give the scalar
- // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
- // instruction is better in the VariableMask case.
- if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())))
- return false;
- }
Type *ScalarTy = DataTy->getScalarType();
if (ScalarTy->isPointerTy())
return true;
@@ -5187,9 +5200,48 @@ bool X86TTIImpl::areInlineCompatible(const Function *Caller,
const FeatureBitset &CalleeBits =
TM.getSubtargetImpl(*Callee)->getFeatureBits();
+ // Check whether features are the same (apart from the ignore list).
FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
- return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
+ if (RealCallerBits == RealCalleeBits)
+ return true;
+
+ // If the features are a subset, we need to additionally check for calls
+ // that may become ABI-incompatible as a result of inlining.
+ if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
+ return false;
+
+ for (const Instruction &I : instructions(Callee)) {
+ if (const auto *CB = dyn_cast<CallBase>(&I)) {
+ SmallVector<Type *, 8> Types;
+ for (Value *Arg : CB->args())
+ Types.push_back(Arg->getType());
+ if (!CB->getType()->isVoidTy())
+ Types.push_back(CB->getType());
+
+ // Simple types are always ABI compatible.
+ auto IsSimpleTy = [](Type *Ty) {
+ return !Ty->isVectorTy() && !Ty->isAggregateType();
+ };
+ if (all_of(Types, IsSimpleTy))
+ continue;
+
+ if (Function *NestedCallee = CB->getCalledFunction()) {
+ // Assume that intrinsics are always ABI compatible.
+ if (NestedCallee->isIntrinsic())
+ continue;
+
+ // Do a precise compatibility check.
+ if (!areTypesABICompatible(Caller, NestedCallee, Types))
+ return false;
+ } else {
+ // We don't know the target features of the callee,
+ // assume it is incompatible.
+ return false;
+ }
+ }
+ }
+ return true;
}
bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h
index 11e9cb09c7d5..69715072426f 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -226,6 +226,10 @@ public:
bool isLegalMaskedStore(Type *DataType, Align Alignment);
bool isLegalNTLoad(Type *DataType, Align Alignment);
bool isLegalNTStore(Type *DataType, Align Alignment);
+ bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment);
+ bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
+ return forceScalarizeMaskedGather(VTy, Alignment);
+ }
bool isLegalMaskedGather(Type *DataType, Align Alignment);
bool isLegalMaskedScatter(Type *DataType, Align Alignment);
bool isLegalMaskedExpandLoad(Type *DataType);
diff --git a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index f2f89f4269ed..19ebcb3ea3e8 100644
--- a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -428,7 +428,7 @@ bool XCoreFrameLowering::spillCalleeSavedRegisters(
DL = MI->getDebugLoc();
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
"LR & FP are always handled in emitPrologue");
@@ -455,7 +455,7 @@ bool XCoreFrameLowering::restoreCalleeSavedRegisters(
if (!AtStart)
--BeforeI;
for (const CalleeSavedInfo &CSR : CSI) {
- unsigned Reg = CSR.getReg();
+ Register Reg = CSR.getReg();
assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
"LR & FP are always handled in emitEpilogue");
diff --git a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index 6799823f6fcb..0d1ba39b8b10 100644
--- a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -97,7 +97,7 @@ static void InsertFPConstInst(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
- unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ Register ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
@@ -174,7 +174,7 @@ static void InsertSPConstInst(MachineBasicBlock::iterator II,
} else
ScratchBase = Reg;
BuildMI(MBB, II, dl, TII.get(XCore::LDAWSP_ru6), ScratchBase).addImm(0);
- unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ Register ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
diff --git a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreSubtarget.cpp
index 1be707cb488c..d4b777ef447f 100644
--- a/contrib/llvm-project/llvm/lib/Target/XCore/XCoreSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/XCore/XCoreSubtarget.cpp
@@ -26,5 +26,5 @@ void XCoreSubtarget::anchor() { }
XCoreSubtarget::XCoreSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
- : XCoreGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), InstrInfo(),
- FrameLowering(*this), TLInfo(TM, *this), TSInfo() {}
+ : XCoreGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), FrameLowering(*this),
+ TLInfo(TM, *this) {}
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/Architecture.cpp b/contrib/llvm-project/llvm/lib/TextAPI/Architecture.cpp
index e1901d5c0ce5..bb349b21774e 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/Architecture.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/Architecture.cpp
@@ -15,7 +15,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/TextAPI/ArchitectureSet.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
namespace MachO {
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/PackedVersion.cpp b/contrib/llvm-project/llvm/lib/TextAPI/PackedVersion.cpp
index f8171e02b6d3..67fb30aeb127 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/PackedVersion.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/PackedVersion.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/TextAPI/PackedVersion.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Format.h"
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/Platform.cpp b/contrib/llvm-project/llvm/lib/TextAPI/Platform.cpp
index a2ce6d0cac86..c3c74252301e 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/Platform.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/Platform.cpp
@@ -18,120 +18,118 @@
namespace llvm {
namespace MachO {
-PlatformKind mapToPlatformKind(PlatformKind Platform, bool WantSim) {
+PlatformType mapToPlatformType(PlatformType Platform, bool WantSim) {
switch (Platform) {
default:
return Platform;
- case PlatformKind::iOS:
- return WantSim ? PlatformKind::iOSSimulator : PlatformKind::iOS;
- case PlatformKind::tvOS:
- return WantSim ? PlatformKind::tvOSSimulator : PlatformKind::tvOS;
- case PlatformKind::watchOS:
- return WantSim ? PlatformKind::watchOSSimulator : PlatformKind::watchOS;
+ case PLATFORM_IOS:
+ return WantSim ? PLATFORM_IOSSIMULATOR : PLATFORM_IOS;
+ case PLATFORM_TVOS:
+ return WantSim ? PLATFORM_TVOSSIMULATOR : PLATFORM_TVOS;
+ case PLATFORM_WATCHOS:
+ return WantSim ? PLATFORM_WATCHOSSIMULATOR : PLATFORM_WATCHOS;
}
- llvm_unreachable("Unknown llvm::MachO::PlatformKind enum");
}
-PlatformKind mapToPlatformKind(const Triple &Target) {
+PlatformType mapToPlatformType(const Triple &Target) {
switch (Target.getOS()) {
default:
- return PlatformKind::unknown;
+ return PLATFORM_UNKNOWN;
case Triple::MacOSX:
- return PlatformKind::macOS;
+ return PLATFORM_MACOS;
case Triple::IOS:
if (Target.isSimulatorEnvironment())
- return PlatformKind::iOSSimulator;
+ return PLATFORM_IOSSIMULATOR;
if (Target.getEnvironment() == Triple::MacABI)
- return PlatformKind::macCatalyst;
- return PlatformKind::iOS;
+ return PLATFORM_MACCATALYST;
+ return PLATFORM_IOS;
case Triple::TvOS:
- return Target.isSimulatorEnvironment() ? PlatformKind::tvOSSimulator
- : PlatformKind::tvOS;
+ return Target.isSimulatorEnvironment() ? PLATFORM_TVOSSIMULATOR
+ : PLATFORM_TVOS;
case Triple::WatchOS:
- return Target.isSimulatorEnvironment() ? PlatformKind::watchOSSimulator
- : PlatformKind::watchOS;
+ return Target.isSimulatorEnvironment() ? PLATFORM_WATCHOSSIMULATOR
+ : PLATFORM_WATCHOS;
// TODO: add bridgeOS & driverKit once in llvm::Triple
}
- llvm_unreachable("Unknown Target Triple");
}
PlatformSet mapToPlatformSet(ArrayRef<Triple> Targets) {
PlatformSet Result;
for (const auto &Target : Targets)
- Result.insert(mapToPlatformKind(Target));
+ Result.insert(mapToPlatformType(Target));
return Result;
}
-StringRef getPlatformName(PlatformKind Platform) {
+StringRef getPlatformName(PlatformType Platform) {
switch (Platform) {
- case PlatformKind::unknown:
+ case PLATFORM_UNKNOWN:
return "unknown";
- case PlatformKind::macOS:
+ case PLATFORM_MACOS:
return "macOS";
- case PlatformKind::iOS:
+ case PLATFORM_IOS:
return "iOS";
- case PlatformKind::tvOS:
+ case PLATFORM_TVOS:
return "tvOS";
- case PlatformKind::watchOS:
+ case PLATFORM_WATCHOS:
return "watchOS";
- case PlatformKind::bridgeOS:
+ case PLATFORM_BRIDGEOS:
return "bridgeOS";
- case PlatformKind::macCatalyst:
+ case PLATFORM_MACCATALYST:
return "macCatalyst";
- case PlatformKind::iOSSimulator:
+ case PLATFORM_IOSSIMULATOR:
return "iOS Simulator";
- case PlatformKind::tvOSSimulator:
+ case PLATFORM_TVOSSIMULATOR:
return "tvOS Simulator";
- case PlatformKind::watchOSSimulator:
+ case PLATFORM_WATCHOSSIMULATOR:
return "watchOS Simulator";
- case PlatformKind::driverKit:
+ case PLATFORM_DRIVERKIT:
return "DriverKit";
}
- llvm_unreachable("Unknown llvm::MachO::PlatformKind enum");
+ llvm_unreachable("Unknown llvm::MachO::PlatformType enum");
}
-PlatformKind getPlatformFromName(StringRef Name) {
- return StringSwitch<PlatformKind>(Name)
- .Case("macos", PlatformKind::macOS)
- .Case("ios", PlatformKind::iOS)
- .Case("tvos", PlatformKind::tvOS)
- .Case("watchos", PlatformKind::watchOS)
- .Case("bridgeos", PlatformKind::macOS)
- .Case("ios-macabi", PlatformKind::macCatalyst)
- .Case("ios-simulator", PlatformKind::iOSSimulator)
- .Case("tvos-simulator", PlatformKind::tvOSSimulator)
- .Case("watchos-simulator", PlatformKind::watchOSSimulator)
- .Case("driverkit", PlatformKind::driverKit)
- .Default(PlatformKind::unknown);
+PlatformType getPlatformFromName(StringRef Name) {
+ return StringSwitch<PlatformType>(Name)
+ .Case("macos", PLATFORM_MACOS)
+ .Case("ios", PLATFORM_IOS)
+ .Case("tvos", PLATFORM_TVOS)
+ .Case("watchos", PLATFORM_WATCHOS)
+ .Case("bridgeos", PLATFORM_BRIDGEOS)
+ .Case("ios-macabi", PLATFORM_MACCATALYST)
+ .Case("ios-simulator", PLATFORM_IOSSIMULATOR)
+ .Case("tvos-simulator", PLATFORM_TVOSSIMULATOR)
+ .Case("watchos-simulator", PLATFORM_WATCHOSSIMULATOR)
+ .Case("driverkit", PLATFORM_DRIVERKIT)
+ .Default(PLATFORM_UNKNOWN);
}
-std::string getOSAndEnvironmentName(PlatformKind Platform,
+std::string getOSAndEnvironmentName(PlatformType Platform,
std::string Version) {
switch (Platform) {
- case PlatformKind::unknown:
+ case PLATFORM_UNKNOWN:
return "darwin" + Version;
- case PlatformKind::macOS:
+ case PLATFORM_MACOS:
return "macos" + Version;
- case PlatformKind::iOS:
+ case PLATFORM_IOS:
return "ios" + Version;
- case PlatformKind::tvOS:
+ case PLATFORM_TVOS:
return "tvos" + Version;
- case PlatformKind::watchOS:
+ case PLATFORM_WATCHOS:
return "watchos" + Version;
- case PlatformKind::bridgeOS:
+ case PLATFORM_BRIDGEOS:
return "bridgeos" + Version;
- case PlatformKind::macCatalyst:
+ case PLATFORM_MACCATALYST:
return "ios" + Version + "-macabi";
- case PlatformKind::iOSSimulator:
+ case PLATFORM_IOSSIMULATOR:
return "ios" + Version + "-simulator";
- case PlatformKind::tvOSSimulator:
+ case PLATFORM_TVOSSIMULATOR:
return "tvos" + Version + "-simulator";
- case PlatformKind::watchOSSimulator:
+ case PLATFORM_WATCHOSSIMULATOR:
return "watchos" + Version + "-simulator";
- case PlatformKind::driverKit:
+ case PLATFORM_DRIVERKIT:
return "driverkit" + Version;
}
- llvm_unreachable("Unknown llvm::MachO::PlatformKind enum");
+ llvm_unreachable("Unknown llvm::MachO::PlatformType enum");
}
} // end namespace MachO.
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/Target.cpp b/contrib/llvm-project/llvm/lib/TextAPI/Target.cpp
index 35fe1bf65e6f..c54c3bd66b9d 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/Target.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/Target.cpp
@@ -7,11 +7,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/TextAPI/Target.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/Format.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -22,26 +19,26 @@ Expected<Target> Target::create(StringRef TargetValue) {
auto ArchitectureStr = Result.first;
auto Architecture = getArchitectureFromName(ArchitectureStr);
auto PlatformStr = Result.second;
- PlatformKind Platform;
- Platform = StringSwitch<PlatformKind>(PlatformStr)
- .Case("macos", PlatformKind::macOS)
- .Case("ios", PlatformKind::iOS)
- .Case("tvos", PlatformKind::tvOS)
- .Case("watchos", PlatformKind::watchOS)
- .Case("bridgeos", PlatformKind::bridgeOS)
- .Case("maccatalyst", PlatformKind::macCatalyst)
- .Case("ios-simulator", PlatformKind::iOSSimulator)
- .Case("tvos-simulator", PlatformKind::tvOSSimulator)
- .Case("watchos-simulator", PlatformKind::watchOSSimulator)
- .Case("driverkit", PlatformKind::driverKit)
- .Default(PlatformKind::unknown);
+ PlatformType Platform;
+ Platform = StringSwitch<PlatformType>(PlatformStr)
+ .Case("macos", PLATFORM_MACOS)
+ .Case("ios", PLATFORM_IOS)
+ .Case("tvos", PLATFORM_TVOS)
+ .Case("watchos", PLATFORM_WATCHOS)
+ .Case("bridgeos", PLATFORM_BRIDGEOS)
+ .Case("maccatalyst", PLATFORM_MACCATALYST)
+ .Case("ios-simulator", PLATFORM_IOSSIMULATOR)
+ .Case("tvos-simulator", PLATFORM_TVOSSIMULATOR)
+ .Case("watchos-simulator", PLATFORM_WATCHOSSIMULATOR)
+ .Case("driverkit", PLATFORM_DRIVERKIT)
+ .Default(PLATFORM_UNKNOWN);
- if (Platform == PlatformKind::unknown) {
+ if (Platform == PLATFORM_UNKNOWN) {
if (PlatformStr.startswith("<") && PlatformStr.endswith(">")) {
PlatformStr = PlatformStr.drop_front().drop_back();
unsigned long long RawValue;
if (!PlatformStr.getAsInteger(10, RawValue))
- Platform = (PlatformKind)RawValue;
+ Platform = (PlatformType)RawValue;
}
}
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/TextStub.cpp b/contrib/llvm-project/llvm/lib/TextAPI/TextStub.cpp
index b64f19ab65cc..ff93e43356f7 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/TextStub.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/TextStub.cpp
@@ -380,34 +380,34 @@ template <> struct ScalarTraits<Target> {
default:
OS << "unknown";
break;
- case PlatformKind::macOS:
+ case PLATFORM_MACOS:
OS << "macos";
break;
- case PlatformKind::iOS:
+ case PLATFORM_IOS:
OS << "ios";
break;
- case PlatformKind::tvOS:
+ case PLATFORM_TVOS:
OS << "tvos";
break;
- case PlatformKind::watchOS:
+ case PLATFORM_WATCHOS:
OS << "watchos";
break;
- case PlatformKind::bridgeOS:
+ case PLATFORM_BRIDGEOS:
OS << "bridgeos";
break;
- case PlatformKind::macCatalyst:
+ case PLATFORM_MACCATALYST:
OS << "maccatalyst";
break;
- case PlatformKind::iOSSimulator:
+ case PLATFORM_IOSSIMULATOR:
OS << "ios-simulator";
break;
- case PlatformKind::tvOSSimulator:
+ case PLATFORM_TVOSSIMULATOR:
OS << "tvos-simulator";
break;
- case PlatformKind::watchOSSimulator:
+ case PLATFORM_WATCHOSSIMULATOR:
OS << "watchos-simulator";
break;
- case PlatformKind::driverKit:
+ case PLATFORM_DRIVERKIT:
OS << "driverkit";
break;
}
@@ -423,7 +423,7 @@ template <> struct ScalarTraits<Target> {
Value = *Result;
if (Value.Arch == AK_unknown)
return "unknown architecture";
- if (Value.Platform == PlatformKind::unknown)
+ if (Value.Platform == PLATFORM_UNKNOWN)
return "unknown platform";
return {};
@@ -597,11 +597,10 @@ template <> struct MappingTraits<const InterfaceFile *> {
TargetList Targets;
for (auto Platform : Platforms) {
- Platform = mapToPlatformKind(Platform, Architectures.hasX86());
+ Platform = mapToPlatformType(Platform, Architectures.hasX86());
for (const auto &&Architecture : Architectures) {
- if ((Architecture == AK_i386) &&
- (Platform == PlatformKind::macCatalyst))
+ if ((Architecture == AK_i386) && (Platform == PLATFORM_MACCATALYST))
continue;
Targets.emplace_back(Architecture, Platform);
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.cpp b/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.cpp
index c2713b9b5203..29b74f981a91 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.cpp
+++ b/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.cpp
@@ -49,8 +49,8 @@ void ScalarTraits<PlatformSet>::output(const PlatformSet &Values, void *IO,
assert((!Ctx || Ctx->FileKind != FileType::Invalid) &&
"File type is not set in context");
- if (Ctx && Ctx->FileKind == TBD_V3 && Values.count(PlatformKind::macOS) &&
- Values.count(PlatformKind::macCatalyst)) {
+ if (Ctx && Ctx->FileKind == TBD_V3 && Values.count(PLATFORM_MACOS) &&
+ Values.count(PLATFORM_MACCATALYST)) {
OS << "zippered";
return;
}
@@ -60,31 +60,31 @@ void ScalarTraits<PlatformSet>::output(const PlatformSet &Values, void *IO,
default:
llvm_unreachable("unexpected platform");
break;
- case PlatformKind::macOS:
+ case PLATFORM_MACOS:
OS << "macosx";
break;
- case PlatformKind::iOSSimulator:
+ case PLATFORM_IOSSIMULATOR:
LLVM_FALLTHROUGH;
- case PlatformKind::iOS:
+ case PLATFORM_IOS:
OS << "ios";
break;
- case PlatformKind::watchOSSimulator:
+ case PLATFORM_WATCHOSSIMULATOR:
LLVM_FALLTHROUGH;
- case PlatformKind::watchOS:
+ case PLATFORM_WATCHOS:
OS << "watchos";
break;
- case PlatformKind::tvOSSimulator:
+ case PLATFORM_TVOSSIMULATOR:
LLVM_FALLTHROUGH;
- case PlatformKind::tvOS:
+ case PLATFORM_TVOS:
OS << "tvos";
break;
- case PlatformKind::bridgeOS:
+ case PLATFORM_BRIDGEOS:
OS << "bridgeos";
break;
- case PlatformKind::macCatalyst:
+ case PLATFORM_MACCATALYST:
OS << "iosmac";
break;
- case PlatformKind::driverKit:
+ case PLATFORM_DRIVERKIT:
OS << "driverkit";
break;
}
@@ -98,28 +98,27 @@ StringRef ScalarTraits<PlatformSet>::input(StringRef Scalar, void *IO,
if (Scalar == "zippered") {
if (Ctx && Ctx->FileKind == FileType::TBD_V3) {
- Values.insert(PlatformKind::macOS);
- Values.insert(PlatformKind::macCatalyst);
+ Values.insert(PLATFORM_MACOS);
+ Values.insert(PLATFORM_MACCATALYST);
return {};
}
return "invalid platform";
}
- auto Platform = StringSwitch<PlatformKind>(Scalar)
- .Case("unknown", PlatformKind::unknown)
- .Case("macosx", PlatformKind::macOS)
- .Case("ios", PlatformKind::iOS)
- .Case("watchos", PlatformKind::watchOS)
- .Case("tvos", PlatformKind::tvOS)
- .Case("bridgeos", PlatformKind::bridgeOS)
- .Case("iosmac", PlatformKind::macCatalyst)
- .Default(PlatformKind::unknown);
-
- if (Platform == PlatformKind::macCatalyst)
+ auto Platform = StringSwitch<PlatformType>(Scalar)
+ .Case("macosx", PLATFORM_MACOS)
+ .Case("ios", PLATFORM_IOS)
+ .Case("watchos", PLATFORM_WATCHOS)
+ .Case("tvos", PLATFORM_TVOS)
+ .Case("bridgeos", PLATFORM_BRIDGEOS)
+ .Case("iosmac", PLATFORM_MACCATALYST)
+ .Default(PLATFORM_UNKNOWN);
+
+ if (Platform == PLATFORM_MACCATALYST)
if (Ctx && Ctx->FileKind != FileType::TBD_V3)
return "invalid platform";
- if (Platform == PlatformKind::unknown)
+ if (Platform == PLATFORM_UNKNOWN)
return "unknown platform";
Values.insert(Platform);
@@ -226,7 +225,7 @@ StringRef ScalarTraits<UUID>::input(StringRef Scalar, void *, UUID &Value) {
if (UUID.empty())
return "invalid uuid string pair";
Value.second = std::string(UUID);
- Value.first = Target{getArchitectureFromName(Arch), PlatformKind::unknown};
+ Value.first = Target{getArchitectureFromName(Arch), PLATFORM_UNKNOWN};
return {};
}
diff --git a/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.h b/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.h
index 89ae5d56297c..aac27221b5ff 100644
--- a/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.h
+++ b/contrib/llvm-project/llvm/lib/TextAPI/TextStubCommon.h
@@ -16,9 +16,9 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/TextAPI/Architecture.h"
-#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/InterfaceFile.h"
-#include "llvm/TextAPI/PackedVersion.h"
+#include "llvm/TextAPI/Platform.h"
+#include "llvm/TextAPI/Target.h"
using UUID = std::pair<llvm::MachO::Target, std::string>;
@@ -28,6 +28,11 @@ LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(UUID)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(FlowStringRef)
namespace llvm {
+
+namespace MachO {
+ class ArchitectureSet;
+ class PackedVersion;
+}
namespace yaml {
template <> struct ScalarTraits<FlowStringRef> {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroEarly.cpp b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
index 68a34bdcb1cd..1533e1805f17 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
@@ -176,11 +176,14 @@ bool Lowerer::lowerEarlyIntrinsics(Function &F) {
lowerCoroNoop(cast<IntrinsicInst>(&I));
break;
case Intrinsic::coro_id:
- // Mark a function that comes out of the frontend that has a coro.id
- // with a coroutine attribute.
if (auto *CII = cast<CoroIdInst>(&I)) {
if (CII->getInfo().isPreSplit()) {
- F.addFnAttr(CORO_PRESPLIT_ATTR, UNPREPARED_FOR_SPLIT);
+ assert(F.hasFnAttribute(CORO_PRESPLIT_ATTR) &&
+ F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString() ==
+ UNPREPARED_FOR_SPLIT &&
+ "The frontend uses Swtich-Resumed ABI should emit "
+ "\"coroutine.presplit\" attribute with value \"0\" for the "
+ "coroutine.");
setCannotDuplicate(CII);
CII->setCoroutineSelf();
CoroId = cast<CoroIdInst>(&I);
@@ -190,6 +193,8 @@ bool Lowerer::lowerEarlyIntrinsics(Function &F) {
case Intrinsic::coro_id_retcon:
case Intrinsic::coro_id_retcon_once:
case Intrinsic::coro_id_async:
+ // TODO: Remove the line once we support it in the corresponding
+ // frontend.
F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
break;
case Intrinsic::coro_resume:
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index a0d12865bd3a..92acfb93057a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -587,7 +587,7 @@ void FrameTypeBuilder::addFieldForAllocas(const Function &F,
}
});
- if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
+ if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
for (const auto &A : FrameData.Allocas) {
AllocaInst *Alloca = A.Alloca;
NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
@@ -808,7 +808,7 @@ static StringRef solveTypeName(Type *Ty) {
if (Ty->isPointerTy()) {
auto *PtrTy = cast<PointerType>(Ty);
- Type *PointeeTy = PtrTy->getElementType();
+ Type *PointeeTy = PtrTy->getPointerElementType();
auto Name = solveTypeName(PointeeTy);
if (Name == "UnknownType")
return "PointerType";
@@ -1659,7 +1659,7 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
&*Builder.GetInsertPoint());
// This dbg.declare is for the main function entry point. It
// will be deleted in all coro-split functions.
- coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
+ coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
}
}
@@ -2278,7 +2278,7 @@ static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
auto ArgTy = cast<PointerType>(Arg.getType());
- auto ValueTy = ArgTy->getElementType();
+ auto ValueTy = ArgTy->getPointerElementType();
// Reduce to the alloca case:
@@ -2506,7 +2506,7 @@ static void collectFrameAllocas(Function &F, coro::Shape &Shape,
void coro::salvageDebugInfo(
SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
- DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) {
+ DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
Function *F = DVI->getFunction();
IRBuilder<> Builder(F->getContext());
auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
@@ -2558,7 +2558,7 @@ void coro::salvageDebugInfo(
//
// Avoid to create the alloca would be eliminated by optimization
// passes and the corresponding dbg.declares would be invalid.
- if (!ReuseFrameSlot && !EnableReuseStorageInFrame)
+ if (!OptimizeFrame && !EnableReuseStorageInFrame)
if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
auto &Cached = DbgPtrAllocaCache[Storage];
if (!Cached) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInstr.h b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInstr.h
index bf3d781ba43e..014938c15a0a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInstr.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInstr.h
@@ -599,6 +599,18 @@ public:
}
};
+/// This represents the llvm.coro.align instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAlignInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_align;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
class LLVM_LIBRARY_VISIBILITY AnyCoroEndInst : public IntrinsicInst {
enum { FrameArg, UnwindArg };
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInternal.h b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInternal.h
index 27ba8524f975..9a17068df3a9 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInternal.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroInternal.h
@@ -36,6 +36,11 @@ void initializeCoroCleanupLegacyPass(PassRegistry &);
// adds coroutine subfunctions to the SCC to be processed by IPO pipeline.
// Async lowering similarily triggers a restart of the pipeline after it has
// split the coroutine.
+//
+// FIXME: Refactor these attributes as LLVM attributes instead of string
+// attributes since these attributes are already used outside LLVM's
+// coroutine module.
+// FIXME: Remove these values once we remove the Legacy PM.
#define CORO_PRESPLIT_ATTR "coroutine.presplit"
#define UNPREPARED_FOR_SPLIT "0"
#define PREPARED_FOR_SPLIT "1"
@@ -54,7 +59,7 @@ void updateCallGraph(Function &Caller, ArrayRef<Function *> Funcs,
/// holding a pointer to the coroutine frame.
void salvageDebugInfo(
SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
- DbgVariableIntrinsic *DVI, bool ReuseFrameSlot);
+ DbgVariableIntrinsic *DVI, bool OptimizeFrame);
// Keeps data and helper functions for lowering coroutine intrinsics.
struct LowererBase {
@@ -99,6 +104,7 @@ struct LLVM_LIBRARY_VISIBILITY Shape {
CoroBeginInst *CoroBegin;
SmallVector<AnyCoroEndInst *, 4> CoroEnds;
SmallVector<CoroSizeInst *, 2> CoroSizes;
+ SmallVector<CoroAlignInst *, 2> CoroAligns;
SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends;
SmallVector<CallInst*, 2> SwiftErrorOps;
@@ -126,7 +132,7 @@ struct LLVM_LIBRARY_VISIBILITY Shape {
BasicBlock *AllocaSpillBlock;
/// This would only be true if optimization are enabled.
- bool ReuseFrameSlot;
+ bool OptimizeFrame;
struct SwitchLoweringStorage {
SwitchInst *ResumeSwitch;
@@ -272,8 +278,8 @@ struct LLVM_LIBRARY_VISIBILITY Shape {
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const;
Shape() = default;
- explicit Shape(Function &F, bool ReuseFrameSlot = false)
- : ReuseFrameSlot(ReuseFrameSlot) {
+ explicit Shape(Function &F, bool OptimizeFrame = false)
+ : OptimizeFrame(OptimizeFrame) {
buildFrom(F);
}
void buildFrom(Function &F);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 12c1829524ef..b5129809c6a6 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
@@ -617,7 +618,8 @@ static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
Value *CachedSlot = nullptr;
auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
if (CachedSlot) {
- assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
+ assert(cast<PointerType>(CachedSlot->getType())
+ ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
"multiple swifterror slots in function with different types");
return CachedSlot;
}
@@ -626,7 +628,8 @@ static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
for (auto &Arg : F.args()) {
if (Arg.isSwiftError()) {
CachedSlot = &Arg;
- assert(Arg.getType()->getPointerElementType() == ValueTy &&
+ assert(cast<PointerType>(Arg.getType())
+ ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
"swifterror argument does not have expected type");
return &Arg;
}
@@ -682,7 +685,7 @@ void CoroCloner::salvageDebugInfo() {
if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
Worklist.push_back(DVI);
for (DbgVariableIntrinsic *DVI : Worklist)
- coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.ReuseFrameSlot);
+ coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
// Remove all salvaged dbg.declare intrinsics that became
// either unreachable or stale due to the CoroSplit transformation.
@@ -835,7 +838,7 @@ Value *CoroCloner::deriveNewFramePointer() {
static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
unsigned ParamIndex,
uint64_t Size, Align Alignment) {
- AttrBuilder ParamAttrs;
+ AttrBuilder ParamAttrs(Context);
ParamAttrs.addAttribute(Attribute::NonNull);
ParamAttrs.addAttribute(Attribute::NoAlias);
ParamAttrs.addAlignmentAttr(Alignment);
@@ -845,14 +848,14 @@ static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
unsigned ParamIndex) {
- AttrBuilder ParamAttrs;
+ AttrBuilder ParamAttrs(Context);
ParamAttrs.addAttribute(Attribute::SwiftAsync);
Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
}
static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
unsigned ParamIndex) {
- AttrBuilder ParamAttrs;
+ AttrBuilder ParamAttrs(Context);
ParamAttrs.addAttribute(Attribute::SwiftSelf);
Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
}
@@ -929,7 +932,7 @@ void CoroCloner::create() {
case coro::ABI::Switch:
// Bootstrap attributes by copying function attributes from the
// original function. This should include optimization settings and so on.
- NewAttrs = NewAttrs.addFnAttributes(Context, OrigAttrs.getFnAttrs());
+ NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
addFramePointerAttrs(NewAttrs, Context, 0,
Shape.FrameSize, Shape.FrameAlign);
@@ -952,7 +955,7 @@ void CoroCloner::create() {
// Transfer the original function's attributes.
auto FnAttrs = OrigF.getAttributes().getFnAttrs();
- NewAttrs = NewAttrs.addFnAttributes(Context, FnAttrs);
+ NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
break;
}
case coro::ABI::Retcon:
@@ -1082,10 +1085,16 @@ static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
}
-static void replaceFrameSize(coro::Shape &Shape) {
+static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
if (Shape.ABI == coro::ABI::Async)
updateAsyncFuncPointerContextSize(Shape);
+ for (CoroAlignInst *CA : Shape.CoroAligns) {
+ CA->replaceAllUsesWith(
+ ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
+ CA->eraseFromParent();
+ }
+
if (Shape.CoroSizes.empty())
return;
@@ -1197,10 +1206,34 @@ scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
DenseMap<Value *, Value *> ResolvedValues;
BasicBlock *UnconditionalSucc = nullptr;
+ assert(InitialInst->getModule());
+ const DataLayout &DL = InitialInst->getModule()->getDataLayout();
+
+ auto GetFirstValidInstruction = [](Instruction *I) {
+ while (I) {
+ // BitCastInst wouldn't generate actual code so that we could skip it.
+ if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
+ I->isLifetimeStartOrEnd())
+ I = I->getNextNode();
+ else if (isInstructionTriviallyDead(I))
+ // Duing we are in the middle of the transformation, we need to erase
+ // the dead instruction manually.
+ I = &*I->eraseFromParent();
+ else
+ break;
+ }
+ return I;
+ };
+
+ auto TryResolveConstant = [&ResolvedValues](Value *V) {
+ auto It = ResolvedValues.find(V);
+ if (It != ResolvedValues.end())
+ V = It->second;
+ return dyn_cast<ConstantInt>(V);
+ };
Instruction *I = InitialInst;
- while (I->isTerminator() ||
- (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
+ while (I->isTerminator() || isa<CmpInst>(I)) {
if (isa<ReturnInst>(I)) {
if (I != InitialInst) {
// If InitialInst is an unconditional branch,
@@ -1213,48 +1246,68 @@ static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
}
if (auto *BR = dyn_cast<BranchInst>(I)) {
if (BR->isUnconditional()) {
- BasicBlock *BB = BR->getSuccessor(0);
+ BasicBlock *Succ = BR->getSuccessor(0);
if (I == InitialInst)
- UnconditionalSucc = BB;
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
+ UnconditionalSucc = Succ;
+ scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
+ I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
continue;
}
- } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
- auto *BR = dyn_cast<BranchInst>(I->getNextNode());
- if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
- // If the case number of suspended switch instruction is reduced to
- // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
- // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
- ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
- if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
- Value *V = CondCmp->getOperand(0);
- auto it = ResolvedValues.find(V);
- if (it != ResolvedValues.end())
- V = it->second;
-
- if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
- BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
- ? BR->getSuccessor(0)
- : BR->getSuccessor(1);
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
- continue;
- }
- }
- }
- } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
- Value *V = SI->getCondition();
- auto it = ResolvedValues.find(V);
- if (it != ResolvedValues.end())
- V = it->second;
- if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
- BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
+
+ BasicBlock *BB = BR->getParent();
+ // Handle the case the condition of the conditional branch is constant.
+ // e.g.,
+ //
+ // br i1 false, label %cleanup, label %CoroEnd
+ //
+ // It is possible during the transformation. We could continue the
+ // simplifying in this case.
+ if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
+ // Handle this branch in next iteration.
+ I = BB->getTerminator();
continue;
}
+ } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
+ // If the case number of suspended switch instruction is reduced to
+ // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
+ auto *BR = dyn_cast<BranchInst>(
+ GetFirstValidInstruction(CondCmp->getNextNode()));
+ if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
+ return false;
+
+ // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
+ // So we try to resolve constant for the first operand only since the
+ // second operand should be literal constant by design.
+ ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
+ auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
+ if (!Cond0 || !Cond1)
+ return false;
+
+ // Both operands of the CmpInst are Constant. So that we could evaluate
+ // it immediately to get the destination.
+ auto *ConstResult =
+ dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
+ CondCmp->getPredicate(), Cond0, Cond1, DL));
+ if (!ConstResult)
+ return false;
+
+ CondCmp->replaceAllUsesWith(ConstResult);
+ CondCmp->eraseFromParent();
+
+ // Handle this branch in next iteration.
+ I = BR;
+ continue;
+ } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
+ ConstantInt *Cond = TryResolveConstant(SI->getCondition());
+ if (!Cond)
+ return false;
+
+ BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
+ scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
+ I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
+ continue;
}
+
return false;
}
return false;
@@ -1826,20 +1879,20 @@ namespace {
static coro::Shape splitCoroutine(Function &F,
SmallVectorImpl<Function *> &Clones,
- bool ReuseFrameSlot) {
+ bool OptimizeFrame) {
PrettyStackTraceFunction prettyStackTrace(F);
// The suspend-crossing algorithm in buildCoroutineFrame get tripped
// up by uses in unreachable blocks, so remove them as a first pass.
removeUnreachableBlocks(F);
- coro::Shape Shape(F, ReuseFrameSlot);
+ coro::Shape Shape(F, OptimizeFrame);
if (!Shape.CoroBegin)
return Shape;
simplifySuspendPoints(Shape);
buildCoroutineFrame(F, Shape);
- replaceFrameSize(Shape);
+ replaceFrameSizeAndAlignment(Shape);
// If there are no suspend points, no split required, just remove
// the allocation and deallocation blocks, they are not needed.
@@ -2165,7 +2218,7 @@ PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
F.removeFnAttr(CORO_PRESPLIT_ATTR);
SmallVector<Function *, 4> Clones;
- const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
+ const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
if (!Shape.CoroSuspends.empty()) {
@@ -2198,13 +2251,13 @@ namespace {
struct CoroSplitLegacy : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- CoroSplitLegacy(bool ReuseFrameSlot = false)
- : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
+ CoroSplitLegacy(bool OptimizeFrame = false)
+ : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) {
initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
}
bool Run = false;
- bool ReuseFrameSlot;
+ bool OptimizeFrame;
// A coroutine is identified by the presence of coro.begin intrinsic, if
// we don't have any, this pass has nothing to do.
@@ -2263,7 +2316,7 @@ struct CoroSplitLegacy : public CallGraphSCCPass {
F->removeFnAttr(CORO_PRESPLIT_ATTR);
SmallVector<Function *, 4> Clones;
- const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
+ const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame);
updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
if (Shape.ABI == coro::ABI::Async) {
// Restart SCC passes.
@@ -2300,6 +2353,6 @@ INITIALIZE_PASS_END(
"Split coroutine into a set of functions driving its state machine", false,
false)
-Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
- return new CoroSplitLegacy(ReuseFrameSlot);
+Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) {
+ return new CoroSplitLegacy(OptimizeFrame);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/Coroutines.cpp b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/Coroutines.cpp
index fba8b03e44ba..965a146c143f 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Coroutines/Coroutines.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Coroutines/Coroutines.cpp
@@ -123,6 +123,7 @@ Value *coro::LowererBase::makeSubFnCall(Value *Arg, int Index,
static bool isCoroutineIntrinsicName(StringRef Name) {
// NOTE: Must be sorted!
static const char *const CoroIntrinsics[] = {
+ "llvm.coro.align",
"llvm.coro.alloc",
"llvm.coro.async.context.alloc",
"llvm.coro.async.context.dealloc",
@@ -268,6 +269,9 @@ void coro::Shape::buildFrom(Function &F) {
case Intrinsic::coro_size:
CoroSizes.push_back(cast<CoroSizeInst>(II));
break;
+ case Intrinsic::coro_align:
+ CoroAligns.push_back(cast<CoroAlignInst>(II));
+ break;
case Intrinsic::coro_frame:
CoroFrames.push_back(cast<CoroFrameInst>(II));
break;
@@ -672,8 +676,11 @@ static void checkAsyncFuncPointer(const Instruction *I, Value *V) {
if (!AsyncFuncPtrAddr)
fail(I, "llvm.coro.id.async async function pointer not a global", V);
- auto *StructTy =
- cast<StructType>(AsyncFuncPtrAddr->getType()->getPointerElementType());
+ if (AsyncFuncPtrAddr->getType()->isOpaquePointerTy())
+ return;
+
+ auto *StructTy = cast<StructType>(
+ AsyncFuncPtrAddr->getType()->getNonOpaquePointerElementType());
if (StructTy->isOpaque() || !StructTy->isPacked() ||
StructTy->getNumElements() != 2 ||
!StructTy->getElementType(0)->isIntegerTy(32) ||
@@ -697,14 +704,16 @@ void CoroIdAsyncInst::checkWellFormed() const {
static void checkAsyncContextProjectFunction(const Instruction *I,
Function *F) {
auto *FunTy = cast<FunctionType>(F->getValueType());
- if (!FunTy->getReturnType()->isPointerTy() ||
- !FunTy->getReturnType()->getPointerElementType()->isIntegerTy(8))
+ Type *Int8Ty = Type::getInt8Ty(F->getContext());
+ auto *RetPtrTy = dyn_cast<PointerType>(FunTy->getReturnType());
+ if (!RetPtrTy || !RetPtrTy->isOpaqueOrPointeeTypeMatches(Int8Ty))
fail(I,
"llvm.coro.suspend.async resume function projection function must "
"return an i8* type",
F);
if (FunTy->getNumParams() != 1 || !FunTy->getParamType(0)->isPointerTy() ||
- !FunTy->getParamType(0)->getPointerElementType()->isIntegerTy(8))
+ !cast<PointerType>(FunTy->getParamType(0))
+ ->isOpaqueOrPointeeTypeMatches(Int8Ty))
fail(I,
"llvm.coro.suspend.async resume function projection function must "
"take one i8* type as parameter",
@@ -719,8 +728,7 @@ void CoroAsyncEndInst::checkWellFormed() const {
auto *MustTailCallFunc = getMustTailCallFunction();
if (!MustTailCallFunc)
return;
- auto *FnTy =
- cast<FunctionType>(MustTailCallFunc->getType()->getPointerElementType());
+ auto *FnTy = MustTailCallFunc->getFunctionType();
if (FnTy->getNumParams() != (arg_size() - 3))
fail(this,
"llvm.coro.end.async must tail call function argument type must "
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/AlwaysInliner.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
index 01e724e22dcf..a6d9ce1033f3 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
@@ -54,13 +54,13 @@ PreservedAnalyses AlwaysInlinerPass::run(Module &M,
if (F.isPresplitCoroutine())
continue;
- if (!F.isDeclaration() && F.hasFnAttribute(Attribute::AlwaysInline) &&
- isInlineViable(F).isSuccess()) {
+ if (!F.isDeclaration() && isInlineViable(F).isSuccess()) {
Calls.clear();
for (User *U : F.users())
if (auto *CB = dyn_cast<CallBase>(U))
- if (CB->getCalledFunction() == &F)
+ if (CB->getCalledFunction() == &F &&
+ CB->hasFnAttr(Attribute::AlwaysInline))
Calls.insert(CB);
for (CallBase *CB : Calls) {
@@ -92,10 +92,12 @@ PreservedAnalyses AlwaysInlinerPass::run(Module &M,
Changed = true;
}
- // Remember to try and delete this function afterward. This both avoids
- // re-walking the rest of the module and avoids dealing with any iterator
- // invalidation issues while deleting functions.
- InlinedFunctions.push_back(&F);
+ if (F.hasFnAttribute(Attribute::AlwaysInline)) {
+ // Remember to try and delete this function afterward. This both avoids
+ // re-walking the rest of the module and avoids dealing with any
+ // iterator invalidation issues while deleting functions.
+ InlinedFunctions.push_back(&F);
+ }
}
}
@@ -117,7 +119,7 @@ PreservedAnalyses AlwaysInlinerPass::run(Module &M,
if (!InlinedFunctions.empty()) {
// Now we just have the comdat functions. Filter out the ones whose comdats
// are not actually dead.
- filterDeadComdatFunctions(M, InlinedFunctions);
+ filterDeadComdatFunctions(InlinedFunctions);
// The remaining functions are actually dead.
for (Function *F : InlinedFunctions) {
M.getFunctionList().erase(F);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 3a42a2cac928..ce3c5153bde2 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -196,8 +196,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
for (const auto &ArgIndex : ArgIndices) {
// not allowed to dereference ->begin() if size() is 0
Params.push_back(GetElementPtrInst::getIndexedType(
- cast<PointerType>(I->getType())->getElementType(),
- ArgIndex.second));
+ I->getType()->getPointerElementType(), ArgIndex.second));
ArgAttrVec.push_back(AttributeSet());
assert(Params.back());
}
@@ -298,7 +297,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
Ops.push_back(ConstantInt::get(IdxTy, II));
// Keep track of the type we're currently indexing.
if (auto *ElPTy = dyn_cast<PointerType>(ElTy))
- ElTy = ElPTy->getElementType();
+ ElTy = ElPTy->getPointerElementType();
else
ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, II);
}
@@ -928,7 +927,7 @@ promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
SmallPtrSet<Argument *, 8> ArgsToPromote;
SmallPtrSet<Argument *, 8> ByValArgsToTransform;
for (Argument *PtrArg : PointerArgs) {
- Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
+ Type *AgTy = PtrArg->getType()->getPointerElementType();
// Replace sret attribute with noalias. This reduces register pressure by
// avoiding a register copy.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
index 7e729e57153c..12b8a0ef9d00 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -202,9 +203,12 @@ bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
return NoRecurseAA.isAssumedNoRecurse();
}
-Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty) {
+Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty,
+ const TargetLibraryInfo *TLI) {
if (isa<AllocaInst>(Obj))
return UndefValue::get(&Ty);
+ if (isAllocationFn(&Obj, TLI))
+ return getInitialValueOfAllocation(&cast<CallBase>(Obj), TLI, &Ty);
auto *GV = dyn_cast<GlobalVariable>(&Obj);
if (!GV || !GV->hasLocalLinkage())
return nullptr;
@@ -316,7 +320,8 @@ bool AA::getPotentialCopiesOfStoredValue(
dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
return false;
}
- if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj)) {
+ if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj) &&
+ !isNoAliasCall(Obj)) {
LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << *Obj
<< "\n";);
return false;
@@ -741,6 +746,7 @@ void IRPosition::verify() {
assert((CBContext == nullptr) &&
"'call site argument' position must not have CallBaseContext!");
Use *U = getAsUsePtr();
+ (void)U; // Silence unused variable warning.
assert(U && "Expected use for a 'call site argument' position!");
assert(isa<CallBase>(U->getUser()) &&
"Expected call base user for a 'call site argument' position!");
@@ -999,10 +1005,11 @@ bool Attributor::isAssumedDead(const BasicBlock &BB,
return false;
}
-bool Attributor::checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
- const AbstractAttribute &QueryingAA,
- const Value &V, bool CheckBBLivenessOnly,
- DepClassTy LivenessDepClass) {
+bool Attributor::checkForAllUses(
+ function_ref<bool(const Use &, bool &)> Pred,
+ const AbstractAttribute &QueryingAA, const Value &V,
+ bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
+ function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
// Check the trivial case first as it catches void values.
if (V.use_empty())
@@ -1053,8 +1060,15 @@ bool Attributor::checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
<< PotentialCopies.size()
<< " potential copies instead!\n");
for (Value *PotentialCopy : PotentialCopies)
- for (const Use &U : PotentialCopy->uses())
- Worklist.push_back(&U);
+ for (const Use &CopyUse : PotentialCopy->uses()) {
+ if (EquivalentUseCB && !EquivalentUseCB(*U, CopyUse)) {
+ LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
+ "rejected by the equivalence call back: "
+ << *CopyUse << "!\n");
+ return false;
+ }
+ Worklist.push_back(&CopyUse);
+ }
continue;
}
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index b977821bcaa6..76420783b2d1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -417,12 +417,10 @@ const Value *stripAndAccumulateMinimalOffsets(
AttributorAnalysis);
}
-static const Value *getMinimalBaseOfAccessPointerOperand(
- Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
- int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
- const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
- if (!Ptr)
- return nullptr;
+static const Value *
+getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
+ const Value *Ptr, int64_t &BytesOffset,
+ const DataLayout &DL, bool AllowNonInbounds = false) {
APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
const Value *Base = stripAndAccumulateMinimalOffsets(
A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
@@ -431,18 +429,6 @@ static const Value *getMinimalBaseOfAccessPointerOperand(
return Base;
}
-static const Value *
-getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
- const DataLayout &DL,
- bool AllowNonInbounds = false) {
- const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
- if (!Ptr)
- return nullptr;
-
- return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
- AllowNonInbounds);
-}
-
/// Clamp the information known for all returned values of a function
/// (identified by \p QueryingAA) into \p S.
template <typename AAType, typename StateType = typename AAType::StateType>
@@ -810,14 +796,17 @@ struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
int64_t getSize() const { return second; }
static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
+ /// Return true if offset or size are unknown.
+ bool offsetOrSizeAreUnknown() const {
+ return getOffset() == OffsetAndSize::Unknown ||
+ getSize() == OffsetAndSize::Unknown;
+ }
+
/// Return true if this offset and size pair might describe an address that
/// overlaps with \p OAS.
bool mayOverlap(const OffsetAndSize &OAS) const {
// Any unknown value and we are giving up -> overlap.
- if (OAS.getOffset() == OffsetAndSize::Unknown ||
- OAS.getSize() == OffsetAndSize::Unknown ||
- getOffset() == OffsetAndSize::Unknown ||
- getSize() == OffsetAndSize::Unknown)
+ if (offsetOrSizeAreUnknown() || OAS.offsetOrSizeAreUnknown())
return true;
// Check if one offset point is in the other interval [offset, offset+size].
@@ -1024,8 +1013,9 @@ protected:
OffsetAndSize ItOAS = It.getFirst();
if (!OAS.mayOverlap(ItOAS))
continue;
+ bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
for (auto &Access : It.getSecond())
- if (!CB(Access, OAS == ItOAS))
+ if (!CB(Access, IsExact))
return false;
}
return true;
@@ -1161,27 +1151,34 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
return true;
};
+ const auto *TLI = getAnchorScope()
+ ? A.getInfoCache().getTargetLibraryInfoForFunction(
+ *getAnchorScope())
+ : nullptr;
auto UsePred = [&](const Use &U, bool &Follow) -> bool {
Value *CurPtr = U.get();
User *Usr = U.getUser();
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
<< *Usr << "\n");
-
- OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
+ assert(OffsetInfoMap.count(CurPtr) &&
+ "The current pointer offset should have been seeded!");
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
if (CE->isCast())
- return HandlePassthroughUser(Usr, PtrOI, Follow);
+ return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
if (CE->isCompare())
return true;
- if (!CE->isGEPWithNoNotionalOverIndexing()) {
+ if (!isa<GEPOperator>(CE)) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
<< "\n");
return false;
}
}
if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
+ // Note the order here, the Usr access might change the map, CurPtr is
+ // already in it though.
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
+ OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
UsrOI = PtrOI;
// TODO: Use range information.
@@ -1205,19 +1202,22 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
}
UsrOI.Offset = PtrOI.Offset +
DL.getIndexedOffsetInType(
- CurPtr->getType()->getPointerElementType(), Indices);
+ GEP->getSourceElementType(), Indices);
Follow = true;
return true;
}
if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
- return HandlePassthroughUser(Usr, PtrOI, Follow);
+ return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
// For PHIs we need to take care of the recurrence explicitly as the value
// might change while we iterate through a loop. For now, we give up if
// the PHI is not invariant.
if (isa<PHINode>(Usr)) {
- // Check if the PHI is invariant (so far).
+ // Note the order here, the Usr access might change the map, CurPtr is
+ // already in it though.
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
+ OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
+ // Check if the PHI is invariant (so far).
if (UsrOI == PtrOI)
return true;
@@ -1257,8 +1257,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
if (auto *LoadI = dyn_cast<LoadInst>(Usr))
return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
- AccessKind::AK_READ, PtrOI.Offset, Changed,
- LoadI->getType());
+ AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
+ Changed, LoadI->getType());
if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
if (StoreI->getValueOperand() == CurPtr) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
@@ -1269,18 +1269,21 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
Optional<Value *> Content = A.getAssumedSimplified(
*StoreI->getValueOperand(), *this, UsedAssumedInformation);
return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
- PtrOI.Offset, Changed,
+ OffsetInfoMap[CurPtr].Offset, Changed,
StoreI->getValueOperand()->getType());
}
if (auto *CB = dyn_cast<CallBase>(Usr)) {
if (CB->isLifetimeStartOrEnd())
return true;
+ if (TLI && isFreeCall(CB, TLI))
+ return true;
if (CB->isArgOperand(&U)) {
unsigned ArgNo = CB->getArgOperandNo(&U);
const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
*this, IRPosition::callsite_argument(*CB, ArgNo),
DepClassTy::REQUIRED);
- Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
+ Changed = translateAndAddCalleeState(
+ A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
Changed;
return true;
}
@@ -1293,8 +1296,15 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
return false;
};
+ auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
+ if (OffsetInfoMap.count(NewU))
+ return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
+ OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
+ return true;
+ };
if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
- /* CheckBBLivenessOnly */ true))
+ /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
+ EquivalentUseCB))
return indicatePessimisticFixpoint();
LLVM_DEBUG({
@@ -2127,31 +2137,26 @@ static int64_t getKnownNonNullAndDerefBytesForUse(
return DerefAA.getKnownDereferenceableBytes();
}
+ Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
+ if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
+ return 0;
+
int64_t Offset;
const Value *Base =
- getMinimalBaseOfAccessPointerOperand(A, QueryingAA, I, Offset, DL);
- if (Base) {
- if (Base == &AssociatedValue &&
- getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
- int64_t DerefBytes =
- (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
-
- IsNonNull |= !NullPointerIsDefined;
- return std::max(int64_t(0), DerefBytes);
- }
+ getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
+ if (Base && Base == &AssociatedValue) {
+ int64_t DerefBytes = Loc->Size.getValue() + Offset;
+ IsNonNull |= !NullPointerIsDefined;
+ return std::max(int64_t(0), DerefBytes);
}
/// Corner case when an offset is 0.
- Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
- /*AllowNonInbounds*/ true);
- if (Base) {
- if (Offset == 0 && Base == &AssociatedValue &&
- getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
- int64_t DerefBytes =
- (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
- IsNonNull |= !NullPointerIsDefined;
- return std::max(int64_t(0), DerefBytes);
- }
+ Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
+ /*AllowNonInbounds*/ true);
+ if (Base && Base == &AssociatedValue && Offset == 0) {
+ int64_t DerefBytes = Loc->Size.getValue();
+ IsNonNull |= !NullPointerIsDefined;
+ return std::max(int64_t(0), DerefBytes);
}
return 0;
@@ -2325,6 +2330,8 @@ struct AANoRecurseFunction final : AANoRecurseImpl {
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
AANoRecurseImpl::initialize(A);
+ // TODO: We should build a call graph ourselves to enable this in the module
+ // pass as well.
if (const Function *F = getAnchorScope())
if (A.getInfoCache().getSccSize(*F) != 1)
indicatePessimisticFixpoint();
@@ -4057,17 +4064,15 @@ struct AADereferenceableImpl : AADereferenceable {
if (!UseV->getType()->isPointerTy())
return;
- Type *PtrTy = UseV->getType();
- const DataLayout &DL = A.getDataLayout();
+ Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
+ if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
+ return;
+
int64_t Offset;
- if (const Value *Base = getBasePointerOfAccessPointerOperand(
- I, Offset, DL, /*AllowNonInbounds*/ true)) {
- if (Base == &getAssociatedValue() &&
- getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
- uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
- State.addAccessedBytes(Offset, Size);
- }
- }
+ const Value *Base = GetPointerBaseWithConstantOffset(
+ Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
+ if (Base && Base == &getAssociatedValue())
+ State.addAccessedBytes(Offset, Loc->Size.getValue());
}
/// See followUsesInMBEC
@@ -5236,6 +5241,8 @@ struct AAValueSimplifyImpl : AAValueSimplify {
if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
return false;
+ const auto *TLI =
+ A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction());
for (Value *Obj : Objects) {
LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
if (isa<UndefValue>(Obj))
@@ -5250,9 +5257,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
continue;
return false;
}
- if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
- return false;
- Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
+ Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI);
if (!InitialVal || !Union(*InitialVal))
return false;
@@ -5745,13 +5750,6 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
/// The call that allocates the memory.
CallBase *const CB;
- /// The kind of allocation.
- const enum class AllocationKind {
- MALLOC,
- CALLOC,
- ALIGNED_ALLOC,
- } Kind;
-
/// The library function id for the allocation.
LibFunc LibraryFunctionId = NotLibFunc;
@@ -5808,20 +5806,17 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
return true;
}
- bool IsMalloc = isMallocLikeFn(CB, TLI);
- bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
- bool IsCalloc =
- !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
- if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
- return true;
- auto Kind =
- IsMalloc ? AllocationInfo::AllocationKind::MALLOC
- : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
- : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
-
- AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
- AllocationInfos[CB] = AI;
- TLI->getLibFunc(*CB, AI->LibraryFunctionId);
+ // To do heap to stack, we need to know that the allocation itself is
+ // removable once uses are rewritten, and that we can initialize the
+ // alloca to the same pattern as the original allocation result.
+ if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
+ auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
+ if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
+ AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
+ AllocationInfos[CB] = AI;
+ TLI->getLibFunc(*CB, AI->LibraryFunctionId);
+ }
+ }
return true;
};
@@ -5917,21 +5912,22 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
Optional<APInt> SizeAPI = getSize(A, *this, AI);
if (SizeAPI.hasValue()) {
Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
- } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
- auto *Num = AI.CB->getOperand(0);
- auto *SizeT = AI.CB->getOperand(1);
- IRBuilder<> B(AI.CB);
- Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
- } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
- Size = AI.CB->getOperand(1);
} else {
- Size = AI.CB->getOperand(0);
+ LLVMContext &Ctx = AI.CB->getContext();
+ auto &DL = A.getInfoCache().getDL();
+ ObjectSizeOpts Opts;
+ ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
+ SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
+ assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
+ cast<ConstantInt>(SizeOffsetPair.second)->isZero());
+ Size = SizeOffsetPair.first;
}
Align Alignment(1);
- if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
- Optional<APInt> AlignmentAPI =
- getAPInt(A, *this, *AI.CB->getArgOperand(0));
+ if (MaybeAlign RetAlign = AI.CB->getRetAlign())
+ Alignment = max(Alignment, RetAlign);
+ if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
+ Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
assert(AlignmentAPI.hasValue() &&
"Expected an alignment during manifest!");
Alignment =
@@ -5947,6 +5943,11 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
Alloca->getNextNode());
+ auto *I8Ty = Type::getInt8Ty(F->getContext());
+ auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
+ assert(InitVal &&
+ "Must be able to materialize initial memory state of allocation");
+
A.changeValueAfterManifest(*AI.CB, *Alloca);
if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
@@ -5957,18 +5958,13 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
A.deleteAfterManifest(*AI.CB);
}
- // Zero out the allocated memory if it was a calloc.
- if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
- auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
- Alloca->getNextNode());
- Value *Ops[] = {
- BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
- ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
-
- Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
- Module *M = F->getParent();
- Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
- CallInst::Create(Fn, Ops, "", BI->getNextNode());
+ // Initialize the alloca with the same value as used by the allocation
+ // function. We can skip undef as the initial value of an alloc is
+ // undef, and the memset would simply end up being DSEd.
+ if (!isa<UndefValue>(InitVal)) {
+ IRBuilder<> Builder(Alloca->getNextNode());
+ // TODO: Use alignment above if align!=1
+ Builder.CreateMemSet(Alloca, InitVal, Size, None);
}
HasChanged = ChangeStatus::CHANGED;
}
@@ -5990,25 +5986,18 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
AllocationInfo &AI) {
+ auto Mapper = [&](const Value *V) -> const Value * {
+ bool UsedAssumedInformation = false;
+ if (Optional<Constant *> SimpleV =
+ A.getAssumedConstant(*V, AA, UsedAssumedInformation))
+ if (*SimpleV)
+ return *SimpleV;
+ return V;
+ };
- if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
- return getAPInt(A, AA, *AI.CB->getArgOperand(0));
-
- if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
- // Only if the alignment is also constant we return a size.
- return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
- ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
- : llvm::None;
-
- assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
- "Expected only callocs are left");
- Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
- Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
- if (!Num.hasValue() || !Size.hasValue())
- return llvm::None;
- bool Overflow = false;
- Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
- return Overflow ? llvm::None : Size;
+ const Function *F = getAnchorScope();
+ const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
+ return getAllocSize(AI.CB, TLI, Mapper);
}
/// Collection of all malloc-like calls in a function with associated
@@ -6025,6 +6014,7 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
const Function *F = getAnchorScope();
+ const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
const auto &LivenessAA =
A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
@@ -6239,22 +6229,24 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
if (AI.Status == AllocationInfo::INVALID)
continue;
- if (MaxHeapToStackSize == -1) {
- if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
- if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
- LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
- << "\n");
- AI.Status = AllocationInfo::INVALID;
- Changed = ChangeStatus::CHANGED;
- continue;
- }
- } else {
+ if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
+ if (!getAPInt(A, *this, *Align)) {
+ // Can't generate an alloca which respects the required alignment
+ // on the allocation.
+ LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
+ << "\n");
+ AI.Status = AllocationInfo::INVALID;
+ Changed = ChangeStatus::CHANGED;
+ continue;
+ }
+ }
+
+ if (MaxHeapToStackSize != -1) {
Optional<APInt> Size = getSize(A, *this, AI);
if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
LLVM_DEBUG({
if (!Size.hasValue())
- dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
- << "\n";
+ dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
else
dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
<< MaxHeapToStackSize << "\n";
@@ -6637,9 +6629,10 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
IRBuilder<NoFolder> IRB(IP);
const DataLayout &DL = IP->getModule()->getDataLayout();
- if (Base->getType()->getPointerElementType() != PrivType)
- Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
- "", ACS.getInstruction());
+ Type *PrivPtrType = PrivType->getPointerTo();
+ if (Base->getType() != PrivPtrType)
+ Base = BitCastInst::CreateBitOrPointerCast(Base, PrivPtrType, "",
+ ACS.getInstruction());
// Traverse the type, build GEPs and loads.
if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
@@ -6781,7 +6774,7 @@ struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
if (auto *AI = dyn_cast<AllocaInst>(Obj))
if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
if (CI->isOne())
- return Obj->getType()->getPointerElementType();
+ return AI->getAllocatedType();
if (auto *Arg = dyn_cast<Argument>(Obj)) {
auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
*this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
@@ -7675,7 +7668,6 @@ void AAMemoryLocationImpl::categorizePtrValue(
for (Value *Obj : Objects) {
// TODO: recognize the TBAA used for constant accesses.
MemoryLocationsKind MLK = NO_LOCATIONS;
- assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
if (isa<UndefValue>(Obj))
continue;
if (isa<Argument>(Obj)) {
@@ -8485,13 +8477,30 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
/* UseValueSimplify */ false))
return indicatePessimisticFixpoint();
- return clampStateAndIndicateChange(getState(), T);
+ // Ensure that long def-use chains can't cause circular reasoning either by
+ // introducing a cutoff below.
+ if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
+ return ChangeStatus::UNCHANGED;
+ if (++NumChanges > MaxNumChanges) {
+ LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
+ << " but only " << MaxNumChanges
+ << " are allowed to avoid cyclic reasoning.");
+ return indicatePessimisticFixpoint();
+ }
+ return ChangeStatus::CHANGED;
}
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
STATS_DECLTRACK_FLOATING_ATTR(value_range)
}
+
+ /// Tracker to bail after too many widening steps of the constant range.
+ int NumChanges = 0;
+
+ /// Upper bound for the number of allowed changes (=widening steps) for the
+ /// constant range before we give up.
+ static constexpr int MaxNumChanges = 5;
};
struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index fb9ab7954e36..2a6e38b0437f 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -287,7 +287,8 @@ bool DeadArgumentEliminationPass::RemoveDeadArgumentsFromCallers(Function &Fn) {
SmallVector<unsigned, 8> UnusedArgs;
bool Changed = false;
- AttrBuilder UBImplyingAttributes = AttributeFuncs::getUBImplyingAttributes();
+ AttributeMask UBImplyingAttributes =
+ AttributeFuncs::getUBImplyingAttributes();
for (Argument &Arg : Fn.args()) {
if (!Arg.hasSwiftErrorAttr() && Arg.use_empty() &&
!Arg.hasPassPointeeByValueCopyAttr()) {
@@ -838,7 +839,7 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) {
assert(NRetTy && "No new return type found?");
// The existing function return attributes.
- AttrBuilder RAttrs(PAL.getRetAttrs());
+ AttrBuilder RAttrs(F->getContext(), PAL.getRetAttrs());
// Remove any incompatible attributes, but only if we removed all return
// values. Otherwise, ensure that we don't have any conflicting attributes
@@ -889,7 +890,7 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) {
// Adjust the call return attributes in case the function was changed to
// return void.
- AttrBuilder RAttrs(CallPAL.getRetAttrs());
+ AttrBuilder RAttrs(F->getContext(), CallPAL.getRetAttrs());
RAttrs.remove(AttributeFuncs::typeIncompatible(NRetTy));
AttributeSet RetAttrs = AttributeSet::get(F->getContext(), RAttrs);
@@ -912,7 +913,7 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) {
// this is not an expected case anyway
ArgAttrVec.push_back(AttributeSet::get(
F->getContext(),
- AttrBuilder(Attrs).removeAttribute(Attribute::Returned)));
+ AttrBuilder(F->getContext(), Attrs).removeAttribute(Attribute::Returned)));
} else {
// Otherwise, use the original attributes.
ArgAttrVec.push_back(Attrs);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 321d4a19a585..213a998d5bba 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -133,7 +133,7 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
if (AliasAnalysis::onlyReadsMemory(MRB))
return MAK_ReadOnly;
- if (AliasAnalysis::doesNotReadMemory(MRB))
+ if (AliasAnalysis::onlyWritesMemory(MRB))
return MAK_WriteOnly;
// Conservatively assume it reads and writes to memory.
@@ -295,13 +295,13 @@ static void addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter,
// No change.
continue;
- if (F->doesNotReadMemory() && WritesMemory)
+ if (F->onlyWritesMemory() && WritesMemory)
continue;
Changed.insert(F);
// Clear out any existing attributes.
- AttrBuilder AttrsToRemove;
+ AttributeMask AttrsToRemove;
AttrsToRemove.addAttribute(Attribute::ReadOnly);
AttrsToRemove.addAttribute(Attribute::ReadNone);
AttrsToRemove.addAttribute(Attribute::WriteOnly);
@@ -720,10 +720,16 @@ determinePointerAccessAttrs(Argument *A,
// The accessors used on call site here do the right thing for calls and
// invokes with operand bundles.
- if (!CB.onlyReadsMemory() && !CB.onlyReadsMemory(UseIndex))
- return Attribute::None;
- if (!CB.doesNotAccessMemory(UseIndex))
+ if (CB.doesNotAccessMemory(UseIndex)) {
+ /* nop */
+ } else if (CB.onlyReadsMemory() || CB.onlyReadsMemory(UseIndex)) {
IsRead = true;
+ } else if (CB.hasFnAttr(Attribute::WriteOnly) ||
+ CB.dataOperandHasImpliedAttr(UseIndex, Attribute::WriteOnly)) {
+ IsWrite = true;
+ } else {
+ return Attribute::None;
+ }
break;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
index 2425646455bd..6c3cc3914337 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp
@@ -6,15 +6,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This specialises functions with constant parameters (e.g. functions,
-// globals). Constant parameters like function pointers and constant globals
-// are propagated to the callee by specializing the function.
+// This specialises functions with constant parameters. Constant parameters
+// like function pointers and constant globals are propagated to the callee by
+// specializing the function. The main benefit of this pass at the moment is
+// that indirect calls are transformed into direct calls, which provides inline
+// opportunities that the inliner would not have been able to achieve. That's
+// why function specialisation is run before the inliner in the optimisation
+// pipeline; that is by design. Otherwise, we would only benefit from constant
+// passing, which is a valid use-case too, but hasn't been explored much in
+// terms of performance uplifts, cost-model and compile-time impact.
//
// Current limitations:
-// - It does not yet handle integer ranges.
+// - It does not yet handle integer ranges. We do support "literal constants",
+// but that's off by default under an option.
// - Only 1 argument per function is specialised,
-// - The cost-model could be further looked into,
-// - We are not yet caching analysis results.
+// - The cost-model could be further looked into (it mainly focuses on inlining
+// benefits),
+// - We are not yet caching analysis results, but profiling and checking where
+// extra compile time is spent didn't suggest this to be a problem.
//
// Ideas:
// - With a function specialization attribute for arguments, we could have
@@ -30,8 +39,12 @@
// https://reviews.llvm.org/D106426 for details. Perhaps there is a
// compile-time friendlier way to control/limit the number of specialisations
// for recursive functions.
-// - Don't transform the function if there is no function specialization
-// happens.
+// - Don't transform the function if function specialization does not trigger;
+// the SCCPSolver may make IR changes.
+//
+// References:
+// - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable
+// it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index b1f3ff15c97b..d3cac3efce86 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -303,11 +303,11 @@ static bool CleanupConstantGlobalUsers(GlobalVariable *GV,
else if (auto *GEP = dyn_cast<GEPOperator>(U))
append_range(WorkList, GEP->users());
else if (auto *LI = dyn_cast<LoadInst>(U)) {
- // A load from zeroinitializer is always zeroinitializer, regardless of
- // any applied offset.
+ // A load from a uniform value is always the same, regardless of any
+ // applied offset.
Type *Ty = LI->getType();
- if (Init->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy()) {
- LI->replaceAllUsesWith(Constant::getNullValue(Ty));
+ if (Constant *Res = ConstantFoldLoadFromUniformValue(Init, Ty)) {
+ LI->replaceAllUsesWith(Res);
EraseFromParent(LI);
continue;
}
@@ -337,107 +337,68 @@ static bool CleanupConstantGlobalUsers(GlobalVariable *GV,
return Changed;
}
-static bool isSafeSROAElementUse(Value *V);
-
-/// Return true if the specified GEP is a safe user of a derived
-/// expression from a global that we want to SROA.
-static bool isSafeSROAGEP(User *U) {
- // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
- // don't like < 3 operand CE's, and we don't like non-constant integer
- // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
- // value of C.
- if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
- !cast<Constant>(U->getOperand(1))->isNullValue())
- return false;
-
- gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
- ++GEPI; // Skip over the pointer index.
-
- // For all other level we require that the indices are constant and inrange.
- // In particular, consider: A[0][i]. We cannot know that the user isn't doing
- // invalid things like allowing i to index an out-of-range subscript that
- // accesses A[1]. This can also happen between different members of a struct
- // in llvm IR.
- for (; GEPI != E; ++GEPI) {
- if (GEPI.isStruct())
+/// Look at all uses of the global and determine which (offset, type) pairs it
+/// can be split into.
+static bool collectSRATypes(DenseMap<uint64_t, Type *> &Types, GlobalValue *GV,
+ const DataLayout &DL) {
+ SmallVector<Use *, 16> Worklist;
+ SmallPtrSet<Use *, 16> Visited;
+ auto AppendUses = [&](Value *V) {
+ for (Use &U : V->uses())
+ if (Visited.insert(&U).second)
+ Worklist.push_back(&U);
+ };
+ AppendUses(GV);
+ while (!Worklist.empty()) {
+ Use *U = Worklist.pop_back_val();
+ User *V = U->getUser();
+ if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V)) {
+ AppendUses(V);
continue;
+ }
- ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
- if (!IdxVal || (GEPI.isBoundedSequential() &&
- IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
- return false;
- }
-
- return llvm::all_of(U->users(), isSafeSROAElementUse);
-}
-
-/// Return true if the specified instruction is a safe user of a derived
-/// expression from a global that we want to SROA.
-static bool isSafeSROAElementUse(Value *V) {
- // We might have a dead and dangling constant hanging off of here.
- if (Constant *C = dyn_cast<Constant>(V))
- return isSafeToDestroyConstant(C);
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return false;
+ if (auto *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->hasAllConstantIndices())
+ return false;
+ AppendUses(V);
+ continue;
+ }
- // Loads are ok.
- if (isa<LoadInst>(I)) return true;
+ if (Value *Ptr = getLoadStorePointerOperand(V)) {
+ // This is storing the global address into somewhere, not storing into
+ // the global.
+ if (isa<StoreInst>(V) && U->getOperandNo() == 0)
+ return false;
- // Stores *to* the pointer are ok.
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->getOperand(0) != V;
+ APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
+ Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
+ /* AllowNonInbounds */ true);
+ if (Ptr != GV || Offset.getActiveBits() >= 64)
+ return false;
- // Otherwise, it must be a GEP. Check it and its users are safe to SRA.
- return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I);
-}
+ // TODO: We currently require that all accesses at a given offset must
+ // use the same type. This could be relaxed.
+ Type *Ty = getLoadStoreType(V);
+ auto It = Types.try_emplace(Offset.getZExtValue(), Ty).first;
+ if (Ty != It->second)
+ return false;
+ continue;
+ }
-/// Look at all uses of the global and decide whether it is safe for us to
-/// perform this transformation.
-static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
- for (User *U : GV->users()) {
- // The user of the global must be a GEP Inst or a ConstantExpr GEP.
- if (!isa<GetElementPtrInst>(U) &&
- (!isa<ConstantExpr>(U) ||
- cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
- return false;
+ // Ignore dead constant users.
+ if (auto *C = dyn_cast<Constant>(V)) {
+ if (!isSafeToDestroyConstant(C))
+ return false;
+ continue;
+ }
- // Check the gep and it's users are safe to SRA
- if (!isSafeSROAGEP(U))
- return false;
+ // Unknown user.
+ return false;
}
return true;
}
-static bool IsSRASequential(Type *T) {
- return isa<ArrayType>(T) || isa<VectorType>(T);
-}
-static uint64_t GetSRASequentialNumElements(Type *T) {
- if (ArrayType *AT = dyn_cast<ArrayType>(T))
- return AT->getNumElements();
- return cast<FixedVectorType>(T)->getNumElements();
-}
-static Type *GetSRASequentialElementType(Type *T) {
- if (ArrayType *AT = dyn_cast<ArrayType>(T))
- return AT->getElementType();
- return cast<VectorType>(T)->getElementType();
-}
-static bool CanDoGlobalSRA(GlobalVariable *GV) {
- Constant *Init = GV->getInitializer();
-
- if (isa<StructType>(Init->getType())) {
- // nothing to check
- } else if (IsSRASequential(Init->getType())) {
- if (GetSRASequentialNumElements(Init->getType()) > 16 &&
- GV->hasNUsesOrMore(16))
- return false; // It's not worth it.
- } else
- return false;
-
- return GlobalUsersSafeToSRA(GV);
-}
-
/// Copy over the debug info for a variable to its SRA replacements.
static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
uint64_t FragmentOffsetInBits,
@@ -468,161 +429,140 @@ static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
/// transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
- // Make sure this global only has simple uses that we can SRA.
- if (!CanDoGlobalSRA(GV))
+ assert(GV->hasLocalLinkage());
+
+ // Collect types to split into.
+ DenseMap<uint64_t, Type *> Types;
+ if (!collectSRATypes(Types, GV, DL) || Types.empty())
return nullptr;
- assert(GV->hasLocalLinkage());
- Constant *Init = GV->getInitializer();
- Type *Ty = Init->getType();
- uint64_t VarSize = DL.getTypeSizeInBits(Ty);
+ // Make sure we don't SRA back to the same type.
+ if (Types.size() == 1 && Types.begin()->second == GV->getValueType())
+ return nullptr;
- std::map<unsigned, GlobalVariable *> NewGlobals;
+ // Don't perform SRA if we would have to split into many globals.
+ if (Types.size() > 16)
+ return nullptr;
- // Get the alignment of the global, either explicit or target-specific.
- Align StartAlignment =
- DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getType());
-
- // Loop over all users and create replacement variables for used aggregate
- // elements.
- for (User *GEP : GV->users()) {
- assert(((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() ==
- Instruction::GetElementPtr) ||
- isa<GetElementPtrInst>(GEP)) &&
- "NonGEP CE's are not SRAable!");
-
- // Ignore the 1th operand, which has to be zero or else the program is quite
- // broken (undefined). Get the 2nd operand, which is the structure or array
- // index.
- unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
- if (NewGlobals.count(ElementIdx) == 1)
- continue; // we`ve already created replacement variable
- assert(NewGlobals.count(ElementIdx) == 0);
-
- Type *ElTy = nullptr;
- if (StructType *STy = dyn_cast<StructType>(Ty))
- ElTy = STy->getElementType(ElementIdx);
- else
- ElTy = GetSRASequentialElementType(Ty);
- assert(ElTy);
+ // Sort by offset.
+ SmallVector<std::pair<uint64_t, Type *>, 16> TypesVector;
+ append_range(TypesVector, Types);
+ sort(TypesVector,
+ [](const auto &A, const auto &B) { return A.first < B.first; });
- Constant *In = Init->getAggregateElement(ElementIdx);
- assert(In && "Couldn't get element of initializer?");
+ // Check that the types are non-overlapping.
+ uint64_t Offset = 0;
+ for (const auto &Pair : TypesVector) {
+ // Overlaps with previous type.
+ if (Pair.first < Offset)
+ return nullptr;
- GlobalVariable *NGV = new GlobalVariable(
- ElTy, false, GlobalVariable::InternalLinkage, In,
- GV->getName() + "." + Twine(ElementIdx), GV->getThreadLocalMode(),
- GV->getType()->getAddressSpace());
- NGV->setExternallyInitialized(GV->isExternallyInitialized());
- NGV->copyAttributesFrom(GV);
- NewGlobals.insert(std::make_pair(ElementIdx, NGV));
-
- if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout &Layout = *DL.getStructLayout(STy);
-
- // Calculate the known alignment of the field. If the original aggregate
- // had 256 byte alignment for example, something might depend on that:
- // propagate info to each field.
- uint64_t FieldOffset = Layout.getElementOffset(ElementIdx);
- Align NewAlign = commonAlignment(StartAlignment, FieldOffset);
- if (NewAlign > DL.getABITypeAlign(STy->getElementType(ElementIdx)))
- NGV->setAlignment(NewAlign);
-
- // Copy over the debug info for the variable.
- uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
- uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx);
- transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, VarSize);
- } else {
- uint64_t EltSize = DL.getTypeAllocSize(ElTy);
- Align EltAlign = DL.getABITypeAlign(ElTy);
- uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
-
- // Calculate the known alignment of the field. If the original aggregate
- // had 256 byte alignment for example, something might depend on that:
- // propagate info to each field.
- Align NewAlign = commonAlignment(StartAlignment, EltSize * ElementIdx);
- if (NewAlign > EltAlign)
- NGV->setAlignment(NewAlign);
- transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx,
- FragmentSizeInBits, VarSize);
- }
+ Offset = Pair.first + DL.getTypeAllocSize(Pair.second);
}
- if (NewGlobals.empty())
+ // Some accesses go beyond the end of the global, don't bother.
+ if (Offset > DL.getTypeAllocSize(GV->getValueType()))
return nullptr;
- Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
- for (auto NewGlobalVar : NewGlobals)
- Globals.push_back(NewGlobalVar.second);
+ // Collect initializers for new globals.
+ Constant *OrigInit = GV->getInitializer();
+ DenseMap<uint64_t, Constant *> Initializers;
+ for (const auto &Pair : Types) {
+ Constant *NewInit = ConstantFoldLoadFromConst(OrigInit, Pair.second,
+ APInt(64, Pair.first), DL);
+ if (!NewInit) {
+ LLVM_DEBUG(dbgs() << "Global SRA: Failed to evaluate initializer of "
+ << *GV << " with type " << *Pair.second << " at offset "
+ << Pair.first << "\n");
+ return nullptr;
+ }
+ Initializers.insert({Pair.first, NewInit});
+ }
LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
- Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
+ // Get the alignment of the global, either explicit or target-specific.
+ Align StartAlignment =
+ DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
+ uint64_t VarSize = DL.getTypeSizeInBits(GV->getValueType());
+
+ // Create replacement globals.
+ DenseMap<uint64_t, GlobalVariable *> NewGlobals;
+ unsigned NameSuffix = 0;
+ for (auto &Pair : TypesVector) {
+ uint64_t Offset = Pair.first;
+ Type *Ty = Pair.second;
+ GlobalVariable *NGV = new GlobalVariable(
+ *GV->getParent(), Ty, false, GlobalVariable::InternalLinkage,
+ Initializers[Offset], GV->getName() + "." + Twine(NameSuffix++), GV,
+ GV->getThreadLocalMode(), GV->getAddressSpace());
+ NGV->copyAttributesFrom(GV);
+ NewGlobals.insert({Offset, NGV});
+
+ // Calculate the known alignment of the field. If the original aggregate
+ // had 256 byte alignment for example, something might depend on that:
+ // propagate info to each field.
+ Align NewAlign = commonAlignment(StartAlignment, Offset);
+ if (NewAlign > DL.getABITypeAlign(Ty))
+ NGV->setAlignment(NewAlign);
+
+ // Copy over the debug info for the variable.
+ transferSRADebugInfo(GV, NGV, Offset * 8, DL.getTypeAllocSizeInBits(Ty),
+ VarSize);
+ }
+
+ // Replace uses of the original global with uses of the new global.
+ SmallVector<Value *, 16> Worklist;
+ SmallPtrSet<Value *, 16> Visited;
+ SmallVector<WeakTrackingVH, 16> DeadInsts;
+ auto AppendUsers = [&](Value *V) {
+ for (User *U : V->users())
+ if (Visited.insert(U).second)
+ Worklist.push_back(U);
+ };
+ AppendUsers(GV);
+ while (!Worklist.empty()) {
+ Value *V = Worklist.pop_back_val();
+ if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V) ||
+ isa<GEPOperator>(V)) {
+ AppendUsers(V);
+ if (isa<Instruction>(V))
+ DeadInsts.push_back(V);
+ continue;
+ }
- // Loop over all of the uses of the global, replacing the constantexpr geps,
- // with smaller constantexpr geps or direct references.
- while (!GV->use_empty()) {
- User *GEP = GV->user_back();
- assert(((isa<ConstantExpr>(GEP) &&
- cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
- isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
-
- // Ignore the 1th operand, which has to be zero or else the program is quite
- // broken (undefined). Get the 2nd operand, which is the structure or array
- // index.
- unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
- assert(NewGlobals.count(ElementIdx) == 1);
-
- Value *NewPtr = NewGlobals[ElementIdx];
- Type *NewTy = NewGlobals[ElementIdx]->getValueType();
-
- // Form a shorter GEP if needed.
- if (GEP->getNumOperands() > 3) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
- SmallVector<Constant*, 8> Idxs;
- Idxs.push_back(NullInt);
- for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
- Idxs.push_back(CE->getOperand(i));
- NewPtr =
- ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
+ if (Value *Ptr = getLoadStorePointerOperand(V)) {
+ APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
+ Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
+ /* AllowNonInbounds */ true);
+ assert(Ptr == GV && "Load/store must be from/to global");
+ GlobalVariable *NGV = NewGlobals[Offset.getZExtValue()];
+ assert(NGV && "Must have replacement global for this offset");
+
+ // Update the pointer operand and recalculate alignment.
+ Align PrefAlign = DL.getPrefTypeAlign(getLoadStoreType(V));
+ Align NewAlign =
+ getOrEnforceKnownAlignment(NGV, PrefAlign, DL, cast<Instruction>(V));
+
+ if (auto *LI = dyn_cast<LoadInst>(V)) {
+ LI->setOperand(0, NGV);
+ LI->setAlignment(NewAlign);
} else {
- GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
- SmallVector<Value*, 8> Idxs;
- Idxs.push_back(NullInt);
- for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
- Idxs.push_back(GEPI->getOperand(i));
- NewPtr = GetElementPtrInst::Create(
- NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(ElementIdx),
- GEPI);
- }
- }
- GEP->replaceAllUsesWith(NewPtr);
-
- // We changed the pointer of any memory access user. Recalculate alignments.
- for (User *U : NewPtr->users()) {
- if (auto *Load = dyn_cast<LoadInst>(U)) {
- Align PrefAlign = DL.getPrefTypeAlign(Load->getType());
- Align NewAlign = getOrEnforceKnownAlignment(Load->getPointerOperand(),
- PrefAlign, DL, Load);
- Load->setAlignment(NewAlign);
- }
- if (auto *Store = dyn_cast<StoreInst>(U)) {
- Align PrefAlign =
- DL.getPrefTypeAlign(Store->getValueOperand()->getType());
- Align NewAlign = getOrEnforceKnownAlignment(Store->getPointerOperand(),
- PrefAlign, DL, Store);
- Store->setAlignment(NewAlign);
+ auto *SI = cast<StoreInst>(V);
+ SI->setOperand(1, NGV);
+ SI->setAlignment(NewAlign);
}
+ continue;
}
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
- GEPI->eraseFromParent();
- else
- cast<ConstantExpr>(GEP)->destroyConstant();
+ assert(isa<Constant>(V) && isSafeToDestroyConstant(cast<Constant>(V)) &&
+ "Other users can only be dead constants");
}
- // Delete the old global, now that it is dead.
- Globals.erase(GV);
+ // Delete old instructions and global.
+ RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
+ GV->removeDeadConstantUsers();
+ GV->eraseFromParent();
++NumSRA;
assert(NewGlobals.size() > 0);
@@ -677,7 +617,7 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
"Should be GlobalVariable");
// This and only this kind of non-signed ICmpInst is to be replaced with
// the comparing of the value of the created global init bool later in
- // optimizeGlobalAddressOfMalloc for the global variable.
+ // optimizeGlobalAddressOfAllocation for the global variable.
} else {
//cerr << "NONTRAPPING USE: " << *U;
return false;
@@ -895,29 +835,36 @@ static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
/// to actually DO the malloc. Instead, turn the malloc into a global, and any
/// loads of GV as uses of the new global.
static GlobalVariable *
-OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
- ConstantInt *NElements, const DataLayout &DL,
- TargetLibraryInfo *TLI) {
+OptimizeGlobalAddressOfAllocation(GlobalVariable *GV, CallInst *CI,
+ uint64_t AllocSize, Constant *InitVal,
+ const DataLayout &DL,
+ TargetLibraryInfo *TLI) {
LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI
<< '\n');
- Type *GlobalType;
- if (NElements->getZExtValue() == 1)
- GlobalType = AllocTy;
- else
- // If we have an array allocation, the global variable is of an array.
- GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
+ // Create global of type [AllocSize x i8].
+ Type *GlobalType = ArrayType::get(Type::getInt8Ty(GV->getContext()),
+ AllocSize);
- // Create the new global variable. The contents of the malloc'd memory is
- // undefined, so initialize with an undef value.
+ // Create the new global variable. The contents of the allocated memory is
+ // undefined initially, so initialize with an undef value.
GlobalVariable *NewGV = new GlobalVariable(
*GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
GV->getThreadLocalMode());
- // If there are bitcast users of the malloc (which is typical, usually we have
- // a malloc + bitcast) then replace them with uses of the new global. Update
- // other users to use the global as well.
+ // Initialize the global at the point of the original call. Note that this
+ // is a different point from the initialization referred to below for the
+ // nullability handling. Sublety: We have not proven the original global was
+ // only initialized once. As such, we can not fold this into the initializer
+ // of the new global as may need to re-init the storage multiple times.
+ if (!isa<UndefValue>(InitVal)) {
+ IRBuilder<> Builder(CI->getNextNode());
+ // TODO: Use alignment above if align!=1
+ Builder.CreateMemSet(NewGV, InitVal, AllocSize, None);
+ }
+
+ // Update users of the allocation to use the new global instead.
BitCastInst *TheBC = nullptr;
while (!CI->use_empty()) {
Instruction *User = cast<Instruction>(CI->user_back());
@@ -1009,7 +956,7 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
} else
GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
- // Now the GV is dead, nuke it and the malloc..
+ // Now the GV is dead, nuke it and the allocation..
GV->eraseFromParent();
CI->eraseFromParent();
@@ -1066,15 +1013,33 @@ valueIsOnlyUsedLocallyOrStoredToOneGlobal(const CallInst *CI,
return true;
}
-/// This function is called when we see a pointer global variable with a single
-/// value stored it that is a malloc or cast of malloc.
-static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
- Type *AllocTy,
- AtomicOrdering Ordering,
- const DataLayout &DL,
- TargetLibraryInfo *TLI) {
- // If this is a malloc of an abstract type, don't touch it.
- if (!AllocTy->isSized())
+/// If we have a global that is only initialized with a fixed size allocation
+/// try to transform the program to use global memory instead of heap
+/// allocated memory. This eliminates dynamic allocation, avoids an indirection
+/// accessing the data, and exposes the resultant global to further GlobalOpt.
+static bool tryToOptimizeStoreOfAllocationToGlobal(GlobalVariable *GV,
+ CallInst *CI,
+ AtomicOrdering Ordering,
+ const DataLayout &DL,
+ TargetLibraryInfo *TLI) {
+ if (!isAllocRemovable(CI, TLI))
+ // Must be able to remove the call when we get done..
+ return false;
+
+ Type *Int8Ty = Type::getInt8Ty(CI->getFunction()->getContext());
+ Constant *InitVal = getInitialValueOfAllocation(CI, TLI, Int8Ty);
+ if (!InitVal)
+ // Must be able to emit a memset for initialization
+ return false;
+
+ uint64_t AllocSize;
+ if (!getObjectSize(CI, AllocSize, DL, TLI, ObjectSizeOpts()))
+ return false;
+
+ // Restrict this transformation to only working on small allocations
+ // (2048 bytes currently), as we don't want to introduce a 16M global or
+ // something.
+ if (AllocSize >= 2048)
return false;
// We can't optimize this global unless all uses of it are *known* to be
@@ -1093,25 +1058,8 @@ static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
if (!valueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV))
return false;
- // If we have a global that is only initialized with a fixed size malloc,
- // transform the program to use global memory instead of malloc'd memory.
- // This eliminates dynamic allocation, avoids an indirection accessing the
- // data, and exposes the resultant global to further GlobalOpt.
- // We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, DL, TLI, true);
- if (!NElems)
- return false;
-
- if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
- // Restrict this transformation to only working on small allocations
- // (2048 bytes currently), as we don't want to introduce a 16M global or
- // something.
- if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
- OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
- return true;
- }
-
- return false;
+ OptimizeGlobalAddressOfAllocation(GV, CI, AllocSize, InitVal, DL, TLI);
+ return true;
}
// Try to optimize globals based on the knowledge that only one value (besides
@@ -1140,12 +1088,12 @@ optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
// Optimize away any trapping uses of the loaded value.
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI))
return true;
- } else if (CallInst *CI = extractMallocCall(StoredOnceVal, GetTLI)) {
- auto *TLI = &GetTLI(*CI->getFunction());
- Type *MallocType = getMallocAllocatedType(CI, TLI);
- if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
- Ordering, DL, TLI))
- return true;
+ } else if (isAllocationFn(StoredOnceVal, GetTLI)) {
+ if (auto *CI = dyn_cast<CallInst>(StoredOnceVal)) {
+ auto *TLI = &GetTLI(*CI->getFunction());
+ if (tryToOptimizeStoreOfAllocationToGlobal(GV, CI, Ordering, DL, TLI))
+ return true;
+ }
}
}
@@ -1171,9 +1119,12 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// Walk the use list of the global seeing if all the uses are load or store.
// If there is anything else, bail out.
- for (User *U : GV->users())
+ for (User *U : GV->users()) {
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
+ if (getLoadStoreType(U) != GVElType)
+ return false;
+ }
LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
@@ -1590,11 +1541,25 @@ processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
// This is restricted to address spaces that allow globals to have
// initializers. NVPTX, for example, does not support initializers for
// shared memory (AS 3).
- if (SOVConstant && SOVConstant->getType() == GV->getValueType() &&
- isa<UndefValue>(GV->getInitializer()) &&
+ if (SOVConstant && isa<UndefValue>(GV->getInitializer()) &&
+ DL.getTypeAllocSize(SOVConstant->getType()) ==
+ DL.getTypeAllocSize(GV->getValueType()) &&
CanHaveNonUndefGlobalInitializer) {
- // Change the initial value here.
- GV->setInitializer(SOVConstant);
+ if (SOVConstant->getType() == GV->getValueType()) {
+ // Change the initializer in place.
+ GV->setInitializer(SOVConstant);
+ } else {
+ // Create a new global with adjusted type.
+ auto *NGV = new GlobalVariable(
+ *GV->getParent(), SOVConstant->getType(), GV->isConstant(),
+ GV->getLinkage(), SOVConstant, "", GV, GV->getThreadLocalMode(),
+ GV->getAddressSpace());
+ NGV->takeName(GV);
+ NGV->copyAttributesFrom(GV);
+ GV->replaceAllUsesWith(ConstantExpr::getBitCast(NGV, GV->getType()));
+ GV->eraseFromParent();
+ GV = NGV;
+ }
// Clean up any obviously simplifiable users now.
CleanupConstantGlobalUsers(GV, DL);
@@ -2066,194 +2031,6 @@ OptimizeGlobalVars(Module &M,
return Changed;
}
-/// Evaluate a piece of a constantexpr store into a global initializer. This
-/// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
-/// GEP operands of Addr [0, OpNo) have been stepped into.
-static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
- ConstantExpr *Addr, unsigned OpNo) {
- // Base case of the recursion.
- if (OpNo == Addr->getNumOperands()) {
- assert(Val->getType() == Init->getType() && "Type mismatch!");
- return Val;
- }
-
- SmallVector<Constant*, 32> Elts;
- if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
- // Break up the constant into its elements.
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- Elts.push_back(Init->getAggregateElement(i));
-
- // Replace the element that we are supposed to.
- ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
- unsigned Idx = CU->getZExtValue();
- assert(Idx < STy->getNumElements() && "Struct index out of range!");
- Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
-
- // Return the modified struct.
- return ConstantStruct::get(STy, Elts);
- }
-
- ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
- uint64_t NumElts;
- if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType()))
- NumElts = ATy->getNumElements();
- else
- NumElts = cast<FixedVectorType>(Init->getType())->getNumElements();
-
- // Break up the array into elements.
- for (uint64_t i = 0, e = NumElts; i != e; ++i)
- Elts.push_back(Init->getAggregateElement(i));
-
- assert(CI->getZExtValue() < NumElts);
- Elts[CI->getZExtValue()] =
- EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
-
- if (Init->getType()->isArrayTy())
- return ConstantArray::get(cast<ArrayType>(Init->getType()), Elts);
- return ConstantVector::get(Elts);
-}
-
-/// We have decided that Addr (which satisfies the predicate
-/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
-static void CommitValueTo(Constant *Val, Constant *Addr) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
- assert(GV->hasInitializer());
- GV->setInitializer(Val);
- return;
- }
-
- ConstantExpr *CE = cast<ConstantExpr>(Addr);
- GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
- GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
-}
-
-/// Given a map of address -> value, where addresses are expected to be some form
-/// of either a global or a constant GEP, set the initializer for the address to
-/// be the value. This performs mostly the same function as CommitValueTo()
-/// and EvaluateStoreInto() but is optimized to be more efficient for the common
-/// case where the set of addresses are GEPs sharing the same underlying global,
-/// processing the GEPs in batches rather than individually.
-///
-/// To give an example, consider the following C++ code adapted from the clang
-/// regression tests:
-/// struct S {
-/// int n = 10;
-/// int m = 2 * n;
-/// S(int a) : n(a) {}
-/// };
-///
-/// template<typename T>
-/// struct U {
-/// T *r = &q;
-/// T q = 42;
-/// U *p = this;
-/// };
-///
-/// U<S> e;
-///
-/// The global static constructor for 'e' will need to initialize 'r' and 'p' of
-/// the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
-/// members. This batch algorithm will simply use general CommitValueTo() method
-/// to handle the complex nested S struct initialization of 'q', before
-/// processing the outermost members in a single batch. Using CommitValueTo() to
-/// handle member in the outer struct is inefficient when the struct/array is
-/// very large as we end up creating and destroy constant arrays for each
-/// initialization.
-/// For the above case, we expect the following IR to be generated:
-///
-/// %struct.U = type { %struct.S*, %struct.S, %struct.U* }
-/// %struct.S = type { i32, i32 }
-/// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
-/// i64 0, i32 1),
-/// %struct.S { i32 42, i32 84 }, %struct.U* @e }
-/// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
-/// constant expression, while the other two elements of @e are "simple".
-static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
- SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs;
- SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs;
- SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs;
- SimpleCEs.reserve(Mem.size());
-
- for (const auto &I : Mem) {
- if (auto *GV = dyn_cast<GlobalVariable>(I.first)) {
- GVs.push_back(std::make_pair(GV, I.second));
- } else {
- ConstantExpr *GEP = cast<ConstantExpr>(I.first);
- // We don't handle the deeply recursive case using the batch method.
- if (GEP->getNumOperands() > 3)
- ComplexCEs.push_back(std::make_pair(GEP, I.second));
- else
- SimpleCEs.push_back(std::make_pair(GEP, I.second));
- }
- }
-
- // The algorithm below doesn't handle cases like nested structs, so use the
- // slower fully general method if we have to.
- for (auto ComplexCE : ComplexCEs)
- CommitValueTo(ComplexCE.second, ComplexCE.first);
-
- for (auto GVPair : GVs) {
- assert(GVPair.first->hasInitializer());
- GVPair.first->setInitializer(GVPair.second);
- }
-
- if (SimpleCEs.empty())
- return;
-
- // We cache a single global's initializer elements in the case where the
- // subsequent address/val pair uses the same one. This avoids throwing away and
- // rebuilding the constant struct/vector/array just because one element is
- // modified at a time.
- SmallVector<Constant *, 32> Elts;
- Elts.reserve(SimpleCEs.size());
- GlobalVariable *CurrentGV = nullptr;
-
- auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) {
- Constant *Init = GV->getInitializer();
- Type *Ty = Init->getType();
- if (Update) {
- if (CurrentGV) {
- assert(CurrentGV && "Expected a GV to commit to!");
- Type *CurrentInitTy = CurrentGV->getInitializer()->getType();
- // We have a valid cache that needs to be committed.
- if (StructType *STy = dyn_cast<StructType>(CurrentInitTy))
- CurrentGV->setInitializer(ConstantStruct::get(STy, Elts));
- else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy))
- CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts));
- else
- CurrentGV->setInitializer(ConstantVector::get(Elts));
- }
- if (CurrentGV == GV)
- return;
- // Need to clear and set up cache for new initializer.
- CurrentGV = GV;
- Elts.clear();
- unsigned NumElts;
- if (auto *STy = dyn_cast<StructType>(Ty))
- NumElts = STy->getNumElements();
- else if (auto *ATy = dyn_cast<ArrayType>(Ty))
- NumElts = ATy->getNumElements();
- else
- NumElts = cast<FixedVectorType>(Ty)->getNumElements();
- for (unsigned i = 0, e = NumElts; i != e; ++i)
- Elts.push_back(Init->getAggregateElement(i));
- }
- };
-
- for (auto CEPair : SimpleCEs) {
- ConstantExpr *GEP = CEPair.first;
- Constant *Val = CEPair.second;
-
- GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0));
- commitAndSetupCache(GV, GV != CurrentGV);
- ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2));
- Elts[CI->getZExtValue()] = Val;
- }
- // The last initializer in the list needs to be committed, others
- // will be committed on a new initializer being processed.
- commitAndSetupCache(CurrentGV, true);
-}
-
/// Evaluate static constructors in the function, if we can. Return true if we
/// can, false otherwise.
static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
@@ -2268,10 +2045,12 @@ static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
++NumCtorsEvaluated;
// We succeeded at evaluation: commit the result.
+ auto NewInitializers = Eval.getMutatedInitializers();
LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
- << F->getName() << "' to "
- << Eval.getMutatedMemory().size() << " stores.\n");
- BatchCommitValueTo(Eval.getMutatedMemory());
+ << F->getName() << "' to " << NewInitializers.size()
+ << " stores.\n");
+ for (const auto &Pair : NewInitializers)
+ Pair.first->setInitializer(Pair.second);
for (GlobalVariable *GV : Eval.getInvariants())
GV->setConstant(true);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/IROutliner.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/IROutliner.cpp
index b8a314c54f18..e064fbbef595 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/IROutliner.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/IROutliner.cpp
@@ -36,8 +36,14 @@ using namespace IRSimilarity;
// A command flag to be used for debugging to exclude branches from similarity
// matching and outlining.
+namespace llvm {
extern cl::opt<bool> DisableBranches;
+// A command flag to be used for debugging to indirect calls from similarity
+// matching and outlining.
+extern cl::opt<bool> DisableIndirectCalls;
+} // namespace llvm
+
// Set to true if the user wants the ir outliner to run on linkonceodr linkage
// functions. This is false by default because the linker can dedupe linkonceodr
// functions. Since the outliner is confined to a single module (modulo LTO),
@@ -104,6 +110,16 @@ struct OutlinableGroup {
/// of the region.
unsigned BranchesToOutside = 0;
+ /// Tracker counting backwards from the highest unsigned value possible to
+ /// avoid conflicting with the GVNs of assigned values. We start at -3 since
+ /// -2 and -1 are assigned by the DenseMap.
+ unsigned PHINodeGVNTracker = -3;
+
+ DenseMap<unsigned,
+ std::pair<std::pair<unsigned, unsigned>, SmallVector<unsigned, 2>>>
+ PHINodeGVNToGVNs;
+ DenseMap<hash_code, unsigned> GVNsToPHINodeGVN;
+
/// The number of instructions that will be outlined by extracting \ref
/// Regions.
InstructionCost Benefit = 0;
@@ -169,6 +185,44 @@ Value *OutlinableRegion::findCorrespondingValueIn(const OutlinableRegion &Other,
return FoundValueOpt.getValueOr(nullptr);
}
+/// Rewrite the BranchInsts in the incoming blocks to \p PHIBlock that are found
+/// in \p Included to branch to BasicBlock \p Replace if they currently branch
+/// to the BasicBlock \p Find. This is used to fix up the incoming basic blocks
+/// when PHINodes are included in outlined regions.
+///
+/// \param PHIBlock - The BasicBlock containing the PHINodes that need to be
+/// checked.
+/// \param Find - The successor block to be replaced.
+/// \param Replace - The new succesor block to branch to.
+/// \param Included - The set of blocks about to be outlined.
+static void replaceTargetsFromPHINode(BasicBlock *PHIBlock, BasicBlock *Find,
+ BasicBlock *Replace,
+ DenseSet<BasicBlock *> &Included) {
+ for (PHINode &PN : PHIBlock->phis()) {
+ for (unsigned Idx = 0, PNEnd = PN.getNumIncomingValues(); Idx != PNEnd;
+ ++Idx) {
+ // Check if the incoming block is included in the set of blocks being
+ // outlined.
+ BasicBlock *Incoming = PN.getIncomingBlock(Idx);
+ if (!Included.contains(Incoming))
+ continue;
+
+ BranchInst *BI = dyn_cast<BranchInst>(Incoming->getTerminator());
+ assert(BI && "Not a branch instruction?");
+ // Look over the branching instructions into this block to see if we
+ // used to branch to Find in this outlined block.
+ for (unsigned Succ = 0, End = BI->getNumSuccessors(); Succ != End;
+ Succ++) {
+ // If we have found the block to replace, we do so here.
+ if (BI->getSuccessor(Succ) != Find)
+ continue;
+ BI->setSuccessor(Succ, Replace);
+ }
+ }
+ }
+}
+
+
void OutlinableRegion::splitCandidate() {
assert(!CandidateSplit && "Candidate already split!");
@@ -199,6 +253,39 @@ void OutlinableRegion::splitCandidate() {
StartBB = StartInst->getParent();
PrevBB = StartBB;
+ DenseSet<BasicBlock *> BBSet;
+ Candidate->getBasicBlocks(BBSet);
+
+ // We iterate over the instructions in the region, if we find a PHINode, we
+ // check if there are predecessors outside of the region, if there are,
+ // we ignore this region since we are unable to handle the severing of the
+ // phi node right now.
+ BasicBlock::iterator It = StartInst->getIterator();
+ while (PHINode *PN = dyn_cast<PHINode>(&*It)) {
+ unsigned NumPredsOutsideRegion = 0;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (!BBSet.contains(PN->getIncomingBlock(i)))
+ ++NumPredsOutsideRegion;
+
+ if (NumPredsOutsideRegion > 1)
+ return;
+
+ It++;
+ }
+
+ // If the region starts with a PHINode, but is not the initial instruction of
+ // the BasicBlock, we ignore this region for now.
+ if (isa<PHINode>(StartInst) && StartInst != &*StartBB->begin())
+ return;
+
+ // If the region ends with a PHINode, but does not contain all of the phi node
+ // instructions of the region, we ignore it for now.
+ if (isa<PHINode>(BackInst)) {
+ EndBB = BackInst->getParent();
+ if (BackInst != &*std::prev(EndBB->getFirstInsertionPt()))
+ return;
+ }
+
// The basic block gets split like so:
// block: block:
// inst1 inst1
@@ -225,12 +312,20 @@ void OutlinableRegion::splitCandidate() {
FollowBB = EndBB->splitBasicBlock(EndInst, OriginalName + "_after_outline");
EndBB->replaceSuccessorsPhiUsesWith(EndBB, FollowBB);
FollowBB->replaceSuccessorsPhiUsesWith(PrevBB, FollowBB);
- return;
+ } else {
+ EndBB = BackInst->getParent();
+ EndsInBranch = true;
+ FollowBB = nullptr;
}
- EndBB = BackInst->getParent();
- EndsInBranch = true;
- FollowBB = nullptr;
+ // Refind the basic block set.
+ BBSet.clear();
+ Candidate->getBasicBlocks(BBSet);
+ // For the phi nodes in the new starting basic block of the region, we
+ // reassign the targets of the basic blocks branching instructions.
+ replaceTargetsFromPHINode(StartBB, PrevBB, StartBB, BBSet);
+ if (FollowBB)
+ replaceTargetsFromPHINode(FollowBB, EndBB, FollowBB, BBSet);
}
void OutlinableRegion::reattachCandidate() {
@@ -252,15 +347,21 @@ void OutlinableRegion::reattachCandidate() {
// inst4
assert(StartBB != nullptr && "StartBB for Candidate is not defined!");
- // StartBB should only have one predecessor since we put an unconditional
- // branch at the end of PrevBB when we split the BasicBlock.
- PrevBB = StartBB->getSinglePredecessor();
- assert(PrevBB != nullptr &&
- "No Predecessor for the region start basic block!");
-
assert(PrevBB->getTerminator() && "Terminator removed from PrevBB!");
PrevBB->getTerminator()->eraseFromParent();
+ // If we reattaching after outlining, we iterate over the phi nodes to
+ // the initial block, and reassign the branch instructions of the incoming
+ // blocks to the block we are remerging into.
+ if (!ExtractedFunction) {
+ DenseSet<BasicBlock *> BBSet;
+ Candidate->getBasicBlocks(BBSet);
+
+ replaceTargetsFromPHINode(StartBB, StartBB, PrevBB, BBSet);
+ if (!EndsInBranch)
+ replaceTargetsFromPHINode(FollowBB, FollowBB, EndBB, BBSet);
+ }
+
moveBBContents(*StartBB, *PrevBB);
BasicBlock *PlacementBB = PrevBB;
@@ -354,6 +455,24 @@ InstructionCost OutlinableRegion::getBenefit(TargetTransformInfo &TTI) {
return Benefit;
}
+/// Check the \p OutputMappings structure for value \p Input, if it exists
+/// it has been used as an output for outlining, and has been renamed, and we
+/// return the new value, otherwise, we return the same value.
+///
+/// \param OutputMappings [in] - The mapping of values to their renamed value
+/// after being used as an output for an outlined region.
+/// \param Input [in] - The value to find the remapped value of, if it exists.
+/// \return The remapped value if it has been renamed, and the same value if has
+/// not.
+static Value *findOutputMapping(const DenseMap<Value *, Value *> OutputMappings,
+ Value *Input) {
+ DenseMap<Value *, Value *>::const_iterator OutputMapping =
+ OutputMappings.find(Input);
+ if (OutputMapping != OutputMappings.end())
+ return OutputMapping->second;
+ return Input;
+}
+
/// Find whether \p Region matches the global value numbering to Constant
/// mapping found so far.
///
@@ -830,6 +949,209 @@ findExtractedInputToOverallInputMapping(OutlinableRegion &Region,
Region.NumExtractedInputs = OriginalIndex;
}
+/// Check if the \p V has any uses outside of the region other than \p PN.
+///
+/// \param V [in] - The value to check.
+/// \param PHILoc [in] - The location in the PHINode of \p V.
+/// \param PN [in] - The PHINode using \p V.
+/// \param Exits [in] - The potential blocks we exit to from the outlined
+/// region.
+/// \param BlocksInRegion [in] - The basic blocks contained in the region.
+/// \returns true if \p V has any use soutside its region other than \p PN.
+static bool outputHasNonPHI(Value *V, unsigned PHILoc, PHINode &PN,
+ SmallPtrSet<BasicBlock *, 1> &Exits,
+ DenseSet<BasicBlock *> &BlocksInRegion) {
+ // We check to see if the value is used by the PHINode from some other
+ // predecessor not included in the region. If it is, we make sure
+ // to keep it as an output.
+ SmallVector<unsigned, 2> IncomingNumbers(PN.getNumIncomingValues());
+ std::iota(IncomingNumbers.begin(), IncomingNumbers.end(), 0);
+ if (any_of(IncomingNumbers, [PHILoc, &PN, V, &BlocksInRegion](unsigned Idx) {
+ return (Idx != PHILoc && V == PN.getIncomingValue(Idx) &&
+ !BlocksInRegion.contains(PN.getIncomingBlock(Idx)));
+ }))
+ return true;
+
+ // Check if the value is used by any other instructions outside the region.
+ return any_of(V->users(), [&Exits, &BlocksInRegion](User *U) {
+ Instruction *I = dyn_cast<Instruction>(U);
+ if (!I)
+ return false;
+
+ // If the use of the item is inside the region, we skip it. Uses
+ // inside the region give us useful information about how the item could be
+ // used as an output.
+ BasicBlock *Parent = I->getParent();
+ if (BlocksInRegion.contains(Parent))
+ return false;
+
+ // If it's not a PHINode then we definitely know the use matters. This
+ // output value will not completely combined with another item in a PHINode
+ // as it is directly reference by another non-phi instruction
+ if (!isa<PHINode>(I))
+ return true;
+
+ // If we have a PHINode outside one of the exit locations, then it
+ // can be considered an outside use as well. If there is a PHINode
+ // contained in the Exit where this values use matters, it will be
+ // caught when we analyze that PHINode.
+ if (!Exits.contains(Parent))
+ return true;
+
+ return false;
+ });
+}
+
+/// Test whether \p CurrentExitFromRegion contains any PhiNodes that should be
+/// considered outputs. A PHINodes is an output when more than one incoming
+/// value has been marked by the CodeExtractor as an output.
+///
+/// \param CurrentExitFromRegion [in] - The block to analyze.
+/// \param PotentialExitsFromRegion [in] - The potential exit blocks from the
+/// region.
+/// \param RegionBlocks [in] - The basic blocks in the region.
+/// \param Outputs [in, out] - The existing outputs for the region, we may add
+/// PHINodes to this as we find that they replace output values.
+/// \param OutputsReplacedByPHINode [out] - A set containing outputs that are
+/// totally replaced by a PHINode.
+/// \param OutputsWithNonPhiUses [out] - A set containing outputs that are used
+/// in PHINodes, but have other uses, and should still be considered outputs.
+static void analyzeExitPHIsForOutputUses(
+ BasicBlock *CurrentExitFromRegion,
+ SmallPtrSet<BasicBlock *, 1> &PotentialExitsFromRegion,
+ DenseSet<BasicBlock *> &RegionBlocks, SetVector<Value *> &Outputs,
+ DenseSet<Value *> &OutputsReplacedByPHINode,
+ DenseSet<Value *> &OutputsWithNonPhiUses) {
+ for (PHINode &PN : CurrentExitFromRegion->phis()) {
+ // Find all incoming values from the outlining region.
+ SmallVector<unsigned, 2> IncomingVals;
+ for (unsigned I = 0, E = PN.getNumIncomingValues(); I < E; ++I)
+ if (RegionBlocks.contains(PN.getIncomingBlock(I)))
+ IncomingVals.push_back(I);
+
+ // Do not process PHI if there are no predecessors from region.
+ unsigned NumIncomingVals = IncomingVals.size();
+ if (NumIncomingVals == 0)
+ continue;
+
+ // If there is one predecessor, we mark it as a value that needs to be kept
+ // as an output.
+ if (NumIncomingVals == 1) {
+ Value *V = PN.getIncomingValue(*IncomingVals.begin());
+ OutputsWithNonPhiUses.insert(V);
+ OutputsReplacedByPHINode.erase(V);
+ continue;
+ }
+
+ // This PHINode will be used as an output value, so we add it to our list.
+ Outputs.insert(&PN);
+
+ // Not all of the incoming values should be ignored as other inputs and
+ // outputs may have uses in outlined region. If they have other uses
+ // outside of the single PHINode we should not skip over it.
+ for (unsigned Idx : IncomingVals) {
+ Value *V = PN.getIncomingValue(Idx);
+ if (outputHasNonPHI(V, Idx, PN, PotentialExitsFromRegion, RegionBlocks)) {
+ OutputsWithNonPhiUses.insert(V);
+ OutputsReplacedByPHINode.erase(V);
+ continue;
+ }
+ if (!OutputsWithNonPhiUses.contains(V))
+ OutputsReplacedByPHINode.insert(V);
+ }
+ }
+}
+
+// Represents the type for the unsigned number denoting the output number for
+// phi node, along with the canonical number for the exit block.
+using ArgLocWithBBCanon = std::pair<unsigned, unsigned>;
+// The list of canonical numbers for the incoming values to a PHINode.
+using CanonList = SmallVector<unsigned, 2>;
+// The pair type representing the set of canonical values being combined in the
+// PHINode, along with the location data for the PHINode.
+using PHINodeData = std::pair<ArgLocWithBBCanon, CanonList>;
+
+/// Encode \p PND as an integer for easy lookup based on the argument location,
+/// the parent BasicBlock canonical numbering, and the canonical numbering of
+/// the values stored in the PHINode.
+///
+/// \param PND - The data to hash.
+/// \returns The hash code of \p PND.
+static hash_code encodePHINodeData(PHINodeData &PND) {
+ return llvm::hash_combine(
+ llvm::hash_value(PND.first.first), llvm::hash_value(PND.first.second),
+ llvm::hash_combine_range(PND.second.begin(), PND.second.end()));
+}
+
+/// Create a special GVN for PHINodes that will be used outside of
+/// the region. We create a hash code based on the Canonical number of the
+/// parent BasicBlock, the canonical numbering of the values stored in the
+/// PHINode and the aggregate argument location. This is used to find whether
+/// this PHINode type has been given a canonical numbering already. If not, we
+/// assign it a value and store it for later use. The value is returned to
+/// identify different output schemes for the set of regions.
+///
+/// \param Region - The region that \p PN is an output for.
+/// \param PN - The PHINode we are analyzing.
+/// \param AggArgIdx - The argument \p PN will be stored into.
+/// \returns An optional holding the assigned canonical number, or None if
+/// there is some attribute of the PHINode blocking it from being used.
+static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
+ PHINode *PN, unsigned AggArgIdx) {
+ OutlinableGroup &Group = *Region.Parent;
+ IRSimilarityCandidate &Cand = *Region.Candidate;
+ BasicBlock *PHIBB = PN->getParent();
+ CanonList PHIGVNs;
+ for (Value *Incoming : PN->incoming_values()) {
+ // If we cannot find a GVN, this means that the input to the PHINode is
+ // not included in the region we are trying to analyze, meaning, that if
+ // it was outlined, we would be adding an extra input. We ignore this
+ // case for now, and so ignore the region.
+ Optional<unsigned> OGVN = Cand.getGVN(Incoming);
+ if (!OGVN.hasValue()) {
+ Region.IgnoreRegion = true;
+ return None;
+ }
+
+ // Collect the canonical numbers of the values in the PHINode.
+ unsigned GVN = OGVN.getValue();
+ OGVN = Cand.getCanonicalNum(GVN);
+ assert(OGVN.hasValue() && "No GVN found for incoming value?");
+ PHIGVNs.push_back(*OGVN);
+ }
+
+ // Now that we have the GVNs for the incoming values, we are going to combine
+ // them with the GVN of the incoming bock, and the output location of the
+ // PHINode to generate a hash value representing this instance of the PHINode.
+ DenseMap<hash_code, unsigned>::iterator GVNToPHIIt;
+ DenseMap<unsigned, PHINodeData>::iterator PHIToGVNIt;
+ Optional<unsigned> BBGVN = Cand.getGVN(PHIBB);
+ assert(BBGVN.hasValue() && "Could not find GVN for the incoming block!");
+
+ BBGVN = Cand.getCanonicalNum(BBGVN.getValue());
+ assert(BBGVN.hasValue() &&
+ "Could not find canonical number for the incoming block!");
+ // Create a pair of the exit block canonical value, and the aggregate
+ // argument location, connected to the canonical numbers stored in the
+ // PHINode.
+ PHINodeData TemporaryPair =
+ std::make_pair(std::make_pair(BBGVN.getValue(), AggArgIdx), PHIGVNs);
+ hash_code PHINodeDataHash = encodePHINodeData(TemporaryPair);
+
+ // Look for and create a new entry in our connection between canonical
+ // numbers for PHINodes, and the set of objects we just created.
+ GVNToPHIIt = Group.GVNsToPHINodeGVN.find(PHINodeDataHash);
+ if (GVNToPHIIt == Group.GVNsToPHINodeGVN.end()) {
+ bool Inserted = false;
+ std::tie(PHIToGVNIt, Inserted) = Group.PHINodeGVNToGVNs.insert(
+ std::make_pair(Group.PHINodeGVNTracker, TemporaryPair));
+ std::tie(GVNToPHIIt, Inserted) = Group.GVNsToPHINodeGVN.insert(
+ std::make_pair(PHINodeDataHash, Group.PHINodeGVNTracker--));
+ }
+
+ return GVNToPHIIt->second;
+}
+
/// Create a mapping of the output arguments for the \p Region to the output
/// arguments of the overall outlined function.
///
@@ -842,35 +1164,25 @@ findExtractedOutputToOverallOutputMapping(OutlinableRegion &Region,
IRSimilarityCandidate &C = *Region.Candidate;
SmallVector<BasicBlock *> BE;
- DenseSet<BasicBlock *> BBSet;
- C.getBasicBlocks(BBSet, BE);
+ DenseSet<BasicBlock *> BlocksInRegion;
+ C.getBasicBlocks(BlocksInRegion, BE);
// Find the exits to the region.
SmallPtrSet<BasicBlock *, 1> Exits;
for (BasicBlock *Block : BE)
for (BasicBlock *Succ : successors(Block))
- if (!BBSet.contains(Succ))
+ if (!BlocksInRegion.contains(Succ))
Exits.insert(Succ);
// After determining which blocks exit to PHINodes, we add these PHINodes to
// the set of outputs to be processed. We also check the incoming values of
// the PHINodes for whether they should no longer be considered outputs.
- for (BasicBlock *ExitBB : Exits) {
- for (PHINode &PN : ExitBB->phis()) {
- // Find all incoming values from the outlining region.
- SmallVector<unsigned, 2> IncomingVals;
- for (unsigned Idx = 0; Idx < PN.getNumIncomingValues(); ++Idx)
- if (BBSet.contains(PN.getIncomingBlock(Idx)))
- IncomingVals.push_back(Idx);
-
- // Do not process PHI if there is one (or fewer) predecessor from region.
- if (IncomingVals.size() <= 1)
- continue;
-
- Region.IgnoreRegion = true;
- return;
- }
- }
+ DenseSet<Value *> OutputsReplacedByPHINode;
+ DenseSet<Value *> OutputsWithNonPhiUses;
+ for (BasicBlock *ExitBB : Exits)
+ analyzeExitPHIsForOutputUses(ExitBB, Exits, BlocksInRegion, Outputs,
+ OutputsReplacedByPHINode,
+ OutputsWithNonPhiUses);
// This counts the argument number in the extracted function.
unsigned OriginalIndex = Region.NumExtractedInputs;
@@ -893,9 +1205,13 @@ findExtractedOutputToOverallOutputMapping(OutlinableRegion &Region,
// do not have to be in same order, but are functionally the same, we will
// have to use a different scheme, as one-to-one correspondence is not
// guaranteed.
- unsigned GlobalValue = C.getGVN(Output).getValue();
unsigned ArgumentSize = Group.ArgumentTypes.size();
+ // If the output is combined in a PHINode, we make sure to skip over it.
+ if (OutputsReplacedByPHINode.contains(Output))
+ continue;
+
+ unsigned AggArgIdx = 0;
for (unsigned Jdx = TypeIndex; Jdx < ArgumentSize; Jdx++) {
if (Group.ArgumentTypes[Jdx] != PointerType::getUnqual(Output->getType()))
continue;
@@ -907,7 +1223,7 @@ findExtractedOutputToOverallOutputMapping(OutlinableRegion &Region,
AggArgsUsed.insert(Jdx);
Region.ExtractedArgToAgg.insert(std::make_pair(OriginalIndex, Jdx));
Region.AggArgToExtracted.insert(std::make_pair(Jdx, OriginalIndex));
- Region.GVNStores.push_back(GlobalValue);
+ AggArgIdx = Jdx;
break;
}
@@ -916,18 +1232,54 @@ findExtractedOutputToOverallOutputMapping(OutlinableRegion &Region,
// function to handle this output and create a mapping to it.
if (!TypeFound) {
Group.ArgumentTypes.push_back(PointerType::getUnqual(Output->getType()));
- AggArgsUsed.insert(Group.ArgumentTypes.size() - 1);
+ // Mark the new pointer type as the last value in the aggregate argument
+ // list.
+ unsigned ArgTypeIdx = Group.ArgumentTypes.size() - 1;
+ AggArgsUsed.insert(ArgTypeIdx);
Region.ExtractedArgToAgg.insert(
- std::make_pair(OriginalIndex, Group.ArgumentTypes.size() - 1));
+ std::make_pair(OriginalIndex, ArgTypeIdx));
Region.AggArgToExtracted.insert(
- std::make_pair(Group.ArgumentTypes.size() - 1, OriginalIndex));
- Region.GVNStores.push_back(GlobalValue);
+ std::make_pair(ArgTypeIdx, OriginalIndex));
+ AggArgIdx = ArgTypeIdx;
+ }
+
+ // TODO: Adapt to the extra input from the PHINode.
+ PHINode *PN = dyn_cast<PHINode>(Output);
+
+ Optional<unsigned> GVN;
+ if (PN && !BlocksInRegion.contains(PN->getParent())) {
+ // Values outside the region can be combined into PHINode when we
+ // have multiple exits. We collect both of these into a list to identify
+ // which values are being used in the PHINode. Each list identifies a
+ // different PHINode, and a different output. We store the PHINode as it's
+ // own canonical value. These canonical values are also dependent on the
+ // output argument it is saved to.
+
+ // If two PHINodes have the same canonical values, but different aggregate
+ // argument locations, then they will have distinct Canonical Values.
+ GVN = getGVNForPHINode(Region, PN, AggArgIdx);
+ if (!GVN.hasValue())
+ return;
+ } else {
+ // If we do not have a PHINode we use the global value numbering for the
+ // output value, to find the canonical number to add to the set of stored
+ // values.
+ GVN = C.getGVN(Output);
+ GVN = C.getCanonicalNum(*GVN);
}
- stable_sort(Region.GVNStores);
+ // Each region has a potentially unique set of outputs. We save which
+ // values are output in a list of canonical values so we can differentiate
+ // among the different store schemes.
+ Region.GVNStores.push_back(*GVN);
+
OriginalIndex++;
TypeIndex++;
}
+
+ // We sort the stored values to make sure that we are not affected by analysis
+ // order when determining what combination of items were stored.
+ stable_sort(Region.GVNStores);
}
void IROutliner::findAddInputsOutputs(Module &M, OutlinableRegion &Region,
@@ -1063,6 +1415,214 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) {
return Call;
}
+/// Find or create a BasicBlock in the outlined function containing PhiBlocks
+/// for \p RetVal.
+///
+/// \param Group - The OutlinableGroup containing the information about the
+/// overall outlined function.
+/// \param RetVal - The return value or exit option that we are currently
+/// evaluating.
+/// \returns The found or newly created BasicBlock to contain the needed
+/// PHINodes to be used as outputs.
+static BasicBlock *findOrCreatePHIBlock(OutlinableGroup &Group, Value *RetVal) {
+ DenseMap<Value *, BasicBlock *>::iterator PhiBlockForRetVal,
+ ReturnBlockForRetVal;
+ PhiBlockForRetVal = Group.PHIBlocks.find(RetVal);
+ ReturnBlockForRetVal = Group.EndBBs.find(RetVal);
+ assert(ReturnBlockForRetVal != Group.EndBBs.end() &&
+ "Could not find output value!");
+ BasicBlock *ReturnBB = ReturnBlockForRetVal->second;
+
+ // Find if a PHIBlock exists for this return value already. If it is
+ // the first time we are analyzing this, we will not, so we record it.
+ PhiBlockForRetVal = Group.PHIBlocks.find(RetVal);
+ if (PhiBlockForRetVal != Group.PHIBlocks.end())
+ return PhiBlockForRetVal->second;
+
+ // If we did not find a block, we create one, and insert it into the
+ // overall function and record it.
+ bool Inserted = false;
+ BasicBlock *PHIBlock = BasicBlock::Create(ReturnBB->getContext(), "phi_block",
+ ReturnBB->getParent());
+ std::tie(PhiBlockForRetVal, Inserted) =
+ Group.PHIBlocks.insert(std::make_pair(RetVal, PHIBlock));
+
+ // We find the predecessors of the return block in the newly created outlined
+ // function in order to point them to the new PHIBlock rather than the already
+ // existing return block.
+ SmallVector<BranchInst *, 2> BranchesToChange;
+ for (BasicBlock *Pred : predecessors(ReturnBB))
+ BranchesToChange.push_back(cast<BranchInst>(Pred->getTerminator()));
+
+ // Now we mark the branch instructions found, and change the references of the
+ // return block to the newly created PHIBlock.
+ for (BranchInst *BI : BranchesToChange)
+ for (unsigned Succ = 0, End = BI->getNumSuccessors(); Succ < End; Succ++) {
+ if (BI->getSuccessor(Succ) != ReturnBB)
+ continue;
+ BI->setSuccessor(Succ, PHIBlock);
+ }
+
+ BranchInst::Create(ReturnBB, PHIBlock);
+
+ return PhiBlockForRetVal->second;
+}
+
+/// For the function call now representing the \p Region, find the passed value
+/// to that call that represents Argument \p A at the call location if the
+/// call has already been replaced with a call to the overall, aggregate
+/// function.
+///
+/// \param A - The Argument to get the passed value for.
+/// \param Region - The extracted Region corresponding to the outlined function.
+/// \returns The Value representing \p A at the call site.
+static Value *
+getPassedArgumentInAlreadyOutlinedFunction(const Argument *A,
+ const OutlinableRegion &Region) {
+ // If we don't need to adjust the argument number at all (since the call
+ // has already been replaced by a call to the overall outlined function)
+ // we can just get the specified argument.
+ return Region.Call->getArgOperand(A->getArgNo());
+}
+
+/// For the function call now representing the \p Region, find the passed value
+/// to that call that represents Argument \p A at the call location if the
+/// call has only been replaced by the call to the aggregate function.
+///
+/// \param A - The Argument to get the passed value for.
+/// \param Region - The extracted Region corresponding to the outlined function.
+/// \returns The Value representing \p A at the call site.
+static Value *
+getPassedArgumentAndAdjustArgumentLocation(const Argument *A,
+ const OutlinableRegion &Region) {
+ unsigned ArgNum = A->getArgNo();
+
+ // If it is a constant, we can look at our mapping from when we created
+ // the outputs to figure out what the constant value is.
+ if (Region.AggArgToConstant.count(ArgNum))
+ return Region.AggArgToConstant.find(ArgNum)->second;
+
+ // If it is not a constant, and we are not looking at the overall function, we
+ // need to adjust which argument we are looking at.
+ ArgNum = Region.AggArgToExtracted.find(ArgNum)->second;
+ return Region.Call->getArgOperand(ArgNum);
+}
+
+/// Find the canonical numbering for the incoming Values into the PHINode \p PN.
+///
+/// \param PN [in] - The PHINode that we are finding the canonical numbers for.
+/// \param Region [in] - The OutlinableRegion containing \p PN.
+/// \param OutputMappings [in] - The mapping of output values from outlined
+/// region to their original values.
+/// \param CanonNums [out] - The canonical numbering for the incoming values to
+/// \p PN.
+/// \param ReplacedWithOutlinedCall - A flag to use the extracted function call
+/// of \p Region rather than the overall function's call.
+static void
+findCanonNumsForPHI(PHINode *PN, OutlinableRegion &Region,
+ const DenseMap<Value *, Value *> &OutputMappings,
+ DenseSet<unsigned> &CanonNums,
+ bool ReplacedWithOutlinedCall = true) {
+ // Iterate over the incoming values.
+ for (unsigned Idx = 0, EIdx = PN->getNumIncomingValues(); Idx < EIdx; Idx++) {
+ Value *IVal = PN->getIncomingValue(Idx);
+ // If we have an argument as incoming value, we need to grab the passed
+ // value from the call itself.
+ if (Argument *A = dyn_cast<Argument>(IVal)) {
+ if (ReplacedWithOutlinedCall)
+ IVal = getPassedArgumentInAlreadyOutlinedFunction(A, Region);
+ else
+ IVal = getPassedArgumentAndAdjustArgumentLocation(A, Region);
+ }
+
+ // Get the original value if it has been replaced by an output value.
+ IVal = findOutputMapping(OutputMappings, IVal);
+
+ // Find and add the canonical number for the incoming value.
+ Optional<unsigned> GVN = Region.Candidate->getGVN(IVal);
+ assert(GVN.hasValue() && "No GVN for incoming value");
+ Optional<unsigned> CanonNum = Region.Candidate->getCanonicalNum(*GVN);
+ assert(CanonNum.hasValue() && "No Canonical Number for GVN");
+ CanonNums.insert(*CanonNum);
+ }
+}
+
+/// Find, or add PHINode \p PN to the combined PHINode Block \p OverallPHIBlock
+/// in order to condense the number of instructions added to the outlined
+/// function.
+///
+/// \param PN [in] - The PHINode that we are finding the canonical numbers for.
+/// \param Region [in] - The OutlinableRegion containing \p PN.
+/// \param OverallPhiBlock [in] - The overall PHIBlock we are trying to find
+/// \p PN in.
+/// \param OutputMappings [in] - The mapping of output values from outlined
+/// region to their original values.
+/// \return the newly found or created PHINode in \p OverallPhiBlock.
+static PHINode*
+findOrCreatePHIInBlock(PHINode &PN, OutlinableRegion &Region,
+ BasicBlock *OverallPhiBlock,
+ const DenseMap<Value *, Value *> &OutputMappings) {
+ OutlinableGroup &Group = *Region.Parent;
+
+ DenseSet<unsigned> PNCanonNums;
+ // We have to use the extracted function since we have merged this region into
+ // the overall function yet. We make sure to reassign the argument numbering
+ // since it is possible that the argument ordering is different between the
+ // functions.
+ findCanonNumsForPHI(&PN, Region, OutputMappings, PNCanonNums,
+ /* ReplacedWithOutlinedCall = */ false);
+
+ OutlinableRegion *FirstRegion = Group.Regions[0];
+ DenseSet<unsigned> CurrentCanonNums;
+ // Find the Canonical Numbering for each PHINode, if it matches, we replace
+ // the uses of the PHINode we are searching for, with the found PHINode.
+ for (PHINode &CurrPN : OverallPhiBlock->phis()) {
+ CurrentCanonNums.clear();
+ findCanonNumsForPHI(&CurrPN, *FirstRegion, OutputMappings, CurrentCanonNums,
+ /* ReplacedWithOutlinedCall = */ true);
+
+ if (all_of(PNCanonNums, [&CurrentCanonNums](unsigned CanonNum) {
+ return CurrentCanonNums.contains(CanonNum);
+ }))
+ return &CurrPN;
+ }
+
+ // If we've made it here, it means we weren't able to replace the PHINode, so
+ // we must insert it ourselves.
+ PHINode *NewPN = cast<PHINode>(PN.clone());
+ NewPN->insertBefore(&*OverallPhiBlock->begin());
+ for (unsigned Idx = 0, Edx = NewPN->getNumIncomingValues(); Idx < Edx;
+ Idx++) {
+ Value *IncomingVal = NewPN->getIncomingValue(Idx);
+ BasicBlock *IncomingBlock = NewPN->getIncomingBlock(Idx);
+
+ // Find corresponding basic block in the overall function for the incoming
+ // block.
+ Instruction *FirstNonPHI = IncomingBlock->getFirstNonPHI();
+ assert(FirstNonPHI && "Incoming block is empty?");
+ Value *CorrespondingVal =
+ Region.findCorrespondingValueIn(*FirstRegion, FirstNonPHI);
+ assert(CorrespondingVal && "Value is nullptr?");
+ BasicBlock *BlockToUse = cast<Instruction>(CorrespondingVal)->getParent();
+ NewPN->setIncomingBlock(Idx, BlockToUse);
+
+ // If we have an argument we make sure we replace using the argument from
+ // the correct function.
+ if (Argument *A = dyn_cast<Argument>(IncomingVal)) {
+ Value *Val = Group.OutlinedFunction->getArg(A->getArgNo());
+ NewPN->setIncomingValue(Idx, Val);
+ continue;
+ }
+
+ // Find the corresponding value in the overall function.
+ IncomingVal = findOutputMapping(OutputMappings, IncomingVal);
+ Value *Val = Region.findCorrespondingValueIn(*FirstRegion, IncomingVal);
+ assert(Val && "Value is nullptr?");
+ NewPN->setIncomingValue(Idx, Val);
+ }
+ return NewPN;
+}
+
// Within an extracted function, replace the argument uses of the extracted
// region with the arguments of the function for an OutlinableGroup.
//
@@ -1075,6 +1635,7 @@ CallInst *replaceCalledFunction(Module &M, OutlinableRegion &Region) {
static void
replaceArgumentUses(OutlinableRegion &Region,
DenseMap<Value *, BasicBlock *> &OutputBBs,
+ const DenseMap<Value *, Value *> &OutputMappings,
bool FirstFunction = false) {
OutlinableGroup &Group = *Region.Parent;
assert(Region.ExtractedFunction && "Region has no extracted function?");
@@ -1144,12 +1705,47 @@ replaceArgumentUses(OutlinableRegion &Region,
LLVM_DEBUG(dbgs() << "Move store for instruction " << *I << " to "
<< *OutputBB << "\n");
- if (FirstFunction)
+ // If this is storing a PHINode, we must make sure it is included in the
+ // overall function.
+ if (!isa<PHINode>(ValueOperand) ||
+ Region.Candidate->getGVN(ValueOperand).hasValue()) {
+ if (FirstFunction)
+ continue;
+ Value *CorrVal =
+ Region.findCorrespondingValueIn(*Group.Regions[0], ValueOperand);
+ assert(CorrVal && "Value is nullptr?");
+ NewI->setOperand(0, CorrVal);
+ continue;
+ }
+ PHINode *PN = cast<PHINode>(SI->getValueOperand());
+ // If it has a value, it was not split by the code extractor, which
+ // is what we are looking for.
+ if (Region.Candidate->getGVN(PN).hasValue())
continue;
- Value *CorrVal =
- Region.findCorrespondingValueIn(*Group.Regions[0], ValueOperand);
- assert(CorrVal && "Value is nullptr?");
- NewI->setOperand(0, CorrVal);
+
+ // We record the parent block for the PHINode in the Region so that
+ // we can exclude it from checks later on.
+ Region.PHIBlocks.insert(std::make_pair(RetVal, PN->getParent()));
+
+ // If this is the first function, we do not need to worry about mergiing
+ // this with any other block in the overall outlined function, so we can
+ // just continue.
+ if (FirstFunction) {
+ BasicBlock *PHIBlock = PN->getParent();
+ Group.PHIBlocks.insert(std::make_pair(RetVal, PHIBlock));
+ continue;
+ }
+
+ // We look for the aggregate block that contains the PHINodes leading into
+ // this exit path. If we can't find one, we create one.
+ BasicBlock *OverallPhiBlock = findOrCreatePHIBlock(Group, RetVal);
+
+ // For our PHINode, we find the combined canonical numbering, and
+ // attempt to find a matching PHINode in the overall PHIBlock. If we
+ // cannot, we copy the PHINode and move it into this new block.
+ PHINode *NewPN =
+ findOrCreatePHIInBlock(*PN, Region, OverallPhiBlock, OutputMappings);
+ NewI->setOperand(0, NewPN);
}
// If we added an edge for basic blocks without a predecessor, we remove it
@@ -1390,7 +1986,12 @@ void createSwitchStatement(
Module &M, OutlinableGroup &OG, DenseMap<Value *, BasicBlock *> &EndBBs,
std::vector<DenseMap<Value *, BasicBlock *>> &OutputStoreBBs) {
// We only need the switch statement if there is more than one store
- // combination.
+ // combination, or there is more than one set of output blocks. The first
+ // will occur when we store different sets of values for two different
+ // regions. The second will occur when we have two outputs that are combined
+ // in a PHINode outside of the region in one outlined instance, and are used
+ // seaparately in another. This will create the same set of OutputGVNs, but
+ // will generate two different output schemes.
if (OG.OutputGVNCombinations.size() > 1) {
Function *AggFunc = OG.OutlinedFunction;
// Create a final block for each different return block.
@@ -1433,8 +2034,14 @@ void createSwitchStatement(
return;
}
+ assert(OutputStoreBBs.size() < 2 && "Different store sets not handled!");
+
// If there needs to be stores, move them from the output blocks to their
- // corresponding ending block.
+ // corresponding ending block. We do not check that the OutputGVNCombinations
+ // is equal to 1 here since that could just been the case where there are 0
+ // outputs. Instead, we check whether there is more than one set of output
+ // blocks since this is the only case where we would have to move the
+ // stores, and erase the extraneous blocks.
if (OutputStoreBBs.size() == 1) {
LLVM_DEBUG(dbgs() << "Move store instructions to the end block in "
<< *OG.OutlinedFunction << "\n");
@@ -1466,10 +2073,13 @@ void createSwitchStatement(
/// set of stores needed for the different functions.
/// \param [in,out] FuncsToRemove - Extracted functions to erase from module
/// once outlining is complete.
+/// \param [in] OutputMappings - Extracted functions to erase from module
+/// once outlining is complete.
static void fillOverallFunction(
Module &M, OutlinableGroup &CurrentGroup,
std::vector<DenseMap<Value *, BasicBlock *>> &OutputStoreBBs,
- std::vector<Function *> &FuncsToRemove) {
+ std::vector<Function *> &FuncsToRemove,
+ const DenseMap<Value *, Value *> &OutputMappings) {
OutlinableRegion *CurrentOS = CurrentGroup.Regions[0];
// Move first extracted function's instructions into new function.
@@ -1489,7 +2099,7 @@ static void fillOverallFunction(
CurrentGroup.OutlinedFunction, "output_block_0");
CurrentOS->OutputBlockNum = 0;
- replaceArgumentUses(*CurrentOS, NewBBs, true);
+ replaceArgumentUses(*CurrentOS, NewBBs, OutputMappings, true);
replaceConstants(*CurrentOS);
// We first identify if any output blocks are empty, if they are we remove
@@ -1523,7 +2133,8 @@ void IROutliner::deduplicateExtractedSections(
OutlinableRegion *CurrentOS;
- fillOverallFunction(M, CurrentGroup, OutputStoreBBs, FuncsToRemove);
+ fillOverallFunction(M, CurrentGroup, OutputStoreBBs, FuncsToRemove,
+ OutputMappings);
std::vector<Value *> SortedKeys;
for (unsigned Idx = 1; Idx < CurrentGroup.Regions.size(); Idx++) {
@@ -1537,8 +2148,7 @@ void IROutliner::deduplicateExtractedSections(
createAndInsertBasicBlocks(
CurrentGroup.EndBBs, NewBBs, CurrentGroup.OutlinedFunction,
"output_block_" + Twine(static_cast<unsigned>(Idx)));
-
- replaceArgumentUses(*CurrentOS, NewBBs);
+ replaceArgumentUses(*CurrentOS, NewBBs, OutputMappings);
alignOutputBlockWithAggFunc(CurrentGroup, *CurrentOS, NewBBs,
CurrentGroup.EndBBs, OutputMappings,
OutputStoreBBs);
@@ -1637,7 +2247,7 @@ void IROutliner::pruneIncompatibleRegions(
if (FirstCandidate.getLength() == 2) {
if (isa<CallInst>(FirstCandidate.front()->Inst) &&
isa<BranchInst>(FirstCandidate.back()->Inst))
- return;
+ return;
}
unsigned CurrentEndIdx = 0;
@@ -1706,6 +2316,34 @@ IROutliner::findBenefitFromAllRegions(OutlinableGroup &CurrentGroup) {
return RegionBenefit;
}
+/// For the \p OutputCanon number passed in find the value represented by this
+/// canonical number. If it is from a PHINode, we pick the first incoming
+/// value and return that Value instead.
+///
+/// \param Region - The OutlinableRegion to get the Value from.
+/// \param OutputCanon - The canonical number to find the Value from.
+/// \returns The Value represented by a canonical number \p OutputCanon in \p
+/// Region.
+static Value *findOutputValueInRegion(OutlinableRegion &Region,
+ unsigned OutputCanon) {
+ OutlinableGroup &CurrentGroup = *Region.Parent;
+ // If the value is greater than the value in the tracker, we have a
+ // PHINode and will instead use one of the incoming values to find the
+ // type.
+ if (OutputCanon > CurrentGroup.PHINodeGVNTracker) {
+ auto It = CurrentGroup.PHINodeGVNToGVNs.find(OutputCanon);
+ assert(It != CurrentGroup.PHINodeGVNToGVNs.end() &&
+ "Could not find GVN set for PHINode number!");
+ assert(It->second.second.size() > 0 && "PHINode does not have any values!");
+ OutputCanon = *It->second.second.begin();
+ }
+ Optional<unsigned> OGVN = Region.Candidate->fromCanonicalNum(OutputCanon);
+ assert(OGVN.hasValue() && "Could not find GVN for Canonical Number?");
+ Optional<Value *> OV = Region.Candidate->fromGVN(*OGVN);
+ assert(OV.hasValue() && "Could not find value for GVN?");
+ return *OV;
+}
+
InstructionCost
IROutliner::findCostOutputReloads(OutlinableGroup &CurrentGroup) {
InstructionCost OverallCost = 0;
@@ -1713,10 +2351,8 @@ IROutliner::findCostOutputReloads(OutlinableGroup &CurrentGroup) {
TargetTransformInfo &TTI = getTTI(*Region->StartBB->getParent());
// Each output incurs a load after the call, so we add that to the cost.
- for (unsigned OutputGVN : Region->GVNStores) {
- Optional<Value *> OV = Region->Candidate->fromGVN(OutputGVN);
- assert(OV.hasValue() && "Could not find value for GVN?");
- Value *V = OV.getValue();
+ for (unsigned OutputCanon : Region->GVNStores) {
+ Value *V = findOutputValueInRegion(*Region, OutputCanon);
InstructionCost LoadCost =
TTI.getMemoryOpCost(Instruction::Load, V->getType(), Align(1), 0,
TargetTransformInfo::TCK_CodeSize);
@@ -1745,6 +2381,7 @@ static InstructionCost findCostForOutputBlocks(Module &M,
InstructionCost OutputCost = 0;
unsigned NumOutputBranches = 0;
+ OutlinableRegion &FirstRegion = *CurrentGroup.Regions[0];
IRSimilarityCandidate &Candidate = *CurrentGroup.Regions[0]->Candidate;
DenseSet<BasicBlock *> CandidateBlocks;
Candidate.getBasicBlocks(CandidateBlocks);
@@ -1770,10 +2407,8 @@ static InstructionCost findCostForOutputBlocks(Module &M,
for (const ArrayRef<unsigned> &OutputUse :
CurrentGroup.OutputGVNCombinations) {
- for (unsigned GVN : OutputUse) {
- Optional<Value *> OV = Candidate.fromGVN(GVN);
- assert(OV.hasValue() && "Could not find value for GVN?");
- Value *V = OV.getValue();
+ for (unsigned OutputCanon : OutputUse) {
+ Value *V = findOutputValueInRegion(FirstRegion, OutputCanon);
InstructionCost StoreCost =
TTI.getMemoryOpCost(Instruction::Load, V->getType(), Align(1), 0,
TargetTransformInfo::TCK_CodeSize);
@@ -1974,6 +2609,7 @@ bool IROutliner::extractSection(OutlinableRegion &Region) {
unsigned IROutliner::doOutline(Module &M) {
// Find the possible similarity sections.
InstructionClassifier.EnableBranches = !DisableBranches;
+ InstructionClassifier.EnableIndirectCalls = !DisableIndirectCalls;
IRSimilarityIdentifier &Identifier = getIRSI(M);
SimilarityGroupList &SimilarityCandidates = *Identifier.getSimilarity();
@@ -2033,8 +2669,8 @@ unsigned IROutliner::doOutline(Module &M) {
continue;
SmallVector<BasicBlock *> BE;
- DenseSet<BasicBlock *> BBSet;
- OS->Candidate->getBasicBlocks(BBSet, BE);
+ DenseSet<BasicBlock *> BlocksInRegion;
+ OS->Candidate->getBasicBlocks(BlocksInRegion, BE);
OS->CE = new (ExtractorAllocator.Allocate())
CodeExtractor(BE, nullptr, false, nullptr, nullptr, nullptr, false,
false, "outlined");
@@ -2144,8 +2780,8 @@ unsigned IROutliner::doOutline(Module &M) {
OutlinedRegions.clear();
for (OutlinableRegion *OS : CurrentGroup.Regions) {
SmallVector<BasicBlock *> BE;
- DenseSet<BasicBlock *> BBSet;
- OS->Candidate->getBasicBlocks(BBSet, BE);
+ DenseSet<BasicBlock *> BlocksInRegion;
+ OS->Candidate->getBasicBlocks(BlocksInRegion, BE);
OS->CE = new (ExtractorAllocator.Allocate())
CodeExtractor(BE, nullptr, false, nullptr, nullptr, nullptr, false,
false, "outlined");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp
index 4e3689f09536..49babc24cb82 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -92,6 +92,11 @@ static cl::opt<bool>
DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
cl::init(false), cl::Hidden);
+/// A flag for test, so we can print the content of the advisor when running it
+/// as part of the default (e.g. -O3) pipeline.
+static cl::opt<bool> KeepAdvisorForPrinting("keep-inline-advisor-for-printing",
+ cl::init(false), cl::Hidden);
+
extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
static cl::opt<std::string> CGSCCInlineReplayFile(
@@ -660,7 +665,7 @@ bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
}
if (!DeadFunctionsInComdats.empty()) {
// Filter out the functions whose comdats remain alive.
- filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
+ filterDeadComdatFunctions(DeadFunctionsInComdats);
// Remove the rest.
for (Function *F : DeadFunctionsInComdats)
RemoveCGN(CG[F]);
@@ -741,7 +746,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
Advisor.onPassEntry();
- auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); });
+ auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(&InitialC); });
// We use a single common worklist for calls across the entire SCC. We
// process these in-order and append new calls introduced during inlining to
@@ -823,6 +828,10 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// defer deleting these to make it easier to handle the call graph updates.
SmallVector<Function *, 4> DeadFunctions;
+ // Track potentially dead non-local functions with comdats to see if they can
+ // be deleted as a batch after inlining.
+ SmallVector<Function *, 4> DeadFunctionsInComdats;
+
// Loop forward over all of the calls.
while (!Calls->empty()) {
// We expect the calls to typically be batched with sequences of calls that
@@ -935,16 +944,15 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// Merge the attributes based on the inlining.
AttributeFuncs::mergeAttributesForInlining(F, Callee);
- // For local functions, check whether this makes the callee trivially
- // dead. In that case, we can drop the body of the function eagerly
- // which may reduce the number of callers of other functions to one,
- // changing inline cost thresholds.
+ // For local functions or discardable functions without comdats, check
+ // whether this makes the callee trivially dead. In that case, we can drop
+ // the body of the function eagerly which may reduce the number of callers
+ // of other functions to one, changing inline cost thresholds. Non-local
+ // discardable functions with comdats are checked later on.
bool CalleeWasDeleted = false;
- if (Callee.hasLocalLinkage()) {
- // To check this we also need to nuke any dead constant uses (perhaps
- // made dead by this operation on other functions).
- Callee.removeDeadConstantUsers();
- if (Callee.use_empty() && !CG.isLibFunction(Callee)) {
+ if (Callee.isDiscardableIfUnused() && Callee.hasZeroLiveUses() &&
+ !CG.isLibFunction(Callee)) {
+ if (Callee.hasLocalLinkage() || !Callee.hasComdat()) {
Calls->erase_if([&](const std::pair<CallBase *, int> &Call) {
return Call.first->getCaller() == &Callee;
});
@@ -957,6 +965,8 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
"Cannot put cause a function to become dead twice!");
DeadFunctions.push_back(&Callee);
CalleeWasDeleted = true;
+ } else {
+ DeadFunctionsInComdats.push_back(&Callee);
}
}
if (CalleeWasDeleted)
@@ -1019,6 +1029,15 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
FAM.invalidate(F, PreservedAnalyses::none());
}
+ // We must ensure that we only delete functions with comdats if every function
+ // in the comdat is going to be deleted.
+ if (!DeadFunctionsInComdats.empty()) {
+ filterDeadComdatFunctions(DeadFunctionsInComdats);
+ for (auto *Callee : DeadFunctionsInComdats)
+ Callee->dropAllReferences();
+ DeadFunctions.append(DeadFunctionsInComdats);
+ }
+
// Now that we've finished inlining all of the calls across this SCC, delete
// all of the trivially dead functions, updating the call graph and the CGSCC
// pass manager in the process.
@@ -1045,14 +1064,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
UR.UpdatedC = nullptr;
// And delete the actual function from the module.
- // The Advisor may use Function pointers to efficiently index various
- // internal maps, e.g. for memoization. Function cleanup passes like
- // argument promotion create new functions. It is possible for a new
- // function to be allocated at the address of a deleted function. We could
- // index using names, but that's inefficient. Alternatively, we let the
- // Advisor free the functions when it sees fit.
- DeadF->getBasicBlockList().clear();
- M.getFunctionList().remove(DeadF);
+ M.getFunctionList().erase(DeadF);
++NumDeleted;
}
@@ -1073,8 +1085,7 @@ ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
bool MandatoryFirst,
InliningAdvisorMode Mode,
unsigned MaxDevirtIterations)
- : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations),
- PM(), MPM() {
+ : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations) {
// Run the inliner first. The theory is that we are walking bottom-up and so
// the callees have already been fully optimized, and we want to inline them
// into the callers so that our optimizations can reflect that.
@@ -1118,7 +1129,8 @@ PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
// Discard the InlineAdvisor, a subsequent inlining session should construct
// its own.
auto PA = PreservedAnalyses::all();
- PA.abandon<InlineAdvisorAnalysis>();
+ if (!KeepAdvisorForPrinting)
+ PA.abandon<InlineAdvisorAnalysis>();
return PA;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/ModuleInliner.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/ModuleInliner.cpp
index ebf080e87c3b..d515303e4911 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/ModuleInliner.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/ModuleInliner.cpp
@@ -335,14 +335,7 @@ PreservedAnalyses ModuleInlinerPass::run(Module &M,
FAM.clear(*DeadF, DeadF->getName());
// And delete the actual function from the module.
- // The Advisor may use Function pointers to efficiently index various
- // internal maps, e.g. for memoization. Function cleanup passes like
- // argument promotion create new functions. It is possible for a new
- // function to be allocated at the address of a deleted function. We could
- // index using names, but that's inefficient. Alternatively, we let the
- // Advisor free the functions when it sees fit.
- DeadF->getBasicBlockList().clear();
- M.getFunctionList().remove(DeadF);
+ M.getFunctionList().erase(DeadF);
++NumDeleted;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index f289e3ecc979..68f33410c602 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/EnumeratedArray.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/CallGraph.h"
@@ -153,14 +154,6 @@ static constexpr auto TAG = "[" DEBUG_TYPE "]";
namespace {
-enum class AddressSpace : unsigned {
- Generic = 0,
- Global = 1,
- Shared = 3,
- Constant = 4,
- Local = 5,
-};
-
struct AAHeapToShared;
struct AAICVTracker;
@@ -170,7 +163,7 @@ struct AAICVTracker;
struct OMPInformationCache : public InformationCache {
OMPInformationCache(Module &M, AnalysisGetter &AG,
BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
- SmallPtrSetImpl<Kernel> &Kernels)
+ KernelSet &Kernels)
: InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
Kernels(Kernels) {
@@ -424,6 +417,12 @@ struct OMPInformationCache : public InformationCache {
recollectUsesForFunction(static_cast<RuntimeFunction>(Idx));
}
+ // Helper function to inherit the calling convention of the function callee.
+ void setCallingConvention(FunctionCallee Callee, CallInst *CI) {
+ if (Function *Fn = dyn_cast<Function>(Callee.getCallee()))
+ CI->setCallingConv(Fn->getCallingConv());
+ }
+
/// Helper to initialize all runtime function information for those defined
/// in OpenMPKinds.def.
void initializeRuntimeFunctions() {
@@ -485,7 +484,7 @@ struct OMPInformationCache : public InformationCache {
}
/// Collection of known kernels (\see Kernel) in the module.
- SmallPtrSetImpl<Kernel> &Kernels;
+ KernelSet &Kernels;
/// Collection of known OpenMP runtime functions..
DenseSet<const Function *> RTLFunctions;
@@ -1013,7 +1012,8 @@ private:
// into a single parallel region is contained in a single basic block
// without any other instructions. We use the OpenMPIRBuilder to outline
// that block and call the resulting function via __kmpc_fork_call.
- auto Merge = [&](SmallVectorImpl<CallInst *> &MergableCIs, BasicBlock *BB) {
+ auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs,
+ BasicBlock *BB) {
// TODO: Change the interface to allow single CIs expanded, e.g, to
// include an outer loop.
assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs");
@@ -1075,8 +1075,7 @@ private:
BranchInst::Create(AfterBB, AfterIP.getBlock());
// Perform the actual outlining.
- OMPInfoCache.OMPBuilder.finalize(OriginalFn,
- /* AllowExtractorSinking */ true);
+ OMPInfoCache.OMPBuilder.finalize(OriginalFn);
Function *OutlinedFn = MergableCIs.front()->getCaller();
@@ -1538,6 +1537,7 @@ private:
CallInst *IssueCallsite =
CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
+ OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite);
RuntimeCall.eraseFromParent();
// Add "wait" runtime call declaration:
@@ -1550,7 +1550,9 @@ private:
OffloadArray::DeviceIDArgNum), // device_id.
Handle // handle to wait on.
};
- CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
+ CallInst *WaitCallsite = CallInst::Create(
+ WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
+ OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite);
return true;
}
@@ -1597,8 +1599,10 @@ private:
&F.getEntryBlock(), F.getEntryBlock().begin()));
// Create a fallback location if non was found.
// TODO: Use the debug locations of the calls instead.
- Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr();
- Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc);
+ uint32_t SrcLocStrSize;
+ Constant *Loc =
+ OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
+ Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize);
}
return Ident;
}
@@ -2171,7 +2175,7 @@ struct AAICVTrackerFunction : public AAICVTracker {
};
auto CallCheck = [&](Instruction &I) {
- Optional<Value *> ReplVal = getValueForCall(A, &I, ICV);
+ Optional<Value *> ReplVal = getValueForCall(A, I, ICV);
if (ReplVal.hasValue() &&
ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
HasChanged = ChangeStatus::CHANGED;
@@ -2197,12 +2201,12 @@ struct AAICVTrackerFunction : public AAICVTracker {
return HasChanged;
}
- /// Hepler to check if \p I is a call and get the value for it if it is
+ /// Helper to check if \p I is a call and get the value for it if it is
/// unique.
- Optional<Value *> getValueForCall(Attributor &A, const Instruction *I,
+ Optional<Value *> getValueForCall(Attributor &A, const Instruction &I,
InternalControlVar &ICV) const {
- const auto *CB = dyn_cast<CallBase>(I);
+ const auto *CB = dyn_cast<CallBase>(&I);
if (!CB || CB->hasFnAttr("no_openmp") ||
CB->hasFnAttr("no_openmp_routines"))
return None;
@@ -2218,8 +2222,8 @@ struct AAICVTrackerFunction : public AAICVTracker {
if (CalledFunction == GetterRFI.Declaration)
return None;
if (CalledFunction == SetterRFI.Declaration) {
- if (ICVReplacementValuesMap[ICV].count(I))
- return ICVReplacementValuesMap[ICV].lookup(I);
+ if (ICVReplacementValuesMap[ICV].count(&I))
+ return ICVReplacementValuesMap[ICV].lookup(&I);
return nullptr;
}
@@ -2231,8 +2235,11 @@ struct AAICVTrackerFunction : public AAICVTracker {
const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
*this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED);
- if (ICVTrackingAA.isAssumedTracked())
- return ICVTrackingAA.getUniqueReplacementValue(ICV);
+ if (ICVTrackingAA.isAssumedTracked()) {
+ Optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV);
+ if (!URV || (*URV && AA::isValidAtPosition(**URV, I, OMPInfoCache)))
+ return URV;
+ }
// If we don't know, assume it changes.
return nullptr;
@@ -2284,7 +2291,7 @@ struct AAICVTrackerFunction : public AAICVTracker {
break;
}
- Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV);
+ Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV);
if (!NewReplVal.hasValue())
continue;
@@ -2548,7 +2555,7 @@ struct AAExecutionDomainFunction : public AAExecutionDomain {
}
/// Set of basic blocks that are executed by a single thread.
- DenseSet<const BasicBlock *> SingleThreadedBBs;
+ SmallSetVector<const BasicBlock *, 16> SingleThreadedBBs;
/// Total number of basic blocks in this function.
long unsigned NumBBs;
@@ -2572,7 +2579,7 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
if (!A.checkForAllCallSites(PredForCallSite, *this,
/* RequiresAllCallSites */ true,
AllCallSitesKnown))
- SingleThreadedBBs.erase(&F->getEntryBlock());
+ SingleThreadedBBs.remove(&F->getEntryBlock());
auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
@@ -2637,7 +2644,7 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
for (auto *BB : RPOT) {
if (!MergePredecessorStates(BB))
- SingleThreadedBBs.erase(BB);
+ SingleThreadedBBs.remove(BB);
}
return (NumSingleThreadedBBs == SingleThreadedBBs.size())
@@ -2759,7 +2766,7 @@ struct AAHeapToSharedFunction : public AAHeapToShared {
if (FreeCalls.size() != 1)
continue;
- ConstantInt *AllocSize = dyn_cast<ConstantInt>(CB->getArgOperand(0));
+ auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0));
LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB
<< " with " << AllocSize->getZExtValue()
@@ -2772,7 +2779,7 @@ struct AAHeapToSharedFunction : public AAHeapToShared {
Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue());
auto *SharedMem = new GlobalVariable(
*M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage,
- UndefValue::get(Int8ArrTy), CB->getName(), nullptr,
+ UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr,
GlobalValue::NotThreadLocal,
static_cast<unsigned>(AddressSpace::Shared));
auto *NewBuffer =
@@ -2786,7 +2793,10 @@ struct AAHeapToSharedFunction : public AAHeapToShared {
};
A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark);
- SharedMem->setAlignment(MaybeAlign(32));
+ MaybeAlign Alignment = CB->getRetAlign();
+ assert(Alignment &&
+ "HeapToShared on allocation without alignment attribute");
+ SharedMem->setAlignment(MaybeAlign(Alignment));
A.changeValueAfterManifest(*CB, *NewBuffer);
A.deleteAfterManifest(*CB);
@@ -2813,7 +2823,7 @@ struct AAHeapToSharedFunction : public AAHeapToShared {
if (CallBase *CB = dyn_cast<CallBase>(U))
if (!isa<ConstantInt>(CB->getArgOperand(0)) ||
!ED.isExecutedByInitialThreadOnly(*CB))
- MallocCalls.erase(CB);
+ MallocCalls.remove(CB);
}
findPotentialRemovedFreeCalls(A);
@@ -2825,7 +2835,7 @@ struct AAHeapToSharedFunction : public AAHeapToShared {
}
/// Collection of all malloc calls in a function.
- SmallPtrSet<CallBase *, 4> MallocCalls;
+ SmallSetVector<CallBase *, 4> MallocCalls;
/// Collection of potentially removed free calls in a function.
SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls;
};
@@ -2962,7 +2972,7 @@ struct AAKernelInfoFunction : AAKernelInfo {
A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
UsedAssumedInformation = !isAtFixpoint();
auto *FalseVal =
- ConstantInt::getBool(IRP.getAnchorValue().getContext(), 0);
+ ConstantInt::getBool(IRP.getAnchorValue().getContext(), false);
return FalseVal;
};
@@ -3225,8 +3235,11 @@ struct AAKernelInfoFunction : AAKernelInfo {
OpenMPIRBuilder::LocationDescription Loc(
InsertPointTy(ParentBB, ParentBB->end()), DL);
OMPInfoCache.OMPBuilder.updateToLocation(Loc);
- auto *SrcLocStr = OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc);
- Value *Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr);
+ uint32_t SrcLocStrSize;
+ auto *SrcLocStr =
+ OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize);
+ Value *Ident =
+ OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize);
BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL);
// Add check for Tid in RegionCheckTidBB
@@ -3237,8 +3250,10 @@ struct AAKernelInfoFunction : AAKernelInfo {
FunctionCallee HardwareTidFn =
OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_get_hardware_thread_id_in_block);
- Value *Tid =
+ CallInst *Tid =
OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {});
+ Tid->setDebugLoc(DL);
+ OMPInfoCache.setCallingConvention(HardwareTidFn, Tid);
Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid);
OMPInfoCache.OMPBuilder.Builder
.CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB)
@@ -3251,14 +3266,18 @@ struct AAKernelInfoFunction : AAKernelInfo {
M, OMPRTL___kmpc_barrier_simple_spmd);
OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy(
RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt()));
- OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid})
- ->setDebugLoc(DL);
+ CallInst *Barrier =
+ OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid});
+ Barrier->setDebugLoc(DL);
+ OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
// Second barrier ensures workers have read broadcast values.
- if (HasBroadcastValues)
- CallInst::Create(BarrierFn, {Ident, Tid}, "",
- RegionBarrierBB->getTerminator())
- ->setDebugLoc(DL);
+ if (HasBroadcastValues) {
+ CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "",
+ RegionBarrierBB->getTerminator());
+ Barrier->setDebugLoc(DL);
+ OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
+ }
};
auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
@@ -3352,17 +3371,17 @@ struct AAKernelInfoFunction : AAKernelInfo {
OMP_TGT_EXEC_MODE_SPMD));
A.changeUseAfterManifest(
KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo),
- *ConstantInt::getBool(Ctx, 0));
+ *ConstantInt::getBool(Ctx, false));
A.changeUseAfterManifest(
KernelDeinitCB->getArgOperandUse(DeinitModeArgNo),
*ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
OMP_TGT_EXEC_MODE_SPMD));
A.changeUseAfterManifest(
KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo),
- *ConstantInt::getBool(Ctx, 0));
+ *ConstantInt::getBool(Ctx, false));
A.changeUseAfterManifest(
KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo),
- *ConstantInt::getBool(Ctx, 0));
+ *ConstantInt::getBool(Ctx, false));
++NumOpenMPTargetRegionKernelsSPMD;
@@ -3403,7 +3422,7 @@ struct AAKernelInfoFunction : AAKernelInfo {
// If not SPMD mode, indicate we use a custom state machine now.
auto &Ctx = getAnchorValue().getContext();
- auto *FalseVal = ConstantInt::getBool(Ctx, 0);
+ auto *FalseVal = ConstantInt::getBool(Ctx, false);
A.changeUseAfterManifest(
KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal);
@@ -3528,10 +3547,12 @@ struct AAKernelInfoFunction : AAKernelInfo {
FunctionCallee WarpSizeFn =
OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_get_warp_size);
- Instruction *BlockHwSize =
+ CallInst *BlockHwSize =
CallInst::Create(BlockHwSizeFn, "block.hw_size", InitBB);
+ OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize);
BlockHwSize->setDebugLoc(DLoc);
- Instruction *WarpSize = CallInst::Create(WarpSizeFn, "warp.size", InitBB);
+ CallInst *WarpSize = CallInst::Create(WarpSizeFn, "warp.size", InitBB);
+ OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize);
WarpSize->setDebugLoc(DLoc);
Instruction *BlockSize =
BinaryOperator::CreateSub(BlockHwSize, WarpSize, "block.size", InitBB);
@@ -3571,8 +3592,10 @@ struct AAKernelInfoFunction : AAKernelInfo {
FunctionCallee BarrierFn =
OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_barrier_simple_generic);
- CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB)
- ->setDebugLoc(DLoc);
+ CallInst *Barrier =
+ CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB);
+ OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
+ Barrier->setDebugLoc(DLoc);
if (WorkFnAI->getType()->getPointerAddressSpace() !=
(unsigned int)AddressSpace::Generic) {
@@ -3588,8 +3611,9 @@ struct AAKernelInfoFunction : AAKernelInfo {
FunctionCallee KernelParallelFn =
OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_kernel_parallel);
- Instruction *IsActiveWorker = CallInst::Create(
+ CallInst *IsActiveWorker = CallInst::Create(
KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB);
+ OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker);
IsActiveWorker->setDebugLoc(DLoc);
Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn",
StateMachineBeginBB);
@@ -3669,10 +3693,13 @@ struct AAKernelInfoFunction : AAKernelInfo {
StateMachineIfCascadeCurrentBB)
->setDebugLoc(DLoc);
- CallInst::Create(OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
- M, OMPRTL___kmpc_kernel_end_parallel),
- {}, "", StateMachineEndParallelBB)
- ->setDebugLoc(DLoc);
+ FunctionCallee EndParallelFn =
+ OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_kernel_end_parallel);
+ CallInst *EndParallel =
+ CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB);
+ OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel);
+ EndParallel->setDebugLoc(DLoc);
BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB)
->setDebugLoc(DLoc);
@@ -4508,6 +4535,8 @@ void OpenMPOpt::registerAAs(bool IsModulePass) {
bool UsedAssumedInformation = false;
A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr,
UsedAssumedInformation);
+ } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
+ A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI));
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/PartialInlining.cpp
index 2d717475ce7f..5f2223e4047e 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -169,8 +169,7 @@ struct FunctionOutliningInfo {
};
struct FunctionOutliningMultiRegionInfo {
- FunctionOutliningMultiRegionInfo()
- : ORI() {}
+ FunctionOutliningMultiRegionInfo() {}
// Container for outline regions
struct OutlineRegionInfo {
@@ -971,6 +970,9 @@ void PartialInlinerImpl::computeCallsiteToProfCountMap(
};
for (User *User : Users) {
+ // Don't bother with BlockAddress used by CallBr for asm goto.
+ if (isa<BlockAddress>(User))
+ continue;
CallBase *CB = getSupportedCallBase(User);
Function *Caller = CB->getCaller();
if (CurrentCaller != Caller) {
@@ -1414,6 +1416,10 @@ bool PartialInlinerImpl::tryPartialInline(FunctionCloner &Cloner) {
bool AnyInline = false;
for (User *User : Users) {
+ // Don't bother with BlockAddress used by CallBr for asm goto.
+ if (isa<BlockAddress>(User))
+ continue;
+
CallBase *CB = getSupportedCallBase(User);
if (isLimitReached())
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index eb1b8a29cfc5..0598f751febe 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -519,13 +519,6 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
unsigned NextTmpIdx = 0;
FAddend TmpResult[3];
- // Points to the constant addend of the resulting simplified expression.
- // If the resulting expr has constant-addend, this constant-addend is
- // desirable to reside at the top of the resulting expression tree. Placing
- // constant close to supper-expr(s) will potentially reveal some optimization
- // opportunities in super-expr(s).
- const FAddend *ConstAdd = nullptr;
-
// Simplified addends are placed <SimpVect>.
AddendVect SimpVect;
@@ -541,6 +534,14 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
}
Value *Val = ThisAddend->getSymVal();
+
+ // If the resulting expr has constant-addend, this constant-addend is
+ // desirable to reside at the top of the resulting expression tree. Placing
+ // constant close to super-expr(s) will potentially reveal some
+ // optimization opportunities in super-expr(s). Here we do not implement
+ // this logic intentionally and rely on SimplifyAssociativeOrCommutative
+ // call later.
+
unsigned StartIdx = SimpVect.size();
SimpVect.push_back(ThisAddend);
@@ -569,14 +570,8 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
// Pop all addends being folded and push the resulting folded addend.
SimpVect.resize(StartIdx);
- if (Val) {
- if (!R.isZero()) {
- SimpVect.push_back(&R);
- }
- } else {
- // Don't push constant addend at this time. It will be the last element
- // of <SimpVect>.
- ConstAdd = &R;
+ if (!R.isZero()) {
+ SimpVect.push_back(&R);
}
}
}
@@ -584,9 +579,6 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) &&
"out-of-bound access");
- if (ConstAdd)
- SimpVect.push_back(ConstAdd);
-
Value *Result;
if (!SimpVect.empty())
Result = createNaryFAdd(SimpVect, InstrQuota);
@@ -1296,6 +1288,9 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
// (A*B)+(A*C) -> A*(B+C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
@@ -1498,15 +1493,18 @@ static Instruction *factorizeFAddFSub(BinaryOperator &I,
return Lerp;
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (!Op0->hasOneUse() || !Op1->hasOneUse())
+ return nullptr;
+
Value *X, *Y, *Z;
bool IsFMul;
- if ((match(Op0, m_OneUse(m_FMul(m_Value(X), m_Value(Z)))) &&
- match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))) ||
- (match(Op0, m_OneUse(m_FMul(m_Value(Z), m_Value(X)))) &&
- match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))))
+ if ((match(Op0, m_FMul(m_Value(X), m_Value(Z))) &&
+ match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))) ||
+ (match(Op0, m_FMul(m_Value(Z), m_Value(X))) &&
+ match(Op1, m_c_FMul(m_Value(Y), m_Specific(Z)))))
IsFMul = true;
- else if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Z)))) &&
- match(Op1, m_OneUse(m_FDiv(m_Value(Y), m_Specific(Z)))))
+ else if (match(Op0, m_FDiv(m_Value(X), m_Value(Z))) &&
+ match(Op1, m_FDiv(m_Value(Y), m_Specific(Z))))
IsFMul = false;
else
return nullptr;
@@ -1541,6 +1539,9 @@ Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I))
return FoldedFAdd;
@@ -1654,6 +1655,14 @@ Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
{X->getType()}, {NewStartC, X}, &I));
}
+ // (X * MulC) + X --> X * (MulC + 1.0)
+ Constant *MulC;
+ if (match(&I, m_c_FAdd(m_FMul(m_Value(X), m_ImmConstant(MulC)),
+ m_Deferred(X)))) {
+ MulC = ConstantExpr::getFAdd(MulC, ConstantFP::get(I.getType(), 1.0));
+ return BinaryOperator::CreateFMulFMF(X, MulC, &I);
+ }
+
if (Value *V = FAddCombine(Builder).simplify(&I))
return replaceInstUsesWith(I, V);
}
@@ -1748,6 +1757,9 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// If this is a 'B = x-(-A)', change to B = x+A.
@@ -2310,6 +2322,9 @@ Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
// Subtraction from -0.0 is the canonical form of fneg.
// fsub -0.0, X ==> fneg X
// fsub nsz 0.0, X ==> fneg nsz X
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index de1034c910d5..6bbb0251f2bc 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1727,25 +1727,37 @@ static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
(Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- Value *A, *B, *C, *X, *Y;
+ Value *A, *B, *C, *X, *Y, *Dummy;
+
+ // Match following expressions:
+ // (~(A | B) & C)
+ // (~(A & B) | C)
+ // Captures X = ~(A | B) or ~(A & B)
+ const auto matchNotOrAnd =
+ [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
+ Value *&X, bool CountUses = false) -> bool {
+ if (CountUses && !Op->hasOneUse())
+ return false;
+
+ if (match(Op, m_c_BinOp(FlippedOpcode,
+ m_CombineAnd(m_Value(X),
+ m_Not(m_c_BinOp(Opcode, m_A, m_B))),
+ m_C)))
+ return !CountUses || X->hasOneUse();
+
+ return false;
+ };
// (~(A | B) & C) | ... --> ...
// (~(A & B) | C) & ... --> ...
// TODO: One use checks are conservative. We just need to check that a total
// number of multiple used values does not exceed reduction
// in operations.
- if (match(Op0,
- m_c_BinOp(FlippedOpcode,
- m_CombineAnd(m_Value(X), m_Not(m_BinOp(Opcode, m_Value(A),
- m_Value(B)))),
- m_Value(C)))) {
+ if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) {
// (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
// (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
- if (match(Op1,
- m_OneUse(m_c_BinOp(FlippedOpcode,
- m_OneUse(m_Not(m_c_BinOp(Opcode, m_Specific(A),
- m_Specific(C)))),
- m_Specific(B))))) {
+ if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy,
+ true)) {
Value *Xor = Builder.CreateXor(B, C);
return (Opcode == Instruction::Or)
? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A))
@@ -1754,11 +1766,8 @@ static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
// (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
// (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
- if (match(Op1,
- m_OneUse(m_c_BinOp(FlippedOpcode,
- m_OneUse(m_Not(m_c_BinOp(Opcode, m_Specific(B),
- m_Specific(C)))),
- m_Specific(A))))) {
+ if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy,
+ true)) {
Value *Xor = Builder.CreateXor(A, C);
return (Opcode == Instruction::Or)
? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B))
@@ -1863,6 +1872,9 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
@@ -2072,21 +2084,37 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
- // (A | B) & ((~A) ^ B) -> (A & B)
- // (A | B) & (B ^ (~A)) -> (A & B)
- // (B | A) & ((~A) ^ B) -> (A & B)
- // (B | A) & (B ^ (~A)) -> (A & B)
+ // (A | B) & (~A ^ B) -> A & B
+ // (A | B) & (B ^ ~A) -> A & B
+ // (B | A) & (~A ^ B) -> A & B
+ // (B | A) & (B ^ ~A) -> A & B
if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
return BinaryOperator::CreateAnd(A, B);
- // ((~A) ^ B) & (A | B) -> (A & B)
- // ((~A) ^ B) & (B | A) -> (A & B)
- // (B ^ (~A)) & (A | B) -> (A & B)
- // (B ^ (~A)) & (B | A) -> (A & B)
+ // (~A ^ B) & (A | B) -> A & B
+ // (~A ^ B) & (B | A) -> A & B
+ // (B ^ ~A) & (A | B) -> A & B
+ // (B ^ ~A) & (B | A) -> A & B
if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
return BinaryOperator::CreateAnd(A, B);
+
+ // (~A | B) & (A ^ B) -> ~A & B
+ // (~A | B) & (B ^ A) -> ~A & B
+ // (B | ~A) & (A ^ B) -> ~A & B
+ // (B | ~A) & (B ^ A) -> ~A & B
+ if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
+ match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
+ return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
+
+ // (A ^ B) & (~A | B) -> ~A & B
+ // (B ^ A) & (~A | B) -> ~A & B
+ // (A ^ B) & (B | ~A) -> ~A & B
+ // (B ^ A) & (B | ~A) -> ~A & B
+ if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
+ match(Op0, m_c_Xor(m_Specific(A), m_Specific(B))))
+ return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
}
{
@@ -2640,6 +2668,9 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
@@ -3528,6 +3559,9 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
if (Instruction *NewXor = foldXorToXor(I, Builder))
return NewXor;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 14427bd1f2f4..1fb46af46bee 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -352,9 +352,27 @@ Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
// * Dereferenceable address & few lanes -> scalarize speculative load/selects
// * Adjacent vector addresses -> masked.load
// * Narrow width by halfs excluding zero/undef lanes
-// * Vector splat address w/known mask -> scalar load
// * Vector incrementing address -> vector masked load
Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
+ auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
+ if (!ConstMask)
+ return nullptr;
+
+ // Vector splat address w/known mask -> scalar load
+ // Fold the gather to load the source vector first lane
+ // because it is reloading the same value each time
+ if (ConstMask->isAllOnesValue())
+ if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {
+ auto *VecTy = cast<VectorType>(II.getType());
+ const Align Alignment =
+ cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
+ LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
+ Alignment, "load.scalar");
+ Value *Shuf =
+ Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");
+ return replaceInstUsesWith(II, cast<Instruction>(Shuf));
+ }
+
return nullptr;
}
@@ -362,7 +380,6 @@ Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
// * Single constant active lane -> store
// * Adjacent vector addresses -> masked.store
// * Narrow store width by halfs excluding zero/undef lanes
-// * Vector splat address w/known mask -> scalar store
// * Vector incrementing address -> vector masked store
Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
@@ -373,6 +390,34 @@ Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
if (ConstMask->isNullValue())
return eraseInstFromFunction(II);
+ // Vector splat address -> scalar store
+ if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {
+ // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr
+ if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {
+ Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
+ StoreInst *S =
+ new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment);
+ S->copyMetadata(II);
+ return S;
+ }
+ // scatter(vector, splat(ptr), splat(true)) -> store extract(vector,
+ // lastlane), ptr
+ if (ConstMask->isAllOnesValue()) {
+ Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
+ VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType());
+ ElementCount VF = WideLoadTy->getElementCount();
+ Constant *EC =
+ ConstantInt::get(Builder.getInt32Ty(), VF.getKnownMinValue());
+ Value *RunTimeVF = VF.isScalable() ? Builder.CreateVScale(EC) : EC;
+ Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1));
+ Value *Extract =
+ Builder.CreateExtractElement(II.getArgOperand(0), LastLane);
+ StoreInst *S =
+ new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment);
+ S->copyMetadata(II);
+ return S;
+ }
+ }
if (isa<ScalableVectorType>(ConstMask->getType()))
return nullptr;
@@ -449,7 +494,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
// ctlz/cttz i1 Op0 --> not Op0
if (match(Op1, m_Zero()))
return BinaryOperator::CreateNot(Op0);
- // If zero is undef, then the input can be assumed to be "true", so the
+ // If zero is poison, then the input can be assumed to be "true", so the
// instruction simplifies to "false".
assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");
return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType()));
@@ -474,7 +519,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
}
// Zext doesn't change the number of trailing zeros, so narrow:
- // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsUndef' parameter is 'true'.
+ // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'.
if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
IC.Builder.getTrue());
@@ -511,7 +556,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
}
// If the input to cttz/ctlz is known to be non-zero,
- // then change the 'ZeroIsUndef' parameter to 'true'
+ // then change the 'ZeroIsPoison' parameter to 'true'
// because we know the zero behavior can't affect the result.
if (!Known.One.isZero() ||
isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
@@ -1188,6 +1233,21 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
Value *IIOperand = II->getArgOperand(0);
Value *X = nullptr;
+ KnownBits Known = computeKnownBits(IIOperand, 0, II);
+ uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
+ uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
+
+ // bswap(x) -> shift(x) if x has exactly one "active byte"
+ if (Known.getBitWidth() - LZ - TZ == 8) {
+ assert(LZ != TZ && "active byte cannot be in the middle");
+ if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x
+ return BinaryOperator::CreateNUWShl(
+ IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));
+ // -> lshr(x) if the "active byte" is in the high part of x
+ return BinaryOperator::CreateExactLShr(
+ IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));
+ }
+
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
unsigned C = X->getType()->getScalarSizeInBits() -
@@ -2460,7 +2520,7 @@ static bool isSafeToEliminateVarargsCast(const CallBase &Call,
if (!Call.isByValArgument(ix))
return false;
- Type *SrcElemTy = SrcTy->getElementType();
+ Type *SrcElemTy = SrcTy->getNonOpaquePointerElementType();
Type *DstElemTy = Call.getParamByValType(ix);
if (!SrcElemTy->isSized() || !DstElemTy->isSized())
return false;
@@ -2571,57 +2631,36 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) {
}
void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
- unsigned NumArgs = Call.arg_size();
- ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
- ConstantInt *Op1C =
- (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));
- // Bail out if the allocation size is zero (or an invalid alignment of zero
- // with aligned_alloc).
- if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
- return;
-
- if (isMallocLikeFn(&Call, TLI) && Op0C) {
- if (isOpNewLikeFn(&Call, TLI))
+ // Note: We only handle cases which can't be driven from generic attributes
+ // here. So, for example, nonnull and noalias (which are common properties
+ // of some allocation functions) are expected to be handled via annotation
+ // of the respective allocator declaration with generic attributes.
+
+ uint64_t Size;
+ ObjectSizeOpts Opts;
+ if (getObjectSize(&Call, Size, DL, TLI, Opts) && Size > 0) {
+ // TODO: We really should just emit deref_or_null here and then
+ // let the generic inference code combine that with nonnull.
+ if (Call.hasRetAttr(Attribute::NonNull))
Call.addRetAttr(Attribute::getWithDereferenceableBytes(
- Call.getContext(), Op0C->getZExtValue()));
+ Call.getContext(), Size));
else
Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), Op0C->getZExtValue()));
- } else if (isAlignedAllocLikeFn(&Call, TLI)) {
- if (Op1C)
- Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), Op1C->getZExtValue()));
- // Add alignment attribute if alignment is a power of two constant.
- if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment) &&
- isKnownNonZero(Call.getOperand(1), DL, 0, &AC, &Call, &DT)) {
- uint64_t AlignmentVal = Op0C->getZExtValue();
- if (llvm::isPowerOf2_64(AlignmentVal)) {
- Call.removeRetAttr(Attribute::Alignment);
- Call.addRetAttr(Attribute::getWithAlignment(Call.getContext(),
- Align(AlignmentVal)));
- }
- }
- } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
- Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), Op1C->getZExtValue()));
- } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
- bool Overflow;
- const APInt &N = Op0C->getValue();
- APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
- if (!Overflow)
- Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), Size.getZExtValue()));
- } else if (isStrdupLikeFn(&Call, TLI)) {
- uint64_t Len = GetStringLength(Call.getOperand(0));
- if (Len) {
- // strdup
- if (NumArgs == 1)
- Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), Len));
- // strndup
- else if (NumArgs == 2 && Op1C)
- Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
- Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1)));
+ Call.getContext(), Size));
+ }
+
+ // Add alignment attribute if alignment is a power of two constant.
+ Value *Alignment = getAllocAlignment(&Call, TLI);
+ if (!Alignment)
+ return;
+
+ ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
+ if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) {
+ uint64_t AlignmentVal = AlignOpC->getZExtValue();
+ if (llvm::isPowerOf2_64(AlignmentVal)) {
+ Call.removeRetAttr(Attribute::Alignment);
+ Call.addRetAttr(Attribute::getWithAlignment(Call.getContext(),
+ Align(AlignmentVal)));
}
}
}
@@ -2744,9 +2783,9 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
PointerType *NewTy = cast<PointerType>(CI->getOperand(0)->getType());
if (!NewTy->isOpaque() && Call.isByValArgument(ix)) {
Call.removeParamAttr(ix, Attribute::ByVal);
- Call.addParamAttr(
- ix, Attribute::getWithByValType(
- Call.getContext(), NewTy->getElementType()));
+ Call.addParamAttr(ix, Attribute::getWithByValType(
+ Call.getContext(),
+ NewTy->getNonOpaquePointerElementType()));
}
Changed = true;
}
@@ -2782,7 +2821,8 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
}
- if (isAllocLikeFn(&Call, &TLI))
+ if (isAllocationFn(&Call, &TLI) &&
+ isAllocRemovable(&cast<CallBase>(Call), &TLI))
return visitAllocSite(Call);
// Handle intrinsics which can be used in both call and invoke context.
@@ -2934,7 +2974,7 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
}
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
- AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
+ AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
return false; // Attribute not compatible with transformed value.
}
@@ -2980,7 +3020,7 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
return false; // Cannot transform this parameter value.
- if (AttrBuilder(CallerPAL.getParamAttrs(i))
+ if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
.overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
return false; // Attribute not compatible with transformed value.
@@ -2994,12 +3034,12 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
- if (!ParamPTy || !ParamPTy->getElementType()->isSized())
+ if (!ParamPTy || !ParamPTy->getPointerElementType()->isSized())
return false;
Type *CurElTy = Call.getParamByValType(i);
if (DL.getTypeAllocSize(CurElTy) !=
- DL.getTypeAllocSize(ParamPTy->getElementType()))
+ DL.getTypeAllocSize(ParamPTy->getPointerElementType()))
return false;
}
}
@@ -3012,17 +3052,14 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
- PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType());
- if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
+ if (FT->isVarArg() != Call.getFunctionType()->isVarArg())
return false;
// If both the callee and the cast type are varargs, we still have to make
// sure the number of fixed parameters are the same or we have the same
// ABI issues as if we introduce a varargs call.
- if (FT->isVarArg() &&
- cast<FunctionType>(APTy->getElementType())->isVarArg() &&
- FT->getNumParams() !=
- cast<FunctionType>(APTy->getElementType())->getNumParams())
+ if (FT->isVarArg() && Call.getFunctionType()->isVarArg() &&
+ FT->getNumParams() != Call.getFunctionType()->getNumParams())
return false;
}
@@ -3045,7 +3082,7 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
ArgAttrs.reserve(NumActualArgs);
// Get any return attributes.
- AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
+ AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
// If the return value is not being used, the type may not be compatible
// with the existing attributes. Wipe out any problematic attributes.
@@ -3063,7 +3100,7 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
// Add any parameter attributes.
if (CallerPAL.hasParamAttr(i, Attribute::ByVal)) {
- AttrBuilder AB(CallerPAL.getParamAttrs(i));
+ AttrBuilder AB(FT->getContext(), CallerPAL.getParamAttrs(i));
AB.addByValAttr(NewArg->getType()->getPointerElementType());
ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
} else
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 8df4a4529f47..f11ba8772f3c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -85,13 +85,16 @@ static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
PointerType *PTy = cast<PointerType>(CI.getType());
+ // Opaque pointers don't have an element type we could replace with.
+ if (PTy->isOpaque())
+ return nullptr;
IRBuilderBase::InsertPointGuard Guard(Builder);
Builder.SetInsertPoint(&AI);
// Get the type really allocated and the type casted to.
Type *AllocElTy = AI.getAllocatedType();
- Type *CastElTy = PTy->getElementType();
+ Type *CastElTy = PTy->getNonOpaquePointerElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
// This optimisation does not work for cases where the cast type
@@ -2649,8 +2652,8 @@ static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder,
if (SrcPTy->isOpaque() || DstPTy->isOpaque())
return nullptr;
- Type *DstElTy = DstPTy->getElementType();
- Type *SrcElTy = SrcPTy->getElementType();
+ Type *DstElTy = DstPTy->getNonOpaquePointerElementType();
+ Type *SrcElTy = SrcPTy->getNonOpaquePointerElementType();
// When the type pointed to is not sized the cast cannot be
// turned into a gep.
@@ -2669,8 +2672,8 @@ static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder,
// If we found a path from the src to dest, create the getelementptr now.
if (SrcElTy == DstElTy) {
SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
- GetElementPtrInst *GEP =
- GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs);
+ GetElementPtrInst *GEP = GetElementPtrInst::Create(
+ SrcPTy->getNonOpaquePointerElementType(), Src, Idxs);
// If the source pointer is dereferenceable, then assume it points to an
// allocated object and apply "inbounds" to the GEP.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index ed53b88aed61..fd58a44504b3 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -503,7 +503,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
/// Returns true if we can rewrite Start as a GEP with pointer Base
/// and some integer offset. The nodes that need to be re-written
/// for this transformation will be added to Explored.
-static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
+static bool canRewriteGEPAsOffset(Type *ElemTy, Value *Start, Value *Base,
const DataLayout &DL,
SetVector<Value *> &Explored) {
SmallVector<Value *, 16> WorkList(1, Start);
@@ -551,7 +551,7 @@ static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
// the original pointer type. We could handle more cases in the
// future.
if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
- GEP->getType() != Start->getType())
+ GEP->getSourceElementType() != ElemTy)
return false;
if (!Explored.contains(GEP->getOperand(0)))
@@ -627,7 +627,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
/// Returns a re-written value of Start as an indexed GEP using Base as a
/// pointer.
-static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
+static Value *rewriteGEPAsOffset(Type *ElemTy, Value *Start, Value *Base,
const DataLayout &DL,
SetVector<Value *> &Explored) {
// Perform all the substitutions. This is a bit tricky because we can
@@ -714,6 +714,8 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
}
}
+ PointerType *PtrTy =
+ ElemTy->getPointerTo(Start->getType()->getPointerAddressSpace());
for (Value *Val : Explored) {
if (Val == Base)
continue;
@@ -722,22 +724,14 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
// a GEP or a GEP + ptrtoint.
setInsertionPoint(Builder, Val, false);
- // If required, create an inttoptr instruction for Base.
- Value *NewBase = Base;
- if (!Base->getType()->isPointerTy())
- NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
- Start->getName() + "to.ptr");
-
- Value *GEP = Builder.CreateInBoundsGEP(
- Start->getType()->getPointerElementType(), NewBase,
- makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
-
- if (!Val->getType()->isPointerTy()) {
- Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
- Val->getName() + ".conv");
- GEP = Cast;
- }
- Val->replaceAllUsesWith(GEP);
+ // Cast base to the expected type.
+ Value *NewVal = Builder.CreateBitOrPointerCast(
+ Base, PtrTy, Start->getName() + "to.ptr");
+ NewVal = Builder.CreateInBoundsGEP(
+ ElemTy, NewVal, makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
+ NewVal = Builder.CreateBitOrPointerCast(
+ NewVal, Val->getType(), Val->getName() + ".conv");
+ Val->replaceAllUsesWith(NewVal);
}
return NewInsts[Start];
@@ -747,7 +741,7 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
/// the input Value as a constant indexed GEP. Returns a pair containing
/// the GEPs Pointer and Index.
static std::pair<Value *, Value *>
-getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
+getAsConstantIndexedAddress(Type *ElemTy, Value *V, const DataLayout &DL) {
Type *IndexType = IntegerType::get(V->getContext(),
DL.getIndexTypeSizeInBits(V->getType()));
@@ -759,7 +753,7 @@ getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
if (!GEP->isInBounds())
break;
if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
- GEP->getType() == V->getType()) {
+ GEP->getSourceElementType() == ElemTy) {
V = GEP->getOperand(0);
Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
Index = ConstantExpr::getAdd(
@@ -798,17 +792,14 @@ static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
if (!GEPLHS->hasAllConstantIndices())
return nullptr;
- // Make sure the pointers have the same type.
- if (GEPLHS->getType() != RHS->getType())
- return nullptr;
-
+ Type *ElemTy = GEPLHS->getSourceElementType();
Value *PtrBase, *Index;
- std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
+ std::tie(PtrBase, Index) = getAsConstantIndexedAddress(ElemTy, GEPLHS, DL);
// The set of nodes that will take part in this transformation.
SetVector<Value *> Nodes;
- if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
+ if (!canRewriteGEPAsOffset(ElemTy, RHS, PtrBase, DL, Nodes))
return nullptr;
// We know we can re-write this as
@@ -817,7 +808,7 @@ static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
// can't have overflow on either side. We can therefore re-write
// this as:
// OFFSET1 cmp OFFSET2
- Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
+ Value *NewRHS = rewriteGEPAsOffset(ElemTy, RHS, PtrBase, DL, Nodes);
// RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
// GEP having PtrBase as the pointer base, and has returned in NewRHS the
@@ -894,9 +885,10 @@ Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If the base pointers are different, but the indices are the same, just
// compare the base pointer.
if (PtrBase != GEPRHS->getOperand(0)) {
- bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
- IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
- GEPRHS->getOperand(0)->getType();
+ bool IndicesTheSame =
+ GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
+ GEPLHS->getType() == GEPRHS->getType() &&
+ GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
if (IndicesTheSame)
for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
@@ -1271,8 +1263,8 @@ static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// This is only really a signed overflow check if the inputs have been
// sign-extended; check for that condition. For example, if CI2 is 2^31 and
// the operands of the add are 64 bits wide, we need at least 33 sign bits.
- if (IC.ComputeMinSignedBits(A, 0, &I) > NewWidth ||
- IC.ComputeMinSignedBits(B, 0, &I) > NewWidth)
+ if (IC.ComputeMaxSignificantBits(A, 0, &I) > NewWidth ||
+ IC.ComputeMaxSignificantBits(B, 0, &I) > NewWidth)
return nullptr;
// In order to replace the original add with a narrower
@@ -2221,7 +2213,7 @@ Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
// icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
Value *X = Shr->getOperand(0);
CmpInst::Predicate Pred = Cmp.getPredicate();
- if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && C.isZero())
+ if (Cmp.isEquality() && Shr->isExact() && C.isZero())
return new ICmpInst(Pred, X, Cmp.getOperand(1));
const APInt *ShiftVal;
@@ -2247,9 +2239,10 @@ Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
// those conditions rather than checking them. This is difficult because of
// undef/poison (PR34838).
if (IsAShr) {
- if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
- // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
- // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
+ if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
+ // When ShAmtC can be shifted losslessly:
+ // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
+ // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
APInt ShiftedC = C.shl(ShAmtVal);
if (ShiftedC.ashr(ShAmtVal) == C)
return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
@@ -2261,6 +2254,12 @@ Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
(ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
}
+ if (Pred == CmpInst::ICMP_UGT) {
+ // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
+ APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
+ if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
+ return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
+ }
// If the compare constant has significant bits above the lowest sign-bit,
// then convert an unsigned cmp to a test of the sign-bit:
@@ -3957,6 +3956,33 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
(Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
+ {
+ // Similar to above: an unsigned overflow comparison may use offset + mask:
+ // ((Op1 + C) & C) u< Op1 --> Op1 != 0
+ // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
+ // Op0 u> ((Op0 + C) & C) --> Op0 != 0
+ // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
+ BinaryOperator *BO;
+ const APInt *C;
+ if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
+ match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
+ match(BO, m_Add(m_Specific(Op1), m_SpecificIntAllowUndef(*C)))) {
+ CmpInst::Predicate NewPred =
+ Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
+ Constant *Zero = ConstantInt::getNullValue(Op1->getType());
+ return new ICmpInst(NewPred, Op1, Zero);
+ }
+
+ if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
+ match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
+ match(BO, m_Add(m_Specific(Op0), m_SpecificIntAllowUndef(*C)))) {
+ CmpInst::Predicate NewPred =
+ Pred == ICmpInst::ICMP_UGT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
+ Constant *Zero = ConstantInt::getNullValue(Op1->getType());
+ return new ICmpInst(NewPred, Op0, Zero);
+ }
+ }
+
bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
if (BO0 && isa<OverflowingBinaryOperator>(BO0))
NoOp0WrapProblem =
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 39b55b028110..7743b4c41555 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -148,6 +148,8 @@ public:
Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
+ Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
+ Instruction *visitGEPOfBitcast(BitCastInst *BCI, GetElementPtrInst &GEP);
Instruction *visitAllocaInst(AllocaInst &AI);
Instruction *visitAllocSite(Instruction &FI);
Instruction *visitFree(CallInst &FI);
@@ -195,8 +197,6 @@ private:
bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
bool shouldChangeType(Type *From, Type *To) const;
Value *dyn_castNegVal(Value *V) const;
- Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
- SmallVectorImpl<Value *> &NewIndices);
/// Classify whether a cast is worth optimizing.
///
@@ -607,6 +607,16 @@ public:
/// only possible if all operands to the PHI are constants).
Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
+ /// For a binary operator with 2 phi operands, try to hoist the binary
+ /// operation before the phi. This can result in fewer instructions in
+ /// patterns where at least one set of phi operands simplifies.
+ /// Example:
+ /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
+ /// -->
+ /// BB1: BO = binop X, Y
+ /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
+ Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
+
/// Given an instruction with a select as one operand and a constant as the
/// other operand, try to fold the binary operator into the select arguments.
/// This also works for Cast instructions, which obviously do not have a
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 0dbfdba353c4..756792918dba 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -301,16 +301,17 @@ void PointerReplacer::replace(Instruction *I) {
assert(V && "Operand not replaced");
SmallVector<Value *, 8> Indices;
Indices.append(GEP->idx_begin(), GEP->idx_end());
- auto *NewI = GetElementPtrInst::Create(
- V->getType()->getPointerElementType(), V, Indices);
+ auto *NewI =
+ GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
IC.InsertNewInstWith(NewI, *GEP);
NewI->takeName(GEP);
WorkMap[GEP] = NewI;
} else if (auto *BC = dyn_cast<BitCastInst>(I)) {
auto *V = getReplacement(BC->getOperand(0));
assert(V && "Operand not replaced");
- auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
- V->getType()->getPointerAddressSpace());
+ auto *NewT = PointerType::getWithSamePointeeType(
+ cast<PointerType>(BC->getType()),
+ V->getType()->getPointerAddressSpace());
auto *NewI = new BitCastInst(V, NewT);
IC.InsertNewInstWith(NewI, *BC);
NewI->takeName(BC);
@@ -345,8 +346,7 @@ void PointerReplacer::replacePointer(Instruction &I, Value *V) {
#ifndef NDEBUG
auto *PT = cast<PointerType>(I.getType());
auto *NT = cast<PointerType>(V->getType());
- assert(PT != NT && PT->getElementType() == NT->getElementType() &&
- "Invalid usage");
+ assert(PT != NT && PT->hasSameElementTypeAs(NT) && "Invalid usage");
#endif
WorkMap[&I] = V;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index aca7ec8d7325..1aa10b550fc4 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -155,6 +155,9 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
@@ -348,13 +351,21 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
return CastInst::Create(Instruction::SExt, And, I.getType());
}
- // (bool X) * Y --> X ? Y : 0
- // Y * (bool X) --> X ? Y : 0
+ // (zext bool X) * Y --> X ? Y : 0
+ // Y * (zext bool X) --> X ? Y : 0
if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
+ // (sext bool X) * C --> X ? -C : 0
+ Constant *ImmC;
+ if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1) &&
+ match(Op1, m_ImmConstant(ImmC))) {
+ Constant *NegC = ConstantExpr::getNeg(ImmC);
+ return SelectInst::Create(X, NegC, ConstantInt::getNullValue(I.getType()));
+ }
+
// (lshr X, 31) * Y --> (ashr X, 31) & Y
// Y * (lshr X, 31) --> (ashr X, 31) & Y
// TODO: We are not checking one-use because the elimination of the multiply
@@ -442,6 +453,9 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
return FoldedMul;
@@ -742,6 +756,9 @@ static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
/// division instructions.
/// Common integer divide transforms
Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
bool IsSigned = I.getOpcode() == Instruction::SDiv;
Type *Ty = I.getType();
@@ -1359,6 +1376,9 @@ Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
if (Instruction *R = foldFDivConstantDivisor(I))
return R;
@@ -1460,6 +1480,9 @@ Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
/// remainder instructions.
/// Common integer remainder transforms
Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// The RHS is known non-zero.
@@ -1638,5 +1661,8 @@ Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
if (Instruction *X = foldVectorBinop(I))
return X;
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
return nullptr;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index a6d6b5199105..65e60498ff95 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -943,7 +943,7 @@ static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
}
/// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
-/// call to cttz/ctlz with flag 'is_zero_undef' cleared.
+/// call to cttz/ctlz with flag 'is_zero_poison' cleared.
///
/// For example, we can fold the following code sequence:
/// \code
@@ -987,7 +987,7 @@ static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
// sizeof in bits of 'Count'.
unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
- // Explicitly clear the 'undef_on_zero' flag. It's always valid to go from
+ // Explicitly clear the 'is_zero_poison' flag. It's always valid to go from
// true to false on this flag, so we can replace it for all users.
II->setArgOperand(1, ConstantInt::getFalse(II->getContext()));
return SelectArg;
@@ -995,7 +995,7 @@ static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
// The ValueOnZero is not the bitwidth. But if the cttz/ctlz (and optional
// zext/trunc) have one use (ending at the select), the cttz/ctlz result will
- // not be used if the input is zero. Relax to 'undef_on_zero' for that case.
+ // not be used if the input is zero. Relax to 'zero is poison' for that case.
if (II->hasOneUse() && SelectArg->hasOneUse() &&
!match(II->getArgOperand(1), m_One()))
II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
@@ -2325,8 +2325,9 @@ Instruction *InstCombinerImpl::matchSAddSubSat(Instruction &MinMax1) {
// The two operands of the add/sub must be nsw-truncatable to the NewTy. This
// is usually achieved via a sext from a smaller type.
- if (ComputeMinSignedBits(AddSub->getOperand(0), 0, AddSub) > NewBitWidth ||
- ComputeMinSignedBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
+ if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) >
+ NewBitWidth ||
+ ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
return nullptr;
// Finally create and return the sat intrinsic, truncated to the new type
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 06421d553915..17f0c5c4cff0 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -369,6 +369,9 @@ static Instruction *foldShiftOfShiftedLogic(BinaryOperator &I,
}
Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
+ if (Instruction *Phi = foldBinopWithPhiOperands(I))
+ return Phi;
+
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
assert(Op0->getType() == Op1->getType());
@@ -1032,12 +1035,13 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
NewLShr->setIsExact(I.isExact());
return NewLShr;
}
- // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
- Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
- APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
- }
- if (C1->ugt(ShAmtC)) {
+ if (Op0->hasOneUse()) {
+ // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
+ Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
+ APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
+ return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
+ }
+ } else if (C1->ugt(ShAmtC)) {
unsigned ShlAmtC = C1->getZExtValue();
Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmtC - ShAmtC);
if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
@@ -1046,15 +1050,33 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
NewShl->setHasNoUnsignedWrap(true);
return NewShl;
}
- // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
- Value *NewShl = Builder.CreateShl(X, ShiftDiff);
+ if (Op0->hasOneUse()) {
+ // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
+ Value *NewShl = Builder.CreateShl(X, ShiftDiff);
+ APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
+ return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
+ }
+ } else {
+ assert(*C1 == ShAmtC);
+ // (X << C) >>u C --> X & (-1 >>u C)
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
+ return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
}
- assert(*C1 == ShAmtC);
- // (X << C) >>u C --> X & (-1 >>u C)
- APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
+ }
+
+ // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
+ // TODO: Consolidate with the more general transform that starts from shl
+ // (the shifts are in the opposite order).
+ Value *Y;
+ if (match(Op0,
+ m_OneUse(m_c_Add(m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))),
+ m_Value(Y))))) {
+ Value *NewLshr = Builder.CreateLShr(Y, Op1);
+ Value *NewAdd = Builder.CreateAdd(NewLshr, X);
+ unsigned Op1Val = C->getLimitedValue(BitWidth);
+ APInt Bits = APInt::getLowBitsSet(BitWidth, BitWidth - Op1Val);
+ Constant *Mask = ConstantInt::get(Ty, Bits);
+ return BinaryOperator::CreateAnd(NewAdd, Mask);
}
if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
@@ -1094,7 +1116,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
}
}
- Value *Y;
if (ShAmtC == BitWidth - 1) {
// lshr i32 or(X,-X), 31 --> zext (X != 0)
if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 4dc712f32536..71a5ae24eead 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -800,22 +800,21 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Round NTZ down to the next byte. If we have 11 trailing zeros, then
// we need all the bits down to bit 8. Likewise, round NLZ. If we
// have 14 leading zeros, round to 8.
- NLZ &= ~7;
- NTZ &= ~7;
+ NLZ = alignDown(NLZ, 8);
+ NTZ = alignDown(NTZ, 8);
// If we need exactly one byte, we can do this transformation.
- if (BitWidth-NLZ-NTZ == 8) {
- unsigned ResultBit = NTZ;
- unsigned InputBit = BitWidth-NTZ-8;
-
+ if (BitWidth - NLZ - NTZ == 8) {
// Replace this with either a left or right shift to get the byte into
// the right place.
Instruction *NewVal;
- if (InputBit > ResultBit)
- NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
- ConstantInt::get(I->getType(), InputBit-ResultBit));
+ if (NLZ > NTZ)
+ NewVal = BinaryOperator::CreateLShr(
+ II->getArgOperand(0),
+ ConstantInt::get(I->getType(), NLZ - NTZ));
else
- NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
- ConstantInt::get(I->getType(), ResultBit-InputBit));
+ NewVal = BinaryOperator::CreateShl(
+ II->getArgOperand(0),
+ ConstantInt::get(I->getType(), NTZ - NLZ));
NewVal->takeName(I);
return InsertNewInstWith(NewVal, *I);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index c6a4602e59e3..736cf9c825d5 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -495,8 +495,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
}
GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
- cast<PointerType>(NewPtr->getType())->getElementType(), NewPtr,
- NewOps);
+ GEP->getSourceElementType(), NewPtr, NewOps);
NewGEP->setIsInBounds(GEP->isInBounds());
return NewGEP;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index eb5eadba194d..029be5257694 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1027,13 +1027,11 @@ static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
if (!ConstIsRHS)
std::swap(Op0, Op1);
- auto *BO = cast<BinaryOperator>(&I);
- Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1,
- SO->getName() + ".op");
- auto *FPInst = dyn_cast<Instruction>(RI);
- if (FPInst && isa<FPMathOperator>(FPInst))
- FPInst->copyFastMathFlags(BO);
- return RI;
+ Value *NewBO = Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), Op0,
+ Op1, SO->getName() + ".op");
+ if (auto *NewBOI = dyn_cast<Instruction>(NewBO))
+ NewBOI->copyIRFlags(&I);
+ return NewBO;
}
Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op,
@@ -1289,6 +1287,70 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
return replaceInstUsesWith(I, NewPN);
}
+Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
+ // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
+ // we are guarding against replicating the binop in >1 predecessor.
+ // This could miss matching a phi with 2 constant incoming values.
+ auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
+ auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
+ if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
+ Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
+ return nullptr;
+
+ // TODO: Remove the restriction for binop being in the same block as the phis.
+ if (BO.getParent() != Phi0->getParent() ||
+ BO.getParent() != Phi1->getParent())
+ return nullptr;
+
+ // Match a pair of incoming constants for one of the predecessor blocks.
+ BasicBlock *ConstBB, *OtherBB;
+ Constant *C0, *C1;
+ if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
+ ConstBB = Phi0->getIncomingBlock(0);
+ OtherBB = Phi0->getIncomingBlock(1);
+ } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
+ ConstBB = Phi0->getIncomingBlock(1);
+ OtherBB = Phi0->getIncomingBlock(0);
+ } else {
+ return nullptr;
+ }
+ if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
+ return nullptr;
+
+ // The block that we are hoisting to must reach here unconditionally.
+ // Otherwise, we could be speculatively executing an expensive or
+ // non-speculative op.
+ auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
+ if (!PredBlockBranch || PredBlockBranch->isConditional() ||
+ !DT.isReachableFromEntry(OtherBB))
+ return nullptr;
+
+ // TODO: This check could be tightened to only apply to binops (div/rem) that
+ // are not safe to speculatively execute. But that could allow hoisting
+ // potentially expensive instructions (fdiv for example).
+ for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
+ if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
+ return nullptr;
+
+ // Make a new binop in the predecessor block with the non-constant incoming
+ // values.
+ Builder.SetInsertPoint(PredBlockBranch);
+ Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
+ Phi0->getIncomingValueForBlock(OtherBB),
+ Phi1->getIncomingValueForBlock(OtherBB));
+ if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
+ NotFoldedNewBO->copyIRFlags(&BO);
+
+ // Fold constants for the predecessor block with constant incoming values.
+ Constant *NewC = ConstantExpr::get(BO.getOpcode(), C0, C1);
+
+ // Replace the binop with a phi of the new values. The old phis are dead.
+ PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
+ NewPhi->addIncoming(NewBO, OtherBB);
+ NewPhi->addIncoming(NewC, ConstBB);
+ return NewPhi;
+}
+
Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
if (!isa<Constant>(I.getOperand(1)))
return nullptr;
@@ -1307,10 +1369,11 @@ Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
/// is a sequence of GEP indices into the pointed type that will land us at the
/// specified offset. If so, fill them into NewIndices and return the resultant
/// element type, otherwise return null.
-Type *
-InstCombinerImpl::FindElementAtOffset(PointerType *PtrTy, int64_t IntOffset,
- SmallVectorImpl<Value *> &NewIndices) {
- Type *Ty = PtrTy->getElementType();
+static Type *findElementAtOffset(PointerType *PtrTy, int64_t IntOffset,
+ SmallVectorImpl<Value *> &NewIndices,
+ const DataLayout &DL) {
+ // Only used by visitGEPOfBitcast(), which is skipped for opaque pointers.
+ Type *Ty = PtrTy->getNonOpaquePointerElementType();
if (!Ty->isSized())
return nullptr;
@@ -1320,7 +1383,7 @@ InstCombinerImpl::FindElementAtOffset(PointerType *PtrTy, int64_t IntOffset,
return nullptr;
for (const APInt &Index : Indices)
- NewIndices.push_back(Builder.getInt(Index));
+ NewIndices.push_back(ConstantInt::get(PtrTy->getContext(), Index));
return Ty;
}
@@ -1884,12 +1947,254 @@ static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
}
+Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
+ GEPOperator *Src) {
+ // Combine Indices - If the source pointer to this getelementptr instruction
+ // is a getelementptr instruction with matching element type, combine the
+ // indices of the two getelementptr instructions into a single instruction.
+ if (Src->getResultElementType() != GEP.getSourceElementType())
+ return nullptr;
+
+ if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
+ return nullptr;
+
+ if (Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 &&
+ Src->hasOneUse()) {
+ Value *GO1 = GEP.getOperand(1);
+ Value *SO1 = Src->getOperand(1);
+
+ if (LI) {
+ // Try to reassociate loop invariant GEP chains to enable LICM.
+ if (Loop *L = LI->getLoopFor(GEP.getParent())) {
+ // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is
+ // invariant: this breaks the dependence between GEPs and allows LICM
+ // to hoist the invariant part out of the loop.
+ if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) {
+ // We have to be careful here.
+ // We have something like:
+ // %src = getelementptr <ty>, <ty>* %base, <ty> %idx
+ // %gep = getelementptr <ty>, <ty>* %src, <ty> %idx2
+ // If we just swap idx & idx2 then we could inadvertantly
+ // change %src from a vector to a scalar, or vice versa.
+ // Cases:
+ // 1) %base a scalar & idx a scalar & idx2 a vector
+ // => Swapping idx & idx2 turns %src into a vector type.
+ // 2) %base a scalar & idx a vector & idx2 a scalar
+ // => Swapping idx & idx2 turns %src in a scalar type
+ // 3) %base, %idx, and %idx2 are scalars
+ // => %src & %gep are scalars
+ // => swapping idx & idx2 is safe
+ // 4) %base a vector
+ // => %src is a vector
+ // => swapping idx & idx2 is safe.
+ auto *SO0 = Src->getOperand(0);
+ auto *SO0Ty = SO0->getType();
+ if (!isa<VectorType>(GEP.getType()) || // case 3
+ isa<VectorType>(SO0Ty)) { // case 4
+ Src->setOperand(1, GO1);
+ GEP.setOperand(1, SO1);
+ return &GEP;
+ } else {
+ // Case 1 or 2
+ // -- have to recreate %src & %gep
+ // put NewSrc at same location as %src
+ Builder.SetInsertPoint(cast<Instruction>(Src));
+ Value *NewSrc = Builder.CreateGEP(
+ GEP.getSourceElementType(), SO0, GO1, Src->getName());
+ // Propagate 'inbounds' if the new source was not constant-folded.
+ if (auto *NewSrcGEPI = dyn_cast<GetElementPtrInst>(NewSrc))
+ NewSrcGEPI->setIsInBounds(Src->isInBounds());
+ GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
+ GEP.getSourceElementType(), NewSrc, {SO1});
+ NewGEP->setIsInBounds(GEP.isInBounds());
+ return NewGEP;
+ }
+ }
+ }
+ }
+ }
+
+ // Note that if our source is a gep chain itself then we wait for that
+ // chain to be resolved before we perform this transformation. This
+ // avoids us creating a TON of code in some cases.
+ if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0)))
+ if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
+ return nullptr; // Wait until our source is folded to completion.
+
+ SmallVector<Value*, 8> Indices;
+
+ // Find out whether the last index in the source GEP is a sequential idx.
+ bool EndsWithSequential = false;
+ for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
+ I != E; ++I)
+ EndsWithSequential = I.isSequential();
+
+ // Can we combine the two pointer arithmetics offsets?
+ if (EndsWithSequential) {
+ // Replace: gep (gep %P, long B), long A, ...
+ // With: T = long A+B; gep %P, T, ...
+ Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
+ Value *GO1 = GEP.getOperand(1);
+
+ // If they aren't the same type, then the input hasn't been processed
+ // by the loop above yet (which canonicalizes sequential index types to
+ // intptr_t). Just avoid transforming this until the input has been
+ // normalized.
+ if (SO1->getType() != GO1->getType())
+ return nullptr;
+
+ Value *Sum =
+ SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
+ // Only do the combine when we are sure the cost after the
+ // merge is never more than that before the merge.
+ if (Sum == nullptr)
+ return nullptr;
+
+ // Update the GEP in place if possible.
+ if (Src->getNumOperands() == 2) {
+ GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
+ replaceOperand(GEP, 0, Src->getOperand(0));
+ replaceOperand(GEP, 1, Sum);
+ return &GEP;
+ }
+ Indices.append(Src->op_begin()+1, Src->op_end()-1);
+ Indices.push_back(Sum);
+ Indices.append(GEP.op_begin()+2, GEP.op_end());
+ } else if (isa<Constant>(*GEP.idx_begin()) &&
+ cast<Constant>(*GEP.idx_begin())->isNullValue() &&
+ Src->getNumOperands() != 1) {
+ // Otherwise we can do the fold if the first index of the GEP is a zero
+ Indices.append(Src->op_begin()+1, Src->op_end());
+ Indices.append(GEP.idx_begin()+1, GEP.idx_end());
+ }
+
+ if (!Indices.empty())
+ return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
+ ? GetElementPtrInst::CreateInBounds(
+ Src->getSourceElementType(), Src->getOperand(0), Indices,
+ GEP.getName())
+ : GetElementPtrInst::Create(Src->getSourceElementType(),
+ Src->getOperand(0), Indices,
+ GEP.getName());
+
+ return nullptr;
+}
+
+// Note that we may have also stripped an address space cast in between.
+Instruction *InstCombinerImpl::visitGEPOfBitcast(BitCastInst *BCI,
+ GetElementPtrInst &GEP) {
+ // With opaque pointers, there is no pointer element type we can use to
+ // adjust the GEP type.
+ PointerType *SrcType = cast<PointerType>(BCI->getSrcTy());
+ if (SrcType->isOpaque())
+ return nullptr;
+
+ Type *GEPEltType = GEP.getSourceElementType();
+ Type *SrcEltType = SrcType->getNonOpaquePointerElementType();
+ Value *SrcOp = BCI->getOperand(0);
+
+ // GEP directly using the source operand if this GEP is accessing an element
+ // of a bitcasted pointer to vector or array of the same dimensions:
+ // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z
+ // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z
+ auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy,
+ const DataLayout &DL) {
+ auto *VecVTy = cast<FixedVectorType>(VecTy);
+ return ArrTy->getArrayElementType() == VecVTy->getElementType() &&
+ ArrTy->getArrayNumElements() == VecVTy->getNumElements() &&
+ DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy);
+ };
+ if (GEP.getNumOperands() == 3 &&
+ ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) &&
+ areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) ||
+ (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() &&
+ areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) {
+
+ // Create a new GEP here, as using `setOperand()` followed by
+ // `setSourceElementType()` won't actually update the type of the
+ // existing GEP Value. Causing issues if this Value is accessed when
+ // constructing an AddrSpaceCastInst
+ SmallVector<Value *, 8> Indices(GEP.indices());
+ Value *NGEP = GEP.isInBounds()
+ ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, Indices)
+ : Builder.CreateGEP(SrcEltType, SrcOp, Indices);
+ NGEP->takeName(&GEP);
+
+ // Preserve GEP address space to satisfy users
+ if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
+ return new AddrSpaceCastInst(NGEP, GEP.getType());
+
+ return replaceInstUsesWith(GEP, NGEP);
+ }
+
+ // See if we can simplify:
+ // X = bitcast A* to B*
+ // Y = gep X, <...constant indices...>
+ // into a gep of the original struct. This is important for SROA and alias
+ // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
+ unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEP.getType());
+ APInt Offset(OffsetBits, 0);
+
+ // If the bitcast argument is an allocation, The bitcast is for convertion
+ // to actual type of allocation. Removing such bitcasts, results in having
+ // GEPs with i8* base and pure byte offsets. That means GEP is not aware of
+ // struct or array hierarchy.
+ // By avoiding such GEPs, phi translation and MemoryDependencyAnalysis have
+ // a better chance to succeed.
+ if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset) &&
+ !isAllocationFn(SrcOp, &TLI)) {
+ // If this GEP instruction doesn't move the pointer, just replace the GEP
+ // with a bitcast of the real input to the dest type.
+ if (!Offset) {
+ // If the bitcast is of an allocation, and the allocation will be
+ // converted to match the type of the cast, don't touch this.
+ if (isa<AllocaInst>(SrcOp)) {
+ // See if the bitcast simplifies, if so, don't nuke this GEP yet.
+ if (Instruction *I = visitBitCast(*BCI)) {
+ if (I != BCI) {
+ I->takeName(BCI);
+ BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
+ replaceInstUsesWith(*BCI, I);
+ }
+ return &GEP;
+ }
+ }
+
+ if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace())
+ return new AddrSpaceCastInst(SrcOp, GEP.getType());
+ return new BitCastInst(SrcOp, GEP.getType());
+ }
+
+ // Otherwise, if the offset is non-zero, we need to find out if there is a
+ // field at Offset in 'A's type. If so, we can pull the cast through the
+ // GEP.
+ SmallVector<Value*, 8> NewIndices;
+ if (findElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices, DL)) {
+ Value *NGEP =
+ GEP.isInBounds()
+ ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, NewIndices)
+ : Builder.CreateGEP(SrcEltType, SrcOp, NewIndices);
+
+ if (NGEP->getType() == GEP.getType())
+ return replaceInstUsesWith(GEP, NGEP);
+ NGEP->takeName(&GEP);
+
+ if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
+ return new AddrSpaceCastInst(NGEP, GEP.getType());
+ return new BitCastInst(NGEP, GEP.getType());
+ }
+ }
+
+ return nullptr;
+}
+
Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
- SmallVector<Value *, 8> Ops(GEP.operands());
+ Value *PtrOp = GEP.getOperand(0);
+ SmallVector<Value *, 8> Indices(GEP.indices());
Type *GEPType = GEP.getType();
Type *GEPEltType = GEP.getSourceElementType();
bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
- if (Value *V = SimplifyGEPInst(GEPEltType, Ops, GEP.isInBounds(),
+ if (Value *V = SimplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
SQ.getWithInstruction(&GEP)))
return replaceInstUsesWith(GEP, V);
@@ -1912,8 +2217,6 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// undef elements to decrease demanded bits
}
- Value *PtrOp = GEP.getOperand(0);
-
// Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
bool MadeChange = false;
@@ -2063,132 +2366,9 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
PtrOp = NewGEP;
}
- // Combine Indices - If the source pointer to this getelementptr instruction
- // is a getelementptr instruction, combine the indices of the two
- // getelementptr instructions into a single instruction.
- if (auto *Src = dyn_cast<GEPOperator>(PtrOp)) {
- if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
- return nullptr;
-
- if (Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 &&
- Src->hasOneUse()) {
- Value *GO1 = GEP.getOperand(1);
- Value *SO1 = Src->getOperand(1);
-
- if (LI) {
- // Try to reassociate loop invariant GEP chains to enable LICM.
- if (Loop *L = LI->getLoopFor(GEP.getParent())) {
- // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is
- // invariant: this breaks the dependence between GEPs and allows LICM
- // to hoist the invariant part out of the loop.
- if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) {
- // We have to be careful here.
- // We have something like:
- // %src = getelementptr <ty>, <ty>* %base, <ty> %idx
- // %gep = getelementptr <ty>, <ty>* %src, <ty> %idx2
- // If we just swap idx & idx2 then we could inadvertantly
- // change %src from a vector to a scalar, or vice versa.
- // Cases:
- // 1) %base a scalar & idx a scalar & idx2 a vector
- // => Swapping idx & idx2 turns %src into a vector type.
- // 2) %base a scalar & idx a vector & idx2 a scalar
- // => Swapping idx & idx2 turns %src in a scalar type
- // 3) %base, %idx, and %idx2 are scalars
- // => %src & %gep are scalars
- // => swapping idx & idx2 is safe
- // 4) %base a vector
- // => %src is a vector
- // => swapping idx & idx2 is safe.
- auto *SO0 = Src->getOperand(0);
- auto *SO0Ty = SO0->getType();
- if (!isa<VectorType>(GEPType) || // case 3
- isa<VectorType>(SO0Ty)) { // case 4
- Src->setOperand(1, GO1);
- GEP.setOperand(1, SO1);
- return &GEP;
- } else {
- // Case 1 or 2
- // -- have to recreate %src & %gep
- // put NewSrc at same location as %src
- Builder.SetInsertPoint(cast<Instruction>(PtrOp));
- Value *NewSrc =
- Builder.CreateGEP(GEPEltType, SO0, GO1, Src->getName());
- // Propagate 'inbounds' if the new source was not constant-folded.
- if (auto *NewSrcGEPI = dyn_cast<GetElementPtrInst>(NewSrc))
- NewSrcGEPI->setIsInBounds(Src->isInBounds());
- GetElementPtrInst *NewGEP =
- GetElementPtrInst::Create(GEPEltType, NewSrc, {SO1});
- NewGEP->setIsInBounds(GEP.isInBounds());
- return NewGEP;
- }
- }
- }
- }
- }
-
- // Note that if our source is a gep chain itself then we wait for that
- // chain to be resolved before we perform this transformation. This
- // avoids us creating a TON of code in some cases.
- if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0)))
- if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
- return nullptr; // Wait until our source is folded to completion.
-
- SmallVector<Value*, 8> Indices;
-
- // Find out whether the last index in the source GEP is a sequential idx.
- bool EndsWithSequential = false;
- for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
- I != E; ++I)
- EndsWithSequential = I.isSequential();
-
- // Can we combine the two pointer arithmetics offsets?
- if (EndsWithSequential) {
- // Replace: gep (gep %P, long B), long A, ...
- // With: T = long A+B; gep %P, T, ...
- Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
- Value *GO1 = GEP.getOperand(1);
-
- // If they aren't the same type, then the input hasn't been processed
- // by the loop above yet (which canonicalizes sequential index types to
- // intptr_t). Just avoid transforming this until the input has been
- // normalized.
- if (SO1->getType() != GO1->getType())
- return nullptr;
-
- Value *Sum =
- SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
- // Only do the combine when we are sure the cost after the
- // merge is never more than that before the merge.
- if (Sum == nullptr)
- return nullptr;
-
- // Update the GEP in place if possible.
- if (Src->getNumOperands() == 2) {
- GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
- replaceOperand(GEP, 0, Src->getOperand(0));
- replaceOperand(GEP, 1, Sum);
- return &GEP;
- }
- Indices.append(Src->op_begin()+1, Src->op_end()-1);
- Indices.push_back(Sum);
- Indices.append(GEP.op_begin()+2, GEP.op_end());
- } else if (isa<Constant>(*GEP.idx_begin()) &&
- cast<Constant>(*GEP.idx_begin())->isNullValue() &&
- Src->getNumOperands() != 1) {
- // Otherwise we can do the fold if the first index of the GEP is a zero
- Indices.append(Src->op_begin()+1, Src->op_end());
- Indices.append(GEP.idx_begin()+1, GEP.idx_end());
- }
-
- if (!Indices.empty())
- return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
- ? GetElementPtrInst::CreateInBounds(
- Src->getSourceElementType(), Src->getOperand(0), Indices,
- GEP.getName())
- : GetElementPtrInst::Create(Src->getSourceElementType(),
- Src->getOperand(0), Indices,
- GEP.getName());
- }
+ if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
+ if (Instruction *I = visitGEPOfGEP(GEP, Src))
+ return I;
// Skip if GEP source element type is scalable. The type alloc size is unknown
// at compile-time.
@@ -2234,9 +2414,13 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *StrippedPtr = PtrOp->stripPointerCasts();
PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
- if (StrippedPtr != PtrOp) {
+ // TODO: The basic approach of these folds is not compatible with opaque
+ // pointers, because we can't use bitcasts as a hint for a desirable GEP
+ // type. Instead, we should perform canonicalization directly on the GEP
+ // type. For now, skip these.
+ if (StrippedPtr != PtrOp && !StrippedPtrTy->isOpaque()) {
bool HasZeroPointerIndex = false;
- Type *StrippedPtrEltTy = StrippedPtrTy->getElementType();
+ Type *StrippedPtrEltTy = StrippedPtrTy->getNonOpaquePointerElementType();
if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
HasZeroPointerIndex = C->isZero();
@@ -2420,103 +2604,9 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
ASCStrippedPtrOp = BC;
}
- if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp)) {
- Value *SrcOp = BCI->getOperand(0);
- PointerType *SrcType = cast<PointerType>(BCI->getSrcTy());
- Type *SrcEltType = SrcType->getElementType();
-
- // GEP directly using the source operand if this GEP is accessing an element
- // of a bitcasted pointer to vector or array of the same dimensions:
- // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z
- // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z
- auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy,
- const DataLayout &DL) {
- auto *VecVTy = cast<FixedVectorType>(VecTy);
- return ArrTy->getArrayElementType() == VecVTy->getElementType() &&
- ArrTy->getArrayNumElements() == VecVTy->getNumElements() &&
- DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy);
- };
- if (GEP.getNumOperands() == 3 &&
- ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) &&
- areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) ||
- (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() &&
- areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) {
-
- // Create a new GEP here, as using `setOperand()` followed by
- // `setSourceElementType()` won't actually update the type of the
- // existing GEP Value. Causing issues if this Value is accessed when
- // constructing an AddrSpaceCastInst
- Value *NGEP =
- GEP.isInBounds()
- ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]})
- : Builder.CreateGEP(SrcEltType, SrcOp, {Ops[1], Ops[2]});
- NGEP->takeName(&GEP);
-
- // Preserve GEP address space to satisfy users
- if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(NGEP, GEPType);
-
- return replaceInstUsesWith(GEP, NGEP);
- }
-
- // See if we can simplify:
- // X = bitcast A* to B*
- // Y = gep X, <...constant indices...>
- // into a gep of the original struct. This is important for SROA and alias
- // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
- unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEPType);
- APInt Offset(OffsetBits, 0);
-
- // If the bitcast argument is an allocation, The bitcast is for convertion
- // to actual type of allocation. Removing such bitcasts, results in having
- // GEPs with i8* base and pure byte offsets. That means GEP is not aware of
- // struct or array hierarchy.
- // By avoiding such GEPs, phi translation and MemoryDependencyAnalysis have
- // a better chance to succeed.
- if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset) &&
- !isAllocationFn(SrcOp, &TLI)) {
- // If this GEP instruction doesn't move the pointer, just replace the GEP
- // with a bitcast of the real input to the dest type.
- if (!Offset) {
- // If the bitcast is of an allocation, and the allocation will be
- // converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(SrcOp)) {
- // See if the bitcast simplifies, if so, don't nuke this GEP yet.
- if (Instruction *I = visitBitCast(*BCI)) {
- if (I != BCI) {
- I->takeName(BCI);
- BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
- replaceInstUsesWith(*BCI, I);
- }
- return &GEP;
- }
- }
-
- if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(SrcOp, GEPType);
- return new BitCastInst(SrcOp, GEPType);
- }
-
- // Otherwise, if the offset is non-zero, we need to find out if there is a
- // field at Offset in 'A's type. If so, we can pull the cast through the
- // GEP.
- SmallVector<Value*, 8> NewIndices;
- if (FindElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices)) {
- Value *NGEP =
- GEP.isInBounds()
- ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, NewIndices)
- : Builder.CreateGEP(SrcEltType, SrcOp, NewIndices);
-
- if (NGEP->getType() == GEPType)
- return replaceInstUsesWith(GEP, NGEP);
- NGEP->takeName(&GEP);
-
- if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(NGEP, GEPType);
- return new BitCastInst(NGEP, GEPType);
- }
- }
- }
+ if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp))
+ if (Instruction *I = visitGEPOfBitcast(BCI, GEP))
+ return I;
if (!GEP.isInBounds()) {
unsigned IdxWidth =
@@ -2533,8 +2623,7 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize());
if (BasePtrOffset.ule(AllocSize)) {
return GetElementPtrInst::CreateInBounds(
- GEP.getSourceElementType(), PtrOp, makeArrayRef(Ops).slice(1),
- GEP.getName());
+ GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
}
}
}
@@ -2553,10 +2642,6 @@ static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
if (auto *LI = dyn_cast<LoadInst>(V))
return isa<GlobalVariable>(LI->getPointerOperand());
// Two distinct allocations will never be equal.
- // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking
- // through bitcasts of V can cause
- // the result statement below to be true, even when AI and V (ex:
- // i8* ->i32* ->i8* of AI) are the same allocations.
return isAllocLikeFn(V, &TLI) && V != AI;
}
@@ -2659,7 +2744,7 @@ static bool isAllocSiteRemovable(Instruction *AI,
continue;
}
- if (isReallocLikeFn(I, &TLI, true)) {
+ if (isReallocLikeFn(I, &TLI)) {
Users.emplace_back(I);
Worklist.push_back(I);
continue;
@@ -2682,6 +2767,8 @@ static bool isAllocSiteRemovable(Instruction *AI,
}
Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
+ assert(isa<AllocaInst>(MI) || isAllocRemovable(&cast<CallBase>(MI), &TLI));
+
// If we have a malloc call which is only used in any amount of comparisons to
// null and free calls, delete the calls and replace the comparisons with true
// or false as appropriate.
@@ -2900,7 +2987,7 @@ Instruction *InstCombinerImpl::visitFree(CallInst &FI) {
// If we had free(realloc(...)) with no intervening uses, then eliminate the
// realloc() entirely.
if (CallInst *CI = dyn_cast<CallInst>(Op)) {
- if (CI->hasOneUse() && isReallocLikeFn(CI, &TLI, true)) {
+ if (CI->hasOneUse() && isReallocLikeFn(CI, &TLI)) {
return eraseInstFromFunction(
*replaceInstUsesWith(*CI, CI->getOperand(0)));
}
@@ -3709,16 +3796,61 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
return nullptr;
}
+/// Check for case where the call writes to an otherwise dead alloca. This
+/// shows up for unused out-params in idiomatic C/C++ code. Note that this
+/// helper *only* analyzes the write; doesn't check any other legality aspect.
+static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
+ auto *CB = dyn_cast<CallBase>(I);
+ if (!CB)
+ // TODO: handle e.g. store to alloca here - only worth doing if we extend
+ // to allow reload along used path as described below. Otherwise, this
+ // is simply a store to a dead allocation which will be removed.
+ return false;
+ Optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
+ if (!Dest)
+ return false;
+ auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
+ if (!AI)
+ // TODO: allow malloc?
+ return false;
+ // TODO: allow memory access dominated by move point? Note that since AI
+ // could have a reference to itself captured by the call, we would need to
+ // account for cycles in doing so.
+ SmallVector<const User *> AllocaUsers;
+ SmallPtrSet<const User *, 4> Visited;
+ auto pushUsers = [&](const Instruction &I) {
+ for (const User *U : I.users()) {
+ if (Visited.insert(U).second)
+ AllocaUsers.push_back(U);
+ }
+ };
+ pushUsers(*AI);
+ while (!AllocaUsers.empty()) {
+ auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
+ if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
+ isa<AddrSpaceCastInst>(UserI)) {
+ pushUsers(*UserI);
+ continue;
+ }
+ if (UserI == CB)
+ continue;
+ // TODO: support lifetime.start/end here
+ return false;
+ }
+ return true;
+}
+
/// Try to move the specified instruction from its current block into the
/// beginning of DestBlock, which can only happen if it's safe to move the
/// instruction past all of the instructions between it and the end of its
/// block.
-static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
+static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock,
+ TargetLibraryInfo &TLI) {
assert(I->getUniqueUndroppableUser() && "Invariants didn't hold!");
BasicBlock *SrcBlock = I->getParent();
// Cannot move control-flow-involving, volatile loads, vaarg, etc.
- if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||
+ if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
I->isTerminator())
return false;
@@ -3738,6 +3870,14 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
if (CI->isConvergent())
return false;
}
+
+ // Unless we can prove that the memory write isn't visibile except on the
+ // path we're sinking to, we must bail.
+ if (I->mayWriteToMemory()) {
+ if (!SoleWriteToDeadLocal(I, TLI))
+ return false;
+ }
+
// We can only sink load instructions if there is nothing between the load and
// the end of block that could change the value.
if (I->mayReadFromMemory()) {
@@ -3746,7 +3886,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
// successor block.
if (DestBlock->getUniquePredecessor() != I->getParent())
return false;
- for (BasicBlock::iterator Scan = I->getIterator(),
+ for (BasicBlock::iterator Scan = std::next(I->getIterator()),
E = I->getParent()->end();
Scan != E; ++Scan)
if (Scan->mayWriteToMemory())
@@ -3906,12 +4046,11 @@ bool InstCombinerImpl::run() {
// predecessor, so that we don't have to split the critical edge.
// Another option where we can sink is a block that ends with a
// terminator that does not pass control to other block (such as
- // return or unreachable). In this case:
+ // return or unreachable or resume). In this case:
// - I dominates the User (by SSA form);
// - the User will be executed at most once.
// So sinking I down to User is always profitable or neutral.
- if (UserParent->getUniquePredecessor() == BB ||
- (isa<ReturnInst>(Term) || isa<UnreachableInst>(Term))) {
+ if (UserParent->getUniquePredecessor() == BB || succ_empty(Term)) {
assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
return UserParent;
}
@@ -3922,7 +4061,7 @@ bool InstCombinerImpl::run() {
if (OptBB) {
auto *UserParent = *OptBB;
// Okay, the CFG is simple enough, try to sink this instruction.
- if (TryToSinkInstruction(I, UserParent)) {
+ if (TryToSinkInstruction(I, UserParent, TLI)) {
LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
MadeIRChange = true;
// We'll add uses of the sunk instruction below, but since
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index bd2dc8d639fc..6e72255e51ae 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1547,10 +1547,9 @@ void AddressSanitizer::getInterestingMemoryOperands(
Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
XCHG->getCompareOperand()->getType(), None);
} else if (auto CI = dyn_cast<CallInst>(I)) {
- auto *F = CI->getCalledFunction();
- if (F && (F->getName().startswith("llvm.masked.load.") ||
- F->getName().startswith("llvm.masked.store."))) {
- bool IsWrite = F->getName().startswith("llvm.masked.store.");
+ if (CI->getIntrinsicID() == Intrinsic::masked_load ||
+ CI->getIntrinsicID() == Intrinsic::masked_store) {
+ bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_store;
// Masked store has an initial operand for the value.
unsigned OpOffset = IsWrite ? 1 : 0;
if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
@@ -1559,7 +1558,7 @@ void AddressSanitizer::getInterestingMemoryOperands(
auto BasePtr = CI->getOperand(OpOffset);
if (ignoreAccess(LI, BasePtr))
return;
- auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
MaybeAlign Alignment = Align(1);
// Otherwise no alignment guarantees. We probably got Undef.
if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
@@ -1653,11 +1652,10 @@ static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
const DataLayout &DL, Type *IntptrTy,
Value *Mask, Instruction *I,
Value *Addr, MaybeAlign Alignment,
- unsigned Granularity, uint32_t TypeSize,
+ unsigned Granularity, Type *OpType,
bool IsWrite, Value *SizeArgument,
bool UseCalls, uint32_t Exp) {
- auto *VTy = cast<FixedVectorType>(
- cast<PointerType>(Addr->getType())->getElementType());
+ auto *VTy = cast<FixedVectorType>(OpType);
uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
unsigned Num = VTy->getNumElements();
auto Zero = ConstantInt::get(IntptrTy, 0);
@@ -1735,7 +1733,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
unsigned Granularity = 1 << Mapping.Scale;
if (O.MaybeMask) {
instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(),
- Addr, O.Alignment, Granularity, O.TypeSize,
+ Addr, O.Alignment, Granularity, O.OpType,
O.IsWrite, nullptr, UseCalls, Exp);
} else {
doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 9f26b37bbc79..ff3aa14a2a83 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -208,6 +208,14 @@ static cl::opt<bool> ClEventCallbacks(
cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
cl::Hidden, cl::init(false));
+// Experimental feature that inserts callbacks for conditionals, including:
+// conditional branch, switch, select.
+// This must be true for dfsan_set_conditional_callback() to have effect.
+static cl::opt<bool> ClConditionalCallbacks(
+ "dfsan-conditional-callbacks",
+ cl::desc("Insert calls to callback functions on conditionals."), cl::Hidden,
+ cl::init(false));
+
// Controls whether the pass tracks the control flow of select instructions.
static cl::opt<bool> ClTrackSelectControlFlow(
"dfsan-track-select-control-flow",
@@ -428,6 +436,8 @@ class DataFlowSanitizer {
FunctionType *DFSanSetLabelFnTy;
FunctionType *DFSanNonzeroLabelFnTy;
FunctionType *DFSanVarargWrapperFnTy;
+ FunctionType *DFSanConditionalCallbackFnTy;
+ FunctionType *DFSanConditionalCallbackOriginFnTy;
FunctionType *DFSanCmpCallbackFnTy;
FunctionType *DFSanLoadStoreCallbackFnTy;
FunctionType *DFSanMemTransferCallbackFnTy;
@@ -444,6 +454,8 @@ class DataFlowSanitizer {
FunctionCallee DFSanLoadCallbackFn;
FunctionCallee DFSanStoreCallbackFn;
FunctionCallee DFSanMemTransferCallbackFn;
+ FunctionCallee DFSanConditionalCallbackFn;
+ FunctionCallee DFSanConditionalCallbackOriginFn;
FunctionCallee DFSanCmpCallbackFn;
FunctionCallee DFSanChainOriginFn;
FunctionCallee DFSanChainOriginIfTaintedFn;
@@ -454,7 +466,7 @@ class DataFlowSanitizer {
MDNode *OriginStoreWeights;
DFSanABIList ABIList;
DenseMap<Value *, Function *> UnwrappedFnMap;
- AttrBuilder ReadOnlyNoneAttrs;
+ AttributeMask ReadOnlyNoneAttrs;
/// Memory map parameters used in calculation mapping application addresses
/// to shadow addresses and origin addresses.
@@ -642,6 +654,10 @@ struct DFSanFunction {
Align getShadowAlign(Align InstAlignment);
+ // If ClConditionalCallbacks is enabled, insert a callback after a given
+ // branch instruction using the given conditional expression.
+ void addConditionalCallbacksIfEnabled(Instruction &I, Value *Condition);
+
private:
/// Collapses the shadow with aggregate type into a single primitive shadow
/// value.
@@ -748,6 +764,8 @@ public:
void visitSelectInst(SelectInst &I);
void visitMemSetInst(MemSetInst &I);
void visitMemTransferInst(MemTransferInst &I);
+ void visitBranchInst(BranchInst &BR);
+ void visitSwitchInst(SwitchInst &SW);
private:
void visitCASOrRMW(Align InstAlignment, Instruction &I);
@@ -971,6 +989,22 @@ Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
return PrimitiveShadow;
}
+void DFSanFunction::addConditionalCallbacksIfEnabled(Instruction &I,
+ Value *Condition) {
+ if (!ClConditionalCallbacks) {
+ return;
+ }
+ IRBuilder<> IRB(&I);
+ Value *CondShadow = getShadow(Condition);
+ if (DFS.shouldTrackOrigins()) {
+ Value *CondOrigin = getOrigin(Condition);
+ IRB.CreateCall(DFS.DFSanConditionalCallbackOriginFn,
+ {CondShadow, CondOrigin});
+ } else {
+ IRB.CreateCall(DFS.DFSanConditionalCallbackFn, {CondShadow});
+ }
+}
+
Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) {
if (!OrigTy->isSized())
return PrimitiveShadowTy;
@@ -1032,6 +1066,13 @@ bool DataFlowSanitizer::initializeModule(Module &M) {
FunctionType::get(Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
DFSanVarargWrapperFnTy = FunctionType::get(
Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
+ DFSanConditionalCallbackFnTy =
+ FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
+ /*isVarArg=*/false);
+ Type *DFSanConditionalCallbackOriginArgs[2] = {PrimitiveShadowTy, OriginTy};
+ DFSanConditionalCallbackOriginFnTy = FunctionType::get(
+ Type::getVoidTy(*Ctx), DFSanConditionalCallbackOriginArgs,
+ /*isVarArg=*/false);
DFSanCmpCallbackFnTy =
FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
/*isVarArg=*/false);
@@ -1160,7 +1201,7 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
// F is called by a wrapped custom function with primitive shadows. So
// its arguments and return value need conversion.
DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true,
- /*ForceZeroLabels=*/false);
+ /*IsForceZeroLabels=*/false);
Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;
++ValAI;
for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {
@@ -1271,6 +1312,10 @@ void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
DFSanRuntimeFunctions.insert(
DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts());
DFSanRuntimeFunctions.insert(
+ DFSanConditionalCallbackFn.getCallee()->stripPointerCasts());
+ DFSanRuntimeFunctions.insert(
+ DFSanConditionalCallbackOriginFn.getCallee()->stripPointerCasts());
+ DFSanRuntimeFunctions.insert(
DFSanCmpCallbackFn.getCallee()->stripPointerCasts());
DFSanRuntimeFunctions.insert(
DFSanChainOriginFn.getCallee()->stripPointerCasts());
@@ -1292,6 +1337,12 @@ void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
"__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
DFSanCmpCallbackFn =
Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
+
+ DFSanConditionalCallbackFn = Mod->getOrInsertFunction(
+ "__dfsan_conditional_callback", DFSanConditionalCallbackFnTy);
+ DFSanConditionalCallbackOriginFn =
+ Mod->getOrInsertFunction("__dfsan_conditional_callback_origin",
+ DFSanConditionalCallbackOriginFnTy);
}
void DataFlowSanitizer::injectMetadataGlobals(Module &M) {
@@ -2593,6 +2644,8 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
Value *FalseOrigin =
ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr;
+ DFSF.addConditionalCallbacksIfEnabled(I, I.getCondition());
+
if (isa<VectorType>(I.getCondition()->getType())) {
ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow,
FalseShadow, &I);
@@ -2683,6 +2736,17 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
}
}
+void DFSanVisitor::visitBranchInst(BranchInst &BR) {
+ if (!BR.isConditional())
+ return;
+
+ DFSF.addConditionalCallbacksIfEnabled(BR, BR.getCondition());
+}
+
+void DFSanVisitor::visitSwitchInst(SwitchInst &SW) {
+ DFSF.addConditionalCallbacksIfEnabled(SW, SW.getCondition());
+}
+
static bool isAMustTailRetVal(Value *RetVal) {
// Tail call may have a bitcast between return.
if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 8d3bc1383e96..fb10a99d1338 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1403,16 +1403,16 @@ bool HWAddressSanitizer::instrumentStack(
size_t Size = getAllocaSizeInBytes(*AI);
size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
+ auto TagEnd = [&](Instruction *Node) {
+ IRB.SetInsertPoint(Node);
+ Value *UARTag = getUARTag(IRB, StackTag);
+ tagAlloca(IRB, AI, UARTag, AlignedSize);
+ };
bool StandardLifetime =
UnrecognizedLifetimes.empty() && isStandardLifetime(Info, GetDT());
if (DetectUseAfterScope && StandardLifetime) {
IntrinsicInst *Start = Info.LifetimeStart[0];
IRB.SetInsertPoint(Start->getNextNode());
- auto TagEnd = [&](Instruction *Node) {
- IRB.SetInsertPoint(Node);
- Value *UARTag = getUARTag(IRB, StackTag);
- tagAlloca(IRB, AI, UARTag, AlignedSize);
- };
tagAlloca(IRB, AI, Tag, Size);
if (!forAllReachableExits(GetDT(), GetPDT(), Start, Info.LifetimeEnd,
RetVec, TagEnd)) {
@@ -1421,11 +1421,8 @@ bool HWAddressSanitizer::instrumentStack(
}
} else {
tagAlloca(IRB, AI, Tag, Size);
- for (auto *RI : RetVec) {
- IRB.SetInsertPoint(RI);
- Value *UARTag = getUARTag(IRB, StackTag);
- tagAlloca(IRB, AI, UARTag, AlignedSize);
- }
+ for (auto *RI : RetVec)
+ TagEnd(RI);
if (!StandardLifetime) {
for (auto &II : Info.LifetimeStart)
II->eraseFromParent();
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index de34348606ef..ab179b03dd29 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -248,8 +248,7 @@ public:
PGOCounterPromoter(
DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI)
- : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
- LI(LI), BFI(BFI) {
+ : LoopToCandidates(LoopToCands), L(CurLoop), LI(LI), BFI(BFI) {
// Skip collection of ExitBlocks and InsertPts for loops that will not be
// able to have counters promoted.
@@ -446,24 +445,19 @@ llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options,
return new InstrProfilingLegacyPass(Options, IsCS);
}
-static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) {
- InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr);
- if (Inc)
- return Inc;
- return dyn_cast<InstrProfIncrementInst>(Instr);
-}
-
bool InstrProfiling::lowerIntrinsics(Function *F) {
bool MadeChange = false;
PromotionCandidates.clear();
for (BasicBlock &BB : *F) {
for (Instruction &Instr : llvm::make_early_inc_range(BB)) {
- InstrProfIncrementInst *Inc = castToIncrementInst(&Instr);
- if (Inc) {
- lowerIncrement(Inc);
+ if (auto *IPIS = dyn_cast<InstrProfIncrementInstStep>(&Instr)) {
+ lowerIncrement(IPIS);
+ MadeChange = true;
+ } else if (auto *IPI = dyn_cast<InstrProfIncrementInst>(&Instr)) {
+ lowerIncrement(IPI);
MadeChange = true;
- } else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(&Instr)) {
- lowerValueProfileInst(Ind);
+ } else if (auto *IPVP = dyn_cast<InstrProfValueProfileInst>(&Instr)) {
+ lowerValueProfileInst(IPVP);
MadeChange = true;
}
}
@@ -540,19 +534,14 @@ static bool needsRuntimeHookUnconditionally(const Triple &TT) {
/// Check if the module contains uses of any profiling intrinsics.
static bool containsProfilingIntrinsics(Module &M) {
- if (auto *F = M.getFunction(
- Intrinsic::getName(llvm::Intrinsic::instrprof_increment)))
- if (!F->use_empty())
- return true;
- if (auto *F = M.getFunction(
- Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step)))
- if (!F->use_empty())
- return true;
- if (auto *F = M.getFunction(
- Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile)))
- if (!F->use_empty())
- return true;
- return false;
+ auto containsIntrinsic = [&](int ID) {
+ if (auto *F = M.getFunction(Intrinsic::getName(ID)))
+ return !F->use_empty();
+ return false;
+ };
+ return containsIntrinsic(llvm::Intrinsic::instrprof_increment) ||
+ containsIntrinsic(llvm::Intrinsic::instrprof_increment_step) ||
+ containsIntrinsic(llvm::Intrinsic::instrprof_value_profile);
}
bool InstrProfiling::run(
@@ -771,7 +760,7 @@ void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
}
/// Get the name of a profiling variable for a particular function.
-static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix,
+static std::string getVarName(InstrProfInstBase *Inc, StringRef Prefix,
bool &Renamed) {
StringRef NamePrefix = getInstrProfNameVarPrefix();
StringRef Name = Inc->getName()->getName().substr(NamePrefix.size());
@@ -860,7 +849,7 @@ static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
}
GlobalVariable *
-InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
+InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
GlobalVariable *NamePtr = Inc->getName();
auto &PD = ProfileDataMap[NamePtr];
if (PD.RegionCounters)
@@ -997,8 +986,11 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
}
- if (DebugInfoCorrelate)
+ if (DebugInfoCorrelate) {
+ // Mark the counter variable as used so that it isn't optimized out.
+ CompilerUsedVars.push_back(PD.RegionCounters);
return PD.RegionCounters;
+ }
// Create data variable.
auto *IntPtrTy = M->getDataLayout().getIntPtrType(M->getContext());
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index 727672fa0605..8fedefccf0e1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -156,6 +156,7 @@ struct InterestingMemoryAccess {
Value *Addr = nullptr;
bool IsWrite;
unsigned Alignment;
+ Type *AccessTy;
uint64_t TypeSize;
Value *MaybeMask = nullptr;
};
@@ -181,7 +182,7 @@ public:
Value *Addr, uint32_t TypeSize, bool IsWrite);
void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
Instruction *I, Value *Addr,
- unsigned Alignment, uint32_t TypeSize,
+ unsigned Alignment, Type *AccessTy,
bool IsWrite);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
@@ -334,36 +335,32 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
InterestingMemoryAccess Access;
- const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads)
return None;
Access.IsWrite = false;
- Access.TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
+ Access.AccessTy = LI->getType();
Access.Alignment = LI->getAlignment();
Access.Addr = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites)
return None;
Access.IsWrite = true;
- Access.TypeSize =
- DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
+ Access.AccessTy = SI->getValueOperand()->getType();
Access.Alignment = SI->getAlignment();
Access.Addr = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics)
return None;
Access.IsWrite = true;
- Access.TypeSize =
- DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
+ Access.AccessTy = RMW->getValOperand()->getType();
Access.Alignment = 0;
Access.Addr = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics)
return None;
Access.IsWrite = true;
- Access.TypeSize =
- DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
+ Access.AccessTy = XCHG->getCompareOperand()->getType();
Access.Alignment = 0;
Access.Addr = XCHG->getPointerOperand();
} else if (auto *CI = dyn_cast<CallInst>(I)) {
@@ -376,16 +373,16 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
return None;
// Masked store has an initial operand for the value.
OpOffset = 1;
+ Access.AccessTy = CI->getArgOperand(0)->getType();
Access.IsWrite = true;
} else {
if (!ClInstrumentReads)
return None;
+ Access.AccessTy = CI->getType();
Access.IsWrite = false;
}
auto *BasePtr = CI->getOperand(0 + OpOffset);
- auto *Ty = cast<PointerType>(BasePtr->getType())->getElementType();
- Access.TypeSize = DL.getTypeStoreSizeInBits(Ty);
if (auto *AlignmentConstant =
dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
Access.Alignment = (unsigned)AlignmentConstant->getZExtValue();
@@ -412,15 +409,16 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
if (Access.Addr->isSwiftError())
return None;
+ const DataLayout &DL = I->getModule()->getDataLayout();
+ Access.TypeSize = DL.getTypeStoreSizeInBits(Access.AccessTy);
return Access;
}
void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
Instruction *I, Value *Addr,
unsigned Alignment,
- uint32_t TypeSize, bool IsWrite) {
- auto *VTy = cast<FixedVectorType>(
- cast<PointerType>(Addr->getType())->getElementType());
+ Type *AccessTy, bool IsWrite) {
+ auto *VTy = cast<FixedVectorType>(AccessTy);
uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
unsigned Num = VTy->getNumElements();
auto *Zero = ConstantInt::get(IntptrTy, 0);
@@ -469,7 +467,7 @@ void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL,
if (Access.MaybeMask) {
instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr,
- Access.Alignment, Access.TypeSize,
+ Access.Alignment, Access.AccessTy,
Access.IsWrite);
} else {
// Since the access counts will be accumulated across the entire allocation,
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 446e601cd4d7..cfe993dedbc2 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -492,7 +492,7 @@ class MemorySanitizer {
public:
MemorySanitizer(Module &M, MemorySanitizerOptions Options)
: CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
- Recover(Options.Recover) {
+ Recover(Options.Recover), EagerChecks(Options.EagerChecks) {
initializeModule(M);
}
@@ -522,6 +522,7 @@ private:
/// Track origins (allocation points) of uninitialized values.
int TrackOrigins;
bool Recover;
+ bool EagerChecks;
LLVMContext *C;
Type *IntptrTy;
@@ -665,10 +666,12 @@ template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
} // end anonymous namespace
-MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K)
+MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
+ bool EagerChecks)
: Kernel(getOptOrDefault(ClEnableKmsan, K)),
TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
- Recover(getOptOrDefault(ClKeepGoing, Kernel || R)) {}
+ Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
+ EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
PreservedAnalyses MemorySanitizerPass::run(Function &F,
FunctionAnalysisManager &FAM) {
@@ -695,6 +698,8 @@ void MemorySanitizerPass::printPipeline(
OS << "recover;";
if (Options.Kernel)
OS << "kernel;";
+ if (Options.EagerChecks)
+ OS << "eager-checks;";
OS << "track-origins=" << Options.TrackOrigins;
OS << ">";
}
@@ -1667,9 +1672,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// This function either returns the value set earlier with setShadow,
/// or extracts if from ParamTLS (for function arguments).
Value *getShadow(Value *V) {
- if (!PropagateShadow) return getCleanShadow(V);
if (Instruction *I = dyn_cast<Instruction>(V)) {
- if (I->getMetadata("nosanitize"))
+ if (!PropagateShadow || I->getMetadata("nosanitize"))
return getCleanShadow(V);
// For instructions the shadow is already stored in the map.
Value *Shadow = ShadowMap[V];
@@ -1681,7 +1685,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return Shadow;
}
if (UndefValue *U = dyn_cast<UndefValue>(V)) {
- Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
+ Value *AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
+ : getCleanShadow(V);
LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
(void)U;
return AllOnes;
@@ -1701,22 +1706,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
continue;
}
- bool FArgByVal = FArg.hasByValAttr();
- bool FArgNoUndef = FArg.hasAttribute(Attribute::NoUndef);
- bool FArgEagerCheck = ClEagerChecks && !FArgByVal && FArgNoUndef;
- unsigned Size =
- FArg.hasByValAttr()
- ? DL.getTypeAllocSize(FArg.getParamByValType())
- : DL.getTypeAllocSize(FArg.getType());
+ unsigned Size = FArg.hasByValAttr()
+ ? DL.getTypeAllocSize(FArg.getParamByValType())
+ : DL.getTypeAllocSize(FArg.getType());
if (A == &FArg) {
bool Overflow = ArgOffset + Size > kParamTLSSize;
- if (FArgEagerCheck) {
- *ShadowPtr = getCleanShadow(V);
- setOrigin(A, getCleanOrigin());
- break;
- } else if (FArgByVal) {
- Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
@@ -1727,40 +1723,38 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/*isStore*/ true)
.first;
// TODO(glider): need to copy origins.
- if (Overflow) {
+ if (!PropagateShadow || Overflow) {
// ParamTLS overflow.
EntryIRB.CreateMemSet(
CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
Size, ArgAlign);
} else {
+ Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
CopyAlign, Size);
LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
(void)Cpy;
}
+ }
+
+ if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
+ (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
*ShadowPtr = getCleanShadow(V);
+ setOrigin(A, getCleanOrigin());
} else {
// Shadow over TLS
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
- if (Overflow) {
- // ParamTLS overflow.
- *ShadowPtr = getCleanShadow(V);
- } else {
- *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
- kShadowTLSAlignment);
+ *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
+ kShadowTLSAlignment);
+ if (MS.TrackOrigins) {
+ Value *OriginPtr =
+ getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
}
}
LLVM_DEBUG(dbgs()
<< " ARG: " << FArg << " ==> " << **ShadowPtr << "\n");
- if (MS.TrackOrigins && !Overflow) {
- Value *OriginPtr =
- getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
- setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
- } else {
- setOrigin(A, getCleanOrigin());
- }
-
break;
}
@@ -3664,7 +3658,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// will become a non-readonly function after it is instrumented by us. To
// prevent this code from being optimized out, mark that function
// non-readonly in advance.
- AttrBuilder B;
+ AttributeMask B;
B.addAttribute(Attribute::ReadOnly)
.addAttribute(Attribute::ReadNone)
.addAttribute(Attribute::WriteOnly)
@@ -3679,7 +3673,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
}
IRBuilder<> IRB(&CB);
- bool MayCheckCall = ClEagerChecks;
+ bool MayCheckCall = MS.EagerChecks;
if (Function *Func = CB.getCalledFunction()) {
// __sanitizer_unaligned_{load,store} functions may be called by users
// and always expects shadows in the TLS. So don't check them.
@@ -3697,15 +3691,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
continue;
}
unsigned Size = 0;
- Value *Store = nullptr;
- // Compute the Shadow for arg even if it is ByVal, because
- // in that case getShadow() will copy the actual arg shadow to
- // __msan_param_tls.
- Value *ArgShadow = getShadow(A);
- Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
- LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
- << " Shadow: " << *ArgShadow << "\n");
- bool ArgIsInitialized = false;
const DataLayout &DL = F.getParent()->getDataLayout();
bool ByVal = CB.paramHasAttr(i, Attribute::ByVal);
@@ -3716,6 +3701,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(A, &CB);
Size = DL.getTypeAllocSize(A->getType());
} else {
+ bool ArgIsInitialized = false;
+ Value *Store = nullptr;
+ // Compute the Shadow for arg even if it is ByVal, because
+ // in that case getShadow() will copy the actual arg shadow to
+ // __msan_param_tls.
+ Value *ArgShadow = getShadow(A);
+ Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
+ LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
+ << " Shadow: " << *ArgShadow << "\n");
if (ByVal) {
// ByVal requires some special handling as it's too big for a single
// load
@@ -3732,10 +3726,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
/*isStore*/ false)
.first;
-
- Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
- Alignment, Size);
- // TODO(glider): need to copy origins.
+ if (!PropagateShadow) {
+ Store = IRB.CreateMemSet(ArgShadowBase,
+ Constant::getNullValue(IRB.getInt8Ty()),
+ Size, Alignment);
+ } else {
+ Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
+ Alignment, Size);
+ }
} else {
// Any other parameters mean we need bit-grained tracking of uninit
// data
@@ -3832,10 +3830,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
- bool StoreShadow = !(ClEagerChecks && HasNoUndef);
+ bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
// FIXME: Consider using SpecialCaseList to specify a list of functions that
// must always return fully initialized values. For now, we hardcode "main".
- bool EagerCheck = (ClEagerChecks && HasNoUndef) || (F.getName() == "main");
+ bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (F.getName() == "main");
Value *Shadow = getShadow(RetVal);
bool StoreOrigin = true;
@@ -5359,7 +5357,7 @@ bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
MemorySanitizerVisitor Visitor(F, *this, TLI);
// Clear out readonly/readnone attributes.
- AttrBuilder B;
+ AttributeMask B;
B.addAttribute(Attribute::ReadOnly)
.addAttribute(Attribute::ReadNone)
.addAttribute(Attribute::WriteOnly)
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index b6ba1fc2132c..c46415e5b1f4 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -877,7 +877,10 @@ populateEHOperandBundle(VPCandidateInfo &Cand,
DenseMap<BasicBlock *, ColorVector> &BlockColors,
SmallVectorImpl<OperandBundleDef> &OpBundles) {
auto *OrigCall = dyn_cast<CallBase>(Cand.AnnotatedInst);
- if (OrigCall && !isa<IntrinsicInst>(OrigCall)) {
+ if (!OrigCall)
+ return;
+
+ if (!isa<IntrinsicInst>(OrigCall)) {
// The instrumentation call should belong to the same funclet as a
// non-intrinsic call, so just copy the operand bundle, if any exists.
Optional<OperandBundleUse> ParentFunclet =
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index da8ee1f15bf8..d3b60c7add34 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -917,8 +917,7 @@ void ModuleSanitizerCoverage::InjectTraceForGep(
void ModuleSanitizerCoverage::InjectTraceForLoadsAndStores(
Function &, ArrayRef<LoadInst *> Loads, ArrayRef<StoreInst *> Stores) {
- auto CallbackIdx = [&](const Value *Ptr) -> int {
- auto ElementTy = cast<PointerType>(Ptr->getType())->getElementType();
+ auto CallbackIdx = [&](Type *ElementTy) -> int {
uint64_t TypeSize = DL->getTypeStoreSizeInBits(ElementTy);
return TypeSize == 8 ? 0
: TypeSize == 16 ? 1
@@ -932,7 +931,7 @@ void ModuleSanitizerCoverage::InjectTraceForLoadsAndStores(
for (auto LI : Loads) {
IRBuilder<> IRB(LI);
auto Ptr = LI->getPointerOperand();
- int Idx = CallbackIdx(Ptr);
+ int Idx = CallbackIdx(LI->getType());
if (Idx < 0)
continue;
IRB.CreateCall(SanCovLoadFunction[Idx],
@@ -941,7 +940,7 @@ void ModuleSanitizerCoverage::InjectTraceForLoadsAndStores(
for (auto SI : Stores) {
IRBuilder<> IRB(SI);
auto Ptr = SI->getPointerOperand();
- int Idx = CallbackIdx(Ptr);
+ int Idx = CallbackIdx(SI->getValueOperand()->getType());
if (Idx < 0)
continue;
IRB.CreateCall(SanCovStoreFunction[Idx],
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
index 764dc5f92707..c11691c613ac 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
@@ -42,7 +42,7 @@ enum class ARCRuntimeEntryPointKind {
Autorelease,
StoreStrong,
RetainRV,
- ClaimRV,
+ UnsafeClaimRV,
RetainAutorelease,
RetainAutoreleaseRV,
};
@@ -62,7 +62,7 @@ public:
Autorelease = nullptr;
StoreStrong = nullptr;
RetainRV = nullptr;
- ClaimRV = nullptr;
+ UnsafeClaimRV = nullptr;
RetainAutorelease = nullptr;
RetainAutoreleaseRV = nullptr;
}
@@ -87,9 +87,9 @@ public:
case ARCRuntimeEntryPointKind::RetainRV:
return getIntrinsicEntryPoint(RetainRV,
Intrinsic::objc_retainAutoreleasedReturnValue);
- case ARCRuntimeEntryPointKind::ClaimRV:
+ case ARCRuntimeEntryPointKind::UnsafeClaimRV:
return getIntrinsicEntryPoint(
- ClaimRV, Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
+ UnsafeClaimRV, Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
case ARCRuntimeEntryPointKind::RetainAutorelease:
return getIntrinsicEntryPoint(RetainAutorelease,
Intrinsic::objc_retainAutorelease);
@@ -127,7 +127,7 @@ private:
Function *RetainRV = nullptr;
/// Declaration for objc_unsafeClaimAutoreleasedReturnValue().
- Function *ClaimRV = nullptr;
+ Function *UnsafeClaimRV = nullptr;
/// Declaration for objc_retainAutorelease().
Function *RetainAutorelease = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index 4921209f041b..de0f5803b4c7 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -194,9 +194,6 @@ llvm::objcarc::Depends(DependenceKind Flavor, Instruction *Inst,
return CanInterruptRV(Class);
}
}
-
- case RetainRVDep:
- return CanInterruptRV(GetBasicARCInstKind(Inst));
}
llvm_unreachable("Invalid dependence flavor");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h
index cf4c05ebe91c..dd6a1c3f9795 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h
@@ -46,8 +46,7 @@ enum DependenceKind {
AutoreleasePoolBoundary,
CanChangeRetainCount,
RetainAutoreleaseDep, ///< Blocks objc_retainAutorelease.
- RetainAutoreleaseRVDep, ///< Blocks objc_retainAutoreleaseReturnValue.
- RetainRVDep ///< Blocks objc_retainAutoreleasedReturnValue.
+ RetainAutoreleaseRVDep ///< Blocks objc_retainAutoreleaseReturnValue.
};
/// Find dependent instructions. If there is exactly one dependent instruction,
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index c2ed94e8e1f6..9e2832827686 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -433,7 +433,7 @@ bool ObjCARCContract::tryToPeepholeInstruction(
// If we succeed in our optimization, fall through.
LLVM_FALLTHROUGH;
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV: {
+ case ARCInstKind::UnsafeClaimRV: {
bool IsInstContainedInBundle = BundledInsts->contains(Inst);
// Return now if the target doesn't need a special inline-asm marker. Return
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 0fa4904456cd..b6dc97f1e43f 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -515,7 +515,7 @@ class ObjCARCOpt {
Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
Instruction *Inst, ARCInstKind Class, const Value *Arg);
- /// Try to optimize an AutoreleaseRV with a RetainRV or ClaimRV. If the
+ /// Try to optimize an AutoreleaseRV with a RetainRV or UnsafeClaimRV. If the
/// optimization occurs, returns true to indicate that the caller should
/// assume the instructions are dead.
bool OptimizeInlinedAutoreleaseRVCall(
@@ -705,14 +705,14 @@ bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
return true;
}
- // ClaimRV is a frontend peephole for RetainRV + Release. Since the
- // AutoreleaseRV and RetainRV cancel out, replace the ClaimRV with a Release.
- assert(Class == ARCInstKind::ClaimRV);
+ // UnsafeClaimRV is a frontend peephole for RetainRV + Release. Since the
+ // AutoreleaseRV and RetainRV cancel out, replace UnsafeClaimRV with Release.
+ assert(Class == ARCInstKind::UnsafeClaimRV);
Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
CallInst *Release = CallInst::Create(
EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
- assert(IsAlwaysTail(ARCInstKind::ClaimRV) &&
- "Expected ClaimRV to be safe to tail call");
+ assert(IsAlwaysTail(ARCInstKind::UnsafeClaimRV) &&
+ "Expected UnsafeClaimRV to be safe to tail call");
Release->setTailCall();
Inst->replaceAllUsesWith(CallArg);
EraseInstruction(Inst);
@@ -810,7 +810,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
BlockColors = colorEHFunclets(F);
// Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
- // with RetainRV and ClaimRV.
+ // with RetainRV and UnsafeClaimRV.
Instruction *DelayedAutoreleaseRV = nullptr;
const Value *DelayedAutoreleaseRVArg = nullptr;
auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
@@ -837,7 +837,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
return false;
// Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
- // ClaimRV, it's probably safe to skip over even opaque function calls
+ // UnsafeClaimRV, it's probably safe to skip over even opaque function calls
// here since OptimizeInlinedAutoreleaseRVCall will confirm that they
// have the same RCIdentityRoot. However, what really matters is
// skipping instructions or intrinsics that the inliner could leave behind;
@@ -881,7 +881,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
setDelayedAutoreleaseRV(Inst);
continue;
case ARCInstKind::RetainRV:
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
if (DelayedAutoreleaseRV) {
// We have a potential RV pair. Check if they cancel out.
if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
@@ -979,9 +979,8 @@ void ObjCARCOpt::OptimizeIndividualCallImpl(
CallInst *CI = cast<CallInst>(Inst);
if (IsNullOrUndef(CI->getArgOperand(0))) {
Changed = true;
- Type *Ty = CI->getArgOperand(0)->getType();
- new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
- Constant::getNullValue(Ty), CI);
+ new StoreInst(ConstantInt::getTrue(CI->getContext()),
+ UndefValue::get(Type::getInt1PtrTy(CI->getContext())), CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
@@ -999,9 +998,8 @@ void ObjCARCOpt::OptimizeIndividualCallImpl(
if (IsNullOrUndef(CI->getArgOperand(0)) ||
IsNullOrUndef(CI->getArgOperand(1))) {
Changed = true;
- Type *Ty = CI->getArgOperand(0)->getType();
- new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
- Constant::getNullValue(Ty), CI);
+ new StoreInst(ConstantInt::getTrue(CI->getContext()),
+ UndefValue::get(Type::getInt1PtrTy(CI->getContext())), CI);
Value *NewValue = UndefValue::get(CI->getType());
LLVM_DEBUG(
@@ -1165,7 +1163,7 @@ void ObjCARCOpt::OptimizeIndividualCallImpl(
DepInst = findSingleDependency(AutoreleasePoolBoundary, Arg,
Inst->getParent(), Inst, PA);
break;
- case ARCInstKind::ClaimRV:
+ case ARCInstKind::UnsafeClaimRV:
case ARCInstKind::RetainRV:
case ARCInstKind::AutoreleaseRV:
// Don't move these; the RV optimization depends on the autoreleaseRV
diff --git a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h
index 6d0a67c91cfa..1624cf26094a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h
@@ -32,7 +32,6 @@
namespace llvm {
class AAResults;
-class DataLayout;
class PHINode;
class SelectInst;
class Value;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ADCE.cpp
index b693acceb3f6..1cda206a7e14 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -579,6 +579,7 @@ bool AggressiveDeadCodeElimination::updateDeadRegions() {
// Don't compute the post ordering unless we needed it.
bool HavePostOrder = false;
bool Changed = false;
+ SmallVector<DominatorTree::UpdateType, 10> DeletedEdges;
for (auto *BB : BlocksWithDeadTerminators) {
auto &Info = BlockInfo[BB];
@@ -617,7 +618,6 @@ bool AggressiveDeadCodeElimination::updateDeadRegions() {
makeUnconditional(BB, PreferredSucc->BB);
// Inform the dominators about the deleted CFG edges.
- SmallVector<DominatorTree::UpdateType, 4> DeletedEdges;
for (auto *Succ : RemovedSuccessors) {
// It might have happened that the same successor appeared multiple times
// and the CFG edge wasn't really removed.
@@ -629,13 +629,14 @@ bool AggressiveDeadCodeElimination::updateDeadRegions() {
}
}
- DomTreeUpdater(DT, &PDT, DomTreeUpdater::UpdateStrategy::Eager)
- .applyUpdates(DeletedEdges);
-
NumBranchesRemoved += 1;
Changed = true;
}
+ if (!DeletedEdges.empty())
+ DomTreeUpdater(DT, &PDT, DomTreeUpdater::UpdateStrategy::Eager)
+ .applyUpdates(DeletedEdges);
+
return Changed;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index 37a7053d778e..25e8c3ef3b48 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -414,6 +414,14 @@ void ConstantHoistingPass::collectConstantCandidates(
IntegerType *PtrIntTy = DL->getIntPtrType(*Ctx, GVPtrTy->getAddressSpace());
APInt Offset(DL->getTypeSizeInBits(PtrIntTy), /*val*/0, /*isSigned*/true);
auto *GEPO = cast<GEPOperator>(ConstExpr);
+
+ // TODO: If we have a mix of inbounds and non-inbounds GEPs, then basing a
+ // non-inbounds GEP on an inbounds GEP is potentially incorrect. Restrict to
+ // inbounds GEP for now -- alternatively, we could drop inbounds from the
+ // constant expression,
+ if (!GEPO->isInBounds())
+ return;
+
if (!GEPO->accumulateConstantOffset(*DL, Offset))
return;
@@ -470,7 +478,7 @@ void ConstantHoistingPass::collectConstantCandidates(
// Visit constant expressions that have constant integers.
if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) {
// Handle constant gep expressions.
- if (ConstHoistGEP && ConstExpr->isGEPWithNoNotionalOverIndexing())
+ if (ConstHoistGEP && isa<GEPOperator>(ConstExpr))
collectConstantCandidates(ConstCandMap, Inst, Idx, ConstExpr);
// Only visit constant cast expressions.
@@ -810,7 +818,7 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
// Visit constant expression.
if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) {
- if (ConstExpr->isGEPWithNoNotionalOverIndexing()) {
+ if (isa<GEPOperator>(ConstExpr)) {
// Operand is a ConstantGEP, replace it.
updateOperand(ConstUser.Inst, ConstUser.OpndIdx, Mat);
return;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 7f2d5d7d9987..13963657d183 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -43,6 +43,51 @@ DEBUG_COUNTER(EliminatedCounter, "conds-eliminated",
static int64_t MaxConstraintValue = std::numeric_limits<int64_t>::max();
+namespace {
+struct ConstraintTy {
+ SmallVector<int64_t, 8> Coefficients;
+
+ ConstraintTy(SmallVector<int64_t, 8> Coefficients)
+ : Coefficients(Coefficients) {}
+
+ unsigned size() const { return Coefficients.size(); }
+};
+
+/// Struct to manage a list of constraints.
+struct ConstraintListTy {
+ SmallVector<ConstraintTy, 4> Constraints;
+
+ ConstraintListTy() {}
+
+ ConstraintListTy(const SmallVector<ConstraintTy, 4> &Constraints)
+ : Constraints(Constraints) {}
+
+ void mergeIn(const ConstraintListTy &Other) {
+ append_range(Constraints, Other.Constraints);
+ }
+
+ unsigned size() const { return Constraints.size(); }
+
+ unsigned empty() const { return Constraints.empty(); }
+
+ /// Returns true if any constraint has a non-zero coefficient for any of the
+ /// newly added indices. Zero coefficients for new indices are removed. If it
+ /// returns true, no new variable need to be added to the system.
+ bool needsNewIndices(const DenseMap<Value *, unsigned> &NewIndices) {
+ assert(size() == 1);
+ for (unsigned I = 0; I < NewIndices.size(); ++I) {
+ int64_t Last = get(0).Coefficients.pop_back_val();
+ if (Last != 0)
+ return true;
+ }
+ return false;
+ }
+
+ ConstraintTy &get(unsigned I) { return Constraints[I]; }
+};
+
+} // namespace
+
// Decomposes \p V into a vector of pairs of the form { c, X } where c * X. The
// sum of the pairs equals \p V. The first pair is the constant-factor and X
// must be nullptr. If the expression cannot be decomposed, returns an empty
@@ -108,24 +153,15 @@ static SmallVector<std::pair<int64_t, Value *>, 4> decompose(Value *V) {
if (match(V, m_NUWSub(m_Value(Op0), m_ConstantInt(CI))))
return {{-1 * CI->getSExtValue(), nullptr}, {1, Op0}};
if (match(V, m_NUWSub(m_Value(Op0), m_Value(Op1))))
- return {{0, nullptr}, {1, Op0}, {1, Op1}};
+ return {{0, nullptr}, {1, Op0}, {-1, Op1}};
return {{0, nullptr}, {1, V}};
}
-struct ConstraintTy {
- SmallVector<int64_t, 8> Coefficients;
-
- ConstraintTy(SmallVector<int64_t, 8> Coefficients)
- : Coefficients(Coefficients) {}
-
- unsigned size() const { return Coefficients.size(); }
-};
-
/// Turn a condition \p CmpI into a vector of constraints, using indices from \p
/// Value2Index. Additional indices for newly discovered values are added to \p
/// NewIndices.
-static SmallVector<ConstraintTy, 4>
+static ConstraintListTy
getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
const DenseMap<Value *, unsigned> &Value2Index,
DenseMap<Value *, unsigned> &NewIndices) {
@@ -151,11 +187,15 @@ getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
Value2Index, NewIndices);
if (Pred == CmpInst::ICMP_EQ) {
+ if (match(Op1, m_Zero()))
+ return getConstraint(CmpInst::ICMP_ULE, Op0, Op1, Value2Index,
+ NewIndices);
+
auto A =
getConstraint(CmpInst::ICMP_UGE, Op0, Op1, Value2Index, NewIndices);
auto B =
getConstraint(CmpInst::ICMP_ULE, Op0, Op1, Value2Index, NewIndices);
- append_range(A, B);
+ A.mergeIn(B);
return A;
}
@@ -200,10 +240,10 @@ getConstraint(CmpInst::Predicate Pred, Value *Op0, Value *Op1,
R[GetOrAddIndex(KV.second)] -= KV.first;
R[0] = Offset1 + Offset2 + (Pred == CmpInst::ICMP_ULT ? -1 : 0);
- return {R};
+ return {{R}};
}
-static SmallVector<ConstraintTy, 4>
+static ConstraintListTy
getConstraint(CmpInst *Cmp, const DenseMap<Value *, unsigned> &Value2Index,
DenseMap<Value *, unsigned> &NewIndices) {
return getConstraint(Cmp->getPredicate(), Cmp->getOperand(0),
@@ -397,21 +437,10 @@ static bool eliminateConstraints(Function &F, DominatorTree &DT) {
if (R.size() != 1)
continue;
- // Check if all coefficients of new indices are 0 after building the
- // constraint. Skip if any of the new indices has a non-null
- // coefficient.
- bool HasNewIndex = false;
- for (unsigned I = 0; I < NewIndices.size(); ++I) {
- int64_t Last = R[0].Coefficients.pop_back_val();
- if (Last != 0) {
- HasNewIndex = true;
- break;
- }
- }
- if (HasNewIndex || R[0].size() == 1)
+ if (R.needsNewIndices(NewIndices))
continue;
- if (CS.isConditionImplied(R[0].Coefficients)) {
+ if (CS.isConditionImplied(R.get(0).Coefficients)) {
if (!DebugCounter::shouldExecute(EliminatedCounter))
continue;
@@ -432,7 +461,7 @@ static bool eliminateConstraints(Function &F, DominatorTree &DT) {
Changed = true;
}
if (CS.isConditionImplied(
- ConstraintSystem::negate(R[0].Coefficients))) {
+ ConstraintSystem::negate(R.get(0).Coefficients))) {
if (!DebugCounter::shouldExecute(EliminatedCounter))
continue;
@@ -479,7 +508,7 @@ static bool eliminateConstraints(Function &F, DominatorTree &DT) {
LLVM_DEBUG(dbgs() << "Adding " << *CB.Condition << " " << CB.Not << "\n");
bool Added = false;
- for (auto &C : R) {
+ for (auto &C : R.Constraints) {
auto Coeffs = C.Coefficients;
LLVM_DEBUG({
dbgs() << " constraint: ";
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index eadbb4293539..ae636e7b61f7 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -699,17 +699,14 @@ bool isNoopIntrinsic(Instruction *I) {
}
// Check if we can ignore \p D for DSE.
-bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller,
- const TargetLibraryInfo &TLI) {
+bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
Instruction *DI = D->getMemoryInst();
// Calls that only access inaccessible memory cannot read or write any memory
// locations we consider for elimination.
if (auto *CB = dyn_cast<CallBase>(DI))
- if (CB->onlyAccessesInaccessibleMemory()) {
- if (isAllocLikeFn(DI, &TLI))
- return false;
+ if (CB->onlyAccessesInaccessibleMemory())
return true;
- }
+
// We can eliminate stores to locations not visible to the caller across
// throwing instructions.
if (DI->mayThrow() && !DefVisibleToCaller)
@@ -759,10 +756,8 @@ struct DSEState {
SmallVector<MemoryDef *, 64> MemDefs;
// Any that should be skipped as they are already deleted
SmallPtrSet<MemoryAccess *, 4> SkipStores;
- // Keep track of all of the objects that are invisible to the caller before
- // the function returns.
- // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
- DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
+ // Keep track whether a given object is captured before return or not.
+ DenseMap<const Value *, bool> CapturedBeforeReturn;
// Keep track of all of the objects that are invisible to the caller after
// the function returns.
DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
@@ -805,12 +800,8 @@ struct DSEState {
// Treat byval or inalloca arguments the same as Allocas, stores to them are
// dead at the end of the function.
for (Argument &AI : F.args())
- if (AI.hasPassPointeeByValueCopyAttr()) {
- // For byval, the caller doesn't know the address of the allocation.
- if (AI.hasByValAttr())
- InvisibleToCallerBeforeRet.insert({&AI, true});
+ if (AI.hasPassPointeeByValueCopyAttr())
InvisibleToCallerAfterRet.insert({&AI, true});
- }
// Collect whether there is any irreducible control flow in the function.
ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
@@ -835,6 +826,20 @@ struct DSEState {
if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
return OW_Unknown;
+ const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
+ const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
+ const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
+ const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
+
+ // Check whether the killing store overwrites the whole object, in which
+ // case the size/offset of the dead store does not matter.
+ if (DeadUndObj == KillingUndObj && KillingLoc.Size.isPrecise()) {
+ uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
+ if (KillingUndObjSize != MemoryLocation::UnknownSize &&
+ KillingUndObjSize == KillingLoc.Size.getValue())
+ return OW_Complete;
+ }
+
// FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
// get imprecise values here, though (except for unknown sizes).
if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) {
@@ -875,14 +880,6 @@ struct DSEState {
return OW_Complete;
}
- // Check to see if the killing store is to the entire object (either a
- // global, an alloca, or a byval/inalloca argument). If so, then it clearly
- // overwrites any other store to the same object.
- const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
- const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
- const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
- const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
-
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
if (DeadUndObj != KillingUndObj) {
@@ -896,12 +893,6 @@ struct DSEState {
return OW_Unknown;
}
- // If the KillingI store is to a recognizable object, get its size.
- uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
- if (KillingUndObjSize != MemoryLocation::UnknownSize)
- if (KillingUndObjSize == KillingSize && KillingUndObjSize >= DeadSize)
- return OW_Complete;
-
// Okay, we have stores to two completely different pointers. Try to
// decompose the pointer into a "base + constant_offset" form. If the base
// pointers are equal, then we can reason about the two stores.
@@ -957,31 +948,30 @@ struct DSEState {
return true;
auto I = InvisibleToCallerAfterRet.insert({V, false});
if (I.second) {
- if (!isInvisibleToCallerBeforeRet(V)) {
+ if (!isInvisibleToCallerOnUnwind(V)) {
I.first->second = false;
- } else {
- auto *Inst = dyn_cast<Instruction>(V);
- if (Inst && isAllocLikeFn(Inst, &TLI))
- I.first->second = !PointerMayBeCaptured(V, true, false);
+ } else if (isNoAliasCall(V)) {
+ I.first->second = !PointerMayBeCaptured(V, true, false);
}
}
return I.first->second;
}
- bool isInvisibleToCallerBeforeRet(const Value *V) {
- if (isa<AllocaInst>(V))
+ bool isInvisibleToCallerOnUnwind(const Value *V) {
+ bool RequiresNoCaptureBeforeUnwind;
+ if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
+ return false;
+ if (!RequiresNoCaptureBeforeUnwind)
return true;
- auto I = InvisibleToCallerBeforeRet.insert({V, false});
- if (I.second) {
- auto *Inst = dyn_cast<Instruction>(V);
- if (Inst && isAllocLikeFn(Inst, &TLI))
- // NOTE: This could be made more precise by PointerMayBeCapturedBefore
- // with the killing MemoryDef. But we refrain from doing so for now to
- // limit compile-time and this does not cause any changes to the number
- // of stores removed on a large test set in practice.
- I.first->second = !PointerMayBeCaptured(V, false, true);
- }
- return I.first->second;
+
+ auto I = CapturedBeforeReturn.insert({V, true});
+ if (I.second)
+ // NOTE: This could be made more precise by PointerMayBeCapturedBefore
+ // with the killing MemoryDef. But we refrain from doing so for now to
+ // limit compile-time and this does not cause any changes to the number
+ // of stores removed on a large test set in practice.
+ I.first->second = PointerMayBeCaptured(V, false, true);
+ return !I.first->second;
}
Optional<MemoryLocation> getLocForWrite(Instruction *I) const {
@@ -1269,8 +1259,7 @@ struct DSEState {
MemoryDef *CurrentDef = cast<MemoryDef>(Current);
Instruction *CurrentI = CurrentDef->getMemoryInst();
- if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(KillingUndObj),
- TLI)) {
+ if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
CanOptimize = false;
continue;
}
@@ -1442,7 +1431,7 @@ struct DSEState {
continue;
}
- if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) {
+ if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
return None;
}
@@ -1623,7 +1612,7 @@ struct DSEState {
// First see if we can ignore it by using the fact that KillingI is an
// alloca/alloca like object that is not visible to the caller during
// execution of the function.
- if (KillingUndObj && isInvisibleToCallerBeforeRet(KillingUndObj))
+ if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
return false;
if (KillingI->getParent() == DeadI->getParent())
@@ -1639,7 +1628,7 @@ struct DSEState {
bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
// If DeadI may throw it acts as a barrier, unless we are to an
// alloca/alloca like object that does not escape.
- if (DeadI->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj))
+ if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
return true;
// If DeadI is an atomic load/store stronger than monotonic, do not try to
@@ -1696,6 +1685,84 @@ struct DSEState {
return MadeChange;
}
+ /// If we have a zero initializing memset following a call to malloc,
+ /// try folding it into a call to calloc.
+ bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
+ Instruction *DefI = Def->getMemoryInst();
+ MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
+ if (!MemSet)
+ // TODO: Could handle zero store to small allocation as well.
+ return false;
+ Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
+ if (!StoredConstant || !StoredConstant->isNullValue())
+ return false;
+
+ if (!isRemovable(DefI))
+ // The memset might be volatile..
+ return false;
+
+ if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
+ F.hasFnAttribute(Attribute::SanitizeAddress) ||
+ F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
+ F.getName() == "calloc")
+ return false;
+ auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
+ if (!Malloc)
+ return false;
+ auto *InnerCallee = Malloc->getCalledFunction();
+ if (!InnerCallee)
+ return false;
+ LibFunc Func;
+ if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
+ Func != LibFunc_malloc)
+ return false;
+
+ auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
+ // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
+ // of malloc block
+ auto *MallocBB = Malloc->getParent(),
+ *MemsetBB = Memset->getParent();
+ if (MallocBB == MemsetBB)
+ return true;
+ auto *Ptr = Memset->getArgOperand(0);
+ auto *TI = MallocBB->getTerminator();
+ ICmpInst::Predicate Pred;
+ BasicBlock *TrueBB, *FalseBB;
+ if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
+ FalseBB)))
+ return false;
+ if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
+ return false;
+ return true;
+ };
+
+ if (Malloc->getOperand(0) != MemSet->getLength())
+ return false;
+ if (!shouldCreateCalloc(Malloc, MemSet) ||
+ !DT.dominates(Malloc, MemSet) ||
+ !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
+ return false;
+ IRBuilder<> IRB(Malloc);
+ const auto &DL = Malloc->getModule()->getDataLayout();
+ auto *Calloc =
+ emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1),
+ Malloc->getArgOperand(0), IRB, TLI);
+ if (!Calloc)
+ return false;
+ MemorySSAUpdater Updater(&MSSA);
+ auto *LastDef =
+ cast<MemoryDef>(Updater.getMemorySSA()->getMemoryAccess(Malloc));
+ auto *NewAccess =
+ Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), LastDef,
+ LastDef);
+ auto *NewAccessMD = cast<MemoryDef>(NewAccess);
+ Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
+ Updater.removeMemoryAccess(Malloc);
+ Malloc->replaceAllUsesWith(Calloc);
+ Malloc->eraseFromParent();
+ return true;
+ }
+
/// \returns true if \p Def is a no-op store, either because it
/// directly stores back a loaded value or stores zero to a calloced object.
bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
@@ -1713,81 +1780,15 @@ struct DSEState {
if (!isRemovable(DefI))
return false;
- if (StoredConstant && StoredConstant->isNullValue()) {
- auto *DefUOInst = dyn_cast<Instruction>(DefUO);
- if (DefUOInst) {
- if (isCallocLikeFn(DefUOInst, &TLI)) {
- auto *UnderlyingDef =
- cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
- // If UnderlyingDef is the clobbering access of Def, no instructions
- // between them can modify the memory location.
- auto *ClobberDef =
- MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
- return UnderlyingDef == ClobberDef;
- }
-
- if (MemSet) {
- if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
- F.hasFnAttribute(Attribute::SanitizeAddress) ||
- F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
- F.getName() == "calloc")
- return false;
- auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUOInst));
- if (!Malloc)
- return false;
- auto *InnerCallee = Malloc->getCalledFunction();
- if (!InnerCallee)
- return false;
- LibFunc Func;
- if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
- Func != LibFunc_malloc)
- return false;
-
- auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
- // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
- // of malloc block
- auto *MallocBB = Malloc->getParent(),
- *MemsetBB = Memset->getParent();
- if (MallocBB == MemsetBB)
- return true;
- auto *Ptr = Memset->getArgOperand(0);
- auto *TI = MallocBB->getTerminator();
- ICmpInst::Predicate Pred;
- BasicBlock *TrueBB, *FalseBB;
- if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
- FalseBB)))
- return false;
- if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
- return false;
- return true;
- };
-
- if (Malloc->getOperand(0) == MemSet->getLength()) {
- if (shouldCreateCalloc(Malloc, MemSet) &&
- DT.dominates(Malloc, MemSet) &&
- memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) {
- IRBuilder<> IRB(Malloc);
- const auto &DL = Malloc->getModule()->getDataLayout();
- if (auto *Calloc =
- emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1),
- Malloc->getArgOperand(0), IRB, TLI)) {
- MemorySSAUpdater Updater(&MSSA);
- auto *LastDef = cast<MemoryDef>(
- Updater.getMemorySSA()->getMemoryAccess(Malloc));
- auto *NewAccess = Updater.createMemoryAccessAfter(
- cast<Instruction>(Calloc), LastDef, LastDef);
- auto *NewAccessMD = cast<MemoryDef>(NewAccess);
- Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
- Updater.removeMemoryAccess(Malloc);
- Malloc->replaceAllUsesWith(Calloc);
- Malloc->eraseFromParent();
- return true;
- }
- return false;
- }
- }
- }
- }
+ if (StoredConstant && isAllocationFn(DefUO, &TLI)) {
+ auto *CB = cast<CallBase>(DefUO);
+ auto *InitC = getInitialValueOfAllocation(CB, &TLI,
+ StoredConstant->getType());
+ // If the clobbering access is LiveOnEntry, no instructions between them
+ // can modify the memory location.
+ if (InitC && InitC == StoredConstant)
+ return MSSA.isLiveOnEntryDef(
+ MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def));
}
if (!Store)
@@ -2074,6 +2075,15 @@ static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
MadeChange = true;
continue;
}
+
+ // Can we form a calloc from a memset/malloc pair?
+ if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) {
+ LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n"
+ << " DEAD: " << *KillingI << '\n');
+ State.deleteDeadInstruction(KillingI);
+ MadeChange = true;
+ continue;
+ }
}
if (EnablePartialOverwriteTracking)
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index a24997dd3fd4..59b934c16c8a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -827,10 +827,13 @@ private:
const ParseMemoryInst &Later);
Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
+ // TODO: We could insert relevant casts on type mismatch here.
if (auto *LI = dyn_cast<LoadInst>(Inst))
- return LI;
- if (auto *SI = dyn_cast<StoreInst>(Inst))
- return SI->getValueOperand();
+ return LI->getType() == ExpectedType ? LI : nullptr;
+ else if (auto *SI = dyn_cast<StoreInst>(Inst)) {
+ Value *V = SI->getValueOperand();
+ return V->getType() == ExpectedType ? V : nullptr;
+ }
assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
auto *II = cast<IntrinsicInst>(Inst);
if (isHandledNonTargetIntrinsic(II->getIntrinsicID()))
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp
index 00506fb86006..398c93e8758c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1104,20 +1104,19 @@ bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
}
assert(DepInfo.isDef() && "follows from above");
- // Loading the allocation -> undef.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
- isAlignedAllocLikeFn(DepInst, TLI) ||
- // Loading immediately after lifetime begin -> undef.
- isLifetimeStart(DepInst)) {
+ // Loading the alloca -> undef.
+ // Loading immediately after lifetime begin -> undef.
+ if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst)) {
Res = AvailableValue::get(UndefValue::get(Load->getType()));
return true;
}
- // Loading from calloc (which zero initializes memory) -> zero
- if (isCallocLikeFn(DepInst, TLI)) {
- Res = AvailableValue::get(Constant::getNullValue(Load->getType()));
- return true;
- }
+ if (isAllocationFn(DepInst, TLI))
+ if (auto *InitVal = getInitialValueOfAllocation(cast<CallBase>(DepInst),
+ TLI, Load->getType())) {
+ Res = AvailableValue::get(InitVal);
+ return true;
+ }
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
@@ -1769,7 +1768,7 @@ bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
// Insert a new store to null instruction before the load to indicate that
// this code is not reachable. FIXME: We could insert unreachable
// instruction directly because we can modify the CFG.
- auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
+ auto *NewS = new StoreInst(PoisonValue::get(Int8Ty),
Constant::getNullValue(Int8Ty->getPointerTo()),
IntrinsicI);
if (MSSAU) {
@@ -2991,12 +2990,12 @@ void GVNPass::addDeadBlock(BasicBlock *BB) {
}
}
- // Now undef the incoming values from the dead predecessors.
+ // Now poison the incoming values from the dead predecessors.
for (BasicBlock *P : predecessors(B)) {
if (!DeadBlocks.count(P))
continue;
for (PHINode &Phi : B->phis()) {
- Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
+ Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType()));
if (MD)
MD->invalidateCachedPointerInfo(&Phi);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 7001d330fce0..ceb03eb17f6d 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -138,8 +138,6 @@ AllowIVWidening("indvars-widen-indvars", cl::Hidden, cl::init(true),
namespace {
-struct RewritePhi;
-
class IndVarSimplify {
LoopInfo *LI;
ScalarEvolution *SE;
@@ -982,6 +980,7 @@ static Value *genLoopLimit(PHINode *IndVar, BasicBlock *ExitingBB,
assert(isLoopCounter(IndVar, L, SE));
const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
const SCEV *IVInit = AR->getStart();
+ assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
// IVInit may be a pointer while ExitCount is an integer when FindLoopCounter
// finds a valid pointer IV. Sign extend ExitCount in order to materialize a
@@ -1004,13 +1003,6 @@ static Value *genLoopLimit(PHINode *IndVar, BasicBlock *ExitingBB,
assert(SE->isLoopInvariant(IVOffset, L) &&
"Computed iteration count is not loop invariant!");
- // We could handle pointer IVs other than i8*, but we need to compensate for
- // gep index scaling.
- assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()),
- cast<PointerType>(IndVar->getType())
- ->getElementType())->isOne() &&
- "unit stride pointer IV must be i8*");
-
const SCEV *IVLimit = SE->getAddExpr(IVInit, IVOffset);
BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
return Rewriter.expandCodeFor(IVLimit, IndVar->getType(), BI);
@@ -1026,7 +1018,6 @@ static Value *genLoopLimit(PHINode *IndVar, BasicBlock *ExitingBB,
// IVInit integer and ExitCount pointer would only occur if a canonical IV
// were generated on top of case #2, which is not expected.
- assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
// For unit stride, IVCount = Start + ExitCount with 2's complement
// overflow.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 883d4afff3bd..8f5933b7bd71 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -250,12 +250,6 @@ public:
char InferAddressSpaces::ID = 0;
-namespace llvm {
-
-void initializeInferAddressSpacesPass(PassRegistry &);
-
-} // end namespace llvm
-
INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index fe9a7211967c..a3efad104ca6 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -728,8 +728,8 @@ bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
// Handle some boolean conditions.
if (I->getType()->getPrimitiveSizeInBits() == 1) {
using namespace PatternMatch;
-
- assert(Preference == WantInteger && "One-bit non-integer type?");
+ if (Preference != WantInteger)
+ return false;
// X | true -> true
// X & false -> false
Value *Op0, *Op1;
@@ -789,8 +789,8 @@ bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
// Try to simplify some other binary operator values.
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
- assert(Preference != WantBlockAddress
- && "A binary operator creating a block address?");
+ if (Preference != WantInteger)
+ return false;
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
PredValueInfoTy LHSVals;
computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals,
@@ -811,7 +811,8 @@ bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
// Handle compare with phi operand, where the PHI is defined in this block.
if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
- assert(Preference == WantInteger && "Compares only produce integers");
+ if (Preference != WantInteger)
+ return false;
Type *CmpType = Cmp->getType();
Value *CmpLHS = Cmp->getOperand(0);
Value *CmpRHS = Cmp->getOperand(1);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp
index bc792ca3d8da..7fb1a25bdf13 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -1355,7 +1355,7 @@ static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
TargetTransformInfo::TCC_Free)
return false;
// For a GEP, we cannot simply use getUserCost because currently it
- // optimistically assume that a GEP will fold into addressing mode
+ // optimistically assumes that a GEP will fold into addressing mode
// regardless of its users.
const BasicBlock *BB = GEP->getParent();
for (const User *U : GEP->users()) {
@@ -1923,26 +1923,15 @@ bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
L->getHeader()->getTerminator(), DT);
}
-/// Return true iff we can prove that a caller of this function can not inspect
-/// the contents of the provided object in a well defined program.
-bool isKnownNonEscaping(Value *Object, const Loop *L,
- const TargetLibraryInfo *TLI, DominatorTree *DT) {
- if (isa<AllocaInst>(Object))
- // Since the alloca goes out of scope, we know the caller can't retain a
- // reference to it and be well defined. Thus, we don't need to check for
- // capture.
- return true;
+/// Return true if we can prove that a caller cannot inspect the object if an
+/// unwind occurs inside the loop.
+bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
+ DominatorTree *DT) {
+ bool RequiresNoCaptureBeforeUnwind;
+ if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
+ return false;
- // For all other objects we need to know that the caller can't possibly
- // have gotten a reference to the object. There are two components of
- // that:
- // 1) Object can't be escaped by this function. This is what
- // PointerMayBeCaptured checks.
- // 2) Object can't have been captured at definition site. For this, we
- // need to know the return value is noalias. At the moment, we use a
- // weaker condition and handle only AllocLikeFunctions (which are
- // known to be noalias). TODO
- return isAllocLikeFn(Object, TLI) &&
+ return !RequiresNoCaptureBeforeUnwind ||
isNotCapturedBeforeOrInLoop(Object, L, DT);
}
@@ -2030,7 +2019,7 @@ bool llvm::promoteLoopAccessesToScalars(
// this by proving that the caller can't have a reference to the object
// after return and thus can't possibly load from the object.
Value *Object = getUnderlyingObject(SomePtr);
- if (!isKnownNonEscaping(Object, CurLoop, TLI, DT))
+ if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
return false;
// Subtlety: Alloca's aren't visible to callers, but *are* potentially
// visible to other threads if captured and used during their lifetimes.
@@ -2163,7 +2152,7 @@ bool llvm::promoteLoopAccessesToScalars(
else {
Value *Object = getUnderlyingObject(SomePtr);
SafeToInsertStore =
- (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
+ (isNoAliasCall(Object) || isa<AllocaInst>(Object)) &&
isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
}
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 5814e2f043d5..361d6c0d9381 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -407,25 +407,19 @@ breakBackedgeIfNotTaken(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
if (!L->getLoopLatch())
return LoopDeletionResult::Unmodified;
- auto *BTC = SE.getSymbolicMaxBackedgeTakenCount(L);
- if (BTC->isZero()) {
- // SCEV knows this backedge isn't taken!
- breakLoopBackedge(L, DT, SE, LI, MSSA);
- ++NumBackedgesBroken;
- return LoopDeletionResult::Deleted;
- }
-
- // If SCEV leaves open the possibility of a zero trip count, see if
- // symbolically evaluating the first iteration lets us prove the backedge
- // unreachable.
- if (isa<SCEVCouldNotCompute>(BTC) || !SE.isKnownNonZero(BTC))
- if (canProveExitOnFirstIteration(L, DT, LI)) {
- breakLoopBackedge(L, DT, SE, LI, MSSA);
- ++NumBackedgesBroken;
- return LoopDeletionResult::Deleted;
+ auto *BTCMax = SE.getConstantMaxBackedgeTakenCount(L);
+ if (!BTCMax->isZero()) {
+ auto *BTC = SE.getBackedgeTakenCount(L);
+ if (!BTC->isZero()) {
+ if (!isa<SCEVCouldNotCompute>(BTC) && SE.isKnownNonZero(BTC))
+ return LoopDeletionResult::Unmodified;
+ if (!canProveExitOnFirstIteration(L, DT, LI))
+ return LoopDeletionResult::Unmodified;
}
-
- return LoopDeletionResult::Unmodified;
+ }
+ ++NumBackedgesBroken;
+ breakLoopBackedge(L, DT, SE, LI, MSSA);
+ return LoopDeletionResult::Deleted;
}
/// Remove a loop if it is dead.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopFlatten.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
index 965d1575518e..c46db4e63bfe 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
@@ -10,10 +10,13 @@
//
// The intention is to optimise loop nests like this, which together access an
// array linearly:
+//
// for (int i = 0; i < N; ++i)
// for (int j = 0; j < M; ++j)
// f(A[i*M+j]);
+//
// into one loop:
+//
// for (int i = 0; i < (N*M); ++i)
// f(A[i]);
//
@@ -22,7 +25,27 @@
// expression like i*M+j. If they had any other uses, we would have to insert a
// div/mod to reconstruct the original values, so this wouldn't be profitable.
//
-// We also need to prove that N*M will not overflow.
+// We also need to prove that N*M will not overflow. The preferred solution is
+// to widen the IV, which avoids overflow checks, so that is tried first. If
+// the IV cannot be widened, then we try to determine that this new tripcount
+// expression won't overflow.
+//
+// Q: Does LoopFlatten use SCEV?
+// Short answer: Yes and no.
+//
+// Long answer:
+// For this transformation to be valid, we require all uses of the induction
+// variables to be linear expressions of the form i*M+j. The different Loop
+// APIs are used to get some loop components like the induction variable,
+// compare statement, etc. In addition, we do some pattern matching to find the
+// linear expressions and other loop components like the loop increment. The
+// latter are examples of expressions that do use the induction variable, but
+// are safe to ignore when we check all uses to be of the form i*M+j. We keep
+// track of all of this in bookkeeping struct FlattenInfo.
+// We assume the loops to be canonical, i.e. starting at 0 and increment with
+// 1. This makes RHS of the compare the loop tripcount (with the right
+// predicate). We use SCEV to then sanity check that this tripcount matches
+// with the tripcount as computed by SCEV.
//
//===----------------------------------------------------------------------===//
@@ -31,6 +54,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -70,37 +94,54 @@ static cl::opt<bool>
"trip counts will never overflow"));
static cl::opt<bool>
- WidenIV("loop-flatten-widen-iv", cl::Hidden,
- cl::init(true),
+ WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true),
cl::desc("Widen the loop induction variables, if possible, so "
"overflow checks won't reject flattening"));
+// We require all uses of both induction variables to match this pattern:
+//
+// (OuterPHI * InnerTripCount) + InnerPHI
+//
+// I.e., it needs to be a linear expression of the induction variables and the
+// inner loop trip count. We keep track of all different expressions on which
+// checks will be performed in this bookkeeping struct.
+//
struct FlattenInfo {
- Loop *OuterLoop = nullptr;
+ Loop *OuterLoop = nullptr; // The loop pair to be flattened.
Loop *InnerLoop = nullptr;
- // These PHINodes correspond to loop induction variables, which are expected
- // to start at zero and increment by one on each loop.
- PHINode *InnerInductionPHI = nullptr;
- PHINode *OuterInductionPHI = nullptr;
- Value *InnerTripCount = nullptr;
- Value *OuterTripCount = nullptr;
- BinaryOperator *InnerIncrement = nullptr;
- BinaryOperator *OuterIncrement = nullptr;
- BranchInst *InnerBranch = nullptr;
- BranchInst *OuterBranch = nullptr;
- SmallPtrSet<Value *, 4> LinearIVUses;
+
+ PHINode *InnerInductionPHI = nullptr; // These PHINodes correspond to loop
+ PHINode *OuterInductionPHI = nullptr; // induction variables, which are
+ // expected to start at zero and
+ // increment by one on each loop.
+
+ Value *InnerTripCount = nullptr; // The product of these two tripcounts
+ Value *OuterTripCount = nullptr; // will be the new flattened loop
+ // tripcount. Also used to recognise a
+ // linear expression that will be replaced.
+
+ SmallPtrSet<Value *, 4> LinearIVUses; // Contains the linear expressions
+ // of the form i*M+j that will be
+ // replaced.
+
+ BinaryOperator *InnerIncrement = nullptr; // Uses of induction variables in
+ BinaryOperator *OuterIncrement = nullptr; // loop control statements that
+ BranchInst *InnerBranch = nullptr; // are safe to ignore.
+
+ BranchInst *OuterBranch = nullptr; // The instruction that needs to be
+ // updated with new tripcount.
+
SmallPtrSet<PHINode *, 4> InnerPHIsToTransform;
- // Whether this holds the flatten info before or after widening.
- bool Widened = false;
+ bool Widened = false; // Whether this holds the flatten info before or after
+ // widening.
- // Holds the old/narrow induction phis, i.e. the Phis before IV widening has
- // been applied. This bookkeeping is used so we can skip some checks on these
- // phi nodes.
- PHINode *NarrowInnerInductionPHI = nullptr;
- PHINode *NarrowOuterInductionPHI = nullptr;
+ PHINode *NarrowInnerInductionPHI = nullptr; // Holds the old/narrow induction
+ PHINode *NarrowOuterInductionPHI = nullptr; // phis, i.e. the Phis before IV
+ // has been apllied. Used to skip
+ // checks on phi nodes.
- FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL) {};
+ FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL){};
bool isNarrowInductionPhi(PHINode *Phi) {
// This can't be the narrow phi if we haven't widened the IV first.
@@ -108,6 +149,118 @@ struct FlattenInfo {
return false;
return NarrowInnerInductionPHI == Phi || NarrowOuterInductionPHI == Phi;
}
+ bool isInnerLoopIncrement(User *U) {
+ return InnerIncrement == U;
+ }
+ bool isOuterLoopIncrement(User *U) {
+ return OuterIncrement == U;
+ }
+ bool isInnerLoopTest(User *U) {
+ return InnerBranch->getCondition() == U;
+ }
+
+ bool checkOuterInductionPhiUsers(SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
+ for (User *U : OuterInductionPHI->users()) {
+ if (isOuterLoopIncrement(U))
+ continue;
+
+ auto IsValidOuterPHIUses = [&] (User *U) -> bool {
+ LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump());
+ if (!ValidOuterPHIUses.count(U)) {
+ LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
+ return false;
+ }
+ LLVM_DEBUG(dbgs() << "Use is optimisable\n");
+ return true;
+ };
+
+ if (auto *V = dyn_cast<TruncInst>(U)) {
+ for (auto *K : V->users()) {
+ if (!IsValidOuterPHIUses(K))
+ return false;
+ }
+ continue;
+ }
+
+ if (!IsValidOuterPHIUses(U))
+ return false;
+ }
+ return true;
+ }
+
+ bool matchLinearIVUser(User *U, Value *InnerTripCount,
+ SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
+ LLVM_DEBUG(dbgs() << "Found use of inner induction variable: "; U->dump());
+ Value *MatchedMul = nullptr;
+ Value *MatchedItCount = nullptr;
+
+ bool IsAdd = match(U, m_c_Add(m_Specific(InnerInductionPHI),
+ m_Value(MatchedMul))) &&
+ match(MatchedMul, m_c_Mul(m_Specific(OuterInductionPHI),
+ m_Value(MatchedItCount)));
+
+ // Matches the same pattern as above, except it also looks for truncs
+ // on the phi, which can be the result of widening the induction variables.
+ bool IsAddTrunc =
+ match(U, m_c_Add(m_Trunc(m_Specific(InnerInductionPHI)),
+ m_Value(MatchedMul))) &&
+ match(MatchedMul, m_c_Mul(m_Trunc(m_Specific(OuterInductionPHI)),
+ m_Value(MatchedItCount)));
+
+ if (!MatchedItCount)
+ return false;
+
+ // Look through extends if the IV has been widened.
+ if (Widened &&
+ (isa<SExtInst>(MatchedItCount) || isa<ZExtInst>(MatchedItCount))) {
+ assert(MatchedItCount->getType() == InnerInductionPHI->getType() &&
+ "Unexpected type mismatch in types after widening");
+ MatchedItCount = isa<SExtInst>(MatchedItCount)
+ ? dyn_cast<SExtInst>(MatchedItCount)->getOperand(0)
+ : dyn_cast<ZExtInst>(MatchedItCount)->getOperand(0);
+ }
+
+ if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerTripCount) {
+ LLVM_DEBUG(dbgs() << "Use is optimisable\n");
+ ValidOuterPHIUses.insert(MatchedMul);
+ LinearIVUses.insert(U);
+ return true;
+ }
+
+ LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
+ return false;
+ }
+
+ bool checkInnerInductionPhiUsers(SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
+ Value *SExtInnerTripCount = InnerTripCount;
+ if (Widened &&
+ (isa<SExtInst>(InnerTripCount) || isa<ZExtInst>(InnerTripCount)))
+ SExtInnerTripCount = cast<Instruction>(InnerTripCount)->getOperand(0);
+
+ for (User *U : InnerInductionPHI->users()) {
+ if (isInnerLoopIncrement(U))
+ continue;
+
+ // After widening the IVs, a trunc instruction might have been introduced,
+ // so look through truncs.
+ if (isa<TruncInst>(U)) {
+ if (!U->hasOneUse())
+ return false;
+ U = *U->user_begin();
+ }
+
+ // If the use is in the compare (which is also the condition of the inner
+ // branch) then the compare has been altered by another transformation e.g
+ // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is
+ // a constant. Ignore this use as the compare gets removed later anyway.
+ if (isInnerLoopTest(U))
+ continue;
+
+ if (!matchLinearIVUser(U, SExtInnerTripCount, ValidOuterPHIUses))
+ return false;
+ }
+ return true;
+ }
};
static bool
@@ -121,6 +274,77 @@ setLoopComponents(Value *&TC, Value *&TripCount, BinaryOperator *&Increment,
return true;
}
+// Given the RHS of the loop latch compare instruction, verify with SCEV
+// that this is indeed the loop tripcount.
+// TODO: This used to be a straightforward check but has grown to be quite
+// complicated now. It is therefore worth revisiting what the additional
+// benefits are of this (compared to relying on canonical loops and pattern
+// matching).
+static bool verifyTripCount(Value *RHS, Loop *L,
+ SmallPtrSetImpl<Instruction *> &IterationInstructions,
+ PHINode *&InductionPHI, Value *&TripCount, BinaryOperator *&Increment,
+ BranchInst *&BackBranch, ScalarEvolution *SE, bool IsWidened) {
+ const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
+ if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
+ LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n");
+ return false;
+ }
+
+ // The Extend=false flag is used for getTripCountFromExitCount as we want
+ // to verify and match it with the pattern matched tripcount. Please note
+ // that overflow checks are performed in checkOverflow, but are first tried
+ // to avoid by widening the IV.
+ const SCEV *SCEVTripCount =
+ SE->getTripCountFromExitCount(BackedgeTakenCount, /*Extend=*/false);
+
+ const SCEV *SCEVRHS = SE->getSCEV(RHS);
+ if (SCEVRHS == SCEVTripCount)
+ return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
+ ConstantInt *ConstantRHS = dyn_cast<ConstantInt>(RHS);
+ if (ConstantRHS) {
+ const SCEV *BackedgeTCExt = nullptr;
+ if (IsWidened) {
+ const SCEV *SCEVTripCountExt;
+ // Find the extended backedge taken count and extended trip count using
+ // SCEV. One of these should now match the RHS of the compare.
+ BackedgeTCExt = SE->getZeroExtendExpr(BackedgeTakenCount, RHS->getType());
+ SCEVTripCountExt = SE->getTripCountFromExitCount(BackedgeTCExt, false);
+ if (SCEVRHS != BackedgeTCExt && SCEVRHS != SCEVTripCountExt) {
+ LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
+ return false;
+ }
+ }
+ // If the RHS of the compare is equal to the backedge taken count we need
+ // to add one to get the trip count.
+ if (SCEVRHS == BackedgeTCExt || SCEVRHS == BackedgeTakenCount) {
+ ConstantInt *One = ConstantInt::get(ConstantRHS->getType(), 1);
+ Value *NewRHS = ConstantInt::get(
+ ConstantRHS->getContext(), ConstantRHS->getValue() + One->getValue());
+ return setLoopComponents(NewRHS, TripCount, Increment,
+ IterationInstructions);
+ }
+ return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
+ }
+ // If the RHS isn't a constant then check that the reason it doesn't match
+ // the SCEV trip count is because the RHS is a ZExt or SExt instruction
+ // (and take the trip count to be the RHS).
+ if (!IsWidened) {
+ LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
+ return false;
+ }
+ auto *TripCountInst = dyn_cast<Instruction>(RHS);
+ if (!TripCountInst) {
+ LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
+ return false;
+ }
+ if ((!isa<ZExtInst>(TripCountInst) && !isa<SExtInst>(TripCountInst)) ||
+ SE->getSCEV(TripCountInst->getOperand(0)) != SCEVTripCount) {
+ LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n");
+ return false;
+ }
+ return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
+}
+
// Finds the induction variable, increment and trip count for a simple loop that
// we can flatten.
static bool findLoopComponents(
@@ -197,63 +421,9 @@ static bool findLoopComponents(
// another transformation has changed the compare (e.g. icmp ult %inc,
// tripcount -> icmp ult %j, tripcount-1), or both.
Value *RHS = Compare->getOperand(1);
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
- if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
- LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n");
- return false;
- }
- // The use of the Extend=false flag on getTripCountFromExitCount was added
- // during a refactoring to preserve existing behavior. However, there's
- // nothing obvious in the surrounding code when handles the overflow case.
- // FIXME: audit code to establish whether there's a latent bug here.
- const SCEV *SCEVTripCount =
- SE->getTripCountFromExitCount(BackedgeTakenCount, false);
- const SCEV *SCEVRHS = SE->getSCEV(RHS);
- if (SCEVRHS == SCEVTripCount)
- return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
- ConstantInt *ConstantRHS = dyn_cast<ConstantInt>(RHS);
- if (ConstantRHS) {
- const SCEV *BackedgeTCExt = nullptr;
- if (IsWidened) {
- const SCEV *SCEVTripCountExt;
- // Find the extended backedge taken count and extended trip count using
- // SCEV. One of these should now match the RHS of the compare.
- BackedgeTCExt = SE->getZeroExtendExpr(BackedgeTakenCount, RHS->getType());
- SCEVTripCountExt = SE->getTripCountFromExitCount(BackedgeTCExt, false);
- if (SCEVRHS != BackedgeTCExt && SCEVRHS != SCEVTripCountExt) {
- LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
- return false;
- }
- }
- // If the RHS of the compare is equal to the backedge taken count we need
- // to add one to get the trip count.
- if (SCEVRHS == BackedgeTCExt || SCEVRHS == BackedgeTakenCount) {
- ConstantInt *One = ConstantInt::get(ConstantRHS->getType(), 1);
- Value *NewRHS = ConstantInt::get(
- ConstantRHS->getContext(), ConstantRHS->getValue() + One->getValue());
- return setLoopComponents(NewRHS, TripCount, Increment,
- IterationInstructions);
- }
- return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
- }
- // If the RHS isn't a constant then check that the reason it doesn't match
- // the SCEV trip count is because the RHS is a ZExt or SExt instruction
- // (and take the trip count to be the RHS).
- if (!IsWidened) {
- LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
- return false;
- }
- auto *TripCountInst = dyn_cast<Instruction>(RHS);
- if (!TripCountInst) {
- LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
- return false;
- }
- if ((!isa<ZExtInst>(TripCountInst) && !isa<SExtInst>(TripCountInst)) ||
- SE->getSCEV(TripCountInst->getOperand(0)) != SCEVTripCount) {
- LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n");
- return false;
- }
- return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
+
+ return verifyTripCount(RHS, L, IterationInstructions, InductionPHI, TripCount,
+ Increment, BackBranch, SE, IsWidened);
}
static bool checkPHIs(FlattenInfo &FI, const TargetTransformInfo *TTI) {
@@ -399,108 +569,26 @@ checkOuterLoopInsts(FlattenInfo &FI,
return true;
}
-static bool checkIVUsers(FlattenInfo &FI) {
- // We require all uses of both induction variables to match this pattern:
- //
- // (OuterPHI * InnerTripCount) + InnerPHI
- //
- // Any uses of the induction variables not matching that pattern would
- // require a div/mod to reconstruct in the flattened loop, so the
- // transformation wouldn't be profitable.
-
- Value *InnerTripCount = FI.InnerTripCount;
- if (FI.Widened &&
- (isa<SExtInst>(InnerTripCount) || isa<ZExtInst>(InnerTripCount)))
- InnerTripCount = cast<Instruction>(InnerTripCount)->getOperand(0);
+
+// We require all uses of both induction variables to match this pattern:
+//
+// (OuterPHI * InnerTripCount) + InnerPHI
+//
+// Any uses of the induction variables not matching that pattern would
+// require a div/mod to reconstruct in the flattened loop, so the
+// transformation wouldn't be profitable.
+static bool checkIVUsers(FlattenInfo &FI) {
// Check that all uses of the inner loop's induction variable match the
// expected pattern, recording the uses of the outer IV.
SmallPtrSet<Value *, 4> ValidOuterPHIUses;
- for (User *U : FI.InnerInductionPHI->users()) {
- if (U == FI.InnerIncrement)
- continue;
-
- // After widening the IVs, a trunc instruction might have been introduced,
- // so look through truncs.
- if (isa<TruncInst>(U)) {
- if (!U->hasOneUse())
- return false;
- U = *U->user_begin();
- }
-
- // If the use is in the compare (which is also the condition of the inner
- // branch) then the compare has been altered by another transformation e.g
- // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is
- // a constant. Ignore this use as the compare gets removed later anyway.
- if (U == FI.InnerBranch->getCondition())
- continue;
-
- LLVM_DEBUG(dbgs() << "Found use of inner induction variable: "; U->dump());
-
- Value *MatchedMul = nullptr;
- Value *MatchedItCount = nullptr;
- bool IsAdd = match(U, m_c_Add(m_Specific(FI.InnerInductionPHI),
- m_Value(MatchedMul))) &&
- match(MatchedMul, m_c_Mul(m_Specific(FI.OuterInductionPHI),
- m_Value(MatchedItCount)));
-
- // Matches the same pattern as above, except it also looks for truncs
- // on the phi, which can be the result of widening the induction variables.
- bool IsAddTrunc =
- match(U, m_c_Add(m_Trunc(m_Specific(FI.InnerInductionPHI)),
- m_Value(MatchedMul))) &&
- match(MatchedMul, m_c_Mul(m_Trunc(m_Specific(FI.OuterInductionPHI)),
- m_Value(MatchedItCount)));
-
- if (!MatchedItCount)
- return false;
- // Look through extends if the IV has been widened.
- if (FI.Widened &&
- (isa<SExtInst>(MatchedItCount) || isa<ZExtInst>(MatchedItCount))) {
- assert(MatchedItCount->getType() == FI.InnerInductionPHI->getType() &&
- "Unexpected type mismatch in types after widening");
- MatchedItCount = isa<SExtInst>(MatchedItCount)
- ? dyn_cast<SExtInst>(MatchedItCount)->getOperand(0)
- : dyn_cast<ZExtInst>(MatchedItCount)->getOperand(0);
- }
-
- if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerTripCount) {
- LLVM_DEBUG(dbgs() << "Use is optimisable\n");
- ValidOuterPHIUses.insert(MatchedMul);
- FI.LinearIVUses.insert(U);
- } else {
- LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
- return false;
- }
- }
+ if (!FI.checkInnerInductionPhiUsers(ValidOuterPHIUses))
+ return false;
// Check that there are no uses of the outer IV other than the ones found
// as part of the pattern above.
- for (User *U : FI.OuterInductionPHI->users()) {
- if (U == FI.OuterIncrement)
- continue;
-
- auto IsValidOuterPHIUses = [&] (User *U) -> bool {
- LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump());
- if (!ValidOuterPHIUses.count(U)) {
- LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
- return false;
- }
- LLVM_DEBUG(dbgs() << "Use is optimisable\n");
- return true;
- };
-
- if (auto *V = dyn_cast<TruncInst>(U)) {
- for (auto *K : V->users()) {
- if (!IsValidOuterPHIUses(K))
- return false;
- }
- continue;
- }
-
- if (!IsValidOuterPHIUses(U))
- return false;
- }
+ if (!FI.checkOuterInductionPhiUsers(ValidOuterPHIUses))
+ return false;
LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n";
dbgs() << "Found " << FI.LinearIVUses.size()
@@ -535,7 +623,7 @@ static OverflowResult checkOverflow(FlattenInfo &FI, DominatorTree *DT,
for (Value *U : V->users()) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
for (Value *GEPUser : U->users()) {
- Instruction *GEPUserInst = dyn_cast<Instruction>(GEPUser);
+ auto *GEPUserInst = cast<Instruction>(GEPUser);
if (!isa<LoadInst>(GEPUserInst) &&
!(isa<StoreInst>(GEPUserInst) &&
GEP == GEPUserInst->getOperand(1)))
@@ -611,7 +699,8 @@ static bool CanFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
ScalarEvolution *SE, AssumptionCache *AC,
- const TargetTransformInfo *TTI, LPMUpdater *U) {
+ const TargetTransformInfo *TTI, LPMUpdater *U,
+ MemorySSAUpdater *MSSAU) {
Function *F = FI.OuterLoop->getHeader()->getParent();
LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n");
{
@@ -647,7 +736,11 @@ static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock();
InnerExitingBlock->getTerminator()->eraseFromParent();
BranchInst::Create(InnerExitBlock, InnerExitingBlock);
+
+ // Update the DomTree and MemorySSA.
DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
+ if (MSSAU)
+ MSSAU->removeEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
// Replace all uses of the polynomial calculated from the two induction
// variables with the one new one.
@@ -658,8 +751,8 @@ static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
OuterValue = Builder.CreateTrunc(FI.OuterInductionPHI, V->getType(),
"flatten.trunciv");
- LLVM_DEBUG(dbgs() << "Replacing: "; V->dump();
- dbgs() << "with: "; OuterValue->dump());
+ LLVM_DEBUG(dbgs() << "Replacing: "; V->dump(); dbgs() << "with: ";
+ OuterValue->dump());
V->replaceAllUsesWith(OuterValue);
}
@@ -698,7 +791,8 @@ static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
// (OuterTripCount * InnerTripCount) as the new trip count is safe.
if (InnerType != OuterType ||
InnerType->getScalarSizeInBits() >= MaxLegalSize ||
- MaxLegalType->getScalarSizeInBits() < InnerType->getScalarSizeInBits() * 2) {
+ MaxLegalType->getScalarSizeInBits() <
+ InnerType->getScalarSizeInBits() * 2) {
LLVM_DEBUG(dbgs() << "Can't widen the IV\n");
return false;
}
@@ -708,10 +802,10 @@ static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
unsigned ElimExt = 0;
unsigned Widened = 0;
- auto CreateWideIV = [&] (WideIVInfo WideIV, bool &Deleted) -> bool {
- PHINode *WidePhi = createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts,
- ElimExt, Widened, true /* HasGuards */,
- true /* UsePostIncrementRanges */);
+ auto CreateWideIV = [&](WideIVInfo WideIV, bool &Deleted) -> bool {
+ PHINode *WidePhi =
+ createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts, ElimExt, Widened,
+ true /* HasGuards */, true /* UsePostIncrementRanges */);
if (!WidePhi)
return false;
LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi->dump());
@@ -721,14 +815,14 @@ static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
};
bool Deleted;
- if (!CreateWideIV({FI.InnerInductionPHI, MaxLegalType, false }, Deleted))
+ if (!CreateWideIV({FI.InnerInductionPHI, MaxLegalType, false}, Deleted))
return false;
// Add the narrow phi to list, so that it will be adjusted later when the
// the transformation is performed.
if (!Deleted)
FI.InnerPHIsToTransform.insert(FI.InnerInductionPHI);
- if (!CreateWideIV({FI.OuterInductionPHI, MaxLegalType, false }, Deleted))
+ if (!CreateWideIV({FI.OuterInductionPHI, MaxLegalType, false}, Deleted))
return false;
assert(Widened && "Widened IV expected");
@@ -744,7 +838,8 @@ static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
ScalarEvolution *SE, AssumptionCache *AC,
- const TargetTransformInfo *TTI, LPMUpdater *U) {
+ const TargetTransformInfo *TTI, LPMUpdater *U,
+ MemorySSAUpdater *MSSAU) {
LLVM_DEBUG(
dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() << " and inner loop "
@@ -773,7 +868,7 @@ static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
// If we have widened and can perform the transformation, do that here.
if (CanFlatten)
- return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U);
+ return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
// Otherwise, if we haven't widened the IV, check if the new iteration
// variable might overflow. In this case, we need to version the loop, and
@@ -791,18 +886,19 @@ static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
}
LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n");
- return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U);
+ return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
}
bool Flatten(LoopNest &LN, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
- AssumptionCache *AC, TargetTransformInfo *TTI, LPMUpdater *U) {
+ AssumptionCache *AC, TargetTransformInfo *TTI, LPMUpdater *U,
+ MemorySSAUpdater *MSSAU) {
bool Changed = false;
for (Loop *InnerLoop : LN.getLoops()) {
auto *OuterLoop = InnerLoop->getParentLoop();
if (!OuterLoop)
continue;
FlattenInfo FI(OuterLoop, InnerLoop);
- Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI, U);
+ Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
}
return Changed;
}
@@ -813,16 +909,30 @@ PreservedAnalyses LoopFlattenPass::run(LoopNest &LN, LoopAnalysisManager &LAM,
bool Changed = false;
+ Optional<MemorySSAUpdater> MSSAU;
+ if (AR.MSSA) {
+ MSSAU = MemorySSAUpdater(AR.MSSA);
+ if (VerifyMemorySSA)
+ AR.MSSA->verifyMemorySSA();
+ }
+
// The loop flattening pass requires loops to be
// in simplified form, and also needs LCSSA. Running
// this pass will simplify all loops that contain inner loops,
// regardless of whether anything ends up being flattened.
- Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI, &U);
+ Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI, &U,
+ MSSAU.hasValue() ? MSSAU.getPointer() : nullptr);
if (!Changed)
return PreservedAnalyses::all();
- return getLoopPassPreservedAnalyses();
+ if (AR.MSSA && VerifyMemorySSA)
+ AR.MSSA->verifyMemorySSA();
+
+ auto PA = getLoopPassPreservedAnalyses();
+ if (AR.MSSA)
+ PA.preserve<MemorySSAAnalysis>();
+ return PA;
}
namespace {
@@ -842,6 +952,7 @@ public:
AU.addPreserved<TargetTransformInfoWrapperPass>();
AU.addRequired<AssumptionCacheTracker>();
AU.addPreserved<AssumptionCacheTracker>();
+ AU.addPreserved<MemorySSAWrapperPass>();
}
};
} // namespace
@@ -854,7 +965,9 @@ INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_END(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops",
false, false)
-FunctionPass *llvm::createLoopFlattenPass() { return new LoopFlattenLegacyPass(); }
+FunctionPass *llvm::createLoopFlattenPass() {
+ return new LoopFlattenLegacyPass();
+}
bool LoopFlattenLegacyPass::runOnFunction(Function &F) {
ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
@@ -864,10 +977,17 @@ bool LoopFlattenLegacyPass::runOnFunction(Function &F) {
auto &TTIP = getAnalysis<TargetTransformInfoWrapperPass>();
auto *TTI = &TTIP.getTTI(F);
auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ auto *MSSA = getAnalysisIfAvailable<MemorySSAWrapperPass>();
+
+ Optional<MemorySSAUpdater> MSSAU;
+ if (MSSA)
+ MSSAU = MemorySSAUpdater(&MSSA->getMSSA());
+
bool Changed = false;
for (Loop *L : *LI) {
auto LN = LoopNest::getLoopNest(*L, *SE);
- Changed |= Flatten(*LN, DT, LI, SE, AC, TTI, nullptr);
+ Changed |= Flatten(*LN, DT, LI, SE, AC, TTI, nullptr,
+ MSSAU.hasValue() ? MSSAU.getPointer() : nullptr);
}
return Changed;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 5d00fa56e888..35ba4e2b4032 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1117,7 +1117,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
SCEVExpander Expander(*SE, *DL, "loop-idiom");
- SCEVExpanderCleaner ExpCleaner(Expander, *DT);
+ SCEVExpanderCleaner ExpCleaner(Expander);
Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
@@ -1328,7 +1328,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
IRBuilder<> Builder(Preheader->getTerminator());
SCEVExpander Expander(*SE, *DL, "loop-idiom");
- SCEVExpanderCleaner ExpCleaner(Expander, *DT);
+ SCEVExpanderCleaner ExpCleaner(Expander);
bool Changed = false;
const SCEV *StrStart = StoreEv->getStart();
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 9f605b4ac4ad..c2b065c4eb31 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -292,33 +292,6 @@ static LoopVector populateWorklist(Loop &L) {
return LoopList;
}
-static PHINode *getInductionVariable(Loop *L, ScalarEvolution *SE) {
- PHINode *InnerIndexVar = L->getCanonicalInductionVariable();
- if (InnerIndexVar)
- return InnerIndexVar;
- if (L->getLoopLatch() == nullptr || L->getLoopPredecessor() == nullptr)
- return nullptr;
- for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- PHINode *PhiVar = cast<PHINode>(I);
- Type *PhiTy = PhiVar->getType();
- if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
- !PhiTy->isPointerTy())
- return nullptr;
- const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(SE->getSCEV(PhiVar));
- if (!AddRec || !AddRec->isAffine())
- continue;
- const SCEV *Step = AddRec->getStepRecurrence(*SE);
- if (!isa<SCEVConstant>(Step))
- continue;
- // Found the induction variable.
- // FIXME: Handle loops with more than one induction variable. Note that,
- // currently, legality makes sure we have only one induction variable.
- return PhiVar;
- }
- return nullptr;
-}
-
namespace {
/// LoopInterchangeLegality checks if it is legal to interchange the loop.
@@ -332,9 +305,13 @@ public:
bool canInterchangeLoops(unsigned InnerLoopId, unsigned OuterLoopId,
CharMatrix &DepMatrix);
+ /// Discover induction PHIs in the header of \p L. Induction
+ /// PHIs are added to \p Inductions.
+ bool findInductions(Loop *L, SmallVectorImpl<PHINode *> &Inductions);
+
/// Check if the loop structure is understood. We do not handle triangular
/// loops for now.
- bool isLoopStructureUnderstood(PHINode *InnerInductionVar);
+ bool isLoopStructureUnderstood();
bool currentLimitations();
@@ -342,6 +319,10 @@ public:
return OuterInnerReductions;
}
+ const SmallVectorImpl<PHINode *> &getInnerLoopInductions() const {
+ return InnerLoopInductions;
+ }
+
private:
bool tightlyNested(Loop *Outer, Loop *Inner);
bool containsUnsafeInstructions(BasicBlock *BB);
@@ -365,6 +346,9 @@ private:
/// Set of reduction PHIs taking part of a reduction across the inner and
/// outer loop.
SmallPtrSet<PHINode *, 4> OuterInnerReductions;
+
+ /// Set of inner loop induction PHIs
+ SmallVector<PHINode *, 8> InnerLoopInductions;
};
/// LoopInterchangeProfitability checks if it is profitable to interchange the
@@ -635,25 +619,26 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) {
return true;
}
-bool LoopInterchangeLegality::isLoopStructureUnderstood(
- PHINode *InnerInduction) {
- unsigned Num = InnerInduction->getNumOperands();
+bool LoopInterchangeLegality::isLoopStructureUnderstood() {
BasicBlock *InnerLoopPreheader = InnerLoop->getLoopPreheader();
- for (unsigned i = 0; i < Num; ++i) {
- Value *Val = InnerInduction->getOperand(i);
- if (isa<Constant>(Val))
- continue;
- Instruction *I = dyn_cast<Instruction>(Val);
- if (!I)
- return false;
- // TODO: Handle triangular loops.
- // e.g. for(int i=0;i<N;i++)
- // for(int j=i;j<N;j++)
- unsigned IncomBlockIndx = PHINode::getIncomingValueNumForOperand(i);
- if (InnerInduction->getIncomingBlock(IncomBlockIndx) ==
- InnerLoopPreheader &&
- !OuterLoop->isLoopInvariant(I)) {
- return false;
+ for (PHINode *InnerInduction : InnerLoopInductions) {
+ unsigned Num = InnerInduction->getNumOperands();
+ for (unsigned i = 0; i < Num; ++i) {
+ Value *Val = InnerInduction->getOperand(i);
+ if (isa<Constant>(Val))
+ continue;
+ Instruction *I = dyn_cast<Instruction>(Val);
+ if (!I)
+ return false;
+ // TODO: Handle triangular loops.
+ // e.g. for(int i=0;i<N;i++)
+ // for(int j=i;j<N;j++)
+ unsigned IncomBlockIndx = PHINode::getIncomingValueNumForOperand(i);
+ if (InnerInduction->getIncomingBlock(IncomBlockIndx) ==
+ InnerLoopPreheader &&
+ !OuterLoop->isLoopInvariant(I)) {
+ return false;
+ }
}
}
@@ -682,27 +667,34 @@ bool LoopInterchangeLegality::isLoopStructureUnderstood(
// Return true if V is InnerInduction, or a cast from
// InnerInduction, or a binary operator that involves
// InnerInduction and a constant.
- std::function<bool(Value *)> IsPathToIndVar;
- IsPathToIndVar = [&InnerInduction, &IsPathToIndVar](Value *V) -> bool {
- if (V == InnerInduction)
+ std::function<bool(Value *)> IsPathToInnerIndVar;
+ IsPathToInnerIndVar = [this, &IsPathToInnerIndVar](const Value *V) -> bool {
+ if (llvm::is_contained(InnerLoopInductions, V))
return true;
if (isa<Constant>(V))
return true;
- Instruction *I = dyn_cast<Instruction>(V);
+ const Instruction *I = dyn_cast<Instruction>(V);
if (!I)
return false;
if (isa<CastInst>(I))
- return IsPathToIndVar(I->getOperand(0));
+ return IsPathToInnerIndVar(I->getOperand(0));
if (isa<BinaryOperator>(I))
- return IsPathToIndVar(I->getOperand(0)) &&
- IsPathToIndVar(I->getOperand(1));
+ return IsPathToInnerIndVar(I->getOperand(0)) &&
+ IsPathToInnerIndVar(I->getOperand(1));
return false;
};
- if (IsPathToIndVar(Op0) && !isa<Constant>(Op0)) {
+ // In case of multiple inner loop indvars, it is okay if LHS and RHS
+ // are both inner indvar related variables.
+ if (IsPathToInnerIndVar(Op0) && IsPathToInnerIndVar(Op1))
+ return true;
+
+ // Otherwise we check if the cmp instruction compares an inner indvar
+ // related variable (Left) with a outer loop invariant (Right).
+ if (IsPathToInnerIndVar(Op0) && !isa<Constant>(Op0)) {
Left = Op0;
Right = Op1;
- } else if (IsPathToIndVar(Op1) && !isa<Constant>(Op1)) {
+ } else if (IsPathToInnerIndVar(Op1) && !isa<Constant>(Op1)) {
Left = Op1;
Right = Op0;
}
@@ -793,7 +785,6 @@ bool LoopInterchangeLegality::findInductionAndReductions(
// This function indicates the current limitations in the transform as a result
// of which we do not proceed.
bool LoopInterchangeLegality::currentLimitations() {
- BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
// transform currently expects the loop latches to also be the exiting
@@ -815,7 +806,6 @@ bool LoopInterchangeLegality::currentLimitations() {
return true;
}
- PHINode *InnerInductionVar;
SmallVector<PHINode *, 8> Inductions;
if (!findInductionAndReductions(OuterLoop, Inductions, InnerLoop)) {
LLVM_DEBUG(
@@ -831,20 +821,6 @@ bool LoopInterchangeLegality::currentLimitations() {
return true;
}
- // TODO: Currently we handle only loops with 1 induction variable.
- if (Inductions.size() != 1) {
- LLVM_DEBUG(dbgs() << "Loops with more than 1 induction variables are not "
- << "supported currently.\n");
- ORE->emit([&]() {
- return OptimizationRemarkMissed(DEBUG_TYPE, "MultiIndutionOuter",
- OuterLoop->getStartLoc(),
- OuterLoop->getHeader())
- << "Only outer loops with 1 induction variable can be "
- "interchanged currently.";
- });
- return true;
- }
-
Inductions.clear();
if (!findInductionAndReductions(InnerLoop, Inductions, nullptr)) {
LLVM_DEBUG(
@@ -860,24 +836,8 @@ bool LoopInterchangeLegality::currentLimitations() {
return true;
}
- // TODO: Currently we handle only loops with 1 induction variable.
- if (Inductions.size() != 1) {
- LLVM_DEBUG(
- dbgs() << "We currently only support loops with 1 induction variable."
- << "Failed to interchange due to current limitation\n");
- ORE->emit([&]() {
- return OptimizationRemarkMissed(DEBUG_TYPE, "MultiInductionInner",
- InnerLoop->getStartLoc(),
- InnerLoop->getHeader())
- << "Only inner loops with 1 induction variable can be "
- "interchanged currently.";
- });
- return true;
- }
- InnerInductionVar = Inductions.pop_back_val();
-
// TODO: Triangular loops are not handled for now.
- if (!isLoopStructureUnderstood(InnerInductionVar)) {
+ if (!isLoopStructureUnderstood()) {
LLVM_DEBUG(dbgs() << "Loop structure not understood by pass\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedStructureInner",
@@ -888,79 +848,17 @@ bool LoopInterchangeLegality::currentLimitations() {
return true;
}
- // TODO: Current limitation: Since we split the inner loop latch at the point
- // were induction variable is incremented (induction.next); We cannot have
- // more than 1 user of induction.next since it would result in broken code
- // after split.
- // e.g.
- // for(i=0;i<N;i++) {
- // for(j = 0;j<M;j++) {
- // A[j+1][i+2] = A[j][i]+k;
- // }
- // }
- Instruction *InnerIndexVarInc = nullptr;
- if (InnerInductionVar->getIncomingBlock(0) == InnerLoopPreHeader)
- InnerIndexVarInc =
- dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(1));
- else
- InnerIndexVarInc =
- dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0));
-
- if (!InnerIndexVarInc) {
- LLVM_DEBUG(
- dbgs() << "Did not find an instruction to increment the induction "
- << "variable.\n");
- ORE->emit([&]() {
- return OptimizationRemarkMissed(DEBUG_TYPE, "NoIncrementInInner",
- InnerLoop->getStartLoc(),
- InnerLoop->getHeader())
- << "The inner loop does not increment the induction variable.";
- });
- return true;
- }
-
- // Since we split the inner loop latch on this induction variable. Make sure
- // we do not have any instruction between the induction variable and branch
- // instruction.
-
- bool FoundInduction = false;
- for (const Instruction &I :
- llvm::reverse(InnerLoopLatch->instructionsWithoutDebug())) {
- if (isa<BranchInst>(I) || isa<CmpInst>(I) || isa<TruncInst>(I) ||
- isa<ZExtInst>(I))
- continue;
-
- // We found an instruction. If this is not induction variable then it is not
- // safe to split this loop latch.
- if (!I.isIdenticalTo(InnerIndexVarInc)) {
- LLVM_DEBUG(dbgs() << "Found unsupported instructions between induction "
- << "variable increment and branch.\n");
- ORE->emit([&]() {
- return OptimizationRemarkMissed(
- DEBUG_TYPE, "UnsupportedInsBetweenInduction",
- InnerLoop->getStartLoc(), InnerLoop->getHeader())
- << "Found unsupported instruction between induction variable "
- "increment and branch.";
- });
- return true;
- }
+ return false;
+}
- FoundInduction = true;
- break;
- }
- // The loop latch ended and we didn't find the induction variable return as
- // current limitation.
- if (!FoundInduction) {
- LLVM_DEBUG(dbgs() << "Did not find the induction variable.\n");
- ORE->emit([&]() {
- return OptimizationRemarkMissed(DEBUG_TYPE, "NoIndutionVariable",
- InnerLoop->getStartLoc(),
- InnerLoop->getHeader())
- << "Did not find the induction variable.";
- });
- return true;
+bool LoopInterchangeLegality::findInductions(
+ Loop *L, SmallVectorImpl<PHINode *> &Inductions) {
+ for (PHINode &PHI : L->getHeader()->phis()) {
+ InductionDescriptor ID;
+ if (InductionDescriptor::isInductionPHI(&PHI, L, SE, ID))
+ Inductions.push_back(&PHI);
}
- return false;
+ return !Inductions.empty();
}
// We currently only support LCSSA PHI nodes in the inner loop exit, if their
@@ -1076,7 +974,7 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
for (Instruction &I : BB->instructionsWithoutDebug())
if (CallInst *CI = dyn_cast<CallInst>(&I)) {
// readnone functions do not prevent interchanging.
- if (CI->doesNotReadMemory())
+ if (CI->onlyWritesMemory())
continue;
LLVM_DEBUG(
dbgs() << "Loops with call instructions cannot be interchanged "
@@ -1091,6 +989,11 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
return false;
}
+ if (!findInductions(InnerLoop, InnerLoopInductions)) {
+ LLVM_DEBUG(dbgs() << "Cound not find inner loop induction variables.\n");
+ return false;
+ }
+
if (!areInnerLoopLatchPHIsSupported(OuterLoop, InnerLoop)) {
LLVM_DEBUG(dbgs() << "Found unsupported PHI nodes in inner loop latch.\n");
ORE->emit([&]() {
@@ -1347,25 +1250,25 @@ void LoopInterchangeTransform::restructureLoops(
bool LoopInterchangeTransform::transform() {
bool Transformed = false;
- Instruction *InnerIndexVar;
if (InnerLoop->getSubLoops().empty()) {
BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
LLVM_DEBUG(dbgs() << "Splitting the inner loop latch\n");
- PHINode *InductionPHI = getInductionVariable(InnerLoop, SE);
- if (!InductionPHI) {
+ auto &InductionPHIs = LIL.getInnerLoopInductions();
+ if (InductionPHIs.empty()) {
LLVM_DEBUG(dbgs() << "Failed to find the point to split loop latch \n");
return false;
}
- if (InductionPHI->getIncomingBlock(0) == InnerLoopPreHeader)
- InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(1));
- else
- InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(0));
-
- // Ensure that InductionPHI is the first Phi node.
- if (&InductionPHI->getParent()->front() != InductionPHI)
- InductionPHI->moveBefore(&InductionPHI->getParent()->front());
+ SmallVector<Instruction *, 8> InnerIndexVarList;
+ for (PHINode *CurInductionPHI : InductionPHIs) {
+ if (CurInductionPHI->getIncomingBlock(0) == InnerLoopPreHeader)
+ InnerIndexVarList.push_back(
+ dyn_cast<Instruction>(CurInductionPHI->getIncomingValue(1)));
+ else
+ InnerIndexVarList.push_back(
+ dyn_cast<Instruction>(CurInductionPHI->getIncomingValue(0)));
+ }
// Create a new latch block for the inner loop. We split at the
// current latch's terminator and then move the condition and all
@@ -1377,7 +1280,7 @@ bool LoopInterchangeTransform::transform() {
SmallSetVector<Instruction *, 4> WorkList;
unsigned i = 0;
- auto MoveInstructions = [&i, &WorkList, this, InductionPHI, NewLatch]() {
+ auto MoveInstructions = [&i, &WorkList, this, &InductionPHIs, NewLatch]() {
for (; i < WorkList.size(); i++) {
// Duplicate instruction and move it the new latch. Update uses that
// have been moved.
@@ -1389,7 +1292,8 @@ bool LoopInterchangeTransform::transform() {
for (Use &U : llvm::make_early_inc_range(WorkList[i]->uses())) {
Instruction *UserI = cast<Instruction>(U.getUser());
if (!InnerLoop->contains(UserI->getParent()) ||
- UserI->getParent() == NewLatch || UserI == InductionPHI)
+ UserI->getParent() == NewLatch ||
+ llvm::is_contained(InductionPHIs, UserI))
U.set(NewI);
}
// Add operands of moved instruction to the worklist, except if they are
@@ -1398,7 +1302,7 @@ bool LoopInterchangeTransform::transform() {
Instruction *OpI = dyn_cast<Instruction>(Op);
if (!OpI ||
this->LI->getLoopFor(OpI->getParent()) != this->InnerLoop ||
- OpI == InductionPHI)
+ llvm::is_contained(InductionPHIs, OpI))
continue;
WorkList.insert(OpI);
}
@@ -1412,7 +1316,8 @@ bool LoopInterchangeTransform::transform() {
if (CondI)
WorkList.insert(CondI);
MoveInstructions();
- WorkList.insert(cast<Instruction>(InnerIndexVar));
+ for (Instruction *InnerIndexVar : InnerIndexVarList)
+ WorkList.insert(cast<Instruction>(InnerIndexVar));
MoveInstructions();
// Splits the inner loops phi nodes out into a separate basic block.
@@ -1685,7 +1590,6 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
updateSuccessor(InnerLoopLatchPredecessorBI, InnerLoopLatch,
InnerLoopLatchSuccessor, DTUpdates);
-
if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopHeader)
OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(1);
else
@@ -1712,19 +1616,22 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
SmallVector<PHINode *, 4> InnerLoopPHIs, OuterLoopPHIs;
for (PHINode &PHI : InnerLoopHeader->phis())
if (OuterInnerReductions.contains(&PHI))
- InnerLoopPHIs.push_back(cast<PHINode>(&PHI));
+ InnerLoopPHIs.push_back(&PHI);
+
for (PHINode &PHI : OuterLoopHeader->phis())
if (OuterInnerReductions.contains(&PHI))
- OuterLoopPHIs.push_back(cast<PHINode>(&PHI));
+ OuterLoopPHIs.push_back(&PHI);
// Now move the remaining reduction PHIs from outer to inner loop header and
// vice versa. The PHI nodes must be part of a reduction across the inner and
// outer loop and all the remains to do is and updating the incoming blocks.
for (PHINode *PHI : OuterLoopPHIs) {
+ LLVM_DEBUG(dbgs() << "Outer loop reduction PHIs:\n"; PHI->dump(););
PHI->moveBefore(InnerLoopHeader->getFirstNonPHI());
assert(OuterInnerReductions.count(PHI) && "Expected a reduction PHI node");
}
for (PHINode *PHI : InnerLoopPHIs) {
+ LLVM_DEBUG(dbgs() << "Inner loop reduction PHIs:\n"; PHI->dump(););
PHI->moveBefore(OuterLoopHeader->getFirstNonPHI());
assert(OuterInnerReductions.count(PHI) && "Expected a reduction PHI node");
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 798af48c2337..654f0d2a03a8 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -3486,6 +3486,31 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
// Don't bother if the instruction is in a BB which ends in an EHPad.
if (UseBB->getTerminator()->isEHPad())
continue;
+
+ // Ignore cases in which the currently-examined value could come from
+ // a basic block terminated with an EHPad. This checks all incoming
+ // blocks of the phi node since it is possible that the same incoming
+ // value comes from multiple basic blocks, only some of which may end
+ // in an EHPad. If any of them do, a subsequent rewrite attempt by this
+ // pass would try to insert instructions into an EHPad, hitting an
+ // assertion.
+ if (isa<PHINode>(UserInst)) {
+ const auto *PhiNode = cast<PHINode>(UserInst);
+ bool HasIncompatibleEHPTerminatedBlock = false;
+ llvm::Value *ExpectedValue = U;
+ for (unsigned int I = 0; I < PhiNode->getNumIncomingValues(); I++) {
+ if (PhiNode->getIncomingValue(I) == ExpectedValue) {
+ if (PhiNode->getIncomingBlock(I)->getTerminator()->isEHPad()) {
+ HasIncompatibleEHPTerminatedBlock = true;
+ break;
+ }
+ }
+ }
+ if (HasIncompatibleEHPTerminatedBlock) {
+ continue;
+ }
+ }
+
// Don't bother rewriting PHIs in catchswitch blocks.
if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator()))
continue;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 893928fb0560..022d9c7abc8c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -1142,7 +1142,7 @@ static LoopUnrollResult tryToUnrollLoop(
// automatic unrolling from interfering with the user requested
// transformation.
Loop *ParentL = L->getParentLoop();
- if (ParentL != NULL &&
+ if (ParentL != nullptr &&
hasUnrollAndJamTransformation(ParentL) == TM_ForcedByUser &&
hasUnrollTransformation(L) != TM_ForcedByUser) {
LLVM_DEBUG(dbgs() << "Not unrolling loop since parent loop has"
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
index 1c186e9a0488..a7eb60b5e032 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -64,7 +64,7 @@ getBranchWeight(Intrinsic::ID IntrinsicID, CallInst *CI, int BranchCount) {
// __builtin_expect_with_probability
assert(CI->getNumOperands() >= 3 &&
"expect with probability must have 3 arguments");
- ConstantFP *Confidence = dyn_cast<ConstantFP>(CI->getArgOperand(2));
+ auto *Confidence = cast<ConstantFP>(CI->getArgOperand(2));
double TrueProb = Confidence->getValueAPF().convertToDouble();
assert((TrueProb >= 0.0 && TrueProb <= 1.0) &&
"probability value must be in the range [0.0, 1.0]");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 4e4097e13271..8f1d0181ee5b 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -220,9 +220,7 @@ class LowerMatrixIntrinsics {
bool IsColumnMajor = true;
public:
- MatrixTy()
- : Vectors(),
- IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
+ MatrixTy() : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
MatrixTy(ArrayRef<Value *> Vectors)
: Vectors(Vectors.begin(), Vectors.end()),
IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
@@ -1393,7 +1391,8 @@ public:
// reloads necessary.
unsigned Op0Regs = (R + VF - 1) / VF * M;
unsigned Op1Regs = (M + VF - 1) / VF * C;
- return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true);
+ return Op0Regs + Op1Regs >
+ TTI.getNumberOfRegisters(TTI.getRegisterClassForType(true));
}
MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) {
@@ -1832,7 +1831,7 @@ public:
const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
const SmallSetVector<Value *, 32> &ExprsInSubprogram,
Value *Leaf)
- : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
+ : Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
void indent(unsigned N) {
@@ -1895,7 +1894,7 @@ public:
write(Name);
return;
}
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
+ auto *II = cast<IntrinsicInst>(CI);
write(Intrinsic::getBaseName(II->getIntrinsicID())
.drop_front(StringRef("llvm.matrix.").size()));
write(".");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 67335a45fb58..6698db26626b 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryLocation.h"
@@ -171,7 +172,7 @@ public:
bool empty() const { return Ranges.empty(); }
void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ if (auto *SI = dyn_cast<StoreInst>(Inst))
addStore(OffsetFromFirst, SI);
else
addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
@@ -312,15 +313,21 @@ INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
Instruction *End) {
assert(Start->getParent() == End->getParent() && "Must be in same block");
- if (!Start->getFunction()->doesNotThrow() &&
- !isa<AllocaInst>(getUnderlyingObject(V))) {
- for (const Instruction &I :
- make_range(Start->getIterator(), End->getIterator())) {
- if (I.mayThrow())
- return true;
- }
- }
- return false;
+ // Function can't unwind, so it also can't be visible through unwinding.
+ if (Start->getFunction()->doesNotThrow())
+ return false;
+
+ // Object is not visible on unwind.
+ // TODO: Support RequiresNoCaptureBeforeUnwind case.
+ bool RequiresNoCaptureBeforeUnwind;
+ if (isNotVisibleOnUnwind(getUnderlyingObject(V),
+ RequiresNoCaptureBeforeUnwind) &&
+ !RequiresNoCaptureBeforeUnwind)
+ return false;
+
+ // Check whether there are any unwinding instructions in the range.
+ return any_of(make_range(Start->getIterator(), End->getIterator()),
+ [](const Instruction &I) { return I.mayThrow(); });
}
void MemCpyOptPass::eraseInstruction(Instruction *I) {
@@ -364,7 +371,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
const DataLayout &DL = StartInst->getModule()->getDataLayout();
// We can't track scalable types
- if (StoreInst *SI = dyn_cast<StoreInst>(StartInst))
+ if (auto *SI = dyn_cast<StoreInst>(StartInst))
if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable())
return nullptr;
@@ -410,7 +417,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
continue;
}
- if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
+ if (auto *NextStore = dyn_cast<StoreInst>(BI)) {
// If this is a store, see if we can merge it in.
if (!NextStore->isSimple()) break;
@@ -440,7 +447,7 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
Ranges.addStore(*Offset, NextStore);
} else {
- MemSetInst *MSI = cast<MemSetInst>(BI);
+ auto *MSI = cast<MemSetInst>(BI);
if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
!isa<ConstantInt>(MSI->getLength()))
@@ -661,7 +668,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
return false;
// Load to store forwarding can be interpreted as memcpy.
- if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
+ if (auto *LI = dyn_cast<LoadInst>(StoredVal)) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
@@ -871,7 +878,7 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
return false;
// Require that src be an alloca. This simplifies the reasoning considerably.
- AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
+ auto *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
@@ -890,8 +897,10 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize),
- DL, C, DT))
+ DL, C, DT)) {
+ LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n");
return false;
+ }
// Make sure that nothing can observe cpyDest being written early. There are
// a number of cases to consider:
@@ -907,8 +916,10 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
// guaranteed to be executed if C is. As it is a non-atomic access, it
// renders accesses from other threads undefined.
// TODO: This is currently not checked.
- if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore))
+ if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) {
+ LLVM_DEBUG(dbgs() << "Call Slot: Dest may be visible through unwinding");
return false;
+ }
// Check that dest points to memory that is at least as aligned as src.
Align srcAlign = srcAlloca->getAlign();
@@ -930,14 +941,14 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
append_range(srcUseList, U->users());
continue;
}
- if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
+ if (const auto *G = dyn_cast<GetElementPtrInst>(U)) {
if (!G->hasAllZeroIndices())
return false;
append_range(srcUseList, U->users());
continue;
}
- if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
+ if (const auto *IT = dyn_cast<IntrinsicInst>(U))
if (IT->isLifetimeStartOrEnd())
continue;
@@ -945,12 +956,57 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
return false;
}
- // Check that src isn't captured by the called function since the
- // transformation can cause aliasing issues in that case.
- for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
- if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
+ // Check whether src is captured by the called function, in which case there
+ // may be further indirect uses of src.
+ bool SrcIsCaptured = any_of(C->args(), [&](Use &U) {
+ return U->stripPointerCasts() == cpySrc &&
+ !C->doesNotCapture(C->getArgOperandNo(&U));
+ });
+
+ // If src is captured, then check whether there are any potential uses of
+ // src through the captured pointer before the lifetime of src ends, either
+ // due to a lifetime.end or a return from the function.
+ if (SrcIsCaptured) {
+ // Check that dest is not captured before/at the call. We have already
+ // checked that src is not captured before it. If either had been captured,
+ // then the call might be comparing the argument against the captured dest
+ // or src pointer.
+ Value *DestObj = getUnderlyingObject(cpyDest);
+ if (!isIdentifiedFunctionLocal(DestObj) ||
+ PointerMayBeCapturedBefore(DestObj, /* ReturnCaptures */ true,
+ /* StoreCaptures */ true, C, DT,
+ /* IncludeI */ true))
return false;
+ MemoryLocation SrcLoc =
+ MemoryLocation(srcAlloca, LocationSize::precise(srcSize));
+ for (Instruction &I :
+ make_range(++C->getIterator(), C->getParent()->end())) {
+ // Lifetime of srcAlloca ends at lifetime.end.
+ if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
+ II->getArgOperand(1)->stripPointerCasts() == srcAlloca &&
+ cast<ConstantInt>(II->getArgOperand(0))->uge(srcSize))
+ break;
+ }
+
+ // Lifetime of srcAlloca ends at return.
+ if (isa<ReturnInst>(&I))
+ break;
+
+ // Ignore the direct read of src in the load.
+ if (&I == cpyLoad)
+ continue;
+
+ // Check whether this instruction may mod/ref src through the captured
+ // pointer (we have already any direct mod/refs in the loop above).
+ // Also bail if we hit a terminator, as we don't want to scan into other
+ // blocks.
+ if (isModOrRefSet(AA->getModRefInfo(&I, SrcLoc)) || I.isTerminator())
+ return false;
+ }
+ }
+
// Since we're changing the parameter to the callsite, we need to make sure
// that what would be the new parameter dominates the callsite.
if (!DT->dominates(cpyDest, C)) {
@@ -1018,6 +1074,8 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
LLVMContext::MD_invariant_group,
LLVMContext::MD_access_group};
combineMetadata(C, cpyLoad, KnownIDs, true);
+ if (cpyLoad != cpyStore)
+ combineMetadata(C, cpyStore, KnownIDs, true);
++NumCallSlot;
return true;
@@ -1043,8 +1101,8 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
// Second, the length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
if (MDep->getLength() != M->getLength()) {
- ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
- ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
+ auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
+ auto *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
}
@@ -1163,7 +1221,7 @@ bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
const unsigned DestAlign =
std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
if (DestAlign > 1)
- if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
+ if (auto *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
IRBuilder<> Builder(MemCpy);
@@ -1211,12 +1269,11 @@ static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
if (MSSA->isLiveOnEntryDef(Def))
return isa<AllocaInst>(getUnderlyingObject(V));
- if (IntrinsicInst *II =
- dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
+ if (auto *II = dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
- ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
+ auto *LTSize = cast<ConstantInt>(II->getArgOperand(0));
- if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantInt>(Size)) {
if (AA->isMustAlias(V, II->getArgOperand(1)) &&
LTSize->getZExtValue() >= CSize->getZExtValue())
return true;
@@ -1226,12 +1283,14 @@ static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
// does) and we're querying a pointer based on that alloca, then we know
// the memory is definitely undef, regardless of how exactly we alias.
// The size also doesn't matter, as an out-of-bounds access would be UB.
- AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
- if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
- const DataLayout &DL = Alloca->getModule()->getDataLayout();
- if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
- if (*AllocaSize == LTSize->getValue() * 8)
- return true;
+ if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V))) {
+ if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
+ const DataLayout &DL = Alloca->getModule()->getDataLayout();
+ if (Optional<TypeSize> AllocaSize =
+ Alloca->getAllocationSizeInBits(DL))
+ if (*AllocaSize == LTSize->getValue() * 8)
+ return true;
+ }
}
}
}
@@ -1266,12 +1325,12 @@ bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
// Don't worry about sizes larger than i64.
// A known memset size is required.
- ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
+ auto *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
if (!CMemSetSize)
return false;
// A known memcpy size is also required.
- ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
+ auto *CCopySize = dyn_cast<ConstantInt>(CopySize);
if (!CCopySize)
return false;
if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
@@ -1323,7 +1382,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
}
// If copying from a constant, try to turn the memcpy into a memset.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
+ if (auto *GV = dyn_cast<GlobalVariable>(M->getSource()))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
M->getModule()->getDataLayout())) {
@@ -1370,7 +1429,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
// d) memcpy from a just-memset'd source can be turned into memset.
if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
if (Instruction *MI = MD->getMemoryInst()) {
- if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
+ if (auto *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
if (auto *C = dyn_cast<CallInst>(MI)) {
// The memcpy must post-dom the call. Limit to the same block for
// now. Additionally, we need to ensure that there are no accesses
@@ -1469,7 +1528,7 @@ bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
return false;
// The length of the memcpy must be larger or equal to the size of the byval.
- ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
+ auto *C1 = dyn_cast<ConstantInt>(MDep->getLength());
if (!C1 || !TypeSize::isKnownGE(
TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize))
return false;
@@ -1540,13 +1599,13 @@ bool MemCpyOptPass::iterateOnFunction(Function &F) {
bool RepeatInstruction = false;
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (auto *SI = dyn_cast<StoreInst>(I))
MadeChange |= processStore(SI, BI);
- else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
+ else if (auto *M = dyn_cast<MemSetInst>(I))
RepeatInstruction = processMemSet(M, BI);
- else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
+ else if (auto *M = dyn_cast<MemCpyInst>(I))
RepeatInstruction = processMemCpy(M, BI);
- else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
+ else if (auto *M = dyn_cast<MemMoveInst>(I))
RepeatInstruction = processMemMove(M);
else if (auto *CB = dyn_cast<CallBase>(I)) {
for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 10a8742940b1..2476e6c408b1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -1198,9 +1198,10 @@ NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const {
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
} else if (auto *GEPI = dyn_cast<GetElementPtrInst>(I)) {
- Value *V = SimplifyGEPInst(GEPI->getSourceElementType(),
- ArrayRef<Value *>(E->op_begin(), E->op_end()),
- GEPI->isInBounds(), SQ);
+ Value *V =
+ SimplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(),
+ makeArrayRef(std::next(E->op_begin()), E->op_end()),
+ GEPI->isInBounds(), SQ);
if (auto Simplified = checkExprResults(E, I, V))
return Simplified;
} else if (AllConstant) {
@@ -1322,11 +1323,11 @@ bool NewGVN::someEquivalentDominates(const Instruction *Inst,
Value *NewGVN::lookupOperandLeader(Value *V) const {
CongruenceClass *CC = ValueToClass.lookup(V);
if (CC) {
- // Everything in TOP is represented by undef, as it can be any value.
+ // Everything in TOP is represented by poison, as it can be any value.
// We do have to make sure we get the type right though, so we can't set the
- // RepLeader to undef.
+ // RepLeader to poison.
if (CC == TOPClass)
- return UndefValue::get(V->getType());
+ return PoisonValue::get(V->getType());
return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
}
@@ -1493,8 +1494,7 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example. Note that this is only true in the case
// that the result of the allocation is pointer equal to the load ptr.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
- isAlignedAllocLikeFn(DepInst, TLI)) {
+ if (isa<AllocaInst>(DepInst)) {
return createConstantExpression(UndefValue::get(LoadType));
}
// If this load occurs either right after a lifetime begin,
@@ -1502,12 +1502,10 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start)
return createConstantExpression(UndefValue::get(LoadType));
- }
- // If this load follows a calloc (which zero initializes memory),
- // then the loaded value is zero
- else if (isCallocLikeFn(DepInst, TLI)) {
- return createConstantExpression(Constant::getNullValue(LoadType));
- }
+ } else if (isAllocationFn(DepInst, TLI))
+ if (auto *InitVal = getInitialValueOfAllocation(cast<CallBase>(DepInst),
+ TLI, LoadType))
+ return createConstantExpression(InitVal);
return nullptr;
}
@@ -1521,9 +1519,9 @@ const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
return nullptr;
Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand());
- // Load of undef is undef.
+ // Load of undef is UB.
if (isa<UndefValue>(LoadAddressLeader))
- return createConstantExpression(UndefValue::get(LI->getType()));
+ return createConstantExpression(PoisonValue::get(LI->getType()));
MemoryAccess *OriginalAccess = getMemoryAccess(I);
MemoryAccess *DefiningAccess =
MSSAWalker->getClobberingMemoryAccess(OriginalAccess);
@@ -1531,9 +1529,9 @@ const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
if (!MSSA->isLiveOnEntryDef(DefiningAccess)) {
if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) {
Instruction *DefiningInst = MD->getMemoryInst();
- // If the defining instruction is not reachable, replace with undef.
+ // If the defining instruction is not reachable, replace with poison.
if (!ReachableBlocks.count(DefiningInst->getParent()))
- return createConstantExpression(UndefValue::get(LI->getType()));
+ return createConstantExpression(PoisonValue::get(LI->getType()));
// This will handle stores and memory insts. We only do if it the
// defining access has a different type, or it is a pointer produced by
// certain memory operations that cause the memory to have a fixed value
@@ -1722,8 +1720,12 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
// We match the semantics of SimplifyPhiNode from InstructionSimplify here.
// See if all arguments are the same.
// We track if any were undef because they need special handling.
- bool HasUndef = false;
+ bool HasUndef = false, HasPoison = false;
auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) {
+ if (isa<PoisonValue>(Arg)) {
+ HasPoison = true;
+ return false;
+ }
if (isa<UndefValue>(Arg)) {
HasUndef = true;
return false;
@@ -1732,8 +1734,14 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
});
// If we are left with no operands, it's dead.
if (Filtered.empty()) {
- // If it has undef at this point, it means there are no-non-undef arguments,
- // and thus, the value of the phi node must be undef.
+ // If it has undef or poison at this point, it means there are no-non-undef
+ // arguments, and thus, the value of the phi node must be undef.
+ if (HasPoison && !HasUndef) {
+ LLVM_DEBUG(
+ dbgs() << "PHI Node " << *I
+ << " has no non-poison arguments, valuing it as poison\n");
+ return createConstantExpression(PoisonValue::get(I->getType()));
+ }
if (HasUndef) {
LLVM_DEBUG(
dbgs() << "PHI Node " << *I
@@ -1758,7 +1766,7 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
// expression to say if one is equivalent to the other.
// We also special case undef, so that if we have an undef, we can't use the
// common value unless it dominates the phi block.
- if (HasUndef) {
+ if (HasPoison || HasUndef) {
// If we have undef and at least one other value, this is really a
// multivalued phi, and we need to know if it's cycle free in order to
// evaluate whether we can ignore the undef. The other parts of this are
@@ -2579,6 +2587,15 @@ bool NewGVN::OpIsSafeForPHIOfOpsHelper(
}
auto *OrigI = cast<Instruction>(V);
+ // When we hit an instruction that reads memory (load, call, etc), we must
+ // consider any store that may happen in the loop. For now, we assume the
+ // worst: there is a store in the loop that alias with this read.
+ // The case where the load is outside the loop is already covered by the
+ // dominator check above.
+ // TODO: relax this condition
+ if (OrigI->mayReadFromMemory())
+ return false;
+
for (auto *Op : OrigI->operand_values()) {
if (!isa<Instruction>(Op))
continue;
@@ -2780,7 +2797,7 @@ NewGVN::makePossiblePHIOfOps(Instruction *I,
LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
<< getBlockName(PredBB)
<< " because the block is unreachable\n");
- FoundVal = UndefValue::get(I->getType());
+ FoundVal = PoisonValue::get(I->getType());
RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
}
@@ -3459,7 +3476,7 @@ bool NewGVN::runGVN() {
// Delete all instructions marked for deletion.
for (Instruction *ToErase : InstructionsToErase) {
if (!ToErase->use_empty())
- ToErase->replaceAllUsesWith(UndefValue::get(ToErase->getType()));
+ ToErase->replaceAllUsesWith(PoisonValue::get(ToErase->getType()));
assert(ToErase->getParent() &&
"BB containing ToErase deleted unexpectedly!");
@@ -3677,7 +3694,7 @@ void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) {
Instruction &Inst = *I++;
if (!Inst.use_empty())
- Inst.replaceAllUsesWith(UndefValue::get(Inst.getType()));
+ Inst.replaceAllUsesWith(PoisonValue::get(Inst.getType()));
if (isa<LandingPadInst>(Inst))
continue;
salvageKnowledge(&Inst, AC);
@@ -3687,7 +3704,7 @@ void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
}
// Now insert something that simplifycfg will turn into an unreachable.
Type *Int8Ty = Type::getInt8Ty(BB->getContext());
- new StoreInst(UndefValue::get(Int8Ty),
+ new StoreInst(PoisonValue::get(Int8Ty),
Constant::getNullValue(Int8Ty->getPointerTo()),
BB->getTerminator());
}
@@ -3827,8 +3844,8 @@ bool NewGVN::eliminateInstructions(Function &F) {
LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
<< " for block "
<< getBlockName(PHI->getIncomingBlock(Operand))
- << " with undef due to it being unreachable\n");
- Operand.set(UndefValue::get(PHI->getType()));
+ << " with poison due to it being unreachable\n");
+ Operand.set(PoisonValue::get(PHI->getType()));
}
};
// Replace unreachable phi arguments.
@@ -4128,21 +4145,25 @@ bool NewGVN::eliminateInstructions(Function &F) {
unsigned int NewGVN::getRank(const Value *V) const {
// Prefer constants to undef to anything else
// Undef is a constant, have to check it first.
+ // Prefer poison to undef as it's less defined.
// Prefer smaller constants to constantexprs
+ // Note that the order here matters because of class inheritance
if (isa<ConstantExpr>(V))
- return 2;
- if (isa<UndefValue>(V))
+ return 3;
+ if (isa<PoisonValue>(V))
return 1;
+ if (isa<UndefValue>(V))
+ return 2;
if (isa<Constant>(V))
return 0;
- else if (auto *A = dyn_cast<Argument>(V))
- return 3 + A->getArgNo();
+ if (auto *A = dyn_cast<Argument>(V))
+ return 4 + A->getArgNo();
- // Need to shift the instruction DFS by number of arguments + 3 to account for
+ // Need to shift the instruction DFS by number of arguments + 5 to account for
// the constant and argument ranking above.
unsigned Result = InstrToDFSNum(V);
if (Result > 0)
- return 4 + NumFuncArgs + Result;
+ return 5 + NumFuncArgs + Result;
// Unreachable or something else, just return a really large number.
return ~0;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 44027ccd92ca..e0d0301c1ef6 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -82,6 +82,7 @@ static bool optimizeSQRT(CallInst *Call, Function *CalledFunc,
// Add attribute "readnone" so that backend can use a native sqrt instruction
// for this call.
+ Call->removeFnAttr(Attribute::WriteOnly);
Call->addFnAttr(Attribute::ReadNone);
// Insert a FP compare instruction and use it as the CurrBB branch condition.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index e12eca0ed287..3da367341d2a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1367,13 +1367,13 @@ static AttributeList legalizeCallAttributes(LLVMContext &Ctx,
return AL;
// Remove the readonly, readnone, and statepoint function attributes.
- AttrBuilder FnAttrs = AL.getFnAttrs();
+ AttrBuilder FnAttrs(Ctx, AL.getFnAttrs());
for (auto Attr : FnAttrsToStrip)
FnAttrs.removeAttribute(Attr);
for (Attribute A : AL.getFnAttrs()) {
if (isStatepointDirectiveAttr(A))
- FnAttrs.remove(A);
+ FnAttrs.removeAttribute(A);
}
// Just skip parameter and return attributes for now
@@ -2643,10 +2643,10 @@ static bool insertParsePoints(Function &F, DominatorTree &DT,
// List of all parameter and return attributes which must be stripped when
// lowering from the abstract machine model. Note that we list attributes
// here which aren't valid as return attributes, that is okay.
-static AttrBuilder getParamAndReturnAttributesToRemove() {
- AttrBuilder R;
- R.addDereferenceableAttr(1);
- R.addDereferenceableOrNullAttr(1);
+static AttributeMask getParamAndReturnAttributesToRemove() {
+ AttributeMask R;
+ R.addAttribute(Attribute::Dereferenceable);
+ R.addAttribute(Attribute::DereferenceableOrNull);
R.addAttribute(Attribute::ReadNone);
R.addAttribute(Attribute::ReadOnly);
R.addAttribute(Attribute::WriteOnly);
@@ -2668,7 +2668,7 @@ static void stripNonValidAttributesFromPrototype(Function &F) {
return;
}
- AttrBuilder R = getParamAndReturnAttributesToRemove();
+ AttributeMask R = getParamAndReturnAttributesToRemove();
for (Argument &A : F.args())
if (isa<PointerType>(A.getType()))
F.removeParamAttrs(A.getArgNo(), R);
@@ -2742,7 +2742,7 @@ static void stripNonValidDataFromBody(Function &F) {
stripInvalidMetadataFromInstruction(I);
- AttrBuilder R = getParamAndReturnAttributesToRemove();
+ AttributeMask R = getParamAndReturnAttributesToRemove();
if (auto *Call = dyn_cast<CallBase>(&I)) {
for (int i = 0, e = Call->arg_size(); i != e; i++)
if (isa<PointerType>(Call->getArgOperand(i)->getType()))
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SCCP.cpp
index ff2f8a25f379..c34da51e6dc1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -486,7 +486,7 @@ bool llvm::runIPSCCP(
// inaccessiblemem_or_argmemonly attributes do not hold any longer. Remove
// them from both the function and callsites.
if (ReplacedPointerArg) {
- AttrBuilder AttributesToRemove;
+ AttributeMask AttributesToRemove;
AttributesToRemove.addAttribute(Attribute::ArgMemOnly);
AttributesToRemove.addAttribute(Attribute::InaccessibleMemOrArgMemOnly);
F.removeFnAttrs(AttributesToRemove);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
index 31c8999c3724..35497ae5ed9a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -323,7 +323,7 @@ private:
///
/// Note that these are not separated by slice. This is because we expect an
/// alloca to be completely rewritten or not rewritten at all. If rewritten,
- /// all these instructions can simply be removed and replaced with undef as
+ /// all these instructions can simply be removed and replaced with poison as
/// they come from outside of the allocated space.
SmallVector<Instruction *, 8> DeadUsers;
@@ -333,10 +333,10 @@ private:
/// Operands which will become dead if we rewrite the alloca.
///
/// These are operands that in their particular use can be replaced with
- /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
+ /// poison when we rewrite the alloca. These show up in out-of-bounds inputs
/// to PHI nodes and the like. They aren't entirely dead (there might be
/// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
- /// want to swap this particular input for undef to simplify the use lists of
+ /// want to swap this particular input for poison to simplify the use lists of
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
};
@@ -1008,6 +1008,13 @@ private:
if (I.use_empty())
return markAsDead(I);
+ // If this is a PHI node before a catchswitch, we cannot insert any non-PHI
+ // instructions in this BB, which may be required during rewriting. Bail out
+ // on these cases.
+ if (isa<PHINode>(I) &&
+ I.getParent()->getFirstInsertionPt() == I.getParent()->end())
+ return PI.setAborted(&I);
+
// TODO: We could use SimplifyInstruction here to fold PHINodes and
// SelectInsts. However, doing so requires to change the current
// dead-operand-tracking mechanism. For instance, suppose neither loading
@@ -1023,7 +1030,7 @@ private:
enqueueUsers(I);
else
// Otherwise the operand to the PHI/select is dead, and we can replace
- // it with undef.
+ // it with poison.
AS.DeadOperands.push_back(U);
return;
@@ -1043,7 +1050,7 @@ private:
// For PHI and select operands outside the alloca, we can't nuke the entire
// phi or select -- the other side might still be relevant, so we special
// case them here and use a separate structure to track the operands
- // themselves which should be replaced with undef.
+ // themselves which should be replaced with poison.
// FIXME: This should instead be escaped in the event we're instrumenting
// for address sanitization.
if (Offset.uge(AllocSize)) {
@@ -1264,14 +1271,14 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
return true;
}
-static void speculatePHINodeLoads(PHINode &PN) {
+static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
Type *LoadTy = SomeLoad->getType();
- IRBuilderTy PHIBuilder(&PN);
- PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
- PN.getName() + ".sroa.speculated");
+ IRB.SetInsertPoint(&PN);
+ PHINode *NewPN = IRB.CreatePHI(LoadTy, PN.getNumIncomingValues(),
+ PN.getName() + ".sroa.speculated");
// Get the AA tags and alignment to use from one of the loads. It does not
// matter which one we get and if any differ.
@@ -1301,9 +1308,9 @@ static void speculatePHINodeLoads(PHINode &PN) {
}
Instruction *TI = Pred->getTerminator();
- IRBuilderTy PredBuilder(TI);
+ IRB.SetInsertPoint(TI);
- LoadInst *Load = PredBuilder.CreateAlignedLoad(
+ LoadInst *Load = IRB.CreateAlignedLoad(
LoadTy, InVal, Alignment,
(PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
@@ -1361,10 +1368,10 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) {
return true;
}
-static void speculateSelectInstLoads(SelectInst &SI) {
+static void speculateSelectInstLoads(IRBuilderTy &IRB, SelectInst &SI) {
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
- IRBuilderTy IRB(&SI);
+ IRB.SetInsertPoint(&SI);
Value *TV = SI.getTrueValue();
Value *FV = SI.getFalseValue();
// Replace the loads of the select with a select of two loads.
@@ -1430,8 +1437,10 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
- return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(),
- BasePtr, Indices, NamePrefix + "sroa_idx");
+ // buildGEP() is only called for non-opaque pointers.
+ return IRB.CreateInBoundsGEP(
+ BasePtr->getType()->getNonOpaquePointerElementType(), BasePtr, Indices,
+ NamePrefix + "sroa_idx");
}
/// Get a natural GEP off of the BasePtr walking through Ty toward
@@ -1504,7 +1513,7 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
return nullptr;
- Type *ElementTy = Ty->getElementType();
+ Type *ElementTy = Ty->getNonOpaquePointerElementType();
if (!ElementTy->isSized())
return nullptr; // We can't GEP through an unsized element.
@@ -1563,7 +1572,7 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
- Type *TargetTy = TargetPtrTy->getElementType();
+ Type *TargetTy = TargetPtrTy->getNonOpaquePointerElementType();
// As `addrspacecast` is , `Ptr` (the storage pointer) may have different
// address space from the expected `PointerTy` (the pointer to be used).
@@ -2558,7 +2567,7 @@ private:
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
Value *Placeholder = new LoadInst(
- LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)), "",
+ LI.getType(), PoisonValue::get(LI.getType()->getPointerTo(AS)), "",
false, Align(1));
V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
"insert");
@@ -3223,8 +3232,11 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
/// Used to calculate offsets, and hence alignment, of subobjects.
const DataLayout &DL;
+ IRBuilderTy &IRB;
+
public:
- AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
+ AggLoadStoreRewriter(const DataLayout &DL, IRBuilderTy &IRB)
+ : DL(DL), IRB(IRB) {}
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
@@ -3255,7 +3267,7 @@ private:
template <typename Derived> class OpSplitter {
protected:
/// The builder used to form new instructions.
- IRBuilderTy IRB;
+ IRBuilderTy &IRB;
/// The indices which to be used with insert- or extractvalue to select the
/// appropriate value within the aggregate.
@@ -3282,9 +3294,11 @@ private:
/// Initialize the splitter with an insertion point, Ptr and start with a
/// single zero GEP index.
OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
- Align BaseAlign, const DataLayout &DL)
- : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr),
- BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {}
+ Align BaseAlign, const DataLayout &DL, IRBuilderTy &IRB)
+ : IRB(IRB), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), BaseTy(BaseTy),
+ BaseAlign(BaseAlign), DL(DL) {
+ IRB.SetInsertPoint(InsertionPoint);
+ }
public:
/// Generic recursive split emission routine.
@@ -3345,9 +3359,10 @@ private:
AAMDNodes AATags;
LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
- AAMDNodes AATags, Align BaseAlign, const DataLayout &DL)
- : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
- DL),
+ AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
+ IRBuilderTy &IRB)
+ : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, DL,
+ IRB),
AATags(AATags) {}
/// Emit a leaf load of a single value. This is called at the leaves of the
@@ -3379,8 +3394,8 @@ private:
// We have an aggregate being loaded, split it apart.
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(),
- getAdjustedAlignment(&LI, 0), DL);
- Value *V = UndefValue::get(LI.getType());
+ getAdjustedAlignment(&LI, 0), DL, IRB);
+ Value *V = PoisonValue::get(LI.getType());
Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
Visited.erase(&LI);
LI.replaceAllUsesWith(V);
@@ -3390,9 +3405,10 @@ private:
struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
- AAMDNodes AATags, Align BaseAlign, const DataLayout &DL)
+ AAMDNodes AATags, Align BaseAlign, const DataLayout &DL,
+ IRBuilderTy &IRB)
: OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
- DL),
+ DL, IRB),
AATags(AATags) {}
AAMDNodes AATags;
/// Emit a leaf store of a single value. This is called at the leaves of the
@@ -3430,7 +3446,7 @@ private:
// We have an aggregate being stored, split it apart.
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(),
- getAdjustedAlignment(&SI, 0), DL);
+ getAdjustedAlignment(&SI, 0), DL, IRB);
Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
Visited.erase(&SI);
SI.eraseFromParent();
@@ -3458,7 +3474,7 @@ private:
<< "\n original: " << *Sel
<< "\n " << GEPI);
- IRBuilderTy Builder(&GEPI);
+ IRB.SetInsertPoint(&GEPI);
SmallVector<Value *, 4> Index(GEPI.indices());
bool IsInBounds = GEPI.isInBounds();
@@ -3466,21 +3482,20 @@ private:
Value *True = Sel->getTrueValue();
Value *NTrue =
IsInBounds
- ? Builder.CreateInBoundsGEP(Ty, True, Index,
- True->getName() + ".sroa.gep")
- : Builder.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep");
+ ? IRB.CreateInBoundsGEP(Ty, True, Index,
+ True->getName() + ".sroa.gep")
+ : IRB.CreateGEP(Ty, True, Index, True->getName() + ".sroa.gep");
Value *False = Sel->getFalseValue();
Value *NFalse =
IsInBounds
- ? Builder.CreateInBoundsGEP(Ty, False, Index,
- False->getName() + ".sroa.gep")
- : Builder.CreateGEP(Ty, False, Index,
- False->getName() + ".sroa.gep");
+ ? IRB.CreateInBoundsGEP(Ty, False, Index,
+ False->getName() + ".sroa.gep")
+ : IRB.CreateGEP(Ty, False, Index, False->getName() + ".sroa.gep");
- Value *NSel = Builder.CreateSelect(Sel->getCondition(), NTrue, NFalse,
- Sel->getName() + ".sroa.sel");
+ Value *NSel = IRB.CreateSelect(Sel->getCondition(), NTrue, NFalse,
+ Sel->getName() + ".sroa.sel");
Visited.erase(&GEPI);
GEPI.replaceAllUsesWith(NSel);
GEPI.eraseFromParent();
@@ -3517,10 +3532,9 @@ private:
SmallVector<Value *, 4> Index(GEPI.indices());
bool IsInBounds = GEPI.isInBounds();
- IRBuilderTy PHIBuilder(GEPI.getParent()->getFirstNonPHI());
- PHINode *NewPN = PHIBuilder.CreatePHI(GEPI.getType(),
- PHI->getNumIncomingValues(),
- PHI->getName() + ".sroa.phi");
+ IRB.SetInsertPoint(GEPI.getParent()->getFirstNonPHI());
+ PHINode *NewPN = IRB.CreatePHI(GEPI.getType(), PHI->getNumIncomingValues(),
+ PHI->getName() + ".sroa.phi");
for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) {
BasicBlock *B = PHI->getIncomingBlock(I);
Value *NewVal = nullptr;
@@ -3530,11 +3544,12 @@ private:
} else {
Instruction *In = cast<Instruction>(PHI->getIncomingValue(I));
- IRBuilderTy B(In->getParent(), std::next(In->getIterator()));
+ IRB.SetInsertPoint(In->getParent(), std::next(In->getIterator()));
Type *Ty = GEPI.getSourceElementType();
- NewVal = IsInBounds
- ? B.CreateInBoundsGEP(Ty, In, Index, In->getName() + ".sroa.gep")
- : B.CreateGEP(Ty, In, Index, In->getName() + ".sroa.gep");
+ NewVal = IsInBounds ? IRB.CreateInBoundsGEP(Ty, In, Index,
+ In->getName() + ".sroa.gep")
+ : IRB.CreateGEP(Ty, In, Index,
+ In->getName() + ".sroa.gep");
}
NewPN->addIncoming(NewVal, B);
}
@@ -4557,11 +4572,11 @@ bool SROAPass::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
return Changed;
}
-/// Clobber a use with undef, deleting the used value if it becomes dead.
+/// Clobber a use with poison, deleting the used value if it becomes dead.
void SROAPass::clobberUse(Use &U) {
Value *OldV = U;
- // Replace the use with an undef value.
- U = UndefValue::get(OldV->getType());
+ // Replace the use with an poison value.
+ U = PoisonValue::get(OldV->getType());
// Check for this making an instruction dead. We have to garbage collect
// all the dead instructions to ensure the uses of any alloca end up being
@@ -4598,7 +4613,8 @@ bool SROAPass::runOnAlloca(AllocaInst &AI) {
// First, split any FCA loads and stores touching this alloca to promote
// better splitting and promotion opportunities.
- AggLoadStoreRewriter AggRewriter(DL);
+ IRBuilderTy IRB(&AI);
+ AggLoadStoreRewriter AggRewriter(DL, IRB);
Changed |= AggRewriter.rewrite(AI);
// Build the slices using a recursive instruction-visiting builder.
@@ -4614,7 +4630,7 @@ bool SROAPass::runOnAlloca(AllocaInst &AI) {
clobberUse(DeadOp);
// Now replace the uses of this instruction.
- DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType()));
+ DeadUser->replaceAllUsesWith(PoisonValue::get(DeadUser->getType()));
// And mark it for deletion.
DeadInsts.push_back(DeadUser);
@@ -4633,11 +4649,11 @@ bool SROAPass::runOnAlloca(AllocaInst &AI) {
LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
while (!SpeculatablePHIs.empty())
- speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
+ speculatePHINodeLoads(IRB, *SpeculatablePHIs.pop_back_val());
LLVM_DEBUG(dbgs() << " Speculating Selects\n");
while (!SpeculatableSelects.empty())
- speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
+ speculateSelectInstLoads(IRB, *SpeculatableSelects.pop_back_val());
return Changed;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index 1284bae820a4..29cea42e4a00 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -959,7 +959,8 @@ static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
Type *LoadTy = CI->getType();
Align Alignment = DL.getValueOrABITypeAlignment(MA,
LoadTy->getScalarType());
- if (TTI.isLegalMaskedGather(LoadTy, Alignment))
+ if (TTI.isLegalMaskedGather(LoadTy, Alignment) &&
+ !TTI.forceScalarizeMaskedGather(cast<VectorType>(LoadTy), Alignment))
return false;
scalarizeMaskedGather(DL, CI, DTU, ModifiedDT);
return true;
@@ -970,7 +971,9 @@ static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
Type *StoreTy = CI->getArgOperand(0)->getType();
Align Alignment = DL.getValueOrABITypeAlignment(MA,
StoreTy->getScalarType());
- if (TTI.isLegalMaskedScatter(StoreTy, Alignment))
+ if (TTI.isLegalMaskedScatter(StoreTy, Alignment) &&
+ !TTI.forceScalarizeMaskedScatter(cast<VectorType>(StoreTy),
+ Alignment))
return false;
scalarizeMaskedScatter(DL, CI, DTU, ModifiedDT);
return true;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 6b7419abe1d1..3606c8a4b073 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -270,7 +270,7 @@ Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
Type *Ty = V->getType();
PtrTy = dyn_cast<PointerType>(Ty);
if (PtrTy)
- Ty = PtrTy->getElementType();
+ Ty = PtrTy->getPointerElementType();
Size = cast<FixedVectorType>(Ty)->getNumElements();
if (!CachePtr)
Tmp.resize(Size, nullptr);
@@ -288,7 +288,8 @@ Value *Scatterer::operator[](unsigned I) {
return CV[I];
IRBuilder<> Builder(BB, BBI);
if (PtrTy) {
- Type *ElTy = cast<VectorType>(PtrTy->getElementType())->getElementType();
+ Type *ElTy =
+ cast<VectorType>(PtrTy->getPointerElementType())->getElementType();
if (!CV[0]) {
Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace());
CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 3799d2dd1cf2..ee17da1875e5 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -78,6 +78,79 @@ static cl::opt<bool> UserSinkCommonInsts(
STATISTIC(NumSimpl, "Number of blocks simplified");
+static bool
+performBlockTailMerging(Function &F, ArrayRef<BasicBlock *> BBs,
+ std::vector<DominatorTree::UpdateType> *Updates) {
+ SmallVector<PHINode *, 1> NewOps;
+
+ // We don't want to change IR just because we can.
+ // Only do that if there are at least two blocks we'll tail-merge.
+ if (BBs.size() < 2)
+ return false;
+
+ if (Updates)
+ Updates->reserve(Updates->size() + BBs.size());
+
+ BasicBlock *CanonicalBB;
+ Instruction *CanonicalTerm;
+ {
+ auto *Term = BBs[0]->getTerminator();
+
+ // Create a canonical block for this function terminator type now,
+ // placing it *before* the first block that will branch to it.
+ CanonicalBB = BasicBlock::Create(
+ F.getContext(), Twine("common.") + Term->getOpcodeName(), &F, BBs[0]);
+ // We'll also need a PHI node per each operand of the terminator.
+ NewOps.resize(Term->getNumOperands());
+ for (auto I : zip(Term->operands(), NewOps)) {
+ std::get<1>(I) = PHINode::Create(std::get<0>(I)->getType(),
+ /*NumReservedValues=*/BBs.size(),
+ CanonicalBB->getName() + ".op");
+ CanonicalBB->getInstList().push_back(std::get<1>(I));
+ }
+ // Make it so that this canonical block actually has the right
+ // terminator.
+ CanonicalTerm = Term->clone();
+ CanonicalBB->getInstList().push_back(CanonicalTerm);
+ // If the canonical terminator has operands, rewrite it to take PHI's.
+ for (auto I : zip(NewOps, CanonicalTerm->operands()))
+ std::get<1>(I) = std::get<0>(I);
+ }
+
+ // Now, go through each block (with the current terminator type)
+ // we've recorded, and rewrite it to branch to the new common block.
+ const DILocation *CommonDebugLoc = nullptr;
+ for (BasicBlock *BB : BBs) {
+ auto *Term = BB->getTerminator();
+ assert(Term->getOpcode() == CanonicalTerm->getOpcode() &&
+ "All blocks to be tail-merged must be the same "
+ "(function-terminating) terminator type.");
+
+ // Aha, found a new non-canonical function terminator. If it has operands,
+ // forward them to the PHI nodes in the canonical block.
+ for (auto I : zip(Term->operands(), NewOps))
+ std::get<1>(I)->addIncoming(std::get<0>(I), BB);
+
+ // Compute the debug location common to all the original terminators.
+ if (!CommonDebugLoc)
+ CommonDebugLoc = Term->getDebugLoc();
+ else
+ CommonDebugLoc =
+ DILocation::getMergedLocation(CommonDebugLoc, Term->getDebugLoc());
+
+ // And turn BB into a block that just unconditionally branches
+ // to the canonical block.
+ Term->eraseFromParent();
+ BranchInst::Create(CanonicalBB, BB);
+ if (Updates)
+ Updates->push_back({DominatorTree::Insert, BB, CanonicalBB});
+ }
+
+ CanonicalTerm->setDebugLoc(CommonDebugLoc);
+
+ return true;
+}
+
static bool tailMergeBlocksWithSimilarFunctionTerminators(Function &F,
DomTreeUpdater *DTU) {
SmallMapVector<unsigned /*TerminatorOpcode*/, SmallVector<BasicBlock *, 2>, 4>
@@ -133,73 +206,8 @@ static bool tailMergeBlocksWithSimilarFunctionTerminators(Function &F,
std::vector<DominatorTree::UpdateType> Updates;
- for (ArrayRef<BasicBlock *> BBs : make_second_range(Structure)) {
- SmallVector<PHINode *, 1> NewOps;
-
- // We don't want to change IR just because we can.
- // Only do that if there are at least two blocks we'll tail-merge.
- if (BBs.size() < 2)
- continue;
-
- Changed = true;
-
- if (DTU)
- Updates.reserve(Updates.size() + BBs.size());
-
- BasicBlock *CanonicalBB;
- Instruction *CanonicalTerm;
- {
- auto *Term = BBs[0]->getTerminator();
-
- // Create a canonical block for this function terminator type now,
- // placing it *before* the first block that will branch to it.
- CanonicalBB = BasicBlock::Create(
- F.getContext(), Twine("common.") + Term->getOpcodeName(), &F, BBs[0]);
- // We'll also need a PHI node per each operand of the terminator.
- NewOps.resize(Term->getNumOperands());
- for (auto I : zip(Term->operands(), NewOps)) {
- std::get<1>(I) = PHINode::Create(std::get<0>(I)->getType(),
- /*NumReservedValues=*/BBs.size(),
- CanonicalBB->getName() + ".op");
- CanonicalBB->getInstList().push_back(std::get<1>(I));
- }
- // Make it so that this canonical block actually has the right
- // terminator.
- CanonicalTerm = Term->clone();
- CanonicalBB->getInstList().push_back(CanonicalTerm);
- // If the canonical terminator has operands, rewrite it to take PHI's.
- for (auto I : zip(NewOps, CanonicalTerm->operands()))
- std::get<1>(I) = std::get<0>(I);
- }
-
- // Now, go through each block (with the current terminator type)
- // we've recorded, and rewrite it to branch to the new common block.
- const DILocation *CommonDebugLoc = nullptr;
- for (BasicBlock *BB : BBs) {
- auto *Term = BB->getTerminator();
-
- // Aha, found a new non-canonical function terminator. If it has operands,
- // forward them to the PHI nodes in the canonical block.
- for (auto I : zip(Term->operands(), NewOps))
- std::get<1>(I)->addIncoming(std::get<0>(I), BB);
-
- // Compute the debug location common to all the original terminators.
- if (!CommonDebugLoc)
- CommonDebugLoc = Term->getDebugLoc();
- else
- CommonDebugLoc =
- DILocation::getMergedLocation(CommonDebugLoc, Term->getDebugLoc());
-
- // And turn BB into a block that just unconditionally branches
- // to the canonical block.
- Term->eraseFromParent();
- BranchInst::Create(CanonicalBB, BB);
- if (DTU)
- Updates.push_back({DominatorTree::Insert, BB, CanonicalBB});
- }
-
- CanonicalTerm->setDebugLoc(CommonDebugLoc);
- }
+ for (ArrayRef<BasicBlock *> BBs : make_second_range(Structure))
+ Changed |= performBlockTailMerging(F, BBs, DTU ? &Updates : nullptr);
if (DTU)
DTU->applyUpdates(Updates);
@@ -313,7 +321,7 @@ static void applyCommandLineOverridesToOptions(SimplifyCFGOptions &Options) {
Options.SinkCommonInsts = UserSinkCommonInsts;
}
-SimplifyCFGPass::SimplifyCFGPass() : Options() {
+SimplifyCFGPass::SimplifyCFGPass() {
applyCommandLineOverridesToOptions(Options);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
index fdc914a72bfd..c734611836eb 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
@@ -22,19 +22,6 @@ using namespace llvm;
#define DEBUG_TYPE "amdgpu-emit-printf"
-static bool isCString(const Value *Arg) {
- auto Ty = Arg->getType();
- auto PtrTy = dyn_cast<PointerType>(Ty);
- if (!PtrTy)
- return false;
-
- auto IntTy = dyn_cast<IntegerType>(PtrTy->getElementType());
- if (!IntTy)
- return false;
-
- return IntTy->getBitWidth() == 8;
-}
-
static Value *fitArgInto64Bits(IRBuilder<> &Builder, Value *Arg) {
auto Int64Ty = Builder.getInt64Ty();
auto Ty = Arg->getType();
@@ -176,13 +163,15 @@ static Value *callAppendStringN(IRBuilder<> &Builder, Value *Desc, Value *Str,
static Value *appendString(IRBuilder<> &Builder, Value *Desc, Value *Arg,
bool IsLast) {
+ Arg = Builder.CreateBitCast(
+ Arg, Builder.getInt8PtrTy(Arg->getType()->getPointerAddressSpace()));
auto Length = getStrlenWithNull(Builder, Arg);
return callAppendStringN(Builder, Desc, Arg, Length, IsLast);
}
static Value *processArg(IRBuilder<> &Builder, Value *Desc, Value *Arg,
bool SpecIsCString, bool IsLast) {
- if (SpecIsCString && isCString(Arg)) {
+ if (SpecIsCString && isa<PointerType>(Arg->getType())) {
return appendString(Builder, Desc, Arg, IsLast);
}
// If the format specifies a string but the argument is not, the frontend will
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 580cfd80141e..97f11ca71726 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -34,6 +34,7 @@ STATISTIC(NumReadNone, "Number of functions inferred as readnone");
STATISTIC(NumInaccessibleMemOnly,
"Number of functions inferred as inaccessiblememonly");
STATISTIC(NumReadOnly, "Number of functions inferred as readonly");
+STATISTIC(NumWriteOnly, "Number of functions inferred as writeonly");
STATISTIC(NumArgMemOnly, "Number of functions inferred as argmemonly");
STATISTIC(NumInaccessibleMemOrArgMemOnly,
"Number of functions inferred as inaccessiblemem_or_argmemonly");
@@ -71,6 +72,19 @@ static bool setOnlyReadsMemory(Function &F) {
return true;
}
+static bool setOnlyWritesMemory(Function &F) {
+ if (F.onlyWritesMemory()) // writeonly or readnone
+ return false;
+ // Turn readonly and writeonly into readnone.
+ if (F.hasFnAttribute(Attribute::ReadOnly)) {
+ F.removeFnAttr(Attribute::ReadOnly);
+ return setDoesNotAccessMemory(F);
+ }
+ ++NumWriteOnly;
+ F.setOnlyWritesMemory();
+ return true;
+}
+
static bool setOnlyAccessesArgMemory(Function &F) {
if (F.onlyAccessesArgMemory())
return false;
@@ -233,6 +247,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
switch (TheLibFunc) {
case LibFunc_strlen:
+ case LibFunc_strnlen:
case LibFunc_wcslen:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
@@ -400,6 +415,8 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
+ case LibFunc_aligned_alloc:
+ case LibFunc_valloc:
case LibFunc_malloc:
case LibFunc_vec_malloc:
Changed |= setOnlyAccessesInaccessibleMemory(F);
@@ -484,6 +501,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
return Changed;
case LibFunc_realloc:
case LibFunc_vec_realloc:
+ case LibFunc_reallocf:
Changed |= setOnlyAccessesInaccessibleMemOrArgMem(F);
Changed |= setRetNoUndef(F);
Changed |= setDoesNotThrow(F);
@@ -492,11 +510,6 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
Changed |= setDoesNotCapture(F, 0);
Changed |= setArgNoUndef(F, 1);
return Changed;
- case LibFunc_reallocf:
- Changed |= setRetNoUndef(F);
- Changed |= setWillReturn(F);
- Changed |= setArgNoUndef(F, 1);
- return Changed;
case LibFunc_read:
// May throw; "read" is a valid pthread cancellation point.
Changed |= setRetAndArgsNoUndef(F);
@@ -536,13 +549,6 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
- case LibFunc_aligned_alloc:
- Changed |= setOnlyAccessesInaccessibleMemory(F);
- Changed |= setRetAndArgsNoUndef(F);
- Changed |= setDoesNotThrow(F);
- Changed |= setRetDoesNotAlias(F);
- Changed |= setWillReturn(F);
- return Changed;
case LibFunc_bcopy:
Changed |= setDoesNotThrow(F);
Changed |= setOnlyAccessesArgMemory(F);
@@ -569,6 +575,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
return Changed;
case LibFunc_calloc:
case LibFunc_vec_calloc:
+ Changed |= setOnlyAccessesInaccessibleMemory(F);
Changed |= setRetAndArgsNoUndef(F);
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
@@ -851,13 +858,6 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
- case LibFunc_valloc:
- Changed |= setOnlyAccessesInaccessibleMemory(F);
- Changed |= setRetAndArgsNoUndef(F);
- Changed |= setDoesNotThrow(F);
- Changed |= setRetDoesNotAlias(F);
- Changed |= setWillReturn(F);
- return Changed;
case LibFunc_vprintf:
Changed |= setRetAndArgsNoUndef(F);
Changed |= setDoesNotThrow(F);
@@ -1020,12 +1020,10 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
case LibFunc_memset_pattern4:
case LibFunc_memset_pattern8:
case LibFunc_memset_pattern16:
- Changed |= setOnlyAccessesArgMemory(F);
Changed |= setDoesNotCapture(F, 0);
- Changed |= setOnlyWritesMemory(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
- return Changed;
+ LLVM_FALLTHROUGH;
case LibFunc_memset:
Changed |= setWillReturn(F);
LLVM_FALLTHROUGH;
@@ -1158,7 +1156,6 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
case LibFunc_sqrt:
case LibFunc_sqrtf:
case LibFunc_sqrtl:
- case LibFunc_strnlen:
case LibFunc_tan:
case LibFunc_tanf:
case LibFunc_tanh:
@@ -1171,6 +1168,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
case LibFunc_truncl:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotFreeMemory(F);
+ Changed |= setOnlyWritesMemory(F);
Changed |= setWillReturn(F);
return Changed;
default:
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp
index b2763900e154..ac3839f2a4ab 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp
@@ -20,8 +20,7 @@ using namespace llvm;
bool CallGraphUpdater::finalize() {
if (!DeadFunctionsInComdats.empty()) {
- filterDeadComdatFunctions(*DeadFunctionsInComdats.front()->getParent(),
- DeadFunctionsInComdats);
+ filterDeadComdatFunctions(DeadFunctionsInComdats);
DeadFunctions.append(DeadFunctionsInComdats.begin(),
DeadFunctionsInComdats.end());
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
index ebe19f1751e5..56b6e4bc46a5 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
@@ -500,7 +500,7 @@ CallBase &llvm::promoteCall(CallBase &CB, Function *Callee,
CB.setArgOperand(ArgNo, Cast);
// Remove any incompatible attributes for the argument.
- AttrBuilder ArgAttrs(CallerPAL.getParamAttrs(ArgNo));
+ AttrBuilder ArgAttrs(Ctx, CallerPAL.getParamAttrs(ArgNo));
ArgAttrs.remove(AttributeFuncs::typeIncompatible(FormalTy));
// We may have a different byval/inalloca type.
@@ -518,7 +518,7 @@ CallBase &llvm::promoteCall(CallBase &CB, Function *Callee,
// If the return type of the call site doesn't match that of the callee, cast
// the returned value to the appropriate type.
// Remove any incompatible return value attribute.
- AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
+ AttrBuilder RAttrs(Ctx, CallerPAL.getRetAttrs());
if (!CallSiteRetTy->isVoidTy() && CallSiteRetTy != CalleeRetTy) {
createRetBitCast(CB, CallSiteRetTy, RetBitCast);
RAttrs.remove(AttributeFuncs::typeIncompatible(CalleeRetTy));
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 96aff563aa9b..24cd5747c5a4 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -829,39 +829,54 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
default: RetTy = Type::getInt16Ty(header->getContext()); break;
}
- std::vector<Type *> paramTy;
+ std::vector<Type *> ParamTy;
+ std::vector<Type *> AggParamTy;
+ ValueSet StructValues;
// Add the types of the input values to the function's argument list
for (Value *value : inputs) {
LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
- paramTy.push_back(value->getType());
+ if (AggregateArgs && !ExcludeArgsFromAggregate.contains(value)) {
+ AggParamTy.push_back(value->getType());
+ StructValues.insert(value);
+ } else
+ ParamTy.push_back(value->getType());
}
// Add the types of the output values to the function's argument list.
for (Value *output : outputs) {
LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
- if (AggregateArgs)
- paramTy.push_back(output->getType());
- else
- paramTy.push_back(PointerType::getUnqual(output->getType()));
+ if (AggregateArgs && !ExcludeArgsFromAggregate.contains(output)) {
+ AggParamTy.push_back(output->getType());
+ StructValues.insert(output);
+ } else
+ ParamTy.push_back(PointerType::getUnqual(output->getType()));
+ }
+
+ assert(
+ (ParamTy.size() + AggParamTy.size()) ==
+ (inputs.size() + outputs.size()) &&
+ "Number of scalar and aggregate params does not match inputs, outputs");
+ assert(StructValues.empty() ||
+ AggregateArgs && "Expeced StructValues only with AggregateArgs set");
+
+ // Concatenate scalar and aggregate params in ParamTy.
+ size_t NumScalarParams = ParamTy.size();
+ StructType *StructTy = nullptr;
+ if (AggregateArgs && !AggParamTy.empty()) {
+ StructTy = StructType::get(M->getContext(), AggParamTy);
+ ParamTy.push_back(PointerType::getUnqual(StructTy));
}
LLVM_DEBUG({
dbgs() << "Function type: " << *RetTy << " f(";
- for (Type *i : paramTy)
+ for (Type *i : ParamTy)
dbgs() << *i << ", ";
dbgs() << ")\n";
});
- StructType *StructTy = nullptr;
- if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
- StructTy = StructType::get(M->getContext(), paramTy);
- paramTy.clear();
- paramTy.push_back(PointerType::getUnqual(StructTy));
- }
- FunctionType *funcType =
- FunctionType::get(RetTy, paramTy,
- AllowVarArgs && oldFunction->isVarArg());
+ FunctionType *funcType = FunctionType::get(
+ RetTy, ParamTy, AllowVarArgs && oldFunction->isVarArg());
std::string SuffixToUse =
Suffix.empty()
@@ -871,13 +886,6 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
Function *newFunction = Function::Create(
funcType, GlobalValue::InternalLinkage, oldFunction->getAddressSpace(),
oldFunction->getName() + "." + SuffixToUse, M);
- // If the old function is no-throw, so is the new one.
- if (oldFunction->doesNotThrow())
- newFunction->setDoesNotThrow();
-
- // Inherit the uwtable attribute if we need to.
- if (oldFunction->hasUWTable())
- newFunction->setHasUWTable();
// Inherit all of the target dependent attributes and white-listed
// target independent attributes.
@@ -893,53 +901,26 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
} else
switch (Attr.getKindAsEnum()) {
// Those attributes cannot be propagated safely. Explicitly list them
- // here so we get a warning if new attributes are added. This list also
- // includes non-function attributes.
- case Attribute::Alignment:
+ // here so we get a warning if new attributes are added.
case Attribute::AllocSize:
case Attribute::ArgMemOnly:
case Attribute::Builtin:
- case Attribute::ByVal:
case Attribute::Convergent:
- case Attribute::Dereferenceable:
- case Attribute::DereferenceableOrNull:
- case Attribute::ElementType:
- case Attribute::InAlloca:
- case Attribute::InReg:
case Attribute::InaccessibleMemOnly:
case Attribute::InaccessibleMemOrArgMemOnly:
case Attribute::JumpTable:
case Attribute::Naked:
- case Attribute::Nest:
- case Attribute::NoAlias:
case Attribute::NoBuiltin:
- case Attribute::NoCapture:
case Attribute::NoMerge:
case Attribute::NoReturn:
case Attribute::NoSync:
- case Attribute::NoUndef:
- case Attribute::None:
- case Attribute::NonNull:
- case Attribute::Preallocated:
case Attribute::ReadNone:
case Attribute::ReadOnly:
- case Attribute::Returned:
case Attribute::ReturnsTwice:
- case Attribute::SExt:
case Attribute::Speculatable:
case Attribute::StackAlignment:
- case Attribute::StructRet:
- case Attribute::SwiftError:
- case Attribute::SwiftSelf:
- case Attribute::SwiftAsync:
case Attribute::WillReturn:
case Attribute::WriteOnly:
- case Attribute::ZExt:
- case Attribute::ImmArg:
- case Attribute::ByRef:
- case Attribute::EndAttrKinds:
- case Attribute::EmptyKey:
- case Attribute::TombstoneKey:
continue;
// Those attributes should be safe to propagate to the extracted function.
case Attribute::AlwaysInline:
@@ -980,30 +961,62 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
case Attribute::MustProgress:
case Attribute::NoProfile:
break;
+ // These attributes cannot be applied to functions.
+ case Attribute::Alignment:
+ case Attribute::ByVal:
+ case Attribute::Dereferenceable:
+ case Attribute::DereferenceableOrNull:
+ case Attribute::ElementType:
+ case Attribute::InAlloca:
+ case Attribute::InReg:
+ case Attribute::Nest:
+ case Attribute::NoAlias:
+ case Attribute::NoCapture:
+ case Attribute::NoUndef:
+ case Attribute::NonNull:
+ case Attribute::Preallocated:
+ case Attribute::Returned:
+ case Attribute::SExt:
+ case Attribute::StructRet:
+ case Attribute::SwiftError:
+ case Attribute::SwiftSelf:
+ case Attribute::SwiftAsync:
+ case Attribute::ZExt:
+ case Attribute::ImmArg:
+ case Attribute::ByRef:
+ // These are not really attributes.
+ case Attribute::None:
+ case Attribute::EndAttrKinds:
+ case Attribute::EmptyKey:
+ case Attribute::TombstoneKey:
+ llvm_unreachable("Not a function attribute");
}
newFunction->addFnAttr(Attr);
}
newFunction->getBasicBlockList().push_back(newRootNode);
- // Create an iterator to name all of the arguments we inserted.
- Function::arg_iterator AI = newFunction->arg_begin();
+ // Create scalar and aggregate iterators to name all of the arguments we
+ // inserted.
+ Function::arg_iterator ScalarAI = newFunction->arg_begin();
+ Function::arg_iterator AggAI = std::next(ScalarAI, NumScalarParams);
// Rewrite all users of the inputs in the extracted region to use the
// arguments (or appropriate addressing into struct) instead.
- for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
+ for (unsigned i = 0, e = inputs.size(), aggIdx = 0; i != e; ++i) {
Value *RewriteVal;
- if (AggregateArgs) {
+ if (AggregateArgs && StructValues.contains(inputs[i])) {
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::getInt32Ty(header->getContext()));
- Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), i);
+ Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), aggIdx);
Instruction *TI = newFunction->begin()->getTerminator();
GetElementPtrInst *GEP = GetElementPtrInst::Create(
- StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI);
- RewriteVal = new LoadInst(StructTy->getElementType(i), GEP,
+ StructTy, &*AggAI, Idx, "gep_" + inputs[i]->getName(), TI);
+ RewriteVal = new LoadInst(StructTy->getElementType(aggIdx), GEP,
"loadgep_" + inputs[i]->getName(), TI);
+ ++aggIdx;
} else
- RewriteVal = &*AI++;
+ RewriteVal = &*ScalarAI++;
std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
for (User *use : Users)
@@ -1013,12 +1026,14 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
}
// Set names for input and output arguments.
- if (!AggregateArgs) {
- AI = newFunction->arg_begin();
- for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI)
- AI->setName(inputs[i]->getName());
- for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI)
- AI->setName(outputs[i]->getName()+".out");
+ if (NumScalarParams) {
+ ScalarAI = newFunction->arg_begin();
+ for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++ScalarAI)
+ if (!StructValues.contains(inputs[i]))
+ ScalarAI->setName(inputs[i]->getName());
+ for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++ScalarAI)
+ if (!StructValues.contains(outputs[i]))
+ ScalarAI->setName(outputs[i]->getName() + ".out");
}
// Rewrite branches to basic blocks outside of the loop to new dummy blocks
@@ -1126,7 +1141,8 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
ValueSet &outputs) {
// Emit a call to the new function, passing in: *pointer to struct (if
// aggregating parameters), or plan inputs and allocated memory for outputs
- std::vector<Value *> params, StructValues, ReloadOutputs, Reloads;
+ std::vector<Value *> params, ReloadOutputs, Reloads;
+ ValueSet StructValues;
Module *M = newFunction->getParent();
LLVMContext &Context = M->getContext();
@@ -1134,23 +1150,24 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
CallInst *call = nullptr;
// Add inputs as params, or to be filled into the struct
- unsigned ArgNo = 0;
+ unsigned ScalarInputArgNo = 0;
SmallVector<unsigned, 1> SwiftErrorArgs;
for (Value *input : inputs) {
- if (AggregateArgs)
- StructValues.push_back(input);
+ if (AggregateArgs && !ExcludeArgsFromAggregate.contains(input))
+ StructValues.insert(input);
else {
params.push_back(input);
if (input->isSwiftError())
- SwiftErrorArgs.push_back(ArgNo);
+ SwiftErrorArgs.push_back(ScalarInputArgNo);
}
- ++ArgNo;
+ ++ScalarInputArgNo;
}
// Create allocas for the outputs
+ unsigned ScalarOutputArgNo = 0;
for (Value *output : outputs) {
- if (AggregateArgs) {
- StructValues.push_back(output);
+ if (AggregateArgs && !ExcludeArgsFromAggregate.contains(output)) {
+ StructValues.insert(output);
} else {
AllocaInst *alloca =
new AllocaInst(output->getType(), DL.getAllocaAddrSpace(),
@@ -1158,12 +1175,14 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
&codeReplacer->getParent()->front().front());
ReloadOutputs.push_back(alloca);
params.push_back(alloca);
+ ++ScalarOutputArgNo;
}
}
StructType *StructArgTy = nullptr;
AllocaInst *Struct = nullptr;
- if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
+ unsigned NumAggregatedInputs = 0;
+ if (AggregateArgs && !StructValues.empty()) {
std::vector<Type *> ArgTypes;
for (Value *V : StructValues)
ArgTypes.push_back(V->getType());
@@ -1175,14 +1194,18 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
&codeReplacer->getParent()->front().front());
params.push_back(Struct);
- for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
- Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
- Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i);
- GetElementPtrInst *GEP = GetElementPtrInst::Create(
- StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName());
- codeReplacer->getInstList().push_back(GEP);
- new StoreInst(StructValues[i], GEP, codeReplacer);
+ // Store aggregated inputs in the struct.
+ for (unsigned i = 0, e = StructValues.size(); i != e; ++i) {
+ if (inputs.contains(StructValues[i])) {
+ Value *Idx[2];
+ Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
+ Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i);
+ GetElementPtrInst *GEP = GetElementPtrInst::Create(
+ StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName());
+ codeReplacer->getInstList().push_back(GEP);
+ new StoreInst(StructValues[i], GEP, codeReplacer);
+ NumAggregatedInputs++;
+ }
}
}
@@ -1205,24 +1228,24 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
newFunction->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
}
- Function::arg_iterator OutputArgBegin = newFunction->arg_begin();
- unsigned FirstOut = inputs.size();
- if (!AggregateArgs)
- std::advance(OutputArgBegin, inputs.size());
-
- // Reload the outputs passed in by reference.
- for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
+ // Reload the outputs passed in by reference, use the struct if output is in
+ // the aggregate or reload from the scalar argument.
+ for (unsigned i = 0, e = outputs.size(), scalarIdx = 0,
+ aggIdx = NumAggregatedInputs;
+ i != e; ++i) {
Value *Output = nullptr;
- if (AggregateArgs) {
+ if (AggregateArgs && StructValues.contains(outputs[i])) {
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
- Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
+ Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), aggIdx);
GetElementPtrInst *GEP = GetElementPtrInst::Create(
StructArgTy, Struct, Idx, "gep_reload_" + outputs[i]->getName());
codeReplacer->getInstList().push_back(GEP);
Output = GEP;
+ ++aggIdx;
} else {
- Output = ReloadOutputs[i];
+ Output = ReloadOutputs[scalarIdx];
+ ++scalarIdx;
}
LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
outputs[i]->getName() + ".reload",
@@ -1304,8 +1327,13 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
// Store the arguments right after the definition of output value.
// This should be proceeded after creating exit stubs to be ensure that invoke
// result restore will be placed in the outlined function.
- Function::arg_iterator OAI = OutputArgBegin;
- for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
+ Function::arg_iterator ScalarOutputArgBegin = newFunction->arg_begin();
+ std::advance(ScalarOutputArgBegin, ScalarInputArgNo);
+ Function::arg_iterator AggOutputArgBegin = newFunction->arg_begin();
+ std::advance(AggOutputArgBegin, ScalarInputArgNo + ScalarOutputArgNo);
+
+ for (unsigned i = 0, e = outputs.size(), aggIdx = NumAggregatedInputs; i != e;
+ ++i) {
auto *OutI = dyn_cast<Instruction>(outputs[i]);
if (!OutI)
continue;
@@ -1325,23 +1353,27 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
assert((InsertBefore->getFunction() == newFunction ||
Blocks.count(InsertBefore->getParent())) &&
"InsertPt should be in new function");
- assert(OAI != newFunction->arg_end() &&
- "Number of output arguments should match "
- "the amount of defined values");
- if (AggregateArgs) {
+ if (AggregateArgs && StructValues.contains(outputs[i])) {
+ assert(AggOutputArgBegin != newFunction->arg_end() &&
+ "Number of aggregate output arguments should match "
+ "the number of defined values");
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
- Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
+ Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), aggIdx);
GetElementPtrInst *GEP = GetElementPtrInst::Create(
- StructArgTy, &*OAI, Idx, "gep_" + outputs[i]->getName(),
+ StructArgTy, &*AggOutputArgBegin, Idx, "gep_" + outputs[i]->getName(),
InsertBefore);
new StoreInst(outputs[i], GEP, InsertBefore);
+ ++aggIdx;
// Since there should be only one struct argument aggregating
- // all the output values, we shouldn't increment OAI, which always
- // points to the struct argument, in this case.
+ // all the output values, we shouldn't increment AggOutputArgBegin, which
+ // always points to the struct argument, in this case.
} else {
- new StoreInst(outputs[i], &*OAI, InsertBefore);
- ++OAI;
+ assert(ScalarOutputArgBegin != newFunction->arg_end() &&
+ "Number of scalar output arguments should match "
+ "the number of defined values");
+ new StoreInst(outputs[i], &*ScalarOutputArgBegin, InsertBefore);
+ ++ScalarOutputArgBegin;
}
}
@@ -1840,3 +1872,7 @@ bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
}
return false;
}
+
+void CodeExtractor::excludeArgFromAggregate(Value *Arg) {
+ ExcludeArgsFromAggregate.insert(Arg);
+}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp
index 91630d876fc8..e73287c060ae 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/Evaluator.cpp
@@ -122,129 +122,114 @@ isSimpleEnoughValueToCommit(Constant *C,
return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
-/// Return true if this constant is simple enough for us to understand. In
-/// particular, if it is a cast to anything other than from one pointer type to
-/// another pointer type, we punt. We basically just support direct accesses to
-/// globals and GEP's of globals. This should be kept up to date with
-/// CommitValueTo.
-static bool isSimpleEnoughPointerToCommit(Constant *C, const DataLayout &DL) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
- // Do not allow weak/*_odr/linkonce linkage or external globals.
- return GV->hasUniqueInitializer();
-
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- // Handle a constantexpr gep.
- if (CE->getOpcode() == Instruction::GetElementPtr &&
- isa<GlobalVariable>(CE->getOperand(0)) &&
- cast<GEPOperator>(CE)->isInBounds()) {
- GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
- // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
- // external globals.
- if (!GV->hasUniqueInitializer())
- return false;
+void Evaluator::MutableValue::clear() {
+ if (auto *Agg = Val.dyn_cast<MutableAggregate *>())
+ delete Agg;
+ Val = nullptr;
+}
- // The first index must be zero.
- ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
- if (!CI || !CI->isZero()) return false;
+Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset,
+ const DataLayout &DL) const {
+ TypeSize TySize = DL.getTypeStoreSize(Ty);
+ const MutableValue *V = this;
+ while (const auto *Agg = V->Val.dyn_cast<MutableAggregate *>()) {
+ Type *AggTy = Agg->Ty;
+ Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
+ if (!Index || Index->uge(Agg->Elements.size()) ||
+ !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
+ return nullptr;
+
+ V = &Agg->Elements[Index->getZExtValue()];
+ }
- // The remaining indices must be compile-time known integers within the
- // notional bounds of the corresponding static array types.
- if (!CE->isGEPWithNoNotionalOverIndexing())
- return false;
+ return ConstantFoldLoadFromConst(V->Val.get<Constant *>(), Ty, Offset, DL);
+}
- return ConstantFoldLoadThroughGEPConstantExpr(
- GV->getInitializer(), CE,
- cast<GEPOperator>(CE)->getResultElementType(), DL);
- } else if (CE->getOpcode() == Instruction::BitCast &&
- isa<GlobalVariable>(CE->getOperand(0))) {
- // A constantexpr bitcast from a pointer to another pointer is a no-op,
- // and we know how to evaluate it by moving the bitcast from the pointer
- // operand to the value operand.
- // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
- // external globals.
- return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
- }
- }
+bool Evaluator::MutableValue::makeMutable() {
+ Constant *C = Val.get<Constant *>();
+ Type *Ty = C->getType();
+ unsigned NumElements;
+ if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
+ NumElements = VT->getNumElements();
+ } else if (auto *AT = dyn_cast<ArrayType>(Ty))
+ NumElements = AT->getNumElements();
+ else if (auto *ST = dyn_cast<StructType>(Ty))
+ NumElements = ST->getNumElements();
+ else
+ return false;
- return false;
+ MutableAggregate *MA = new MutableAggregate(Ty);
+ MA->Elements.reserve(NumElements);
+ for (unsigned I = 0; I < NumElements; ++I)
+ MA->Elements.push_back(C->getAggregateElement(I));
+ Val = MA;
+ return true;
}
-/// Apply \p TryLoad to Ptr. If this returns \p nullptr, introspect the
-/// pointer's type and walk down through the initial elements to obtain
-/// additional pointers to try. Returns the first non-null return value from
-/// \p TryLoad, or \p nullptr if the type can't be introspected further.
-static Constant *
-evaluateBitcastFromPtr(Constant *Ptr, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- std::function<Constant *(Constant *)> TryLoad) {
- Constant *Val;
- while (!(Val = TryLoad(Ptr))) {
- // If Ty is a non-opaque struct, we can convert the pointer to the struct
- // into a pointer to its first member.
- // FIXME: This could be extended to support arrays as well.
- Type *Ty = cast<PointerType>(Ptr->getType())->getElementType();
- if (!isa<StructType>(Ty) || cast<StructType>(Ty)->isOpaque())
- break;
-
- IntegerType *IdxTy = IntegerType::get(Ty->getContext(), 32);
- Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
- Constant *const IdxList[] = {IdxZero, IdxZero};
-
- Ptr = ConstantExpr::getGetElementPtr(Ty, Ptr, IdxList);
- Ptr = ConstantFoldConstant(Ptr, DL, TLI);
+bool Evaluator::MutableValue::write(Constant *V, APInt Offset,
+ const DataLayout &DL) {
+ Type *Ty = V->getType();
+ TypeSize TySize = DL.getTypeStoreSize(Ty);
+ MutableValue *MV = this;
+ while (Offset != 0 ||
+ !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) {
+ if (MV->Val.is<Constant *>() && !MV->makeMutable())
+ return false;
+
+ MutableAggregate *Agg = MV->Val.get<MutableAggregate *>();
+ Type *AggTy = Agg->Ty;
+ Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
+ if (!Index || Index->uge(Agg->Elements.size()) ||
+ !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
+ return false;
+
+ MV = &Agg->Elements[Index->getZExtValue()];
}
- return Val;
+
+ Type *MVType = MV->getType();
+ MV->clear();
+ if (Ty->isIntegerTy() && MVType->isPointerTy())
+ MV->Val = ConstantExpr::getIntToPtr(V, MVType);
+ else if (Ty->isPointerTy() && MVType->isIntegerTy())
+ MV->Val = ConstantExpr::getPtrToInt(V, MVType);
+ else if (Ty != MVType)
+ MV->Val = ConstantExpr::getBitCast(V, MVType);
+ else
+ MV->Val = V;
+ return true;
}
-static Constant *getInitializer(Constant *C) {
- auto *GV = dyn_cast<GlobalVariable>(C);
- return GV && GV->hasDefinitiveInitializer() ? GV->getInitializer() : nullptr;
+Constant *Evaluator::MutableAggregate::toConstant() const {
+ SmallVector<Constant *, 32> Consts;
+ for (const MutableValue &MV : Elements)
+ Consts.push_back(MV.toConstant());
+
+ if (auto *ST = dyn_cast<StructType>(Ty))
+ return ConstantStruct::get(ST, Consts);
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ return ConstantArray::get(AT, Consts);
+ assert(isa<FixedVectorType>(Ty) && "Must be vector");
+ return ConstantVector::get(Consts);
}
/// Return the value that would be computed by a load from P after the stores
/// reflected by 'memory' have been performed. If we can't decide, return null.
Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) {
- // If this memory location has been recently stored, use the stored value: it
- // is the most up-to-date.
- auto TryFindMemLoc = [this](Constant *Ptr) {
- return MutatedMemory.lookup(Ptr);
- };
-
- if (Constant *Val = TryFindMemLoc(P))
- return Val;
-
- // Access it.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
- if (GV->hasDefinitiveInitializer())
- return GV->getInitializer();
+ APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0);
+ P = cast<Constant>(P->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true));
+ Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType()));
+ auto *GV = dyn_cast<GlobalVariable>(P);
+ if (!GV)
return nullptr;
- }
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) {
- switch (CE->getOpcode()) {
- // Handle a constantexpr getelementptr.
- case Instruction::GetElementPtr:
- if (auto *I = getInitializer(CE->getOperand(0)))
- return ConstantFoldLoadThroughGEPConstantExpr(I, CE, Ty, DL);
- break;
- // Handle a constantexpr bitcast.
- case Instruction::BitCast:
- // We're evaluating a load through a pointer that was bitcast to a
- // different type. See if the "from" pointer has recently been stored.
- // If it hasn't, we may still be able to find a stored pointer by
- // introspecting the type.
- Constant *Val =
- evaluateBitcastFromPtr(CE->getOperand(0), DL, TLI, TryFindMemLoc);
- if (!Val)
- Val = getInitializer(CE->getOperand(0));
- if (Val)
- return ConstantFoldLoadThroughBitcast(
- Val, P->getType()->getPointerElementType(), DL);
- break;
- }
- }
+ auto It = MutatedMemory.find(GV);
+ if (It != MutatedMemory.end())
+ return It->second.read(Ty, Offset, DL);
- return nullptr; // don't know how to evaluate.
+ if (!GV->hasDefinitiveInitializer())
+ return nullptr;
+ return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
}
static Function *getFunction(Constant *C) {
@@ -260,17 +245,10 @@ static Function *getFunction(Constant *C) {
Function *
Evaluator::getCalleeWithFormalArgs(CallBase &CB,
SmallVectorImpl<Constant *> &Formals) {
- auto *V = CB.getCalledOperand();
+ auto *V = CB.getCalledOperand()->stripPointerCasts();
if (auto *Fn = getFunction(getVal(V)))
return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
-
- auto *CE = dyn_cast<ConstantExpr>(V);
- if (!CE || CE->getOpcode() != Instruction::BitCast ||
- !getFormalParams(CB, getFunction(CE->getOperand(0)), Formals))
- return nullptr;
-
- return dyn_cast<Function>(
- ConstantFoldLoadThroughBitcast(CE, CE->getOperand(0)->getType(), DL));
+ return nullptr;
}
bool Evaluator::getFormalParams(CallBase &CB, Function *F,
@@ -299,17 +277,13 @@ bool Evaluator::getFormalParams(CallBase &CB, Function *F,
/// If call expression contains bitcast then we may need to cast
/// evaluated return value to a type of the call expression.
-Constant *Evaluator::castCallResultIfNeeded(Value *CallExpr, Constant *RV) {
- ConstantExpr *CE = dyn_cast<ConstantExpr>(CallExpr);
- if (!RV || !CE || CE->getOpcode() != Instruction::BitCast)
+Constant *Evaluator::castCallResultIfNeeded(Type *ReturnType, Constant *RV) {
+ if (!RV || RV->getType() == ReturnType)
return RV;
- if (auto *FT =
- dyn_cast<FunctionType>(CE->getType()->getPointerElementType())) {
- RV = ConstantFoldLoadThroughBitcast(RV, FT->getReturnType(), DL);
- if (!RV)
- LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n");
- }
+ RV = ConstantFoldLoadThroughBitcast(RV, ReturnType, DL);
+ if (!RV)
+ LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n");
return RV;
}
@@ -337,68 +311,30 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
Ptr = FoldedPtr;
LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
- // Conservatively, avoid aggregate types. This is because we don't
- // want to worry about them partially overlapping other stores.
- if (!SI->getValueOperand()->getType()->isSingleValueType() ||
- !isSimpleEnoughPointerToCommit(Ptr, DL)) {
- // If this is too complex for us to commit, reject it.
- LLVM_DEBUG(
- dbgs() << "Pointer is too complex for us to evaluate store.");
+
+ APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
+ Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true));
+ Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType()));
+ auto *GV = dyn_cast<GlobalVariable>(Ptr);
+ if (!GV || !GV->hasUniqueInitializer()) {
+ LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: "
+ << *Ptr << "\n");
return false;
}
- Constant *Val = getVal(SI->getOperand(0));
-
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
+ Constant *Val = getVal(SI->getOperand(0));
if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
<< *Val << "\n");
return false;
}
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- if (CE->getOpcode() == Instruction::BitCast) {
- LLVM_DEBUG(dbgs()
- << "Attempting to resolve bitcast on constant ptr.\n");
- // If we're evaluating a store through a bitcast, then we need
- // to pull the bitcast off the pointer type and push it onto the
- // stored value. In order to push the bitcast onto the stored value,
- // a bitcast from the pointer's element type to Val's type must be
- // legal. If it's not, we can try introspecting the type to find a
- // legal conversion.
-
- auto TryCastValTy = [&](Constant *P) -> Constant * {
- // The conversion is illegal if the store is wider than the
- // pointee proposed by `evaluateBitcastFromPtr`, since that would
- // drop stores to other struct elements when the caller attempts to
- // look through a struct's 0th element.
- Type *NewTy = cast<PointerType>(P->getType())->getElementType();
- Type *STy = Val->getType();
- if (DL.getTypeSizeInBits(NewTy) < DL.getTypeSizeInBits(STy))
- return nullptr;
-
- if (Constant *FV = ConstantFoldLoadThroughBitcast(Val, NewTy, DL)) {
- Ptr = P;
- return FV;
- }
- return nullptr;
- };
-
- Constant *NewVal =
- evaluateBitcastFromPtr(CE->getOperand(0), DL, TLI, TryCastValTy);
- if (!NewVal) {
- LLVM_DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
- "evaluate.\n");
- return false;
- }
-
- Val = NewVal;
- LLVM_DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
- }
- }
-
- MutatedMemory[Ptr] = Val;
+ auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer());
+ if (!Res.first->second.write(Val, Offset, DL))
+ return false;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
InstResult = ConstantExpr::get(BO->getOpcode(),
getVal(BO->getOperand(0)),
@@ -593,7 +529,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
if (Callee->isDeclaration()) {
// If this is a function we can constant fold, do it.
if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
- InstResult = castCallResultIfNeeded(CB.getCalledOperand(), C);
+ InstResult = castCallResultIfNeeded(CB.getType(), C);
if (!InstResult)
return false;
LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
@@ -617,7 +553,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
return false;
}
ValueStack.pop_back();
- InstResult = castCallResultIfNeeded(CB.getCalledOperand(), RetVal);
+ InstResult = castCallResultIfNeeded(CB.getType(), RetVal);
if (RetVal && !InstResult)
return false;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/GlobalStatus.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/GlobalStatus.cpp
index 9bfc73e4ba6c..f8ec8c6ad426 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/GlobalStatus.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/GlobalStatus.cpp
@@ -66,8 +66,6 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
for (const Use &U : V->uses()) {
const User *UR = U.getUser();
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(UR)) {
- GS.HasNonInstructionUser = true;
-
// If the result of the constantexpr isn't pointer type, then we won't
// know to expect it in various places. Just reject early.
if (!isa<PointerType>(CE->getType()))
@@ -105,9 +103,7 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
// value, not an aggregate), keep more specific information about
// stores.
if (GS.StoredType != GlobalStatus::Stored) {
- const Value *Ptr = SI->getPointerOperand();
- if (isa<ConstantExpr>(Ptr))
- Ptr = Ptr->stripPointerCasts();
+ const Value *Ptr = SI->getPointerOperand()->stripPointerCasts();
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Value *StoredVal = SI->getOperand(0);
@@ -174,12 +170,10 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
return true; // Any other non-load instruction might take address!
}
} else if (const Constant *C = dyn_cast<Constant>(UR)) {
- GS.HasNonInstructionUser = true;
// We might have a dead and dangling constant hanging off of here.
if (!isSafeToDestroyConstant(C))
return true;
} else {
- GS.HasNonInstructionUser = true;
// Otherwise must be some other user.
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 997667810580..c9f872f5b7e1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1185,10 +1185,10 @@ static bool MayContainThrowingOrExitingCall(Instruction *Begin,
static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
- AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
- if (AB.empty())
+ AttrBuilder AB(CB.getContext(), CB.getAttributes().getRetAttrs());
+ if (!AB.hasAttributes())
return AB;
- AttrBuilder Valid;
+ AttrBuilder Valid(CB.getContext());
// Only allow these white listed attributes to be propagated back to the
// callee. This is because other attributes may only be valid on the call
// itself, i.e. attributes such as signext and zeroext.
@@ -1208,7 +1208,7 @@ static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
return;
AttrBuilder Valid = IdentifyValidAttributes(CB);
- if (Valid.empty())
+ if (!Valid.hasAttributes())
return;
auto *CalledFunction = CB.getCalledFunction();
auto &Context = CalledFunction->getContext();
@@ -1667,7 +1667,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
Module *Mod = CB.getModule();
assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
- IsClaimRV = !IsRetainRV;
+ IsUnsafeClaimRV = !IsRetainRV;
for (auto *RI : Returns) {
Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
@@ -1694,7 +1694,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
// and erase the autoreleaseRV call.
// - If retainRV is attached to the call, just erase the autoreleaseRV
// call.
- if (IsClaimRV) {
+ if (IsUnsafeClaimRV) {
Builder.SetInsertPoint(II);
Function *IFn =
Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LCSSA.cpp
index 668626fef933..72b864dc3e48 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -339,8 +339,10 @@ bool llvm::formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
#ifdef EXPENSIVE_CHECKS
// Verify all sub-loops are in LCSSA form already.
- for (Loop *SubLoop: L)
+ for (Loop *SubLoop: L) {
+ (void)SubLoop; // Silence unused variable warning.
assert(SubLoop->isRecursivelyLCSSAForm(DT, *LI) && "Subloop not in LCSSA!");
+ }
#endif
SmallVector<BasicBlock *, 8> ExitBlocks;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
index ecad79b68185..9f33d2f82732 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
@@ -492,7 +492,7 @@ bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
}
}
- if (isAllocLikeFn(I, TLI))
+ if (isAllocationFn(I, TLI) && isAllocRemovable(cast<CallBase>(I), TLI))
return true;
if (CallInst *CI = isFreeCall(I, TLI))
@@ -2189,8 +2189,8 @@ CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
return NewCall;
}
-/// changeToCall - Convert the specified invoke into a normal call.
-void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
+// changeToCall - Convert the specified invoke into a normal call.
+CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
CallInst *NewCall = createCallMatchingInvoke(II);
NewCall->takeName(II);
NewCall->insertBefore(II);
@@ -2207,6 +2207,7 @@ void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
II->eraseFromParent();
if (DTU)
DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
+ return NewCall;
}
BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
@@ -3147,11 +3148,6 @@ bool llvm::recognizeBSwapOrBitReverseIdiom(
if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
return false; // Can't do integer/elements > 128 bits.
- Type *DemandedTy = ITy;
- if (I->hasOneUse())
- if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
- DemandedTy = Trunc->getType();
-
// Try to find all the pieces corresponding to the bswap.
bool FoundRoot = false;
std::map<Value *, Optional<BitPart>> BPS;
@@ -3165,6 +3161,7 @@ bool llvm::recognizeBSwapOrBitReverseIdiom(
"Illegal bit provenance index");
// If the upper bits are zero, then attempt to perform as a truncated op.
+ Type *DemandedTy = ITy;
if (BitProvenance.back() == BitPart::Unset) {
while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
BitProvenance = BitProvenance.drop_back();
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp
index 69fd110dc3c2..92333408aaef 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopPeel.cpp
@@ -359,7 +359,7 @@ static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
// Return the number of iterations we want to peel off.
void llvm::computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::PeelingPreferences &PP,
- unsigned &TripCount, DominatorTree &DT,
+ unsigned TripCount, DominatorTree &DT,
ScalarEvolution &SE, unsigned Threshold) {
assert(LoopSize > 0 && "Zero loop size is not allowed!");
// Save the PP.PeelCount value set by the target in
@@ -370,7 +370,7 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
return;
// Only try to peel innermost loops by default.
- // The constraint can be relaxed by the target in TTI.getUnrollingPreferences
+ // The constraint can be relaxed by the target in TTI.getPeelingPreferences
// or by the flag -unroll-allow-loop-nests-peeling.
if (!PP.AllowLoopNestsPeeling && !L->isInnermost())
return;
@@ -407,8 +407,8 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
SmallDenseMap<PHINode *, Optional<unsigned> > IterationsToInvariance;
// Now go through all Phis to calculate their the number of iterations they
// need to become invariants.
- // Start the max computation with the UP.PeelCount value set by the target
- // in TTI.getUnrollingPreferences or by the flag -unroll-peel-count.
+ // Start the max computation with the PP.PeelCount value set by the target
+ // in TTI.getPeelingPreferences or by the flag -unroll-peel-count.
unsigned DesiredPeelCount = TargetPeelCount;
BasicBlock *BackEdge = L->getLoopLatch();
assert(BackEdge && "Loop is not in simplified form?");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index b0c622b98d5e..9ca1f4f44b97 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -99,6 +99,17 @@ UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden,
#endif
);
+static cl::opt<bool>
+UnrollVerifyLoopInfo("unroll-verify-loopinfo", cl::Hidden,
+ cl::desc("Verify loopinfo after unrolling"),
+#ifdef EXPENSIVE_CHECKS
+ cl::init(true)
+#else
+ cl::init(false)
+#endif
+ );
+
+
/// Check if unrolling created a situation where we need to insert phi nodes to
/// preserve LCSSA form.
/// \param Blocks is a vector of basic blocks representing unrolled loop.
@@ -764,6 +775,9 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
// Apply updates to the DomTree.
DT = &DTU.getDomTree();
+ assert(!UnrollVerifyDomtree ||
+ DT->verify(DominatorTree::VerificationLevel::Fast));
+
// At this point, the code is well formed. We now simplify the unrolled loop,
// doing constant propagation and dead code elimination as we go.
simplifyLoopAfterUnroll(L, !CompletelyUnroll && ULO.Count > 1, LI, SE, DT, AC,
@@ -777,6 +791,10 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
if (CompletelyUnroll)
LI->erase(L);
+ // LoopInfo should not be valid, confirm that.
+ if (UnrollVerifyLoopInfo)
+ LI->verify(*DT);
+
// After complete unrolling most of the blocks should be contained in OuterL.
// However, some of them might happen to be out of OuterL (e.g. if they
// precede a loop exit). In this case we might need to insert PHI nodes in
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUtils.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUtils.cpp
index 93157bd87c34..95db2fe8d310 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -22,6 +22,7 @@
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
@@ -1567,7 +1568,9 @@ Value *llvm::addRuntimeChecks(
auto ExpandedChecks = expandBounds(PointerChecks, TheLoop, Loc, Exp);
LLVMContext &Ctx = Loc->getContext();
- IRBuilder<> ChkBuilder(Loc);
+ IRBuilder<InstSimplifyFolder> ChkBuilder(Ctx,
+ Loc->getModule()->getDataLayout());
+ ChkBuilder.SetInsertPoint(Loc);
// Our instructions might fold to a constant.
Value *MemoryRuntimeCheck = nullptr;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopVersioning.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopVersioning.cpp
index 771b7d25b0f2..f0bf625fa18e 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopVersioning.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LoopVersioning.cpp
@@ -15,6 +15,7 @@
#include "llvm/Transforms/Utils/LoopVersioning.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
@@ -70,17 +71,14 @@ void LoopVersioning::versionLoop(
"scev.check");
SCEVRuntimeCheck =
Exp.expandCodeForPredicate(&Preds, RuntimeCheckBB->getTerminator());
- auto *CI = dyn_cast<ConstantInt>(SCEVRuntimeCheck);
-
- // Discard the SCEV runtime check if it is always true.
- if (CI && CI->isZero())
- SCEVRuntimeCheck = nullptr;
+ IRBuilder<InstSimplifyFolder> Builder(
+ RuntimeCheckBB->getContext(),
+ InstSimplifyFolder(RuntimeCheckBB->getModule()->getDataLayout()));
if (MemRuntimeCheck && SCEVRuntimeCheck) {
- RuntimeCheck = BinaryOperator::Create(Instruction::Or, MemRuntimeCheck,
- SCEVRuntimeCheck, "lver.safe");
- if (auto *I = dyn_cast<Instruction>(RuntimeCheck))
- I->insertBefore(RuntimeCheckBB->getTerminator());
+ Builder.SetInsertPoint(RuntimeCheckBB->getTerminator());
+ RuntimeCheck =
+ Builder.CreateOr(MemRuntimeCheck, SCEVRuntimeCheck, "lver.safe");
} else
RuntimeCheck = MemRuntimeCheck ? MemRuntimeCheck : SCEVRuntimeCheck;
@@ -109,8 +107,9 @@ void LoopVersioning::versionLoop(
// Insert the conditional branch based on the result of the memchecks.
Instruction *OrigTerm = RuntimeCheckBB->getTerminator();
- BranchInst::Create(NonVersionedLoop->getLoopPreheader(),
- VersionedLoop->getLoopPreheader(), RuntimeCheck, OrigTerm);
+ Builder.SetInsertPoint(OrigTerm);
+ Builder.CreateCondBr(RuntimeCheck, NonVersionedLoop->getLoopPreheader(),
+ VersionedLoop->getLoopPreheader());
OrigTerm->eraseFromParent();
// The loops merge in the original exit block. This is now dominated by the
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 8dc4702993c3..3d75dd57456d 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -297,7 +297,7 @@ static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
Function *F = OrigBB->getParent();
const DataLayout &DL = F->getParent()->getDataLayout();
- Type *EltTy = cast<PointerType>(SrcAddr->getType())->getElementType();
+ Type *EltTy = SrcAddr->getType()->getPointerElementType();
// Create the a comparison of src and dst, based on which we jump to either
// the forward-copy part of the function (if src >= dst) or the backwards-copy
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/ModuleUtils.cpp
index bb5ff59cba4b..7c9ab7f6ca2c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/ModuleUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/ModuleUtils.cpp
@@ -178,66 +178,30 @@ llvm::getOrCreateSanitizerCtorAndInitFunctions(
}
void llvm::filterDeadComdatFunctions(
- Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions) {
- // Build a map from the comdat to the number of entries in that comdat we
- // think are dead. If this fully covers the comdat group, then the entire
- // group is dead. If we find another entry in the comdat group though, we'll
- // have to preserve the whole group.
- SmallDenseMap<Comdat *, int, 16> ComdatEntriesCovered;
+ SmallVectorImpl<Function *> &DeadComdatFunctions) {
+ SmallPtrSet<Function *, 32> MaybeDeadFunctions;
+ SmallPtrSet<Comdat *, 32> MaybeDeadComdats;
for (Function *F : DeadComdatFunctions) {
- Comdat *C = F->getComdat();
- assert(C && "Expected all input GVs to be in a comdat!");
- ComdatEntriesCovered[C] += 1;
+ MaybeDeadFunctions.insert(F);
+ if (Comdat *C = F->getComdat())
+ MaybeDeadComdats.insert(C);
}
- auto CheckComdat = [&](Comdat &C) {
- auto CI = ComdatEntriesCovered.find(&C);
- if (CI == ComdatEntriesCovered.end())
- return;
-
- // If this could have been covered by a dead entry, just subtract one to
- // account for it.
- if (CI->second > 0) {
- CI->second -= 1;
- return;
- }
-
- // If we've already accounted for all the entries that were dead, the
- // entire comdat is alive so remove it from the map.
- ComdatEntriesCovered.erase(CI);
- };
-
- auto CheckAllComdats = [&] {
- for (Function &F : M.functions())
- if (Comdat *C = F.getComdat()) {
- CheckComdat(*C);
- if (ComdatEntriesCovered.empty())
- return;
- }
- for (GlobalVariable &GV : M.globals())
- if (Comdat *C = GV.getComdat()) {
- CheckComdat(*C);
- if (ComdatEntriesCovered.empty())
- return;
- }
- for (GlobalAlias &GA : M.aliases())
- if (Comdat *C = GA.getComdat()) {
- CheckComdat(*C);
- if (ComdatEntriesCovered.empty())
- return;
- }
- };
- CheckAllComdats();
-
- if (ComdatEntriesCovered.empty()) {
- DeadComdatFunctions.clear();
- return;
+ // Find comdats for which all users are dead now.
+ SmallPtrSet<Comdat *, 32> DeadComdats;
+ for (Comdat *C : MaybeDeadComdats) {
+ auto IsUserDead = [&](GlobalObject *GO) {
+ auto *F = dyn_cast<Function>(GO);
+ return F && MaybeDeadFunctions.contains(F);
+ };
+ if (all_of(C->getUsers(), IsUserDead))
+ DeadComdats.insert(C);
}
- // Remove the entries that were not covering.
- erase_if(DeadComdatFunctions, [&](GlobalValue *GV) {
- return ComdatEntriesCovered.find(GV->getComdat()) ==
- ComdatEntriesCovered.end();
+ // Only keep functions which have no comdat or a dead comdat.
+ erase_if(DeadComdatFunctions, [&](Function *F) {
+ Comdat *C = F->getComdat();
+ return C && !DeadComdats.contains(C);
});
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/SampleProfileInference.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/SampleProfileInference.cpp
index 2f2dff6b5f0b..961adf2570a7 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/SampleProfileInference.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/SampleProfileInference.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/SampleProfileInference.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Debug.h"
#include <queue>
#include <set>
@@ -144,7 +145,7 @@ public:
/// A cost of decreasing the entry block's count by one.
static constexpr int64_t AuxCostDecEntry = 10;
/// A cost of taking an unlikely jump.
- static constexpr int64_t AuxCostUnlikely = ((int64_t)1) << 20;
+ static constexpr int64_t AuxCostUnlikely = ((int64_t)1) << 30;
private:
/// Check for existence of an augmenting path with a positive capacity.
@@ -236,7 +237,7 @@ private:
}
}
- /// An node in a flow network.
+ /// A node in a flow network.
struct Node {
/// The cost of the cheapest path from the source to the current node.
int64_t Distance;
@@ -303,13 +304,10 @@ public:
rebalanceUnknownSubgraphs();
}
- /// The probability for the first successor of a unknown subgraph
- static constexpr double UnknownFirstSuccProbability = 0.5;
-
private:
void joinIsolatedComponents() {
// Find blocks that are reachable from the source
- auto Visited = std::vector<bool>(NumBlocks(), false);
+ auto Visited = BitVector(NumBlocks(), false);
findReachable(Func.Entry, Visited);
// Iterate over all non-reachable blocks and adjust their weights
@@ -334,7 +332,7 @@ private:
/// Run BFS from a given block along the jumps with a positive flow and mark
/// all reachable blocks.
- void findReachable(uint64_t Src, std::vector<bool> &Visited) {
+ void findReachable(uint64_t Src, BitVector &Visited) {
if (Visited[Src])
return;
std::queue<uint64_t> Queue;
@@ -452,44 +450,70 @@ private:
uint64_t NumBlocks() const { return Func.Blocks.size(); }
- /// Rebalance unknown subgraphs so as each branch splits with probabilities
- /// UnknownFirstSuccProbability and 1 - UnknownFirstSuccProbability
+ /// Rebalance unknown subgraphs so that the flow is split evenly across the
+ /// outgoing branches of every block of the subgraph. The method iterates over
+ /// blocks with known weight and identifies unknown subgraphs rooted at the
+ /// blocks. Then it verifies if flow rebalancing is feasible and applies it.
void rebalanceUnknownSubgraphs() {
- assert(UnknownFirstSuccProbability >= 0.0 &&
- UnknownFirstSuccProbability <= 1.0 &&
- "the share of the unknown successor should be between 0 and 1");
- // Try to find unknown subgraphs from each non-unknown block
+ // Try to find unknown subgraphs from each block
for (uint64_t I = 0; I < Func.Blocks.size(); I++) {
auto SrcBlock = &Func.Blocks[I];
- // Do not attempt to find unknown successors from a unknown or a
- // zero-flow block
- if (SrcBlock->UnknownWeight || SrcBlock->Flow == 0)
+ // Verify if rebalancing rooted at SrcBlock is feasible
+ if (!canRebalanceAtRoot(SrcBlock))
continue;
- std::vector<FlowBlock *> UnknownSuccs;
+ // Find an unknown subgraphs starting at SrcBlock. Along the way,
+ // fill in known destinations and intermediate unknown blocks.
+ std::vector<FlowBlock *> UnknownBlocks;
+ std::vector<FlowBlock *> KnownDstBlocks;
+ findUnknownSubgraph(SrcBlock, KnownDstBlocks, UnknownBlocks);
+
+ // Verify if rebalancing of the subgraph is feasible. If the search is
+ // successful, find the unique destination block (which can be null)
FlowBlock *DstBlock = nullptr;
- // Find a unknown subgraphs starting at block SrcBlock
- if (!findUnknownSubgraph(SrcBlock, DstBlock, UnknownSuccs))
+ if (!canRebalanceSubgraph(SrcBlock, KnownDstBlocks, UnknownBlocks,
+ DstBlock))
continue;
- // At the moment, we do not rebalance subgraphs containing cycles among
- // unknown blocks
- if (!isAcyclicSubgraph(SrcBlock, DstBlock, UnknownSuccs))
+
+ // We cannot rebalance subgraphs containing cycles among unknown blocks
+ if (!isAcyclicSubgraph(SrcBlock, DstBlock, UnknownBlocks))
continue;
// Rebalance the flow
- rebalanceUnknownSubgraph(SrcBlock, DstBlock, UnknownSuccs);
+ rebalanceUnknownSubgraph(SrcBlock, DstBlock, UnknownBlocks);
}
}
- /// Find a unknown subgraph starting at block SrcBlock.
- /// If the search is successful, the method sets DstBlock and UnknownSuccs.
- bool findUnknownSubgraph(FlowBlock *SrcBlock, FlowBlock *&DstBlock,
- std::vector<FlowBlock *> &UnknownSuccs) {
+ /// Verify if rebalancing rooted at a given block is possible.
+ bool canRebalanceAtRoot(const FlowBlock *SrcBlock) {
+ // Do not attempt to find unknown subgraphs from an unknown or a
+ // zero-flow block
+ if (SrcBlock->UnknownWeight || SrcBlock->Flow == 0)
+ return false;
+
+ // Do not attempt to process subgraphs from a block w/o unknown sucessors
+ bool HasUnknownSuccs = false;
+ for (auto Jump : SrcBlock->SuccJumps) {
+ if (Func.Blocks[Jump->Target].UnknownWeight) {
+ HasUnknownSuccs = true;
+ break;
+ }
+ }
+ if (!HasUnknownSuccs)
+ return false;
+
+ return true;
+ }
+
+ /// Find an unknown subgraph starting at block SrcBlock. The method sets
+ /// identified destinations, KnownDstBlocks, and intermediate UnknownBlocks.
+ void findUnknownSubgraph(const FlowBlock *SrcBlock,
+ std::vector<FlowBlock *> &KnownDstBlocks,
+ std::vector<FlowBlock *> &UnknownBlocks) {
// Run BFS from SrcBlock and make sure all paths are going through unknown
// blocks and end at a non-unknown DstBlock
- auto Visited = std::vector<bool>(NumBlocks(), false);
+ auto Visited = BitVector(NumBlocks(), false);
std::queue<uint64_t> Queue;
- DstBlock = nullptr;
Queue.push(SrcBlock->Index);
Visited[SrcBlock->Index] = true;
@@ -498,52 +522,105 @@ private:
Queue.pop();
// Process blocks reachable from Block
for (auto Jump : Block.SuccJumps) {
+ // If Jump can be ignored, skip it
+ if (ignoreJump(SrcBlock, nullptr, Jump))
+ continue;
+
uint64_t Dst = Jump->Target;
+ // If Dst has been visited, skip Jump
if (Visited[Dst])
continue;
+ // Process block Dst
Visited[Dst] = true;
if (!Func.Blocks[Dst].UnknownWeight) {
- // If we see non-unique non-unknown block reachable from SrcBlock,
- // stop processing and skip rebalancing
- FlowBlock *CandidateDstBlock = &Func.Blocks[Dst];
- if (DstBlock != nullptr && DstBlock != CandidateDstBlock)
- return false;
- DstBlock = CandidateDstBlock;
+ KnownDstBlocks.push_back(&Func.Blocks[Dst]);
} else {
Queue.push(Dst);
- UnknownSuccs.push_back(&Func.Blocks[Dst]);
+ UnknownBlocks.push_back(&Func.Blocks[Dst]);
}
}
}
+ }
+ /// Verify if rebalancing of the subgraph is feasible. If the checks are
+ /// successful, set the unique destination block, DstBlock (can be null).
+ bool canRebalanceSubgraph(const FlowBlock *SrcBlock,
+ const std::vector<FlowBlock *> &KnownDstBlocks,
+ const std::vector<FlowBlock *> &UnknownBlocks,
+ FlowBlock *&DstBlock) {
// If the list of unknown blocks is empty, we don't need rebalancing
- if (UnknownSuccs.empty())
+ if (UnknownBlocks.empty())
return false;
- // If all reachable nodes from SrcBlock are unknown, skip rebalancing
- if (DstBlock == nullptr)
+
+ // If there are multiple known sinks, we can't rebalance
+ if (KnownDstBlocks.size() > 1)
return false;
- // If any of the unknown blocks is an exit block, skip rebalancing
- for (auto Block : UnknownSuccs) {
- if (Block->isExit())
+ DstBlock = KnownDstBlocks.empty() ? nullptr : KnownDstBlocks.front();
+
+ // Verify sinks of the subgraph
+ for (auto Block : UnknownBlocks) {
+ if (Block->SuccJumps.empty()) {
+ // If there are multiple (known and unknown) sinks, we can't rebalance
+ if (DstBlock != nullptr)
+ return false;
+ continue;
+ }
+ size_t NumIgnoredJumps = 0;
+ for (auto Jump : Block->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ NumIgnoredJumps++;
+ }
+ // If there is a non-sink block in UnknownBlocks with all jumps ignored,
+ // then we can't rebalance
+ if (NumIgnoredJumps == Block->SuccJumps.size())
return false;
}
return true;
}
+ /// Decide whether the Jump is ignored while processing an unknown subgraphs
+ /// rooted at basic block SrcBlock with the destination block, DstBlock.
+ bool ignoreJump(const FlowBlock *SrcBlock, const FlowBlock *DstBlock,
+ const FlowJump *Jump) {
+ // Ignore unlikely jumps with zero flow
+ if (Jump->IsUnlikely && Jump->Flow == 0)
+ return true;
+
+ auto JumpSource = &Func.Blocks[Jump->Source];
+ auto JumpTarget = &Func.Blocks[Jump->Target];
+
+ // Do not ignore jumps coming into DstBlock
+ if (DstBlock != nullptr && JumpTarget == DstBlock)
+ return false;
+
+ // Ignore jumps out of SrcBlock to known blocks
+ if (!JumpTarget->UnknownWeight && JumpSource == SrcBlock)
+ return true;
+
+ // Ignore jumps to known blocks with zero flow
+ if (!JumpTarget->UnknownWeight && JumpTarget->Flow == 0)
+ return true;
+
+ return false;
+ }
+
/// Verify if the given unknown subgraph is acyclic, and if yes, reorder
- /// UnknownSuccs in the topological order (so that all jumps are "forward").
- bool isAcyclicSubgraph(FlowBlock *SrcBlock, FlowBlock *DstBlock,
- std::vector<FlowBlock *> &UnknownSuccs) {
+ /// UnknownBlocks in the topological order (so that all jumps are "forward").
+ bool isAcyclicSubgraph(const FlowBlock *SrcBlock, const FlowBlock *DstBlock,
+ std::vector<FlowBlock *> &UnknownBlocks) {
// Extract local in-degrees in the considered subgraph
auto LocalInDegree = std::vector<uint64_t>(NumBlocks(), 0);
- for (auto Jump : SrcBlock->SuccJumps) {
- LocalInDegree[Jump->Target]++;
- }
- for (uint64_t I = 0; I < UnknownSuccs.size(); I++) {
- for (auto Jump : UnknownSuccs[I]->SuccJumps) {
+ auto fillInDegree = [&](const FlowBlock *Block) {
+ for (auto Jump : Block->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ continue;
LocalInDegree[Jump->Target]++;
}
+ };
+ fillInDegree(SrcBlock);
+ for (auto Block : UnknownBlocks) {
+ fillInDegree(Block);
}
// A loop containing SrcBlock
if (LocalInDegree[SrcBlock->Index] > 0)
@@ -553,15 +630,20 @@ private:
std::queue<uint64_t> Queue;
Queue.push(SrcBlock->Index);
while (!Queue.empty()) {
- auto &Block = Func.Blocks[Queue.front()];
+ FlowBlock *Block = &Func.Blocks[Queue.front()];
Queue.pop();
- // Stop propagation once we reach DstBlock
- if (Block.Index == DstBlock->Index)
+ // Stop propagation once we reach DstBlock, if any
+ if (DstBlock != nullptr && Block == DstBlock)
break;
- AcyclicOrder.push_back(&Block);
+ // Keep an acyclic order of unknown blocks
+ if (Block->UnknownWeight && Block != SrcBlock)
+ AcyclicOrder.push_back(Block);
+
// Add to the queue all successors with zero local in-degree
- for (auto Jump : Block.SuccJumps) {
+ for (auto Jump : Block->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ continue;
uint64_t Dst = Jump->Target;
LocalInDegree[Dst]--;
if (LocalInDegree[Dst] == 0) {
@@ -572,42 +654,69 @@ private:
// If there is a cycle in the subgraph, AcyclicOrder contains only a subset
// of all blocks
- if (UnknownSuccs.size() + 1 != AcyclicOrder.size())
+ if (UnknownBlocks.size() != AcyclicOrder.size())
return false;
- UnknownSuccs = AcyclicOrder;
+ UnknownBlocks = AcyclicOrder;
return true;
}
- /// Rebalance a given subgraph.
- void rebalanceUnknownSubgraph(FlowBlock *SrcBlock, FlowBlock *DstBlock,
- std::vector<FlowBlock *> &UnknownSuccs) {
+ /// Rebalance a given subgraph rooted at SrcBlock, ending at DstBlock and
+ /// having UnknownBlocks intermediate blocks.
+ void rebalanceUnknownSubgraph(const FlowBlock *SrcBlock,
+ const FlowBlock *DstBlock,
+ const std::vector<FlowBlock *> &UnknownBlocks) {
assert(SrcBlock->Flow > 0 && "zero-flow block in unknown subgraph");
- assert(UnknownSuccs.front() == SrcBlock && "incorrect order of unknowns");
- for (auto Block : UnknownSuccs) {
+ // Ditribute flow from the source block
+ uint64_t BlockFlow = 0;
+ // SrcBlock's flow is the sum of outgoing flows along non-ignored jumps
+ for (auto Jump : SrcBlock->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ continue;
+ BlockFlow += Jump->Flow;
+ }
+ rebalanceBlock(SrcBlock, DstBlock, SrcBlock, BlockFlow);
+
+ // Ditribute flow from the remaining blocks
+ for (auto Block : UnknownBlocks) {
+ assert(Block->UnknownWeight && "incorrect unknown subgraph");
+ uint64_t BlockFlow = 0;
// Block's flow is the sum of incoming flows
- uint64_t TotalFlow = 0;
- if (Block == SrcBlock) {
- TotalFlow = Block->Flow;
- } else {
- for (auto Jump : Block->PredJumps) {
- TotalFlow += Jump->Flow;
- }
- Block->Flow = TotalFlow;
+ for (auto Jump : Block->PredJumps) {
+ BlockFlow += Jump->Flow;
}
+ Block->Flow = BlockFlow;
+ rebalanceBlock(SrcBlock, DstBlock, Block, BlockFlow);
+ }
+ }
- // Process all successor jumps and update corresponding flow values
- for (uint64_t I = 0; I < Block->SuccJumps.size(); I++) {
- auto Jump = Block->SuccJumps[I];
- if (I + 1 == Block->SuccJumps.size()) {
- Jump->Flow = TotalFlow;
- continue;
- }
- uint64_t Flow = uint64_t(TotalFlow * UnknownFirstSuccProbability);
- Jump->Flow = Flow;
- TotalFlow -= Flow;
- }
+ /// Redistribute flow for a block in a subgraph rooted at SrcBlock,
+ /// and ending at DstBlock.
+ void rebalanceBlock(const FlowBlock *SrcBlock, const FlowBlock *DstBlock,
+ const FlowBlock *Block, uint64_t BlockFlow) {
+ // Process all successor jumps and update corresponding flow values
+ size_t BlockDegree = 0;
+ for (auto Jump : Block->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ continue;
+ BlockDegree++;
+ }
+ // If all successor jumps of the block are ignored, skip it
+ if (DstBlock == nullptr && BlockDegree == 0)
+ return;
+ assert(BlockDegree > 0 && "all outgoing jumps are ignored");
+
+ // Each of the Block's successors gets the following amount of flow.
+ // Rounding the value up so that all flow is propagated
+ uint64_t SuccFlow = (BlockFlow + BlockDegree - 1) / BlockDegree;
+ for (auto Jump : Block->SuccJumps) {
+ if (ignoreJump(SrcBlock, DstBlock, Jump))
+ continue;
+ uint64_t Flow = std::min(SuccFlow, BlockFlow);
+ Jump->Flow = Flow;
+ BlockFlow -= Flow;
}
+ assert(BlockFlow == 0 && "not all flow is propagated");
}
/// A constant indicating an arbitrary exit block of a function.
@@ -799,7 +908,7 @@ void verifyWeights(const FlowFunction &Func) {
// Run BFS from the source along edges with positive flow
std::queue<uint64_t> Queue;
- auto Visited = std::vector<bool>(NumBlocks, false);
+ auto Visited = BitVector(NumBlocks, false);
Queue.push(Func.Entry);
Visited[Func.Entry] = true;
while (!Queue.empty()) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index c840ee85795f..5363a851fc27 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -173,7 +173,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
auto *PtrTy = cast<PointerType>(Ty);
if (DL.isNonIntegralPointerType(PtrTy)) {
auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
- assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 &&
+ assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 &&
"alloc size of i8 must by 1 byte for the GEP to be correct");
auto *GEP = Builder.CreateGEP(
Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
@@ -471,7 +471,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// indexes into the array implied by the pointer operand; the rest of
// the indices index into the element or field type selected by the
// preceding index.
- Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getNonOpaquePointerElementType();
for (;;) {
// If the scale size is not 0, attempt to factor out a scale for
// array indexing.
@@ -640,8 +640,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
Value *Casted = V;
if (V->getType() != PTy)
Casted = InsertNoopCastOfTo(Casted, PTy);
- Value *GEP = Builder.CreateGEP(PTy->getElementType(), Casted, GepIndices,
- "scevgep");
+ Value *GEP = Builder.CreateGEP(PTy->getNonOpaquePointerElementType(),
+ Casted, GepIndices, "scevgep");
Ops.push_back(SE.getUnknown(GEP));
}
@@ -1671,7 +1671,7 @@ Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
return Builder.CreateSExt(V, Ty);
}
-Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
+Value *SCEVExpander::expandSMaxExpr(const SCEVNAryExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
@@ -1700,7 +1700,7 @@ Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
+Value *SCEVExpander::expandUMaxExpr(const SCEVNAryExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
@@ -1729,7 +1729,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
+Value *SCEVExpander::expandSMinExpr(const SCEVNAryExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands() - 2; i >= 0; --i) {
@@ -1758,7 +1758,7 @@ Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
return LHS;
}
-Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
+Value *SCEVExpander::expandUMinExpr(const SCEVNAryExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands() - 2; i >= 0; --i) {
@@ -1787,6 +1787,40 @@ Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
return LHS;
}
+Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
+ return expandSMaxExpr(S);
+}
+
+Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
+ return expandUMaxExpr(S);
+}
+
+Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
+ return expandSMinExpr(S);
+}
+
+Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
+ return expandUMinExpr(S);
+}
+
+Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
+ SmallVector<Value *> Ops;
+ for (const SCEV *Op : S->operands())
+ Ops.emplace_back(expand(Op));
+
+ Value *SaturationPoint =
+ MinMaxIntrinsic::getSaturationPoint(Intrinsic::umin, S->getType());
+
+ SmallVector<Value *> OpIsZero;
+ for (Value *Op : ArrayRef<Value *>(Ops).drop_back())
+ OpIsZero.emplace_back(Builder.CreateICmpEQ(Op, SaturationPoint));
+
+ Value *AnyOpIsZero = Builder.CreateLogicalOr(OpIsZero);
+
+ Value *NaiveUMin = expandUMinExpr(S);
+ return Builder.CreateSelect(AnyOpIsZero, SaturationPoint, NaiveUMin);
+}
+
Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
Instruction *IP, bool Root) {
setInsertPoint(IP);
@@ -1809,8 +1843,8 @@ Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
// instruction.
Instruction *Tmp;
if (Inst->getType()->isIntegerTy())
- Tmp =
- cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user"));
+ Tmp = cast<Instruction>(Builder.CreateIntToPtr(
+ Inst, Inst->getType()->getPointerTo(), "tmp.lcssa.user"));
else {
assert(Inst->getType()->isPointerTy());
Tmp = cast<Instruction>(Builder.CreatePtrToInt(
@@ -1947,22 +1981,14 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (VO.second) {
if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
- Type *Ety = Vty->getPointerElementType();
int64_t Offset = VO.second->getSExtValue();
- int64_t ESize = SE.getTypeSizeInBits(Ety);
- if ((Offset * 8) % ESize == 0) {
- ConstantInt *Idx =
- ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
- V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
- } else {
- ConstantInt *Idx =
- ConstantInt::getSigned(VO.second->getType(), -Offset);
- unsigned AS = Vty->getAddressSpace();
- V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
- V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
- "uglygep");
- V = Builder.CreateBitCast(V, Vty);
- }
+ ConstantInt *Idx =
+ ConstantInt::getSigned(VO.second->getType(), -Offset);
+ unsigned AS = Vty->getAddressSpace();
+ V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
+ V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
+ "uglygep");
+ V = Builder.CreateBitCast(V, Vty);
} else {
V = Builder.CreateSub(V, VO.second);
}
@@ -2271,10 +2297,27 @@ template<typename T> static InstructionCost costAndCollectOperands(
case scSMaxExpr:
case scUMaxExpr:
case scSMinExpr:
- case scUMinExpr: {
+ case scUMinExpr:
+ case scSequentialUMinExpr: {
// FIXME: should this ask the cost for Intrinsic's?
+ // The reduction tree.
Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
+ switch (S->getSCEVType()) {
+ case scSequentialUMinExpr: {
+ // The safety net against poison.
+ // FIXME: this is broken.
+ Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
+ Cost += ArithCost(Instruction::Or,
+ S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
+ Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
+ break;
+ }
+ default:
+ assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
+ "Unhandled SCEV expression type?");
+ break;
+ }
break;
}
case scAddRecExpr: {
@@ -2362,7 +2405,7 @@ bool SCEVExpander::isHighCostExpansionHelper(
case scConstant: {
// Only evalulate the costs of constants when optimizing for size.
if (CostKind != TargetTransformInfo::TCK_CodeSize)
- return 0;
+ return false;
const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
Type *Ty = S->getType();
Cost += TTI.getIntImmCostInst(
@@ -2399,7 +2442,8 @@ bool SCEVExpander::isHighCostExpansionHelper(
case scUMaxExpr:
case scSMaxExpr:
case scUMinExpr:
- case scSMinExpr: {
+ case scSMinExpr:
+ case scSequentialUMinExpr: {
assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
"Nary expr should have more than 1 operand.");
// The simple nary expr will require one less op (or pair of ops)
@@ -2490,49 +2534,73 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
- // Get the backedge taken count and truncate or extended to the AR type.
- Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
-
// Compute |Step| * Backedge
- Value *MulV, *OfMul;
- if (Step->isOne()) {
- // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
- // needed, there is never an overflow, so to avoid artificially inflating
- // the cost of the check, directly emit the optimized IR.
- MulV = TruncTripCount;
- OfMul = ConstantInt::getFalse(MulV->getContext());
- } else {
- auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
- Intrinsic::umul_with_overflow, Ty);
- CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
- MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
- OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
- }
-
// Compute:
- // Start + |Step| * Backedge < Start
- // Start - |Step| * Backedge > Start
- Value *Add = nullptr, *Sub = nullptr;
- if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
- StartValue = InsertNoopCastOfTo(
- StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
- Value *NegMulV = Builder.CreateNeg(MulV);
- Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
- Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
- } else {
- Add = Builder.CreateAdd(StartValue, MulV);
- Sub = Builder.CreateSub(StartValue, MulV);
- }
-
- Value *EndCompareGT = Builder.CreateICmp(
- Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
+ // 1. Start + |Step| * Backedge < Start
+ // 2. Start - |Step| * Backedge > Start
+ //
+ // And select either 1. or 2. depending on whether step is positive or
+ // negative. If Step is known to be positive or negative, only create
+ // either 1. or 2.
+ auto ComputeEndCheck = [&]() -> Value * {
+ // Checking <u 0 is always false.
+ if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
+ return ConstantInt::getFalse(Loc->getContext());
+
+ // Get the backedge taken count and truncate or extended to the AR type.
+ Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
+
+ Value *MulV, *OfMul;
+ if (Step->isOne()) {
+ // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
+ // needed, there is never an overflow, so to avoid artificially inflating
+ // the cost of the check, directly emit the optimized IR.
+ MulV = TruncTripCount;
+ OfMul = ConstantInt::getFalse(MulV->getContext());
+ } else {
+ auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
+ Intrinsic::umul_with_overflow, Ty);
+ CallInst *Mul =
+ Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
+ MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
+ OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
+ }
- Value *EndCompareLT = Builder.CreateICmp(
- Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
+ Value *Add = nullptr, *Sub = nullptr;
+ bool NeedPosCheck = !SE.isKnownNegative(Step);
+ bool NeedNegCheck = !SE.isKnownPositive(Step);
+
+ if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
+ StartValue = InsertNoopCastOfTo(
+ StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
+ Value *NegMulV = Builder.CreateNeg(MulV);
+ if (NeedPosCheck)
+ Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
+ if (NeedNegCheck)
+ Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
+ } else {
+ if (NeedPosCheck)
+ Add = Builder.CreateAdd(StartValue, MulV);
+ if (NeedNegCheck)
+ Sub = Builder.CreateSub(StartValue, MulV);
+ }
- // Select the answer based on the sign of Step.
- Value *EndCheck =
- Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
+ Value *EndCompareLT = nullptr;
+ Value *EndCompareGT = nullptr;
+ Value *EndCheck = nullptr;
+ if (NeedPosCheck)
+ EndCheck = EndCompareLT = Builder.CreateICmp(
+ Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
+ if (NeedNegCheck)
+ EndCheck = EndCompareGT = Builder.CreateICmp(
+ Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
+ if (NeedPosCheck && NeedNegCheck) {
+ // Select the answer based on the sign of Step.
+ EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
+ }
+ return Builder.CreateOr(EndCheck, OfMul);
+ };
+ Value *EndCheck = ComputeEndCheck();
// If the backedge taken count type is larger than the AR type,
// check that we don't drop any bits by truncating it. If we are
@@ -2548,7 +2616,7 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
}
- return Builder.CreateOr(EndCheck, OfMul);
+ return EndCheck;
}
Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
@@ -2578,17 +2646,16 @@ Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
Instruction *IP) {
- auto *BoolType = IntegerType::get(IP->getContext(), 1);
- Value *Check = ConstantInt::getNullValue(BoolType);
-
// Loop over all checks in this set.
+ SmallVector<Value *> Checks;
for (auto Pred : Union->getPredicates()) {
- auto *NextCheck = expandCodeForPredicate(Pred, IP);
+ Checks.push_back(expandCodeForPredicate(Pred, IP));
Builder.SetInsertPoint(IP);
- Check = Builder.CreateOr(Check, NextCheck);
}
- return Check;
+ if (Checks.empty())
+ return ConstantInt::getFalse(IP->getContext());
+ return Builder.CreateOr(Checks);
}
Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
@@ -2720,13 +2787,8 @@ void SCEVExpanderCleaner::cleanup() {
// Remove sets with value handles.
Expander.clear();
- // Sort so that earlier instructions do not dominate later instructions.
- stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) {
- return DT.dominates(B, A);
- });
// Remove all inserted instructions.
- for (Instruction *I : InsertedInstructions) {
-
+ for (Instruction *I : reverse(InsertedInstructions)) {
#ifndef NDEBUG
assert(all_of(I->users(),
[&InsertedSet](Value *U) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 1046998c26de..335ac03ccb52 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -2052,109 +2052,119 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB,
if (ScanIdx == 0)
return false;
- // Okay, we *could* sink last ScanIdx instructions. But how many can we
- // actually sink before encountering instruction that is unprofitable to sink?
- auto ProfitableToSinkInstruction = [&](LockstepReverseIterator &LRI) {
- unsigned NumPHIdValues = 0;
- for (auto *I : *LRI)
- for (auto *V : PHIOperands[I]) {
- if (!InstructionsToSink.contains(V))
- ++NumPHIdValues;
- // FIXME: this check is overly optimistic. We may end up not sinking
- // said instruction, due to the very same profitability check.
- // See @creating_too_many_phis in sink-common-code.ll.
- }
- LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n");
- unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size();
- if ((NumPHIdValues % UnconditionalPreds.size()) != 0)
+ bool followedByDeoptOrUnreachable = IsBlockFollowedByDeoptOrUnreachable(BB);
+
+ if (!followedByDeoptOrUnreachable) {
+ // Okay, we *could* sink last ScanIdx instructions. But how many can we
+ // actually sink before encountering instruction that is unprofitable to
+ // sink?
+ auto ProfitableToSinkInstruction = [&](LockstepReverseIterator &LRI) {
+ unsigned NumPHIdValues = 0;
+ for (auto *I : *LRI)
+ for (auto *V : PHIOperands[I]) {
+ if (!InstructionsToSink.contains(V))
+ ++NumPHIdValues;
+ // FIXME: this check is overly optimistic. We may end up not sinking
+ // said instruction, due to the very same profitability check.
+ // See @creating_too_many_phis in sink-common-code.ll.
+ }
+ LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n");
+ unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size();
+ if ((NumPHIdValues % UnconditionalPreds.size()) != 0)
NumPHIInsts++;
- return NumPHIInsts <= 1;
- };
+ return NumPHIInsts <= 1;
+ };
- // We've determined that we are going to sink last ScanIdx instructions,
- // and recorded them in InstructionsToSink. Now, some instructions may be
- // unprofitable to sink. But that determination depends on the instructions
- // that we are going to sink.
-
- // First, forward scan: find the first instruction unprofitable to sink,
- // recording all the ones that are profitable to sink.
- // FIXME: would it be better, after we detect that not all are profitable.
- // to either record the profitable ones, or erase the unprofitable ones?
- // Maybe we need to choose (at runtime) the one that will touch least instrs?
- LRI.reset();
- int Idx = 0;
- SmallPtrSet<Value *, 4> InstructionsProfitableToSink;
- while (Idx < ScanIdx) {
- if (!ProfitableToSinkInstruction(LRI)) {
- // Too many PHIs would be created.
- LLVM_DEBUG(
- dbgs() << "SINK: stopping here, too many PHIs would be created!\n");
- break;
+ // We've determined that we are going to sink last ScanIdx instructions,
+ // and recorded them in InstructionsToSink. Now, some instructions may be
+ // unprofitable to sink. But that determination depends on the instructions
+ // that we are going to sink.
+
+ // First, forward scan: find the first instruction unprofitable to sink,
+ // recording all the ones that are profitable to sink.
+ // FIXME: would it be better, after we detect that not all are profitable.
+ // to either record the profitable ones, or erase the unprofitable ones?
+ // Maybe we need to choose (at runtime) the one that will touch least
+ // instrs?
+ LRI.reset();
+ int Idx = 0;
+ SmallPtrSet<Value *, 4> InstructionsProfitableToSink;
+ while (Idx < ScanIdx) {
+ if (!ProfitableToSinkInstruction(LRI)) {
+ // Too many PHIs would be created.
+ LLVM_DEBUG(
+ dbgs() << "SINK: stopping here, too many PHIs would be created!\n");
+ break;
+ }
+ InstructionsProfitableToSink.insert((*LRI).begin(), (*LRI).end());
+ --LRI;
+ ++Idx;
}
- InstructionsProfitableToSink.insert((*LRI).begin(), (*LRI).end());
- --LRI;
- ++Idx;
- }
- // If no instructions can be sunk, early-return.
- if (Idx == 0)
- return false;
+ // If no instructions can be sunk, early-return.
+ if (Idx == 0)
+ return false;
- // Did we determine that (only) some instructions are unprofitable to sink?
- if (Idx < ScanIdx) {
- // Okay, some instructions are unprofitable.
- ScanIdx = Idx;
- InstructionsToSink = InstructionsProfitableToSink;
-
- // But, that may make other instructions unprofitable, too.
- // So, do a backward scan, do any earlier instructions become unprofitable?
- assert(!ProfitableToSinkInstruction(LRI) &&
- "We already know that the last instruction is unprofitable to sink");
- ++LRI;
- --Idx;
- while (Idx >= 0) {
- // If we detect that an instruction becomes unprofitable to sink,
- // all earlier instructions won't be sunk either,
- // so preemptively keep InstructionsProfitableToSink in sync.
- // FIXME: is this the most performant approach?
- for (auto *I : *LRI)
- InstructionsProfitableToSink.erase(I);
- if (!ProfitableToSinkInstruction(LRI)) {
- // Everything starting with this instruction won't be sunk.
- ScanIdx = Idx;
- InstructionsToSink = InstructionsProfitableToSink;
- }
+ // Did we determine that (only) some instructions are unprofitable to sink?
+ if (Idx < ScanIdx) {
+ // Okay, some instructions are unprofitable.
+ ScanIdx = Idx;
+ InstructionsToSink = InstructionsProfitableToSink;
+
+ // But, that may make other instructions unprofitable, too.
+ // So, do a backward scan, do any earlier instructions become
+ // unprofitable?
+ assert(
+ !ProfitableToSinkInstruction(LRI) &&
+ "We already know that the last instruction is unprofitable to sink");
++LRI;
--Idx;
+ while (Idx >= 0) {
+ // If we detect that an instruction becomes unprofitable to sink,
+ // all earlier instructions won't be sunk either,
+ // so preemptively keep InstructionsProfitableToSink in sync.
+ // FIXME: is this the most performant approach?
+ for (auto *I : *LRI)
+ InstructionsProfitableToSink.erase(I);
+ if (!ProfitableToSinkInstruction(LRI)) {
+ // Everything starting with this instruction won't be sunk.
+ ScanIdx = Idx;
+ InstructionsToSink = InstructionsProfitableToSink;
+ }
+ ++LRI;
+ --Idx;
+ }
}
- }
- // If no instructions can be sunk, early-return.
- if (ScanIdx == 0)
- return false;
+ // If no instructions can be sunk, early-return.
+ if (ScanIdx == 0)
+ return false;
+ }
bool Changed = false;
if (HaveNonUnconditionalPredecessors) {
- // It is always legal to sink common instructions from unconditional
- // predecessors. However, if not all predecessors are unconditional,
- // this transformation might be pessimizing. So as a rule of thumb,
- // don't do it unless we'd sink at least one non-speculatable instruction.
- // See https://bugs.llvm.org/show_bug.cgi?id=30244
- LRI.reset();
- int Idx = 0;
- bool Profitable = false;
- while (Idx < ScanIdx) {
- if (!isSafeToSpeculativelyExecute((*LRI)[0])) {
- Profitable = true;
- break;
+ if (!followedByDeoptOrUnreachable) {
+ // It is always legal to sink common instructions from unconditional
+ // predecessors. However, if not all predecessors are unconditional,
+ // this transformation might be pessimizing. So as a rule of thumb,
+ // don't do it unless we'd sink at least one non-speculatable instruction.
+ // See https://bugs.llvm.org/show_bug.cgi?id=30244
+ LRI.reset();
+ int Idx = 0;
+ bool Profitable = false;
+ while (Idx < ScanIdx) {
+ if (!isSafeToSpeculativelyExecute((*LRI)[0])) {
+ Profitable = true;
+ break;
+ }
+ --LRI;
+ ++Idx;
}
- --LRI;
- ++Idx;
+ if (!Profitable)
+ return false;
}
- if (!Profitable)
- return false;
LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n");
// We have a conditional edge and we're going to sink some instructions.
@@ -4935,14 +4945,13 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
AssumptionCache *AC,
const DataLayout &DL) {
Value *Cond = SI->getCondition();
- unsigned Bits = Cond->getType()->getIntegerBitWidth();
KnownBits Known = computeKnownBits(Cond, DL, 0, AC, SI);
// We can also eliminate cases by determining that their values are outside of
// the limited range of the condition based on how many significant (non-sign)
// bits are in the condition value.
- unsigned ExtraSignBits = ComputeNumSignBits(Cond, DL, 0, AC, SI) - 1;
- unsigned MaxSignificantBitsInCond = Bits - ExtraSignBits;
+ unsigned MaxSignificantBitsInCond =
+ ComputeMaxSignificantBits(Cond, DL, 0, AC, SI);
// Gather dead cases.
SmallVector<ConstantInt *, 8> DeadCases;
@@ -4973,8 +4982,8 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
bool HasDefault =
!isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
const unsigned NumUnknownBits =
- Bits - (Known.Zero | Known.One).countPopulation();
- assert(NumUnknownBits <= Bits);
+ Known.getBitWidth() - (Known.Zero | Known.One).countPopulation();
+ assert(NumUnknownBits <= Known.getBitWidth());
if (HasDefault && DeadCases.empty() &&
NumUnknownBits < 64 /* avoid overflow */ &&
SI->getNumCases() == (1ULL << NumUnknownBits)) {
@@ -5796,10 +5805,9 @@ static void reuseTableCompare(
for (auto ValuePair : Values) {
Constant *CaseConst = ConstantExpr::getICmp(CmpInst->getPredicate(),
ValuePair.second, CmpOp1, true);
- if (!CaseConst || CaseConst == DefaultConst || isa<UndefValue>(CaseConst))
+ if (!CaseConst || CaseConst == DefaultConst ||
+ (CaseConst != TrueConst && CaseConst != FalseConst))
return;
- assert((CaseConst == TrueConst || CaseConst == FalseConst) &&
- "Expect true or false as compare result.");
}
// Check if the branch instruction dominates the phi node. It's a simple
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 02727a3dbf9c..e02d02a05752 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -602,7 +602,7 @@ Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilderBase &B) {
Align MemSetAlign =
CI->getAttributes().getParamAttrs(0).getAlignment().valueOrOne();
CallInst *NewCI = B.CreateMemSet(Dst, B.getInt8('\0'), Size, MemSetAlign);
- AttrBuilder ArgAttrs(CI->getAttributes().getParamAttrs(0));
+ AttrBuilder ArgAttrs(CI->getContext(), CI->getAttributes().getParamAttrs(0));
NewCI->setAttributes(NewCI->getAttributes().addParamAttributes(
CI->getContext(), 0, ArgAttrs));
copyFlags(*CI, NewCI);
@@ -2515,8 +2515,9 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI,
} else if (Value *V = emitStpCpy(Dest, CI->getArgOperand(2), B, TLI)) {
// sprintf(dest, "%s", str) -> stpcpy(dest, str) - dest
// Handle mismatched pointer types (goes away with typeless pointers?).
- V = B.CreatePointerCast(V, Dest->getType());
- Value *PtrDiff = B.CreatePtrDiff(V, Dest);
+ V = B.CreatePointerCast(V, B.getInt8PtrTy());
+ Dest = B.CreatePointerCast(Dest, B.getInt8PtrTy());
+ Value *PtrDiff = B.CreatePtrDiff(B.getInt8Ty(), V, Dest);
return B.CreateIntCast(PtrDiff, CI->getType(), false);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/ValueMapper.cpp
index b822db938af8..8947303674ee 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -398,13 +398,17 @@ Value *Mapper::mapValue(const Value *V) {
SmallVector<ValueAsMetadata *, 4> MappedArgs;
for (auto *VAM : AL->getArgs()) {
// Map both Local and Constant VAMs here; they will both ultimately
- // be mapped via mapValue (apart from constants when we have no
- // module level changes, which have an identity mapping).
+ // be mapped via mapValue. The exceptions are constants when we have no
+ // module level changes and locals when they have no existing mapped
+ // value and RF_IgnoreMissingLocals is set; these have identity
+ // mappings.
if ((Flags & RF_NoModuleLevelChanges) && isa<ConstantAsMetadata>(VAM)) {
MappedArgs.push_back(VAM);
} else if (Value *LV = mapValue(VAM->getValue())) {
MappedArgs.push_back(
LV == VAM->getValue() ? VAM : ValueAsMetadata::get(LV));
+ } else if ((Flags & RF_IgnoreMissingLocals) && isa<LocalAsMetadata>(VAM)) {
+ MappedArgs.push_back(VAM);
} else {
// If we cannot map the value, set the argument as undef.
MappedArgs.push_back(ValueAsMetadata::get(
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 5a4a2f0924f6..97c2acb7d4c7 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -698,8 +698,9 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
ChainInstrs.push_back(&I);
continue;
}
- if (I.mayThrow()) {
- LLVM_DEBUG(dbgs() << "LSV: Found may-throw operation: " << I << '\n');
+ if (!isGuaranteedToTransferExecutionToSuccessor(&I)) {
+ LLVM_DEBUG(dbgs() << "LSV: Found instruction may not transfer execution: "
+ << I << '\n');
break;
}
if (I.mayReadOrWriteMemory())
@@ -853,13 +854,6 @@ Vectorizer::collectInstructions(BasicBlock *BB) {
(VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
continue;
- // Make sure all the users of a vector are constant-index extracts.
- if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) {
- const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
- return EEI && isa<ConstantInt>(EEI->getOperand(1));
- }))
- continue;
-
// Save the load locations.
const ChainID ID = getChainID(Ptr);
LoadRefs[ID].push_back(LI);
@@ -900,12 +894,6 @@ Vectorizer::collectInstructions(BasicBlock *BB) {
(VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
continue;
- if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
- const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
- return EEI && isa<ConstantInt>(EEI->getOperand(1));
- }))
- continue;
-
// Save store location.
const ChainID ID = getChainID(Ptr);
StoreRefs[ID].push_back(SI);
@@ -1289,52 +1277,32 @@ bool Vectorizer::vectorizeLoadChain(
Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment));
propagateMetadata(LI, Chain);
- if (VecLoadTy) {
- SmallVector<Instruction *, 16> InstrsToErase;
-
- unsigned VecWidth = VecLoadTy->getNumElements();
- for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
- for (auto Use : Chain[I]->users()) {
- // All users of vector loads are ExtractElement instructions with
- // constant indices, otherwise we would have bailed before now.
- Instruction *UI = cast<Instruction>(Use);
- unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
- unsigned NewIdx = Idx + I * VecWidth;
- Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx),
- UI->getName());
- if (V->getType() != UI->getType())
- V = Builder.CreateBitCast(V, UI->getType());
-
- // Replace the old instruction.
- UI->replaceAllUsesWith(V);
- InstrsToErase.push_back(UI);
- }
+ for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
+ Value *CV = Chain[I];
+ Value *V;
+ if (VecLoadTy) {
+ // Extract a subvector using shufflevector.
+ unsigned VecWidth = VecLoadTy->getNumElements();
+ auto Mask =
+ llvm::to_vector<8>(llvm::seq<int>(I * VecWidth, (I + 1) * VecWidth));
+ V = Builder.CreateShuffleVector(LI, Mask, CV->getName());
+ } else {
+ V = Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
}
- // Bitcast might not be an Instruction, if the value being loaded is a
- // constant. In that case, no need to reorder anything.
- if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
- reorder(BitcastInst);
-
- for (auto I : InstrsToErase)
- I->eraseFromParent();
- } else {
- for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
- Value *CV = Chain[I];
- Value *V =
- Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
- if (V->getType() != CV->getType()) {
- V = Builder.CreateBitOrPointerCast(V, CV->getType());
- }
-
- // Replace the old instruction.
- CV->replaceAllUsesWith(V);
+ if (V->getType() != CV->getType()) {
+ V = Builder.CreateBitOrPointerCast(V, CV->getType());
}
- if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
- reorder(BitcastInst);
+ // Replace the old instruction.
+ CV->replaceAllUsesWith(V);
}
+ // Bitcast might not be an Instruction, if the value being loaded is a
+ // constant. In that case, no need to reorder anything.
+ if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
+ reorder(BitcastInst);
+
eraseInstructions(Chain);
++NumVectorInstructions;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 4747f34fcc62..d11f4146b590 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -470,10 +470,11 @@ public:
/// on, while the old loop will be used as the scalar remainder. Control flow
/// is generated around the vectorized (and scalar epilogue) loops consisting
/// of various checks and bypasses. Return the pre-header block of the new
- /// loop.
- /// In the case of epilogue vectorization, this function is overriden to
- /// handle the more complex control flow around the loops.
- virtual BasicBlock *createVectorizedLoopSkeleton();
+ /// loop and the start value for the canonical induction, if it is != 0. The
+ /// latter is the case when vectorizing the epilogue loop. In the case of
+ /// epilogue vectorization, this function is overriden to handle the more
+ /// complex control flow around the loops.
+ virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
/// Widen a single call instruction within the innermost loop.
void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
@@ -507,10 +508,10 @@ public:
/// Widen an integer or floating-point induction variable \p IV. If \p Trunc
/// is provided, the integer induction variable will first be truncated to
- /// the corresponding type.
- void widenIntOrFpInduction(PHINode *IV, const InductionDescriptor &ID,
- Value *Start, TruncInst *Trunc, VPValue *Def,
- VPTransformState &State);
+ /// the corresponding type. \p CanonicalIV is the scalar value generated for
+ /// the canonical induction variable.
+ void widenIntOrFpInduction(PHINode *IV, VPWidenIntOrFpInductionRecipe *Def,
+ VPTransformState &State, Value *CanonicalIV);
/// Construct the vector value of a scalarized value \p V one lane at a time.
void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
@@ -556,6 +557,10 @@ public:
/// vector of instructions.
void addMetadata(ArrayRef<Value *> To, Instruction *From);
+ // Returns the resume value (bc.merge.rdx) for a reduction as
+ // generated by fixReduction.
+ PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
+
protected:
friend class LoopVectorizationPlanner;
@@ -573,16 +578,18 @@ protected:
Value *CountRoundDown, Value *EndValue,
BasicBlock *MiddleBlock);
- /// Create a new induction variable inside L.
- PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
- Value *Step, Instruction *DL);
+ /// Introduce a conditional branch (on true, condition to be set later) at the
+ /// end of the header=latch connecting it to itself (across the backedge) and
+ /// to the exit block of \p L.
+ void createHeaderBranch(Loop *L);
/// Handle all cross-iteration phis in the header.
void fixCrossIterationPHIs(VPTransformState &State);
/// Create the exit value of first order recurrences in the middle block and
/// update their users.
- void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State);
+ void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
+ VPTransformState &State);
/// Create code for the loop exit value of the reduction.
void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
@@ -606,14 +613,6 @@ protected:
/// represented as.
void truncateToMinimalBitwidths(VPTransformState &State);
- /// This function adds
- /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
- /// to each vector element of Val. The sequence starts at StartIndex.
- /// \p Opcode is relevant for FP induction variable.
- virtual Value *
- getStepVector(Value *Val, Value *StartIdx, Value *Step,
- Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd);
-
/// Compute scalar induction steps. \p ScalarIV is the scalar induction
/// variable on which to base the steps, \p Step is the size of the step, and
/// \p EntryVal is the value from the original loop that maps to the steps.
@@ -640,9 +639,6 @@ protected:
/// Returns true if we should generate a scalar version of \p IV.
bool needsScalarInduction(Instruction *IV) const;
- /// Generate a shuffle sequence that will reverse the vector Vec.
- virtual Value *reverseVector(Value *Vec);
-
/// Returns (and creates if needed) the original loop trip count.
Value *getOrCreateTripCount(Loop *NewLoop);
@@ -685,14 +681,13 @@ protected:
Loop *createVectorLoopSkeleton(StringRef Prefix);
/// Create new phi nodes for the induction variables to resume iteration count
- /// in the scalar epilogue, from where the vectorized loop left off (given by
- /// \p VectorTripCount).
+ /// in the scalar epilogue, from where the vectorized loop left off.
/// In cases where the loop skeleton is more complicated (eg. epilogue
/// vectorization) and the resume values can come from an additional bypass
/// block, the \p AdditionalBypass pair provides information about the bypass
/// block and the end value on the edge from bypass to this loop.
void createInductionResumeValues(
- Loop *L, Value *VectorTripCount,
+ Loop *L,
std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
/// Complete the loop skeleton by adding debug MDs, creating appropriate
@@ -795,12 +790,6 @@ protected:
/// A list of all bypass blocks. The first block is the entry of the loop.
SmallVector<BasicBlock *, 4> LoopBypassBlocks;
- /// The new Induction variable which was added to the new block.
- PHINode *Induction = nullptr;
-
- /// The induction variable of the old basic block.
- PHINode *OldInduction = nullptr;
-
/// Store instructions that were predicated.
SmallVector<Instruction *, 4> PredicatedInstructions;
@@ -838,6 +827,11 @@ protected:
/// Structure to hold information about generated runtime checks, responsible
/// for cleaning the checks, if vectorization turns out unprofitable.
GeneratedRTChecks &RTChecks;
+
+ // Holds the resume values for reductions in the loops, used to set the
+ // correct start value of reduction PHIs when vectorizing the epilogue.
+ SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
+ ReductionResumeValues;
};
class InnerLoopUnroller : public InnerLoopVectorizer {
@@ -856,10 +850,6 @@ public:
private:
Value *getBroadcastInstrs(Value *V) override;
- Value *getStepVector(
- Value *Val, Value *StartIdx, Value *Step,
- Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override;
- Value *reverseVector(Value *Vec) override;
};
/// Encapsulate information regarding vectorization of a loop and its epilogue.
@@ -909,14 +899,16 @@ public:
// Override this function to handle the more complex control flow around the
// three loops.
- BasicBlock *createVectorizedLoopSkeleton() final override {
+ std::pair<BasicBlock *, Value *>
+ createVectorizedLoopSkeleton() final override {
return createEpilogueVectorizedLoopSkeleton();
}
/// The interface for creating a vectorized skeleton using one of two
/// different strategies, each corresponding to one execution of the vplan
/// as described above.
- virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
+ virtual std::pair<BasicBlock *, Value *>
+ createEpilogueVectorizedLoopSkeleton() = 0;
/// Holds and updates state information required to vectorize the main loop
/// and its epilogue in two separate passes. This setup helps us avoid
@@ -944,7 +936,8 @@ public:
EPI, LVL, CM, BFI, PSI, Check) {}
/// Implements the interface for creating a vectorized skeleton using the
/// *main loop* strategy (ie the first pass of vplan execution).
- BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
+ std::pair<BasicBlock *, Value *>
+ createEpilogueVectorizedLoopSkeleton() final override;
protected:
/// Emits an iteration count bypass check once for the main loop (when \p
@@ -973,7 +966,8 @@ public:
EPI, LVL, CM, BFI, PSI, Checks) {}
/// Implements the interface for creating a vectorized skeleton using the
/// *epilogue loop* strategy (ie the second pass of vplan execution).
- BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
+ std::pair<BasicBlock *, Value *>
+ createEpilogueVectorizedLoopSkeleton() final override;
protected:
/// Emits an iteration count bypass check after the main vector loop has
@@ -1069,16 +1063,16 @@ static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
}
+namespace llvm {
+
/// Return a value for Step multiplied by VF.
-static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
- int64_t Step) {
+Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
+ int64_t Step) {
assert(Ty->isIntegerTy() && "Expected an integer step");
Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
}
-namespace llvm {
-
/// Return the runtime value for VF.
Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
@@ -1163,7 +1157,8 @@ void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
// will lead to gather/scatter instructions, which don't need to be
// handled.
if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
- isa<VPInterleaveRecipe>(CurRec))
+ isa<VPInterleaveRecipe>(CurRec) ||
+ isa<VPCanonicalIVPHIRecipe>(CurRec))
continue;
// This recipe contributes to the address computation of a widen
@@ -1232,6 +1227,14 @@ void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
}
}
+PHINode *InnerLoopVectorizer::getReductionResumeValue(
+ const RecurrenceDescriptor &RdxDesc) {
+ auto It = ReductionResumeValues.find(&RdxDesc);
+ assert(It != ReductionResumeValues.end() &&
+ "Expected to find a resume value for the reduction.");
+ return It->second;
+}
+
namespace llvm {
// Loop vectorization cost-model hints how the scalar epilogue loop should be
@@ -1556,13 +1559,16 @@ public:
/// Returns true if the target machine can represent \p V as a masked gather
/// or scatter operation.
- bool isLegalGatherOrScatter(Value *V) {
+ bool isLegalGatherOrScatter(Value *V,
+ ElementCount VF = ElementCount::getFixed(1)) {
bool LI = isa<LoadInst>(V);
bool SI = isa<StoreInst>(V);
if (!LI && !SI)
return false;
auto *Ty = getLoadStoreType(V);
Align Align = getLoadStoreAlignment(V);
+ if (VF.isVector())
+ Ty = VectorType::get(Ty, VF);
return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
(SI && TTI.isLegalMaskedScatter(Ty, Align));
}
@@ -1577,16 +1583,17 @@ public:
}
/// Returns true if \p I is an instruction that will be scalarized with
- /// predication. Such instructions include conditional stores and
- /// instructions that may divide by zero.
- /// If a non-zero VF has been calculated, we check if I will be scalarized
- /// predication for that VF.
- bool isScalarWithPredication(Instruction *I) const;
+ /// predication when vectorizing \p I with vectorization factor \p VF. Such
+ /// instructions include conditional stores and instructions that may divide
+ /// by zero.
+ bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
// Returns true if \p I is an instruction that will be predicated either
// through scalar predication or masked load/store or masked gather/scatter.
+ // \p VF is the vectorization factor that will be used to vectorize \p I.
// Superset of instructions that return true for isScalarWithPredication.
- bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) {
+ bool isPredicatedInst(Instruction *I, ElementCount VF,
+ bool IsKnownUniform = false) {
// When we know the load is uniform and the original scalar loop was not
// predicated we don't need to mark it as a predicated instruction. Any
// vectorised blocks created when tail-folding are something artificial we
@@ -1602,7 +1609,7 @@ public:
// instructions.
if (isa<LoadInst>(I) || isa<StoreInst>(I))
return Legal->isMaskRequired(I);
- return isScalarWithPredication(I);
+ return isScalarWithPredication(I, VF);
}
/// Returns true if \p I is a memory instruction with consecutive memory
@@ -1794,7 +1801,7 @@ private:
/// Returns true if an artificially high cost for emulated masked memrefs
/// should be used.
- bool useEmulatedMaskMemRefHack(Instruction *I);
+ bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
/// Map of scalar integer values to the smallest bitwidth they can be legally
/// represented as. The vector equivalents of these values should be truncated
@@ -2078,8 +2085,8 @@ public:
/// Remove the created SCEV & memory runtime check blocks & instructions, if
/// unused.
~GeneratedRTChecks() {
- SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
- SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
+ SCEVExpanderCleaner SCEVCleaner(SCEVExp);
+ SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
if (!SCEVCheckCond)
SCEVCleaner.markResultUsed();
@@ -2335,6 +2342,60 @@ Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
return Shuf;
}
+/// This function adds
+/// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
+/// to each vector element of Val. The sequence starts at StartIndex.
+/// \p Opcode is relevant for FP induction variable.
+static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
+ Instruction::BinaryOps BinOp, ElementCount VF,
+ IRBuilder<> &Builder) {
+ assert(VF.isVector() && "only vector VFs are supported");
+
+ // Create and check the types.
+ auto *ValVTy = cast<VectorType>(Val->getType());
+ ElementCount VLen = ValVTy->getElementCount();
+
+ Type *STy = Val->getType()->getScalarType();
+ assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
+ "Induction Step must be an integer or FP");
+ assert(Step->getType() == STy && "Step has wrong type");
+
+ SmallVector<Constant *, 8> Indices;
+
+ // Create a vector of consecutive numbers from zero to VF.
+ VectorType *InitVecValVTy = ValVTy;
+ Type *InitVecValSTy = STy;
+ if (STy->isFloatingPointTy()) {
+ InitVecValSTy =
+ IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
+ InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
+ }
+ Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
+
+ // Splat the StartIdx
+ Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
+
+ if (STy->isIntegerTy()) {
+ InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
+ Step = Builder.CreateVectorSplat(VLen, Step);
+ assert(Step->getType() == Val->getType() && "Invalid step vec");
+ // FIXME: The newly created binary instructions should contain nsw/nuw
+ // flags, which can be found from the original scalar operations.
+ Step = Builder.CreateMul(InitVec, Step);
+ return Builder.CreateAdd(Val, Step, "induction");
+ }
+
+ // Floating point induction.
+ assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
+ "Binary Opcode should be specified for FP induction");
+ InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
+ InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
+
+ Step = Builder.CreateVectorSplat(VLen, Step);
+ Value *MulOp = Builder.CreateFMul(InitVec, Step);
+ return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
+}
+
void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
const InductionDescriptor &II, Value *Step, Value *Start,
Instruction *EntryVal, VPValue *Def, VPTransformState &State) {
@@ -2355,8 +2416,8 @@ void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
- Value *SteppedStart =
- getStepVector(SplatStart, Zero, Step, II.getInductionOpcode());
+ Value *SteppedStart = getStepVector(
+ SplatStart, Zero, Step, II.getInductionOpcode(), State.VF, State.Builder);
// We create vector phi nodes for both integer and floating-point induction
// variables. Here, we determine the kind of arithmetic we will perform.
@@ -2411,8 +2472,7 @@ void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
// placement of all induction updates.
auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
- auto *ICmp = cast<Instruction>(Br->getCondition());
- LastInduction->moveBefore(ICmp);
+ LastInduction->moveBefore(Br);
LastInduction->setName("vec.ind.next");
VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
@@ -2434,15 +2494,15 @@ bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
return llvm::any_of(IV->users(), isScalarInst);
}
-void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
- const InductionDescriptor &ID,
- Value *Start, TruncInst *Trunc,
- VPValue *Def,
- VPTransformState &State) {
+void InnerLoopVectorizer::widenIntOrFpInduction(
+ PHINode *IV, VPWidenIntOrFpInductionRecipe *Def, VPTransformState &State,
+ Value *CanonicalIV) {
+ Value *Start = Def->getStartValue()->getLiveInIRValue();
+ const InductionDescriptor &ID = Def->getInductionDescriptor();
+ TruncInst *Trunc = Def->getTruncInst();
IRBuilder<> &Builder = State.Builder;
- assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
- "Primary induction variable must have an integer type");
assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
+ assert(!State.VF.isZero() && "VF must be non-zero");
// The value from the original loop to which we are mapping the new induction
// variable.
@@ -2468,12 +2528,13 @@ void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
// induction variable and step. Otherwise, derive these values from the
// induction descriptor.
auto CreateScalarIV = [&](Value *&Step) -> Value * {
- Value *ScalarIV = Induction;
- if (IV != OldInduction) {
- ScalarIV = IV->getType()->isIntegerTy()
- ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
- : Builder.CreateCast(Instruction::SIToFP, Induction,
- IV->getType());
+ Value *ScalarIV = CanonicalIV;
+ Type *NeededType = IV->getType();
+ if (!Def->isCanonical() || ScalarIV->getType() != NeededType) {
+ ScalarIV =
+ NeededType->isIntegerTy()
+ ? Builder.CreateSExtOrTrunc(ScalarIV, NeededType)
+ : Builder.CreateCast(Instruction::SIToFP, ScalarIV, NeededType);
ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID,
State.CFG.PrevBB);
ScalarIV->setName("offset.idx");
@@ -2493,7 +2554,6 @@ void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
Value *Broadcasted = getBroadcastInstrs(ScalarIV);
for (unsigned Part = 0; Part < UF; ++Part) {
- assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
Value *StartIdx;
if (Step->getType()->isFloatingPointTy())
StartIdx =
@@ -2502,7 +2562,8 @@ void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
StartIdx = getRuntimeVF(Builder, Step->getType(), State.VF * Part);
Value *EntryPart =
- getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode());
+ getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode(),
+ State.VF, State.Builder);
State.set(Def, EntryPart, Part);
if (Trunc)
addMetadata(EntryPart, Trunc);
@@ -2516,9 +2577,31 @@ void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
// Now do the actual transformations, and start with creating the step value.
Value *Step = CreateStepValue(ID.getStep());
- if (State.VF.isZero() || State.VF.isScalar()) {
+ if (State.VF.isScalar()) {
Value *ScalarIV = CreateScalarIV(Step);
- CreateSplatIV(ScalarIV, Step);
+ Type *ScalarTy = IntegerType::get(ScalarIV->getContext(),
+ Step->getType()->getScalarSizeInBits());
+
+ Instruction::BinaryOps IncOp = ID.getInductionOpcode();
+ if (IncOp == Instruction::BinaryOpsEnd)
+ IncOp = Instruction::Add;
+ for (unsigned Part = 0; Part < UF; ++Part) {
+ Value *StartIdx = ConstantInt::get(ScalarTy, Part);
+ Instruction::BinaryOps MulOp = Instruction::Mul;
+ if (Step->getType()->isFloatingPointTy()) {
+ StartIdx = Builder.CreateUIToFP(StartIdx, Step->getType());
+ MulOp = Instruction::FMul;
+ }
+
+ Value *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
+ Value *EntryPart = Builder.CreateBinOp(IncOp, ScalarIV, Mul, "induction");
+ State.set(Def, EntryPart, Part);
+ if (Trunc) {
+ assert(!Step->getType()->isFloatingPointTy() &&
+ "fp inductions shouldn't be truncated");
+ addMetadata(EntryPart, Trunc);
+ }
+ }
return;
}
@@ -2554,54 +2637,6 @@ void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV,
buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, State);
}
-Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx,
- Value *Step,
- Instruction::BinaryOps BinOp) {
- // Create and check the types.
- auto *ValVTy = cast<VectorType>(Val->getType());
- ElementCount VLen = ValVTy->getElementCount();
-
- Type *STy = Val->getType()->getScalarType();
- assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
- "Induction Step must be an integer or FP");
- assert(Step->getType() == STy && "Step has wrong type");
-
- SmallVector<Constant *, 8> Indices;
-
- // Create a vector of consecutive numbers from zero to VF.
- VectorType *InitVecValVTy = ValVTy;
- Type *InitVecValSTy = STy;
- if (STy->isFloatingPointTy()) {
- InitVecValSTy =
- IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
- InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
- }
- Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
-
- // Splat the StartIdx
- Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
-
- if (STy->isIntegerTy()) {
- InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
- Step = Builder.CreateVectorSplat(VLen, Step);
- assert(Step->getType() == Val->getType() && "Invalid step vec");
- // FIXME: The newly created binary instructions should contain nsw/nuw flags,
- // which can be found from the original scalar operations.
- Step = Builder.CreateMul(InitVec, Step);
- return Builder.CreateAdd(Val, Step, "induction");
- }
-
- // Floating point induction.
- assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
- "Binary Opcode should be specified for FP induction");
- InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
- InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
-
- Step = Builder.CreateVectorSplat(VLen, Step);
- Value *MulOp = Builder.CreateFMul(InitVec, Step);
- return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
-}
-
void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
Instruction *EntryVal,
const InductionDescriptor &ID,
@@ -2691,11 +2726,6 @@ void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
State.set(Def, VectorValue, Instance.Part);
}
-Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
- assert(Vec->getType()->isVectorTy() && "Invalid type");
- return Builder.CreateVectorReverse(Vec, "reverse");
-}
-
// Return whether we allow using masked interleave-groups (for dealing with
// strided loads/stores that reside in predicated blocks, or for dealing
// with gaps).
@@ -2858,7 +2888,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
}
if (Group->isReverse())
- StridedVec = reverseVector(StridedVec);
+ StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
State.set(VPDefs[J], StridedVec, Part);
}
@@ -2894,7 +2924,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
Value *StoredVec = State.get(StoredValues[i], Part);
if (Group->isReverse())
- StoredVec = reverseVector(StoredVec);
+ StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
// If this member has different type, cast it to a unified type.
@@ -2993,43 +3023,21 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
PredicatedInstructions.push_back(Cloned);
}
-PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
- Value *End, Value *Step,
- Instruction *DL) {
+void InnerLoopVectorizer::createHeaderBranch(Loop *L) {
BasicBlock *Header = L->getHeader();
- BasicBlock *Latch = L->getLoopLatch();
- // As we're just creating this loop, it's possible no latch exists
- // yet. If so, use the header as this will be a single block loop.
- if (!Latch)
- Latch = Header;
-
- IRBuilder<> B(&*Header->getFirstInsertionPt());
- Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
- setDebugLocFromInst(OldInst, &B);
- auto *Induction = B.CreatePHI(Start->getType(), 2, "index");
+ assert(!L->getLoopLatch() && "loop should not have a latch at this point");
- B.SetInsertPoint(Latch->getTerminator());
+ IRBuilder<> B(Header->getTerminator());
+ Instruction *OldInst =
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
setDebugLocFromInst(OldInst, &B);
- // Create i+1 and fill the PHINode.
- //
- // If the tail is not folded, we know that End - Start >= Step (either
- // statically or through the minimum iteration checks). We also know that both
- // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
- // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
- // overflows and we can mark the induction increment as NUW.
- Value *Next = B.CreateAdd(Induction, Step, "index.next",
- /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
- Induction->addIncoming(Start, L->getLoopPreheader());
- Induction->addIncoming(Next, Latch);
- // Create the compare.
- Value *ICmp = B.CreateICmpEQ(Next, End);
- B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
+ // Connect the header to the exit and header blocks and replace the old
+ // terminator.
+ B.CreateCondBr(B.getTrue(), L->getUniqueExitBlock(), Header);
// Now we have two terminators. Remove the old one from the block.
- Latch->getTerminator()->eraseFromParent();
-
- return Induction;
+ Header->getTerminator()->eraseFromParent();
}
Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
@@ -3099,10 +3107,9 @@ Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
if (Cost->foldTailByMasking()) {
assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
"VF*UF must be a power of 2 when folding tail by masking");
- assert(!VF.isScalable() &&
- "Tail folding not yet supported for scalable vectors");
+ Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
TC = Builder.CreateAdd(
- TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
+ TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
}
// Now we need to generate the expression for the part of the loop that the
@@ -3436,12 +3443,13 @@ Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
}
void InnerLoopVectorizer::createInductionResumeValues(
- Loop *L, Value *VectorTripCount,
- std::pair<BasicBlock *, Value *> AdditionalBypass) {
- assert(VectorTripCount && L && "Expected valid arguments");
+ Loop *L, std::pair<BasicBlock *, Value *> AdditionalBypass) {
assert(((AdditionalBypass.first && AdditionalBypass.second) ||
(!AdditionalBypass.first && !AdditionalBypass.second)) &&
"Inconsistent information about additional bypass.");
+
+ Value *VectorTripCount = getOrCreateVectorTripCount(L);
+ assert(VectorTripCount && L && "Expected valid arguments");
// We are going to resume the execution of the scalar loop.
// Go over all of the induction variables that we found and fix the
// PHIs that are left in the scalar version of the loop.
@@ -3449,6 +3457,7 @@ void InnerLoopVectorizer::createInductionResumeValues(
// iteration in the vectorized loop.
// If we come from a bypass edge then we need to start from the original
// start value.
+ Instruction *OldInduction = Legal->getPrimaryInduction();
for (auto &InductionEntry : Legal->getInductionVars()) {
PHINode *OrigPhi = InductionEntry.first;
InductionDescriptor II = InductionEntry.second;
@@ -3546,25 +3555,6 @@ BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
"Inconsistent vector loop preheader");
Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
- Optional<MDNode *> VectorizedLoopID =
- makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
- LLVMLoopVectorizeFollowupVectorized});
- if (VectorizedLoopID.hasValue()) {
- L->setLoopID(VectorizedLoopID.getValue());
-
- // Do not setAlreadyVectorized if loop attributes have been defined
- // explicitly.
- return LoopVectorPreHeader;
- }
-
- // Keep all loop hints from the original loop on the vector loop (we'll
- // replace the vectorizer-specific hints below).
- if (MDNode *LID = OrigLoop->getLoopID())
- L->setLoopID(LID);
-
- LoopVectorizeHints Hints(L, true, *ORE, TTI);
- Hints.setAlreadyVectorized();
-
#ifdef EXPENSIVE_CHECKS
assert(DT->verify(DominatorTree::VerificationLevel::Fast));
LI->verify(*DT);
@@ -3573,7 +3563,8 @@ BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
return LoopVectorPreHeader;
}
-BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
+std::pair<BasicBlock *, Value *>
+InnerLoopVectorizer::createVectorizedLoopSkeleton() {
/*
In this function we generate a new loop. The new loop will contain
the vectorized instructions while the old loop will continue to run the
@@ -3638,33 +3629,12 @@ BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
// faster.
emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
- // Some loops have a single integer induction variable, while other loops
- // don't. One example is c++ iterators that often have multiple pointer
- // induction variables. In the code below we also support a case where we
- // don't have a single induction variable.
- //
- // We try to obtain an induction variable from the original loop as hard
- // as possible. However if we don't find one that:
- // - is an integer
- // - counts from zero, stepping by one
- // - is the size of the widest induction variable type
- // then we create a new one.
- OldInduction = Legal->getPrimaryInduction();
- Type *IdxTy = Legal->getWidestInductionType();
- Value *StartIdx = ConstantInt::get(IdxTy, 0);
- // The loop step is equal to the vectorization factor (num of SIMD elements)
- // times the unroll factor (num of SIMD instructions).
- Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
- Value *Step = createStepForVF(Builder, IdxTy, VF, UF);
- Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
- Induction =
- createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
- getDebugLocFromInstOrOperands(OldInduction));
+ createHeaderBranch(Lp);
// Emit phis for the new starting index of the scalar loop.
- createInductionResumeValues(Lp, CountRoundDown);
+ createInductionResumeValues(Lp);
- return completeLoopSkeleton(Lp, OrigLoopID);
+ return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
}
// Fix up external users of the induction variable. At this point, we are
@@ -4088,8 +4058,8 @@ void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
}
}
-void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR,
- VPTransformState &State) {
+void InnerLoopVectorizer::fixFirstOrderRecurrence(
+ VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
// This is the second phase of vectorizing first-order recurrences. An
// overview of the transformation is described below. Suppose we have the
// following loop.
@@ -4334,13 +4304,29 @@ void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
: Builder.CreateZExt(ReducedPartRdx, PhiTy);
}
+ PHINode *ResumePhi =
+ dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
+
// Create a phi node that merges control-flow from the backedge-taken check
// block and the middle block.
PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
LoopScalarPreHeader->getTerminator());
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
- BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
- BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
+
+ // If we are fixing reductions in the epilogue loop then we should already
+ // have created a bc.merge.rdx Phi after the main vector body. Ensure that
+ // we carry over the incoming values correctly.
+ for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
+ if (Incoming == LoopMiddleBlock)
+ BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
+ else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
+ BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
+ Incoming);
+ else
+ BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
+ }
+
+ // Set the resume value for this reduction
+ ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
// Now, we need to fix the users of the reduction variable
// inside and outside of the scalar remainder loop.
@@ -4557,6 +4543,9 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
InductionDescriptor II = Legal->getInductionVars().lookup(P);
const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
+ auto *IVR = PhiR->getParent()->getPlan()->getCanonicalIV();
+ PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
+
// FIXME: The newly created binary instructions should contain nsw/nuw flags,
// which can be found from the original scalar operations.
switch (II.getKind()) {
@@ -4572,7 +4561,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
if (Cost->isScalarAfterVectorization(P, State.VF)) {
// This is the normalized GEP that starts counting at zero.
Value *PtrInd =
- Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
+ Builder.CreateSExtOrTrunc(CanonicalIV, II.getStep()->getType());
// Determine the number of scalars we need to generate for each unroll
// iteration. If the instruction is uniform, we only need to generate the
// first lane. Otherwise, we generate all VF values.
@@ -4602,10 +4591,10 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
Type *PhiType = II.getStep()->getType();
// Build a pointer phi
- Value *ScalarStartValue = II.getStartValue();
+ Value *ScalarStartValue = PhiR->getStartValue()->getLiveInIRValue();
Type *ScStValueType = ScalarStartValue->getType();
PHINode *NewPointerPhi =
- PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
+ PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
// A pointer induction, performed by using a gep
@@ -4916,7 +4905,8 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
Scalars[VF].insert(Worklist.begin(), Worklist.end());
}
-bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
+bool LoopVectorizationCostModel::isScalarWithPredication(
+ Instruction *I, ElementCount VF) const {
if (!blockNeedsPredicationForAnyReason(I->getParent()))
return false;
switch(I->getOpcode()) {
@@ -4928,11 +4918,14 @@ bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
return false;
auto *Ptr = getLoadStorePointerOperand(I);
auto *Ty = getLoadStoreType(I);
+ Type *VTy = Ty;
+ if (VF.isVector())
+ VTy = VectorType::get(Ty, VF);
const Align Alignment = getLoadStoreAlignment(I);
return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
- TTI.isLegalMaskedGather(Ty, Alignment))
+ TTI.isLegalMaskedGather(VTy, Alignment))
: !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
- TTI.isLegalMaskedScatter(Ty, Alignment));
+ TTI.isLegalMaskedScatter(VTy, Alignment));
}
case Instruction::UDiv:
case Instruction::SDiv:
@@ -5005,7 +4998,7 @@ bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
// If the instruction is a store located in a predicated block, it will be
// scalarized.
- if (isScalarWithPredication(I))
+ if (isScalarWithPredication(I, VF))
return false;
// If the instruction's allocated size doesn't equal it's type size, it
@@ -5056,7 +5049,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
<< *I << "\n");
return;
}
- if (isScalarWithPredication(I)) {
+ if (isScalarWithPredication(I, VF)) {
LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
<< *I << "\n");
return;
@@ -5531,10 +5524,12 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
}
}
- // For scalable vectors, don't use tail folding as this is currently not yet
- // supported. The code is likely to have ended up here if the tripcount is
- // low, in which case it makes sense not to use scalable vectors.
- if (MaxFactors.ScalableVF.isVector())
+ // For scalable vectors don't use tail folding for low trip counts or
+ // optimizing for code size. We only permit this if the user has explicitly
+ // requested it.
+ if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
+ ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
+ MaxFactors.ScalableVF.isVector())
MaxFactors.ScalableVF = ElementCount::getScalable(0);
// If we don't know the precise trip count, or if the trip count that we
@@ -5849,10 +5844,8 @@ bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
const Loop &L, ElementCount VF) const {
// Cross iteration phis such as reductions need special handling and are
// currently unsupported.
- if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
- return Legal->isFirstOrderRecurrence(&Phi) ||
- Legal->isReductionVariable(&Phi);
- }))
+ if (any_of(L.getHeader()->phis(),
+ [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
return false;
// Phis with uses outside of the loop require special handling and are
@@ -5978,11 +5971,29 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() {
unsigned MinWidth = -1U;
unsigned MaxWidth = 8;
const DataLayout &DL = TheFunction->getParent()->getDataLayout();
- for (Type *T : ElementTypesInLoop) {
- MinWidth = std::min<unsigned>(
- MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
- MaxWidth = std::max<unsigned>(
- MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
+ // For in-loop reductions, no element types are added to ElementTypesInLoop
+ // if there are no loads/stores in the loop. In this case, check through the
+ // reduction variables to determine the maximum width.
+ if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
+ // Reset MaxWidth so that we can find the smallest type used by recurrences
+ // in the loop.
+ MaxWidth = -1U;
+ for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
+ const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
+ // When finding the min width used by the recurrence we need to account
+ // for casts on the input operands of the recurrence.
+ MaxWidth = std::min<unsigned>(
+ MaxWidth, std::min<unsigned>(
+ RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
+ RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
+ }
+ } else {
+ for (Type *T : ElementTypesInLoop) {
+ MinWidth = std::min<unsigned>(
+ MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
+ MaxWidth = std::max<unsigned>(
+ MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
+ }
}
return {MinWidth, MaxWidth};
}
@@ -6022,18 +6033,8 @@ void LoopVectorizationCostModel::collectElementTypesForWidening() {
if (auto *ST = dyn_cast<StoreInst>(&I))
T = ST->getValueOperand()->getType();
- // Ignore loaded pointer types and stored pointer types that are not
- // vectorizable.
- //
- // FIXME: The check here attempts to predict whether a load or store will
- // be vectorized. We only know this for certain after a VF has
- // been selected. Here, we assume that if an access can be
- // vectorized, it will be. We should also look at extending this
- // optimization to non-pointer types.
- //
- if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
- !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
- continue;
+ assert(T->isSized() &&
+ "Expected the load/store/recurrence type to be sized");
ElementTypesInLoop.insert(T);
}
@@ -6475,7 +6476,8 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
return RUs;
}
-bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
+bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
+ ElementCount VF) {
// TODO: Cost model for emulated masked load/store is completely
// broken. This hack guides the cost model to use an artificially
// high enough value to practically disable vectorization with such
@@ -6484,8 +6486,7 @@ bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
// from moving "masked load/store" check from legality to cost model.
// Masked Load/Gather emulation was previously never allowed.
// Limited number of Masked Store/Scatter emulation was allowed.
- assert(isPredicatedInst(I) &&
- "Expecting a scalar emulated instruction");
+ assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
return isa<LoadInst>(I) ||
(isa<StoreInst>(I) &&
NumPredStores > NumberOfStoresToPredicate);
@@ -6512,13 +6513,13 @@ void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
if (!blockNeedsPredicationForAnyReason(BB))
continue;
for (Instruction &I : *BB)
- if (isScalarWithPredication(&I)) {
+ if (isScalarWithPredication(&I, VF)) {
ScalarCostsTy ScalarCosts;
// Do not apply discount if scalable, because that would lead to
// invalid scalarization costs.
// Do not apply discount logic if hacked cost is needed
// for emulated masked memrefs.
- if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) &&
+ if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
// Remember that BB will remain after vectorization.
@@ -6554,7 +6555,7 @@ int LoopVectorizationCostModel::computePredInstDiscount(
// If the instruction is scalar with predication, it will be analyzed
// separately. We ignore it within the context of PredInst.
- if (isScalarWithPredication(I))
+ if (isScalarWithPredication(I, VF))
return false;
// If any of the instruction's operands are uniform after vectorization,
@@ -6601,7 +6602,7 @@ int LoopVectorizationCostModel::computePredInstDiscount(
// Compute the scalarization overhead of needed insertelement instructions
// and phi nodes.
- if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
+ if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
ScalarCost += TTI.getScalarizationOverhead(
cast<VectorType>(ToVectorTy(I->getType(), VF)),
APInt::getAllOnes(VF.getFixedValue()), true, false);
@@ -6764,7 +6765,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
// If we have a predicated load/store, it will need extra i1 extracts and
// conditional branches, but may not be executed for each vector lane. Scale
// the cost by the probability of executing the predicated block.
- if (isPredicatedInst(I)) {
+ if (isPredicatedInst(I, VF)) {
Cost /= getReciprocalPredBlockProb();
// Add the cost of an i1 extract and a branch
@@ -6775,7 +6776,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
/*Insert=*/false, /*Extract=*/true);
Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
- if (useEmulatedMaskMemRefHack(I))
+ if (useEmulatedMaskMemRefHack(I, VF))
// Artificially setting to a high enough value to practically disable
// vectorization with such operations.
Cost = 3000000;
@@ -7182,7 +7183,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
// predicated uniform stores. Today they are treated as any other
// predicated store (see added test cases in
// invariant-store-vectorization.ll).
- if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
+ if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
NumPredStores++;
if (Legal->isUniformMemOp(I)) {
@@ -7192,7 +7193,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
// Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
InstructionCost Cost;
if (isa<StoreInst>(&I) && VF.isScalable() &&
- isLegalGatherOrScatter(&I)) {
+ isLegalGatherOrScatter(&I, VF)) {
Cost = getGatherScatterCost(&I, VF);
setWideningDecision(&I, VF, CM_GatherScatter, Cost);
} else {
@@ -7234,7 +7235,7 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
}
InstructionCost GatherScatterCost =
- isLegalGatherOrScatter(&I)
+ isLegalGatherOrScatter(&I, VF)
? getGatherScatterCost(&I, VF) * NumAccesses
: InstructionCost::getInvalid();
@@ -7437,7 +7438,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// vector lane. Get the scalarization cost and scale this amount by the
// probability of executing the predicated block. If the instruction is not
// predicated, we fall through to the next case.
- if (VF.isVector() && isScalarWithPredication(I)) {
+ if (VF.isVector() && isScalarWithPredication(I, VF)) {
InstructionCost Cost = 0;
// These instructions have a non-void type, so account for the phi nodes
@@ -7941,6 +7942,40 @@ VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
llvm_unreachable("No plan found!");
}
+static void AddRuntimeUnrollDisableMetaData(Loop *L) {
+ SmallVector<Metadata *, 4> MDs;
+ // Reserve first location for self reference to the LoopID metadata node.
+ MDs.push_back(nullptr);
+ bool IsUnrollMetadata = false;
+ MDNode *LoopID = L->getLoopID();
+ if (LoopID) {
+ // First find existing loop unrolling disable metadata.
+ for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
+ auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+ if (MD) {
+ const auto *S = dyn_cast<MDString>(MD->getOperand(0));
+ IsUnrollMetadata =
+ S && S->getString().startswith("llvm.loop.unroll.disable");
+ }
+ MDs.push_back(LoopID->getOperand(i));
+ }
+ }
+
+ if (!IsUnrollMetadata) {
+ // Add runtime unroll disable metadata.
+ LLVMContext &Context = L->getHeader()->getContext();
+ SmallVector<Metadata *, 1> DisableOperands;
+ DisableOperands.push_back(
+ MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
+ MDNode *DisableNode = MDNode::get(Context, DisableOperands);
+ MDs.push_back(DisableNode);
+ MDNode *NewLoopID = MDNode::get(Context, MDs);
+ // Set operand 0 to refer to the loop id itself.
+ NewLoopID->replaceOperandWith(0, NewLoopID);
+ L->setLoopID(NewLoopID);
+ }
+}
+
void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
VPlan &BestVPlan,
InnerLoopVectorizer &ILV,
@@ -7952,9 +7987,9 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
// 1. Create a new empty loop. Unlink the old loop and connect the new one.
VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
- State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
- State.TripCount = ILV.getOrCreateTripCount(nullptr);
- State.CanonicalIV = ILV.Induction;
+ Value *CanonicalIVStartValue;
+ std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
+ ILV.createVectorizedLoopSkeleton();
ILV.collectPoisonGeneratingRecipes(State);
ILV.printDebugTracesAtStart();
@@ -7968,8 +8003,35 @@ void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
//===------------------------------------------------===//
// 2. Copy and widen instructions from the old loop into the new loop.
+ BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
+ ILV.getOrCreateVectorTripCount(nullptr),
+ CanonicalIVStartValue, State);
BestVPlan.execute(&State);
+ // Keep all loop hints from the original loop on the vector loop (we'll
+ // replace the vectorizer-specific hints below).
+ MDNode *OrigLoopID = OrigLoop->getLoopID();
+
+ Optional<MDNode *> VectorizedLoopID =
+ makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
+ LLVMLoopVectorizeFollowupVectorized});
+
+ Loop *L = LI->getLoopFor(State.CFG.PrevBB);
+ if (VectorizedLoopID.hasValue())
+ L->setLoopID(VectorizedLoopID.getValue());
+ else {
+ // Keep all loop hints from the original loop on the vector loop (we'll
+ // replace the vectorizer-specific hints below).
+ if (MDNode *LID = OrigLoop->getLoopID())
+ L->setLoopID(LID);
+
+ LoopVectorizeHints Hints(L, true, *ORE);
+ Hints.setAlreadyVectorized();
+ }
+ // Disable runtime unrolling when vectorizing the epilogue loop.
+ if (CanonicalIVStartValue)
+ AddRuntimeUnrollDisableMetaData(L);
+
// 3. Fix the vectorized code: take care of header phi's, live-outs,
// predication, updating analyses.
ILV.fixVectorizedLoop(State);
@@ -8032,66 +8094,16 @@ void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
}
}
-Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
-
Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
-Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx,
- Value *Step,
- Instruction::BinaryOps BinOp) {
- // When unrolling and the VF is 1, we only need to add a simple scalar.
- Type *Ty = Val->getType();
- assert(!Ty->isVectorTy() && "Val must be a scalar");
-
- if (Ty->isFloatingPointTy()) {
- // Floating-point operations inherit FMF via the builder's flags.
- Value *MulOp = Builder.CreateFMul(StartIdx, Step);
- return Builder.CreateBinOp(BinOp, Val, MulOp);
- }
- return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction");
-}
-
-static void AddRuntimeUnrollDisableMetaData(Loop *L) {
- SmallVector<Metadata *, 4> MDs;
- // Reserve first location for self reference to the LoopID metadata node.
- MDs.push_back(nullptr);
- bool IsUnrollMetadata = false;
- MDNode *LoopID = L->getLoopID();
- if (LoopID) {
- // First find existing loop unrolling disable metadata.
- for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
- auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
- if (MD) {
- const auto *S = dyn_cast<MDString>(MD->getOperand(0));
- IsUnrollMetadata =
- S && S->getString().startswith("llvm.loop.unroll.disable");
- }
- MDs.push_back(LoopID->getOperand(i));
- }
- }
-
- if (!IsUnrollMetadata) {
- // Add runtime unroll disable metadata.
- LLVMContext &Context = L->getHeader()->getContext();
- SmallVector<Metadata *, 1> DisableOperands;
- DisableOperands.push_back(
- MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
- MDNode *DisableNode = MDNode::get(Context, DisableOperands);
- MDs.push_back(DisableNode);
- MDNode *NewLoopID = MDNode::get(Context, MDs);
- // Set operand 0 to refer to the loop id itself.
- NewLoopID->replaceOperandWith(0, NewLoopID);
- L->setLoopID(NewLoopID);
- }
-}
-
//===--------------------------------------------------------------------===//
// EpilogueVectorizerMainLoop
//===--------------------------------------------------------------------===//
/// This function is partially responsible for generating the control flow
/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
-BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
+std::pair<BasicBlock *, Value *>
+EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
MDNode *OrigLoopID = OrigLoop->getLoopID();
Loop *Lp = createVectorLoopSkeleton("");
@@ -8120,24 +8132,16 @@ BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
// Generate the induction variable.
- OldInduction = Legal->getPrimaryInduction();
- Type *IdxTy = Legal->getWidestInductionType();
- Value *StartIdx = ConstantInt::get(IdxTy, 0);
-
- IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt());
- Value *Step = getRuntimeVF(B, IdxTy, VF * UF);
Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
EPI.VectorTripCount = CountRoundDown;
- Induction =
- createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
- getDebugLocFromInstOrOperands(OldInduction));
+ createHeaderBranch(Lp);
// Skip induction resume value creation here because they will be created in
// the second pass. If we created them here, they wouldn't be used anyway,
// because the vplan in the second pass still contains the inductions from the
// original loop.
- return completeLoopSkeleton(Lp, OrigLoopID);
+ return {completeLoopSkeleton(Lp, OrigLoopID), nullptr};
}
void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
@@ -8219,7 +8223,7 @@ BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
/// This function is partially responsible for generating the control flow
/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
-BasicBlock *
+std::pair<BasicBlock *, Value *>
EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
MDNode *OrigLoopID = OrigLoop->getLoopID();
Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
@@ -8275,6 +8279,25 @@ EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
+ // The vec.epilog.iter.check block may contain Phi nodes from reductions which
+ // merge control-flow from the latch block and the middle block. Update the
+ // incoming values here and move the Phi into the preheader.
+ SmallVector<PHINode *, 4> PhisInBlock;
+ for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
+ PhisInBlock.push_back(&Phi);
+
+ for (PHINode *Phi : PhisInBlock) {
+ Phi->replaceIncomingBlockWith(
+ VecEpilogueIterationCountCheck->getSinglePredecessor(),
+ VecEpilogueIterationCountCheck);
+ Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
+ if (EPI.SCEVSafetyCheck)
+ Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
+ if (EPI.MemSafetyCheck)
+ Phi->removeIncomingValue(EPI.MemSafetyCheck);
+ Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
+ }
+
// Generate a resume induction for the vector epilogue and put it in the
// vector epilogue preheader
Type *IdxTy = Legal->getWidestInductionType();
@@ -8285,13 +8308,7 @@ EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
EPI.MainLoopIterationCountCheck);
// Generate the induction variable.
- OldInduction = Legal->getPrimaryInduction();
- Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
- Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
- Value *StartIdx = EPResumeVal;
- Induction =
- createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
- getDebugLocFromInstOrOperands(OldInduction));
+ createHeaderBranch(Lp);
// Generate induction resume values. These variables save the new starting
// indexes for the scalar loop. They are used to test if there are any tail
@@ -8300,12 +8317,10 @@ EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
// check, then the resume value for the induction variable comes from
// the trip count of the main vector loop, hence passing the AdditionalBypass
// argument.
- createInductionResumeValues(Lp, CountRoundDown,
- {VecEpilogueIterationCountCheck,
- EPI.VectorTripCount} /* AdditionalBypass */);
+ createInductionResumeValues(Lp, {VecEpilogueIterationCountCheck,
+ EPI.VectorTripCount} /* AdditionalBypass */);
- AddRuntimeUnrollDisableMetaData(Lp);
- return completeLoopSkeleton(Lp, OrigLoopID);
+ return {completeLoopSkeleton(Lp, OrigLoopID), EPResumeVal};
}
BasicBlock *
@@ -8447,33 +8462,22 @@ VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
// Introduce the early-exit compare IV <= BTC to form header block mask.
- // This is used instead of IV < TC because TC may wrap, unlike BTC.
- // Start by constructing the desired canonical IV in the header block.
- VPValue *IV = nullptr;
- if (Legal->getPrimaryInduction())
- IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
- else {
- VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
- auto *IVRecipe = new VPWidenCanonicalIVRecipe();
- HeaderVPBB->insert(IVRecipe, HeaderVPBB->getFirstNonPhi());
- IV = IVRecipe;
- }
+ // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
+ // constructing the desired canonical IV in the header block as its first
+ // non-phi instructions.
+ assert(CM.foldTailByMasking() && "must fold the tail");
+ VPBasicBlock *HeaderVPBB = Plan->getEntry()->getEntryBasicBlock();
+ auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
+ auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
+ HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
- // Create the block in mask as the first non-phi instruction in the block.
VPBuilder::InsertPointGuard Guard(Builder);
- auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
- Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
-
- VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
- bool TailFolded = !CM.isScalarEpilogueAllowed();
-
- if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
- // While ActiveLaneMask is a binary op that consumes the loop tripcount
- // as a second argument, we only pass the IV here and extract the
- // tripcount from the transform state where codegen of the VP instructions
- // happen.
- BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
+ Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
+ if (CM.TTI.emitGetActiveLaneMask()) {
+ VPValue *TC = Plan->getOrCreateTripCount();
+ BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
} else {
+ VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
}
return BlockMaskCache[BB] = BlockMask;
@@ -8621,7 +8625,9 @@ VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
VFRange &Range) const {
bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
- [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
+ [this, CI](ElementCount VF) {
+ return CM.isScalarWithPredication(CI, VF);
+ },
Range);
if (IsPredicated)
@@ -8661,7 +8667,8 @@ bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
// scalarization is profitable or it is predicated.
auto WillScalarize = [this, I](ElementCount VF) -> bool {
return CM.isScalarAfterVectorization(I, VF) ||
- CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
+ CM.isProfitableToScalarize(I, VF) ||
+ CM.isScalarWithPredication(I, VF);
};
return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
Range);
@@ -8719,7 +8726,7 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
void VPRecipeBuilder::fixHeaderPhis() {
BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
- for (VPWidenPHIRecipe *R : PhisToFix) {
+ for (VPHeaderPHIRecipe *R : PhisToFix) {
auto *PN = cast<PHINode>(R->getUnderlyingValue());
VPRecipeBase *IncR =
getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
@@ -8735,7 +8742,7 @@ VPBasicBlock *VPRecipeBuilder::handleReplication(
Range);
bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
- [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); },
+ [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
Range);
// Even if the instruction is not marked as uniform, there are certain
@@ -8861,7 +8868,7 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
return toVPRecipeResult(Recipe);
- VPWidenPHIRecipe *PhiRecipe = nullptr;
+ VPHeaderPHIRecipe *PhiRecipe = nullptr;
if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
VPValue *StartV = Operands[0];
if (Legal->isReductionVariable(Phi)) {
@@ -8882,11 +8889,14 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
PhisToFix.push_back(PhiRecipe);
} else {
- // TODO: record start and backedge value for remaining pointer induction
- // phis.
+ // TODO: record backedge value for remaining pointer induction phis.
assert(Phi->getType()->isPointerTy() &&
"only pointer phis should be handled here");
- PhiRecipe = new VPWidenPHIRecipe(Phi);
+ assert(Legal->getInductionVars().count(Phi) &&
+ "Not an induction variable");
+ InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
+ VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
+ PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
}
return toVPRecipeResult(PhiRecipe);
@@ -8966,6 +8976,40 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
}
}
+// Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
+// CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
+// BranchOnCount VPInstruction to the latch.
+static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
+ bool HasNUW, bool IsVPlanNative) {
+ Value *StartIdx = ConstantInt::get(IdxTy, 0);
+ auto *StartV = Plan.getOrAddVPValue(StartIdx);
+
+ auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
+ VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
+ VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
+ if (IsVPlanNative)
+ Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
+ Header->insert(CanonicalIVPHI, Header->begin());
+
+ auto *CanonicalIVIncrement =
+ new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
+ : VPInstruction::CanonicalIVIncrement,
+ {CanonicalIVPHI}, DL);
+ CanonicalIVPHI->addOperand(CanonicalIVIncrement);
+
+ VPBasicBlock *EB = TopRegion->getExitBasicBlock();
+ if (IsVPlanNative) {
+ EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
+ EB->setCondBit(nullptr);
+ }
+ EB->appendRecipe(CanonicalIVIncrement);
+
+ auto *BranchOnCount =
+ new VPInstruction(VPInstruction::BranchOnCount,
+ {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
+ EB->appendRecipe(BranchOnCount);
+}
+
VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
const MapVector<Instruction *, Instruction *> &SinkAfter) {
@@ -9033,6 +9077,12 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
auto Plan = std::make_unique<VPlan>(TopRegion);
+ Instruction *DLInst =
+ getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
+ addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
+ DLInst ? DLInst->getDebugLoc() : DebugLoc(),
+ !CM.foldTailByMasking(), false);
+
// Scan the body of the loop in a topological order to visit each basic block
// after having visited its predecessor basic blocks.
LoopBlocksDFS DFS(OrigLoop);
@@ -9194,6 +9244,7 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
}
}
+ VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
VPlanTransforms::removeRedundantInductionCasts(*Plan);
// Now that sink-after is done, move induction recipes for optimized truncates
@@ -9325,6 +9376,9 @@ VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
OrigLoop, Plan,
[this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
DeadInstructions, *PSE.getSE());
+
+ addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
+ true, true);
return Plan;
}
@@ -9414,16 +9468,19 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
}
// If tail is folded by masking, introduce selects between the phi
- // and the live-out instruction of each reduction, at the end of the latch.
+ // and the live-out instruction of each reduction, at the beginning of the
+ // dedicated latch block.
if (CM.foldTailByMasking()) {
+ Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
if (!PhiR || PhiR->isInLoop())
continue;
- Builder.setInsertPoint(LatchVPBB);
VPValue *Cond =
RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
VPValue *Red = PhiR->getBackedgeValue();
+ assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
+ "reduction recipe must be defined before latch");
Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
}
}
@@ -9682,9 +9739,8 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
assert(!State.Instance && "Int or FP induction being replicated.");
- State.ILV->widenIntOrFpInduction(IV, getInductionDescriptor(),
- getStartValue()->getLiveInIRValue(),
- getTruncInst(), getVPValue(0), State);
+ auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
+ State.ILV->widenIntOrFpInduction(IV, this, State, CanonicalIV);
}
void VPWidenPHIRecipe::execute(VPTransformState &State) {
@@ -10013,7 +10069,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
}
- State.set(getVPSingleValue(), NewLI, Part);
+ State.set(this, NewLI, Part);
}
}
@@ -10561,6 +10617,21 @@ bool LoopVectorizePass::processLoop(Loop *L) {
Checks);
VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
+
+ // Ensure that the start values for any VPReductionPHIRecipes are
+ // updated before vectorising the epilogue loop.
+ VPBasicBlock *Header = BestEpiPlan.getEntry()->getEntryBasicBlock();
+ for (VPRecipeBase &R : Header->phis()) {
+ if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
+ if (auto *Resume = MainILV.getReductionResumeValue(
+ ReductionPhi->getRecurrenceDescriptor())) {
+ VPValue *StartVal = new VPValue(Resume);
+ BestEpiPlan.addExternalDef(StartVal);
+ ReductionPhi->setOperand(0, StartVal);
+ }
+ }
+ }
+
LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
DT);
++LoopsEpilogueVectorized;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 37ae13666f7a..99c265fc5101 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -435,7 +435,7 @@ struct InstructionsState {
}
/// Some of the instructions in the list have alternate opcodes.
- bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
+ bool isAltShuffle() const { return AltOp != MainOp; }
bool isOpcodeOrAlt(Instruction *I) const {
unsigned CheckedOpcode = I->getOpcode();
@@ -581,7 +581,7 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
}
/// \returns the AA location that is being access by the instruction.
-static MemoryLocation getLocation(Instruction *I, AAResults *AA) {
+static MemoryLocation getLocation(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return MemoryLocation::get(SI);
if (LoadInst *LI = dyn_cast<LoadInst>(I))
@@ -1417,7 +1417,11 @@ public:
HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
} else if (NumFreeOpsHash.NumOfAPOs == Min &&
NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
- ++HashMap[NumFreeOpsHash.Hash].first;
+ auto It = HashMap.find(NumFreeOpsHash.Hash);
+ if (It == HashMap.end())
+ HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
+ else
+ ++It->second.first;
}
}
// Select the lane with the minimum counter.
@@ -2019,9 +2023,7 @@ private:
}
/// Some of the instructions in the list have alternate opcodes.
- bool isAltShuffle() const {
- return getOpcode() != getAltOpcode();
- }
+ bool isAltShuffle() const { return MainOp != AltOp; }
bool isOpcodeOrAlt(Instruction *I) const {
unsigned CheckedOpcode = I->getOpcode();
@@ -2519,12 +2521,11 @@ private:
SD->IsScheduled = true;
LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
- ScheduleData *BundleMember = SD;
- while (BundleMember) {
- if (BundleMember->Inst != BundleMember->OpValue) {
- BundleMember = BundleMember->NextInBundle;
+ for (ScheduleData *BundleMember = SD; BundleMember;
+ BundleMember = BundleMember->NextInBundle) {
+ if (BundleMember->Inst != BundleMember->OpValue)
continue;
- }
+
// Handle the def-use chain dependencies.
// Decrement the unscheduled counter and insert to ready list if ready.
@@ -2589,7 +2590,6 @@ private:
<< "SLP: gets ready (mem): " << *DepBundle << "\n");
}
}
- BundleMember = BundleMember->NextInBundle;
}
}
@@ -2618,6 +2618,10 @@ private:
}
}
+ /// Build a bundle from the ScheduleData nodes corresponding to the
+ /// scalar instruction for each lane.
+ ScheduleData *buildBundle(ArrayRef<Value *> VL);
+
/// Checks if a bundle of instructions can be scheduled, i.e. has no
/// cyclic dependencies. This is only a dry-run, no instructions are
/// actually moved at this stage.
@@ -3040,7 +3044,7 @@ Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
void BoUpSLP::reorderTopToBottom() {
// Maps VF to the graph nodes.
- DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries;
+ DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
// ExtractElement gather nodes which can be vectorized and need to handle
// their ordering.
DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
@@ -3051,6 +3055,29 @@ void BoUpSLP::reorderTopToBottom() {
const std::unique_ptr<TreeEntry> &TE) {
if (Optional<OrdersType> CurrentOrder =
getReorderingData(*TE.get(), /*TopToBottom=*/true)) {
+ // Do not include ordering for nodes used in the alt opcode vectorization,
+ // better to reorder them during bottom-to-top stage. If follow the order
+ // here, it causes reordering of the whole graph though actually it is
+ // profitable just to reorder the subgraph that starts from the alternate
+ // opcode vectorization node. Such nodes already end-up with the shuffle
+ // instruction and it is just enough to change this shuffle rather than
+ // rotate the scalars for the whole graph.
+ unsigned Cnt = 0;
+ const TreeEntry *UserTE = TE.get();
+ while (UserTE && Cnt < RecursionMaxDepth) {
+ if (UserTE->UserTreeIndices.size() != 1)
+ break;
+ if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
+ return EI.UserTE->State == TreeEntry::Vectorize &&
+ EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
+ }))
+ return;
+ if (UserTE->UserTreeIndices.empty())
+ UserTE = nullptr;
+ else
+ UserTE = UserTE->UserTreeIndices.back().UserTE;
+ ++Cnt;
+ }
VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
if (TE->State != TreeEntry::Vectorize)
GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
@@ -3066,7 +3093,7 @@ void BoUpSLP::reorderTopToBottom() {
// Try to find the most profitable order. We just are looking for the most
// used order and reorder scalar elements in the nodes according to this
// mostly used order.
- const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond();
+ ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
// All operands are reordered and used only in this node - propagate the
// most used order to the user node.
MapVector<OrdersType, unsigned,
@@ -4459,6 +4486,8 @@ bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
CurrentOrder.clear();
return false;
}
+ if (ShouldKeepOrder)
+ CurrentOrder.clear();
return ShouldKeepOrder;
}
@@ -7202,6 +7231,33 @@ void BoUpSLP::optimizeGatherSequence() {
GatherShuffleSeq.clear();
}
+BoUpSLP::ScheduleData *
+BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
+ ScheduleData *Bundle = nullptr;
+ ScheduleData *PrevInBundle = nullptr;
+ for (Value *V : VL) {
+ ScheduleData *BundleMember = getScheduleData(V);
+ assert(BundleMember &&
+ "no ScheduleData for bundle member "
+ "(maybe not in same basic block)");
+ assert(BundleMember->isSchedulingEntity() &&
+ "bundle member already part of other bundle");
+ if (PrevInBundle) {
+ PrevInBundle->NextInBundle = BundleMember;
+ } else {
+ Bundle = BundleMember;
+ }
+ BundleMember->UnscheduledDepsInBundle = 0;
+ Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
+
+ // Group the instructions to a bundle.
+ BundleMember->FirstInBundle = Bundle;
+ PrevInBundle = BundleMember;
+ }
+ assert(Bundle && "Failed to find schedule bundle");
+ return Bundle;
+}
+
// Groups the instructions to a bundle (which is then a single scheduling entity)
// and schedules instructions until the bundle gets ready.
Optional<BoUpSLP::ScheduleData *>
@@ -7214,12 +7270,9 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
// Initialize the instruction bundle.
Instruction *OldScheduleEnd = ScheduleEnd;
- ScheduleData *PrevInBundle = nullptr;
- ScheduleData *Bundle = nullptr;
- bool ReSchedule = false;
LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n");
- auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule,
+ auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
ScheduleData *Bundle) {
// The scheduling region got new instructions at the lower end (or it is a
// new region for the first bundle). This makes it necessary to
@@ -7263,39 +7316,28 @@ BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
// Otherwise the compiler may crash trying to incorrectly calculate
// dependencies and emit instruction in the wrong order at the actual
// scheduling.
- TryScheduleBundle(/*ReSchedule=*/false, nullptr);
+ TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
return None;
}
}
+ bool ReSchedule = false;
for (Value *V : VL) {
ScheduleData *BundleMember = getScheduleData(V);
assert(BundleMember &&
"no ScheduleData for bundle member (maybe not in same basic block)");
- if (BundleMember->IsScheduled) {
- // A bundle member was scheduled as single instruction before and now
- // needs to be scheduled as part of the bundle. We just get rid of the
- // existing schedule.
- LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
- << " was already scheduled\n");
- ReSchedule = true;
- }
- assert(BundleMember->isSchedulingEntity() &&
- "bundle member already part of other bundle");
- if (PrevInBundle) {
- PrevInBundle->NextInBundle = BundleMember;
- } else {
- Bundle = BundleMember;
- }
- BundleMember->UnscheduledDepsInBundle = 0;
- Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
-
- // Group the instructions to a bundle.
- BundleMember->FirstInBundle = Bundle;
- PrevInBundle = BundleMember;
+ if (!BundleMember->IsScheduled)
+ continue;
+ // A bundle member was scheduled as single instruction before and now
+ // needs to be scheduled as part of the bundle. We just get rid of the
+ // existing schedule.
+ LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
+ << " was already scheduled\n");
+ ReSchedule = true;
}
- assert(Bundle && "Failed to find schedule bundle");
- TryScheduleBundle(ReSchedule, Bundle);
+
+ auto *Bundle = buildBundle(VL);
+ TryScheduleBundleImpl(ReSchedule, Bundle);
if (!Bundle->isReady()) {
cancelScheduling(VL, S.OpValue);
return None;
@@ -7464,20 +7506,33 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
while (!WorkList.empty()) {
ScheduleData *SD = WorkList.pop_back_val();
-
- ScheduleData *BundleMember = SD;
- while (BundleMember) {
+ for (ScheduleData *BundleMember = SD; BundleMember;
+ BundleMember = BundleMember->NextInBundle) {
assert(isInSchedulingRegion(BundleMember));
- if (!BundleMember->hasValidDependencies()) {
-
- LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
- << "\n");
- BundleMember->Dependencies = 0;
- BundleMember->resetUnscheduledDeps();
+ if (BundleMember->hasValidDependencies())
+ continue;
- // Handle def-use chain dependencies.
- if (BundleMember->OpValue != BundleMember->Inst) {
- ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
+ LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
+ << "\n");
+ BundleMember->Dependencies = 0;
+ BundleMember->resetUnscheduledDeps();
+
+ // Handle def-use chain dependencies.
+ if (BundleMember->OpValue != BundleMember->Inst) {
+ ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
+ if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
+ BundleMember->Dependencies++;
+ ScheduleData *DestBundle = UseSD->FirstInBundle;
+ if (!DestBundle->IsScheduled)
+ BundleMember->incrementUnscheduledDeps(1);
+ if (!DestBundle->hasValidDependencies())
+ WorkList.push_back(DestBundle);
+ }
+ } else {
+ for (User *U : BundleMember->Inst->users()) {
+ assert(isa<Instruction>(U) &&
+ "user of instruction must be instruction");
+ ScheduleData *UseSD = getScheduleData(U);
if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
BundleMember->Dependencies++;
ScheduleData *DestBundle = UseSD->FirstInBundle;
@@ -7486,89 +7541,69 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
if (!DestBundle->hasValidDependencies())
WorkList.push_back(DestBundle);
}
- } else {
- for (User *U : BundleMember->Inst->users()) {
- if (isa<Instruction>(U)) {
- ScheduleData *UseSD = getScheduleData(U);
- if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
- BundleMember->Dependencies++;
- ScheduleData *DestBundle = UseSD->FirstInBundle;
- if (!DestBundle->IsScheduled)
- BundleMember->incrementUnscheduledDeps(1);
- if (!DestBundle->hasValidDependencies())
- WorkList.push_back(DestBundle);
- }
- } else {
- // I'm not sure if this can ever happen. But we need to be safe.
- // This lets the instruction/bundle never be scheduled and
- // eventually disable vectorization.
- BundleMember->Dependencies++;
- BundleMember->incrementUnscheduledDeps(1);
- }
- }
}
+ }
- // Handle the memory dependencies.
- ScheduleData *DepDest = BundleMember->NextLoadStore;
- if (DepDest) {
- Instruction *SrcInst = BundleMember->Inst;
- MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
- bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
- unsigned numAliased = 0;
- unsigned DistToSrc = 1;
-
- while (DepDest) {
- assert(isInSchedulingRegion(DepDest));
-
- // We have two limits to reduce the complexity:
- // 1) AliasedCheckLimit: It's a small limit to reduce calls to
- // SLP->isAliased (which is the expensive part in this loop).
- // 2) MaxMemDepDistance: It's for very large blocks and it aborts
- // the whole loop (even if the loop is fast, it's quadratic).
- // It's important for the loop break condition (see below) to
- // check this limit even between two read-only instructions.
- if (DistToSrc >= MaxMemDepDistance ||
- ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
- (numAliased >= AliasedCheckLimit ||
- SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
-
- // We increment the counter only if the locations are aliased
- // (instead of counting all alias checks). This gives a better
- // balance between reduced runtime and accurate dependencies.
- numAliased++;
-
- DepDest->MemoryDependencies.push_back(BundleMember);
- BundleMember->Dependencies++;
- ScheduleData *DestBundle = DepDest->FirstInBundle;
- if (!DestBundle->IsScheduled) {
- BundleMember->incrementUnscheduledDeps(1);
- }
- if (!DestBundle->hasValidDependencies()) {
- WorkList.push_back(DestBundle);
- }
- }
- DepDest = DepDest->NextLoadStore;
-
- // Example, explaining the loop break condition: Let's assume our
- // starting instruction is i0 and MaxMemDepDistance = 3.
- //
- // +--------v--v--v
- // i0,i1,i2,i3,i4,i5,i6,i7,i8
- // +--------^--^--^
- //
- // MaxMemDepDistance let us stop alias-checking at i3 and we add
- // dependencies from i0 to i3,i4,.. (even if they are not aliased).
- // Previously we already added dependencies from i3 to i6,i7,i8
- // (because of MaxMemDepDistance). As we added a dependency from
- // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
- // and we can abort this loop at i6.
- if (DistToSrc >= 2 * MaxMemDepDistance)
- break;
- DistToSrc++;
+ // Handle the memory dependencies (if any).
+ ScheduleData *DepDest = BundleMember->NextLoadStore;
+ if (!DepDest)
+ continue;
+ Instruction *SrcInst = BundleMember->Inst;
+ assert(SrcInst->mayReadOrWriteMemory() &&
+ "NextLoadStore list for non memory effecting bundle?");
+ MemoryLocation SrcLoc = getLocation(SrcInst);
+ bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
+ unsigned numAliased = 0;
+ unsigned DistToSrc = 1;
+
+ for ( ; DepDest; DepDest = DepDest->NextLoadStore) {
+ assert(isInSchedulingRegion(DepDest));
+
+ // We have two limits to reduce the complexity:
+ // 1) AliasedCheckLimit: It's a small limit to reduce calls to
+ // SLP->isAliased (which is the expensive part in this loop).
+ // 2) MaxMemDepDistance: It's for very large blocks and it aborts
+ // the whole loop (even if the loop is fast, it's quadratic).
+ // It's important for the loop break condition (see below) to
+ // check this limit even between two read-only instructions.
+ if (DistToSrc >= MaxMemDepDistance ||
+ ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
+ (numAliased >= AliasedCheckLimit ||
+ SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
+
+ // We increment the counter only if the locations are aliased
+ // (instead of counting all alias checks). This gives a better
+ // balance between reduced runtime and accurate dependencies.
+ numAliased++;
+
+ DepDest->MemoryDependencies.push_back(BundleMember);
+ BundleMember->Dependencies++;
+ ScheduleData *DestBundle = DepDest->FirstInBundle;
+ if (!DestBundle->IsScheduled) {
+ BundleMember->incrementUnscheduledDeps(1);
+ }
+ if (!DestBundle->hasValidDependencies()) {
+ WorkList.push_back(DestBundle);
}
}
+
+ // Example, explaining the loop break condition: Let's assume our
+ // starting instruction is i0 and MaxMemDepDistance = 3.
+ //
+ // +--------v--v--v
+ // i0,i1,i2,i3,i4,i5,i6,i7,i8
+ // +--------^--^--^
+ //
+ // MaxMemDepDistance let us stop alias-checking at i3 and we add
+ // dependencies from i0 to i3,i4,.. (even if they are not aliased).
+ // Previously we already added dependencies from i3 to i6,i7,i8
+ // (because of MaxMemDepDistance). As we added a dependency from
+ // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
+ // and we can abort this loop at i6.
+ if (DistToSrc >= 2 * MaxMemDepDistance)
+ break;
+ DistToSrc++;
}
- BundleMember = BundleMember->NextInBundle;
}
if (InsertInReadyList && SD->isReady()) {
ReadyInsts.push_back(SD);
@@ -7638,8 +7673,8 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
// Move the scheduled instruction(s) to their dedicated places, if not
// there yet.
- ScheduleData *BundleMember = picked;
- while (BundleMember) {
+ for (ScheduleData *BundleMember = picked; BundleMember;
+ BundleMember = BundleMember->NextInBundle) {
Instruction *pickedInst = BundleMember->Inst;
if (pickedInst->getNextNode() != LastScheduledInst) {
BS->BB->getInstList().remove(pickedInst);
@@ -7647,7 +7682,6 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
pickedInst);
}
LastScheduledInst = pickedInst;
- BundleMember = BundleMember->NextInBundle;
}
BS->schedule(picked, ReadyInsts);
@@ -8045,8 +8079,11 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
// If the target claims to have no vector registers don't attempt
// vectorization.
- if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
+ if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
+ LLVM_DEBUG(
+ dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
return false;
+ }
// Don't vectorize when the attribute NoImplicitFloat is used.
if (F.hasFnAttribute(Attribute::NoImplicitFloat))
@@ -8693,7 +8730,6 @@ class HorizontalReduction {
static RecurKind getRdxKind(Instruction *I) {
assert(I && "Expected instruction for reduction matching");
- TargetTransformInfo::ReductionFlags RdxFlags;
if (match(I, m_Add(m_Value(), m_Value())))
return RecurKind::Add;
if (match(I, m_Mul(m_Value(), m_Value())))
@@ -8767,7 +8803,6 @@ class HorizontalReduction {
return RecurKind::None;
}
- TargetTransformInfo::ReductionFlags RdxFlags;
switch (Pred) {
default:
return RecurKind::None;
@@ -9206,7 +9241,7 @@ private:
auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
- /*unsigned=*/false, CostKind);
+ /*IsUnsigned=*/false, CostKind);
CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy,
SclCondTy, RdxPred, CostKind) +
@@ -9571,8 +9606,7 @@ bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
return false;
LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
- // Aggregate value is unlikely to be processed in vector register, we need to
- // extract scalars into scalar registers, so NeedExtraction is set true.
+ // Aggregate value is unlikely to be processed in vector register.
return tryToVectorizeList(BuildVectorOpds, R);
}
@@ -9598,7 +9632,7 @@ tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
function_ref<unsigned(T *)> Limit,
function_ref<bool(T *, T *)> Comparator,
function_ref<bool(T *, T *)> AreCompatible,
- function_ref<bool(ArrayRef<T *>, bool)> TryToVectorize,
+ function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
bool LimitForRegisterSize) {
bool Changed = false;
// Sort by type, parent, operands.
@@ -9627,7 +9661,7 @@ tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
// same/alternate ops only, this may result in some extra final
// vectorization.
if (NumElts > 1 &&
- TryToVectorize(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
+ TryToVectorizeHelper(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
// Success start over because instructions might have been changed.
Changed = true;
} else if (NumElts < Limit(*IncIt) &&
@@ -9638,7 +9672,7 @@ tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
// Final attempt to vectorize instructions with the same types.
if (Candidates.size() > 1 &&
(SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
- if (TryToVectorize(Candidates, /*LimitForRegisterSize=*/false)) {
+ if (TryToVectorizeHelper(Candidates, /*LimitForRegisterSize=*/false)) {
// Success start over because instructions might have been changed.
Changed = true;
} else if (LimitForRegisterSize) {
@@ -9649,7 +9683,7 @@ tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It))
++SameTypeIt;
unsigned NumElts = (SameTypeIt - It);
- if (NumElts > 1 && TryToVectorize(makeArrayRef(It, NumElts),
+ if (NumElts > 1 && TryToVectorizeHelper(makeArrayRef(It, NumElts),
/*LimitForRegisterSize=*/false))
Changed = true;
It = SameTypeIt;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index 65857f034210..e5dded3c0f1e 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -59,7 +59,7 @@ class VPRecipeBuilder {
/// Cross-iteration reduction & first-order recurrence phis for which we need
/// to add the incoming value from the backedge after all recipes have been
/// created.
- SmallVector<VPWidenPHIRecipe *, 4> PhisToFix;
+ SmallVector<VPHeaderPHIRecipe *, 4> PhisToFix;
/// Check if \p I can be widened at the start of \p Range and possibly
/// decrease the range such that the returned value holds for the entire \p
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 1d9e71663cd2..a96c122db2a9 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -677,10 +677,10 @@ void VPInstruction::generateInstruction(VPTransformState &State,
// Get first lane of vector induction variable.
Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0));
// Get the original loop tripcount.
- Value *ScalarTC = State.TripCount;
+ Value *ScalarTC = State.get(getOperand(1), Part);
auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
- auto *PredTy = FixedVectorType::get(Int1Ty, State.VF.getKnownMinValue());
+ auto *PredTy = VectorType::get(Int1Ty, State.VF);
Instruction *Call = Builder.CreateIntrinsic(
Intrinsic::get_active_lane_mask, {PredTy, ScalarTC->getType()},
{VIVElem0, ScalarTC}, nullptr, "active.lane.mask");
@@ -711,6 +711,51 @@ void VPInstruction::generateInstruction(VPTransformState &State,
}
break;
}
+
+ case VPInstruction::CanonicalIVIncrement:
+ case VPInstruction::CanonicalIVIncrementNUW: {
+ Value *Next = nullptr;
+ if (Part == 0) {
+ bool IsNUW = getOpcode() == VPInstruction::CanonicalIVIncrementNUW;
+ auto *Phi = State.get(getOperand(0), 0);
+ // The loop step is equal to the vectorization factor (num of SIMD
+ // elements) times the unroll factor (num of SIMD instructions).
+ Value *Step =
+ createStepForVF(Builder, Phi->getType(), State.VF, State.UF);
+ Next = Builder.CreateAdd(Phi, Step, "index.next", IsNUW, false);
+ } else {
+ Next = State.get(this, 0);
+ }
+
+ State.set(this, Next, Part);
+ break;
+ }
+ case VPInstruction::BranchOnCount: {
+ if (Part != 0)
+ break;
+ // First create the compare.
+ Value *IV = State.get(getOperand(0), Part);
+ Value *TC = State.get(getOperand(1), Part);
+ Value *Cond = Builder.CreateICmpEQ(IV, TC);
+
+ // Now create the branch.
+ auto *Plan = getParent()->getPlan();
+ VPRegionBlock *TopRegion = Plan->getVectorLoopRegion();
+ VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock();
+ if (Header->empty()) {
+ assert(EnableVPlanNativePath &&
+ "empty entry block only expected in VPlanNativePath");
+ Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
+ }
+ // TODO: Once the exit block is modeled in VPlan, use it instead of going
+ // through State.CFG.LastBB.
+ BasicBlock *Exit =
+ cast<BranchInst>(State.CFG.LastBB->getTerminator())->getSuccessor(0);
+
+ Builder.CreateCondBr(Cond, Exit, State.CFG.VPBB2IRBB[Header]);
+ Builder.GetInsertBlock()->getTerminator()->eraseFromParent();
+ break;
+ }
default:
llvm_unreachable("Unsupported opcode for instruction");
}
@@ -758,6 +803,15 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent,
case VPInstruction::FirstOrderRecurrenceSplice:
O << "first-order splice";
break;
+ case VPInstruction::CanonicalIVIncrement:
+ O << "VF * UF + ";
+ break;
+ case VPInstruction::CanonicalIVIncrementNUW:
+ O << "VF * UF +(nuw) ";
+ break;
+ case VPInstruction::BranchOnCount:
+ O << "branch-on-count ";
+ break;
default:
O << Instruction::getOpcodeName(getOpcode());
}
@@ -786,23 +840,55 @@ void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) {
FMF = FMFNew;
}
-/// Generate the code inside the body of the vectorized loop. Assumes a single
-/// LoopVectorBody basic-block was created for this. Introduce additional
-/// basic-blocks as needed, and fill them all.
-void VPlan::execute(VPTransformState *State) {
- // -1. Check if the backedge taken count is needed, and if so build it.
+void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
+ Value *CanonicalIVStartValue,
+ VPTransformState &State) {
+ // Check if the trip count is needed, and if so build it.
+ if (TripCount && TripCount->getNumUsers()) {
+ for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
+ State.set(TripCount, TripCountV, Part);
+ }
+
+ // Check if the backedge taken count is needed, and if so build it.
if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
- Value *TC = State->TripCount;
- IRBuilder<> Builder(State->CFG.PrevBB->getTerminator());
- auto *TCMO = Builder.CreateSub(TC, ConstantInt::get(TC->getType(), 1),
+ IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
+ auto *TCMO = Builder.CreateSub(TripCountV,
+ ConstantInt::get(TripCountV->getType(), 1),
"trip.count.minus.1");
- auto VF = State->VF;
+ auto VF = State.VF;
Value *VTCMO =
VF.isScalar() ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast");
- for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part)
- State->set(BackedgeTakenCount, VTCMO, Part);
+ for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
+ State.set(BackedgeTakenCount, VTCMO, Part);
}
+ for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
+ State.set(&VectorTripCount, VectorTripCountV, Part);
+
+ // When vectorizing the epilogue loop, the canonical induction start value
+ // needs to be changed from zero to the value after the main vector loop.
+ if (CanonicalIVStartValue) {
+ VPValue *VPV = new VPValue(CanonicalIVStartValue);
+ addExternalDef(VPV);
+ auto *IV = getCanonicalIV();
+ assert(all_of(IV->users(),
+ [](const VPUser *U) {
+ auto *VPI = cast<VPInstruction>(U);
+ return VPI->getOpcode() ==
+ VPInstruction::CanonicalIVIncrement ||
+ VPI->getOpcode() ==
+ VPInstruction::CanonicalIVIncrementNUW;
+ }) &&
+ "the canonical IV should only be used by its increments when "
+ "resetting the start value");
+ IV->setOperand(0, VPV);
+ }
+}
+
+/// Generate the code inside the body of the vectorized loop. Assumes a single
+/// LoopVectorBody basic-block was created for this. Introduce additional
+/// basic-blocks as needed, and fill them all.
+void VPlan::execute(VPTransformState *State) {
// 0. Set the reverse mapping from VPValues to Values for code generation.
for (auto &Entry : Value2VPValue)
State->VPValue2Value[Entry.second] = Entry.first;
@@ -834,28 +920,6 @@ void VPlan::execute(VPTransformState *State) {
for (VPBlockBase *Block : depth_first(Entry))
Block->execute(State);
- // Fix the latch value of reduction and first-order recurrences phis in the
- // vector loop.
- VPBasicBlock *Header = Entry->getEntryBasicBlock();
- for (VPRecipeBase &R : Header->phis()) {
- auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
- if (!PhiR || !(isa<VPFirstOrderRecurrencePHIRecipe>(&R) ||
- isa<VPReductionPHIRecipe>(&R)))
- continue;
- // For first-order recurrences and in-order reduction phis, only a single
- // part is generated, which provides the last part from the previous
- // iteration. Otherwise all UF parts are generated.
- bool SinglePartNeeded = isa<VPFirstOrderRecurrencePHIRecipe>(&R) ||
- cast<VPReductionPHIRecipe>(&R)->isOrdered();
- unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
- for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
- Value *VecPhi = State->get(PhiR, Part);
- Value *Val = State->get(PhiR->getBackedgeValue(),
- SinglePartNeeded ? State->UF - 1 : Part);
- cast<PHINode>(VecPhi)->addIncoming(Val, VectorLatchBB);
- }
- }
-
// Setup branch terminator successors for VPBBs in VPBBsToFix based on
// VPBB's successors.
for (auto VPBB : State->CFG.VPBBsToFix) {
@@ -876,13 +940,19 @@ void VPlan::execute(VPTransformState *State) {
// 3. Merge the temporary latch created with the last basic-block filled.
BasicBlock *LastBB = State->CFG.PrevBB;
+ assert(isa<BranchInst>(LastBB->getTerminator()) &&
+ "Expected VPlan CFG to terminate with branch");
+
+ // Move both the branch and check from LastBB to VectorLatchBB.
+ auto *LastBranch = cast<BranchInst>(LastBB->getTerminator());
+ LastBranch->moveBefore(VectorLatchBB->getTerminator());
+ VectorLatchBB->getTerminator()->eraseFromParent();
+ // Move condition so it is guaranteed to be next to branch. This is only done
+ // to avoid excessive test updates.
+ // TODO: Remove special handling once the increments for all inductions are
+ // modeled explicitly in VPlan.
+ cast<Instruction>(LastBranch->getCondition())->moveBefore(LastBranch);
// Connect LastBB to VectorLatchBB to facilitate their merge.
- assert((EnableVPlanNativePath ||
- isa<UnreachableInst>(LastBB->getTerminator())) &&
- "Expected InnerLoop VPlan CFG to terminate with unreachable");
- assert((!EnableVPlanNativePath || isa<BranchInst>(LastBB->getTerminator())) &&
- "Expected VPlan CFG to terminate with branch in NativePath");
- LastBB->getTerminator()->eraseFromParent();
BranchInst::Create(VectorLatchBB, LastBB);
// Merge LastBB with Latch.
@@ -891,6 +961,37 @@ void VPlan::execute(VPTransformState *State) {
assert(Merged && "Could not merge last basic block with latch.");
VectorLatchBB = LastBB;
+ // Fix the latch value of canonical, reduction and first-order recurrences
+ // phis in the vector loop.
+ VPBasicBlock *Header = Entry->getEntryBasicBlock();
+ if (Header->empty()) {
+ assert(EnableVPlanNativePath);
+ Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
+ }
+ for (VPRecipeBase &R : Header->phis()) {
+ // Skip phi-like recipes that generate their backedege values themselves.
+ // TODO: Model their backedge values explicitly.
+ if (isa<VPWidenIntOrFpInductionRecipe>(&R) || isa<VPWidenPHIRecipe>(&R))
+ continue;
+
+ auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
+ // For canonical IV, first-order recurrences and in-order reduction phis,
+ // only a single part is generated, which provides the last part from the
+ // previous iteration. For non-ordered reductions all UF parts are
+ // generated.
+ bool SinglePartNeeded = isa<VPCanonicalIVPHIRecipe>(PhiR) ||
+ isa<VPFirstOrderRecurrencePHIRecipe>(PhiR) ||
+ cast<VPReductionPHIRecipe>(PhiR)->isOrdered();
+ unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
+
+ for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
+ Value *Phi = State->get(PhiR, Part);
+ Value *Val = State->get(PhiR->getBackedgeValue(),
+ SinglePartNeeded ? State->UF - 1 : Part);
+ cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB);
+ }
+ }
+
// We do not attempt to preserve DT for outer loop vectorization currently.
if (!EnableVPlanNativePath)
updateDominatorTree(State->DT, VectorPreHeaderBB, VectorLatchBB,
@@ -904,6 +1005,12 @@ void VPlan::print(raw_ostream &O) const {
O << "VPlan '" << Name << "' {";
+ if (VectorTripCount.getNumUsers() > 0) {
+ O << "\nLive-in ";
+ VectorTripCount.printAsOperand(O, SlotTracker);
+ O << " = vector-trip-count\n";
+ }
+
if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
O << "\nLive-in ";
BackedgeTakenCount->printAsOperand(O, SlotTracker);
@@ -1155,7 +1262,15 @@ void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent,
} else
O << " " << VPlanIngredient(IV);
}
+#endif
+bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
+ auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
+ auto *StepC = dyn_cast<SCEVConstant>(getInductionDescriptor().getStep());
+ return StartC && StartC->isZero() && StepC && StepC->isOne();
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent << "WIDEN-GEP ";
@@ -1255,7 +1370,7 @@ void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent,
O << Indent << "WIDEN ";
if (!isStore()) {
- getVPSingleValue()->printAsOperand(O, SlotTracker);
+ printAsOperand(O, SlotTracker);
O << " = ";
}
O << Instruction::getOpcodeName(Ingredient.getOpcode()) << " ";
@@ -1264,26 +1379,39 @@ void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent,
}
#endif
+void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) {
+ Value *Start = getStartValue()->getLiveInIRValue();
+ PHINode *EntryPart = PHINode::Create(
+ Start->getType(), 2, "index", &*State.CFG.PrevBB->getFirstInsertionPt());
+ EntryPart->addIncoming(Start, State.CFG.VectorPreHeader);
+ EntryPart->setDebugLoc(DL);
+ for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
+ State.set(this, EntryPart, Part);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const {
+ O << Indent << "EMIT ";
+ printAsOperand(O, SlotTracker);
+ O << " = CANONICAL-INDUCTION";
+}
+#endif
+
void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
- Value *CanonicalIV = State.CanonicalIV;
+ Value *CanonicalIV = State.get(getOperand(0), 0);
Type *STy = CanonicalIV->getType();
IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
ElementCount VF = State.VF;
- assert(!VF.isScalable() && "the code following assumes non scalables ECs");
Value *VStart = VF.isScalar()
? CanonicalIV
- : Builder.CreateVectorSplat(VF.getKnownMinValue(),
- CanonicalIV, "broadcast");
+ : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
- SmallVector<Constant *, 8> Indices;
- for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
- Indices.push_back(
- ConstantInt::get(STy, Part * VF.getKnownMinValue() + Lane));
- // If VF == 1, there is only one iteration in the loop above, thus the
- // element pushed back into Indices is ConstantInt::get(STy, Part)
- Constant *VStep =
- VF.isScalar() ? Indices.back() : ConstantVector::get(Indices);
- // Add the consecutive indices to the vector value.
+ Value *VStep = createStepForVF(Builder, STy, VF, Part);
+ if (VF.isVector()) {
+ VStep = Builder.CreateVectorSplat(VF, VStep);
+ VStep = Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
+ }
Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
State.set(this, CanonicalVectorIV, Part);
}
@@ -1294,7 +1422,8 @@ void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent << "EMIT ";
printAsOperand(O, SlotTracker);
- O << " = WIDEN-CANONICAL-INDUCTION";
+ O << " = WIDEN-CANONICAL-INDUCTION ";
+ printOperands(O, SlotTracker);
}
#endif
@@ -1461,7 +1590,7 @@ void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
InterleavedAccessInfo &IAI) {
if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) {
for (VPRecipeBase &VPI : *VPBB) {
- if (isa<VPWidenPHIRecipe>(&VPI))
+ if (isa<VPHeaderPHIRecipe>(&VPI))
continue;
assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions");
auto *VPInst = cast<VPInstruction>(&VPI);
@@ -1506,6 +1635,7 @@ void VPSlotTracker::assignSlots(const VPlan &Plan) {
for (const VPValue *V : Plan.VPExternalDefs)
assignSlot(V);
+ assignSlot(&Plan.VectorTripCount);
if (Plan.BackedgeTakenCount)
assignSlot(Plan.BackedgeTakenCount);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h
index f4a1883e35d5..824440f98a8b 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -69,6 +69,9 @@ class VPlanSlp;
/// vectors it is an expression determined at runtime.
Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
+/// Return a value for Step multiplied by VF.
+Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, int64_t Step);
+
/// A range of powers-of-2 vectorization factors with fixed start and
/// adjustable end. The range includes start and excludes end, e.g.,:
/// [1, 9) = {1, 2, 4, 8}
@@ -198,8 +201,8 @@ struct VPTransformState {
VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
DominatorTree *DT, IRBuilder<> &Builder,
InnerLoopVectorizer *ILV, VPlan *Plan)
- : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
- Plan(Plan) {}
+ : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan) {
+ }
/// The chosen Vectorization and Unroll Factors of the loop being vectorized.
ElementCount VF;
@@ -341,9 +344,6 @@ struct VPTransformState {
/// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
Value *CanonicalIV = nullptr;
- /// Hold the trip count of the scalar loop.
- Value *TripCount = nullptr;
-
/// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
InnerLoopVectorizer *ILV;
@@ -793,6 +793,9 @@ public:
SLPLoad,
SLPStore,
ActiveLaneMask,
+ CanonicalIVIncrement,
+ CanonicalIVIncrementNUW,
+ BranchOnCount,
};
private:
@@ -833,6 +836,16 @@ public:
return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
}
+ /// Extra classof implementations to allow directly casting from VPUser ->
+ /// VPInstruction.
+ static inline bool classof(const VPUser *U) {
+ auto *R = dyn_cast<VPRecipeBase>(U);
+ return R && R->getVPDefID() == VPRecipeBase::VPInstructionSC;
+ }
+ static inline bool classof(const VPRecipeBase *R) {
+ return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
+ }
+
unsigned getOpcode() const { return Opcode; }
/// Generate the instruction.
@@ -871,6 +884,7 @@ public:
case Instruction::Unreachable:
case Instruction::Fence:
case Instruction::AtomicRMW:
+ case VPInstruction::BranchOnCount:
return false;
default:
return true;
@@ -1045,6 +1059,7 @@ public:
/// Returns the start value of the induction.
VPValue *getStartValue() { return getOperand(0); }
+ const VPValue *getStartValue() const { return getOperand(0); }
/// Returns the first defined value as TruncInst, if it is one or nullptr
/// otherwise.
@@ -1057,66 +1072,65 @@ public:
/// Returns the induction descriptor for the recipe.
const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
-};
-/// A recipe for handling first order recurrences and pointer inductions. For
-/// first-order recurrences, the start value is the first operand of the recipe
-/// and the incoming value from the backedge is the second operand. It also
-/// serves as base class for VPReductionPHIRecipe. In the VPlan native path, all
-/// incoming VPValues & VPBasicBlock pairs are managed in the recipe directly.
-class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
- /// List of incoming blocks. Only used in the VPlan native path.
- SmallVector<VPBasicBlock *, 2> IncomingBlocks;
+ /// Returns true if the induction is canonical, i.e. starting at 0 and
+ /// incremented by UF * VF (= the original IV is incremented by 1).
+ bool isCanonical() const;
+
+ /// Returns the scalar type of the induction.
+ const Type *getScalarType() const {
+ const TruncInst *TruncI = getTruncInst();
+ return TruncI ? TruncI->getType() : IV->getType();
+ }
+};
+/// A pure virtual base class for all recipes modeling header phis, including
+/// phis for first order recurrences, pointer inductions and reductions. The
+/// start value is the first operand of the recipe and the incoming value from
+/// the backedge is the second operand.
+class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
protected:
- VPWidenPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
- VPValue *Start = nullptr)
+ VPHeaderPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
+ VPValue *Start = nullptr)
: VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
if (Start)
addOperand(Start);
}
public:
- /// Create a VPWidenPHIRecipe for \p Phi
- VPWidenPHIRecipe(PHINode *Phi)
- : VPWidenPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {}
-
- /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
- VPWidenPHIRecipe(PHINode *Phi, VPValue &Start) : VPWidenPHIRecipe(Phi) {
- addOperand(&Start);
- }
-
- ~VPWidenPHIRecipe() override = default;
+ ~VPHeaderPHIRecipe() override = default;
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPRecipeBase *B) {
- return B->getVPDefID() == VPRecipeBase::VPWidenPHISC ||
+ return B->getVPDefID() == VPRecipeBase::VPCanonicalIVPHISC ||
B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
- B->getVPDefID() == VPRecipeBase::VPReductionPHISC;
+ B->getVPDefID() == VPRecipeBase::VPReductionPHISC ||
+ B->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC ||
+ B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
}
static inline bool classof(const VPValue *V) {
- return V->getVPValueID() == VPValue::VPVWidenPHISC ||
+ return V->getVPValueID() == VPValue::VPVCanonicalIVPHISC ||
V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
- V->getVPValueID() == VPValue::VPVReductionPHISC;
+ V->getVPValueID() == VPValue::VPVReductionPHISC ||
+ V->getVPValueID() == VPValue::VPVWidenIntOrFpInductionSC ||
+ V->getVPValueID() == VPValue::VPVWidenPHISC;
}
- /// Generate the phi/select nodes.
- void execute(VPTransformState &State) override;
+ /// Generate the phi nodes.
+ void execute(VPTransformState &State) override = 0;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
- VPSlotTracker &SlotTracker) const override;
+ VPSlotTracker &SlotTracker) const override = 0;
#endif
- /// Returns the start value of the phi, if it is a reduction or first-order
- /// recurrence.
+ /// Returns the start value of the phi, if one is set.
VPValue *getStartValue() {
return getNumOperands() == 0 ? nullptr : getOperand(0);
}
- /// Returns the incoming value from the loop backedge, if it is a reduction or
- /// first-order recurrence.
+ /// Returns the incoming value from the loop backedge.
VPValue *getBackedgeValue() {
return getOperand(1);
}
@@ -1126,6 +1140,44 @@ public:
VPRecipeBase *getBackedgeRecipe() {
return cast<VPRecipeBase>(getBackedgeValue()->getDef());
}
+};
+
+/// A recipe for handling header phis that are widened in the vector loop.
+/// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
+/// managed in the recipe directly.
+class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
+ /// List of incoming blocks. Only used in the VPlan native path.
+ SmallVector<VPBasicBlock *, 2> IncomingBlocks;
+
+public:
+ /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
+ VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
+ : VPHeaderPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {
+ if (Start)
+ addOperand(Start);
+ }
+
+ ~VPWidenPHIRecipe() override = default;
+
+ /// Method to support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const VPRecipeBase *B) {
+ return B->getVPDefID() == VPRecipeBase::VPWidenPHISC;
+ }
+ static inline bool classof(const VPHeaderPHIRecipe *R) {
+ return R->getVPDefID() == VPRecipeBase::VPWidenPHISC;
+ }
+ static inline bool classof(const VPValue *V) {
+ return V->getVPValueID() == VPValue::VPVWidenPHISC;
+ }
+
+ /// Generate the phi/select nodes.
+ void execute(VPTransformState &State) override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Print the recipe.
+ void print(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const override;
+#endif
/// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
@@ -1133,27 +1185,27 @@ public:
IncomingBlocks.push_back(IncomingBlock);
}
- /// Returns the \p I th incoming VPValue.
- VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
-
/// Returns the \p I th incoming VPBasicBlock.
VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
+
+ /// Returns the \p I th incoming VPValue.
+ VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
};
/// A recipe for handling first-order recurrence phis. The start value is the
/// first operand of the recipe and the incoming value from the backedge is the
/// second operand.
-struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
+struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
- : VPWidenPHIRecipe(VPVFirstOrderRecurrencePHISC,
- VPFirstOrderRecurrencePHISC, Phi, &Start) {}
+ : VPHeaderPHIRecipe(VPVFirstOrderRecurrencePHISC,
+ VPFirstOrderRecurrencePHISC, Phi, &Start) {}
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
}
- static inline bool classof(const VPWidenPHIRecipe *D) {
- return D->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
+ static inline bool classof(const VPHeaderPHIRecipe *R) {
+ return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
}
static inline bool classof(const VPValue *V) {
return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
@@ -1171,7 +1223,7 @@ struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
/// A recipe for handling reduction phis. The start value is the first operand
/// of the recipe and the incoming value from the backedge is the second
/// operand.
-class VPReductionPHIRecipe : public VPWidenPHIRecipe {
+class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
/// Descriptor for the reduction.
const RecurrenceDescriptor &RdxDesc;
@@ -1187,7 +1239,7 @@ public:
VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
VPValue &Start, bool IsInLoop = false,
bool IsOrdered = false)
- : VPWidenPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
+ : VPHeaderPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
}
@@ -1198,12 +1250,12 @@ public:
static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
}
+ static inline bool classof(const VPHeaderPHIRecipe *R) {
+ return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
+ }
static inline bool classof(const VPValue *V) {
return V->getVPValueID() == VPValue::VPVReductionPHISC;
}
- static inline bool classof(const VPWidenPHIRecipe *R) {
- return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
- }
/// Generate the phi/select nodes.
void execute(VPTransformState &State) override;
@@ -1601,11 +1653,46 @@ public:
#endif
};
+/// Canonical scalar induction phi of the vector loop. Starting at the specified
+/// start value (either 0 or the resume value when vectorizing the epilogue
+/// loop). VPWidenCanonicalIVRecipe represents the vector version of the
+/// canonical induction variable.
+class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
+ DebugLoc DL;
+
+public:
+ VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
+ : VPHeaderPHIRecipe(VPValue::VPVCanonicalIVPHISC, VPCanonicalIVPHISC,
+ nullptr, StartV),
+ DL(DL) {}
+
+ ~VPCanonicalIVPHIRecipe() override = default;
+
+ /// Method to support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const VPDef *D) {
+ return D->getVPDefID() == VPCanonicalIVPHISC;
+ }
+
+ /// Generate the canonical scalar induction phi of the vector loop.
+ void execute(VPTransformState &State) override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Print the recipe.
+ void print(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const override;
+#endif
+
+ /// Returns the scalar type of the induction.
+ const Type *getScalarType() const {
+ return getOperand(0)->getLiveInIRValue()->getType();
+ }
+};
+
/// A Recipe for widening the canonical induction variable of the vector loop.
class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
public:
- VPWidenCanonicalIVRecipe()
- : VPRecipeBase(VPWidenCanonicalIVSC, {}),
+ VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
+ : VPRecipeBase(VPWidenCanonicalIVSC, {CanonicalIV}),
VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
~VPWidenCanonicalIVRecipe() override = default;
@@ -1615,6 +1702,16 @@ public:
return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
}
+ /// Extra classof implementations to allow directly casting from VPUser ->
+ /// VPWidenCanonicalIVRecipe.
+ static inline bool classof(const VPUser *U) {
+ auto *R = dyn_cast<VPRecipeBase>(U);
+ return R && R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
+ }
+ static inline bool classof(const VPRecipeBase *R) {
+ return R->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
+ }
+
/// Generate a canonical vector induction variable of the vector loop, with
/// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
/// step = <VF*UF, VF*UF, ..., VF*UF>.
@@ -1625,6 +1722,12 @@ public:
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
+
+ /// Returns the scalar type of the induction.
+ const Type *getScalarType() const {
+ return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDef())
+ ->getScalarType();
+ }
};
/// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
@@ -2112,10 +2215,17 @@ class VPlan {
// (operators '==' and '<').
SetVector<VPValue *> VPExternalDefs;
- /// Represents the backedge taken count of the original loop, for folding
+ /// Represents the trip count of the original loop, for folding
/// the tail.
+ VPValue *TripCount = nullptr;
+
+ /// Represents the backedge taken count of the original loop, for folding
+ /// the tail. It equals TripCount - 1.
VPValue *BackedgeTakenCount = nullptr;
+ /// Represents the vector trip count.
+ VPValue VectorTripCount;
+
/// Holds a mapping between Values and their corresponding VPValue inside
/// VPlan.
Value2VPValueTy Value2VPValue;
@@ -2147,12 +2257,18 @@ public:
}
for (VPValue *VPV : VPValuesToFree)
delete VPV;
+ if (TripCount)
+ delete TripCount;
if (BackedgeTakenCount)
delete BackedgeTakenCount;
for (VPValue *Def : VPExternalDefs)
delete Def;
}
+ /// Prepare the plan for execution, setting up the required live-in values.
+ void prepareToExecute(Value *TripCount, Value *VectorTripCount,
+ Value *CanonicalIVStartValue, VPTransformState &State);
+
/// Generate the IR code for this VPlan.
void execute(struct VPTransformState *State);
@@ -2165,6 +2281,13 @@ public:
return Entry;
}
+ /// The trip count of the original loop.
+ VPValue *getOrCreateTripCount() {
+ if (!TripCount)
+ TripCount = new VPValue();
+ return TripCount;
+ }
+
/// The backedge taken count of the original loop.
VPValue *getOrCreateBackedgeTakenCount() {
if (!BackedgeTakenCount)
@@ -2172,6 +2295,9 @@ public:
return BackedgeTakenCount;
}
+ /// The vector trip count.
+ VPValue &getVectorTripCount() { return VectorTripCount; }
+
/// Mark the plan to indicate that using Value2VPValue is not safe any
/// longer, because it may be stale.
void disableValue2VPValue() { Value2VPValueEnabled = false; }
@@ -2264,6 +2390,21 @@ public:
return !VPV->getDef() || (RepR && RepR->isUniform());
}
+ /// Returns the VPRegionBlock of the vector loop.
+ VPRegionBlock *getVectorLoopRegion() {
+ return cast<VPRegionBlock>(getEntry());
+ }
+
+ /// Returns the canonical induction recipe of the vector loop.
+ VPCanonicalIVPHIRecipe *getCanonicalIV() {
+ VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
+ if (EntryVPBB->empty()) {
+ // VPlan native path.
+ EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
+ }
+ return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
+ }
+
private:
/// Add to the given dominator tree the header block and every new basic block
/// that was created between it and the latch block, inclusive.
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp
index 86ecd6817873..e879a33db6ee 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp
@@ -231,7 +231,7 @@ void VPlanPredicator::linearizeRegionRec(VPRegionBlock *Region) {
}
// Entry point. The driver function for the predicator.
-void VPlanPredicator::predicate(void) {
+void VPlanPredicator::predicate() {
// Predicate the blocks within Region.
predicateRegionRec(cast<VPRegionBlock>(Plan.getEntry()));
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.h b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.h
index 692afd2978d5..a5db9a54da3c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanPredicator.h
@@ -68,7 +68,7 @@ public:
VPlanPredicator(VPlan &Plan);
/// Predicate Plan's HCFG.
- void predicate(void);
+ void predicate();
};
} // end namespace llvm
#endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_PREDICATOR_H
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index d2daf558c2c5..fb5f3d428189 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -324,3 +324,30 @@ void VPlanTransforms::removeRedundantInductionCasts(VPlan &Plan) {
E.first->eraseFromParent();
}
}
+
+void VPlanTransforms::removeRedundantCanonicalIVs(VPlan &Plan) {
+ VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
+ VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
+ for (VPUser *U : CanonicalIV->users()) {
+ WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
+ if (WidenNewIV)
+ break;
+ }
+
+ if (!WidenNewIV)
+ return;
+
+ VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
+ for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
+ auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
+
+ // If the induction recipe is canonical and the types match, use it
+ // directly.
+ if (WidenOriginalIV && WidenOriginalIV->isCanonical() &&
+ WidenOriginalIV->getScalarType() == WidenNewIV->getScalarType()) {
+ WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
+ WidenNewIV->eraseFromParent();
+ return;
+ }
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index a82a562d5e35..e74409a86466 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -45,6 +45,10 @@ struct VPlanTransforms {
/// in the vectorized loop. There is no need to vectorize the cast - the same
/// value can be used for both the phi and casts in the vector loop.
static void removeRedundantInductionCasts(VPlan &Plan);
+
+ /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
+ /// recipe, if it exists.
+ static void removeRedundantCanonicalIVs(VPlan &Plan);
};
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanValue.h b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanValue.h
index fd92201614df..5296d2b9485c 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -96,14 +96,15 @@ public:
VPVReplicateSC,
VPVWidenSC,
VPVWidenCallSC,
+ VPVWidenCanonicalIVSC,
VPVWidenGEPSC,
VPVWidenSelectSC,
// Phi-like VPValues. Need to be kept together.
VPVBlendSC,
+ VPVCanonicalIVPHISC,
VPVFirstOrderRecurrencePHISC,
VPVWidenPHISC,
- VPVWidenCanonicalIVSC,
VPVWidenIntOrFpInductionSC,
VPVPredInstPHI,
VPVReductionPHISC,
@@ -177,6 +178,7 @@ public:
void replaceAllUsesWith(VPValue *New);
VPDef *getDef() { return Def; }
+ const VPDef *getDef() const { return Def; }
/// Returns the underlying IR value, if this VPValue is defined outside the
/// scope of VPlan. Returns nullptr if the VPValue is defined by a VPDef
@@ -186,6 +188,11 @@ public:
"VPValue is not a live-in; it is defined by a VPDef inside a VPlan");
return getUnderlyingValue();
}
+ const Value *getLiveInIRValue() const {
+ assert(!getDef() &&
+ "VPValue is not a live-in; it is defined by a VPDef inside a VPlan");
+ return getUnderlyingValue();
+ }
};
typedef DenseMap<Value *, VPValue *> Value2VPValueTy;
@@ -325,6 +332,7 @@ public:
VPReductionSC,
VPReplicateSC,
VPWidenCallSC,
+ VPWidenCanonicalIVSC,
VPWidenGEPSC,
VPWidenMemoryInstructionSC,
VPWidenSC,
@@ -332,9 +340,9 @@ public:
// Phi-like recipes. Need to be kept together.
VPBlendSC,
+ VPCanonicalIVPHISC,
VPFirstOrderRecurrencePHISC,
VPWidenPHISC,
- VPWidenCanonicalIVSC,
VPWidenIntOrFpInductionSC,
VPPredInstPHISC,
VPReductionPHISC,
@@ -403,7 +411,6 @@ public:
class VPlan;
class VPBasicBlock;
-class VPRegionBlock;
/// This class can be used to assign consecutive numbers to all VPValues in a
/// VPlan and allows querying the numbering for printing, similar to the
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 7732d9367985..d36f250995e1 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -163,12 +163,32 @@ bool VPlanVerifier::verifyPlanIsValid(const VPlan &Plan) {
errs() << "VPlan entry block is not a VPBasicBlock\n";
return false;
}
+
+ if (!isa<VPCanonicalIVPHIRecipe>(&*Entry->begin())) {
+ errs() << "VPlan vector loop header does not start with a "
+ "VPCanonicalIVPHIRecipe\n";
+ return false;
+ }
+
const VPBasicBlock *Exit = dyn_cast<VPBasicBlock>(TopRegion->getExit());
if (!Exit) {
errs() << "VPlan exit block is not a VPBasicBlock\n";
return false;
}
+ if (Exit->empty()) {
+ errs() << "VPlan vector loop exit must end with BranchOnCount "
+ "VPInstruction but is empty\n";
+ return false;
+ }
+
+ auto *LastInst = dyn_cast<VPInstruction>(std::prev(Exit->end()));
+ if (!LastInst || LastInst->getOpcode() != VPInstruction::BranchOnCount) {
+ errs() << "VPlan vector loop exit must end with BranchOnCount "
+ "VPInstruction\n";
+ return false;
+ }
+
for (const VPRegionBlock *Region :
VPBlockUtils::blocksOnly<const VPRegionBlock>(
depth_first(VPBlockRecursiveTraversalWrapper<const VPBlockBase *>(
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index c0aedab2fed0..620d388199e0 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -881,7 +881,8 @@ static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy,
ConstantRange IdxRange(IntWidth, true);
if (isGuaranteedNotToBePoison(Idx, &AC)) {
- if (ValidIndices.contains(computeConstantRange(Idx, true, &AC, CtxI, &DT)))
+ if (ValidIndices.contains(computeConstantRange(Idx, /* ForSigned */ false,
+ true, &AC, CtxI, &DT)))
return ScalarizationResult::safe();
return ScalarizationResult::unsafe();
}
diff --git a/contrib/llvm-project/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp b/contrib/llvm-project/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
index 1be1d34417eb..40c03f7b0de7 100644
--- a/contrib/llvm-project/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
+++ b/contrib/llvm-project/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
@@ -669,7 +669,7 @@ WindowsManifestMerger::WindowsManifestMergerImpl::getMergedManifest() {
std::unique_ptr<xmlDoc, XmlDeleter> OutputDoc(
xmlNewDoc((const unsigned char *)"1.0"));
xmlDocSetRootElement(OutputDoc.get(), CombinedRoot);
- assert(0 == xmlDocGetRootElement(CombinedDoc));
+ assert(nullptr == xmlDocGetRootElement(CombinedDoc));
xmlKeepBlanksDefault(0);
xmlChar *Buff = nullptr;
diff --git a/contrib/llvm-project/llvm/tools/bugpoint/BugDriver.h b/contrib/llvm-project/llvm/tools/bugpoint/BugDriver.h
index fe5201eb2e6c..b7c9edc5b812 100644
--- a/contrib/llvm-project/llvm/tools/bugpoint/BugDriver.h
+++ b/contrib/llvm-project/llvm/tools/bugpoint/BugDriver.h
@@ -25,7 +25,6 @@
namespace llvm {
-class Value;
class PassInfo;
class Module;
class GlobalVariable;
@@ -35,8 +34,6 @@ class AbstractInterpreter;
class Instruction;
class LLVMContext;
-class DebugCrashes;
-
class CC;
extern bool DisableSimplifyCFG;
diff --git a/contrib/llvm-project/llvm/tools/bugpoint/CrashDebugger.cpp b/contrib/llvm-project/llvm/tools/bugpoint/CrashDebugger.cpp
index 451e1cd98ee8..d127ea0945f2 100644
--- a/contrib/llvm-project/llvm/tools/bugpoint/CrashDebugger.cpp
+++ b/contrib/llvm-project/llvm/tools/bugpoint/CrashDebugger.cpp
@@ -354,7 +354,7 @@ bool ReduceCrashingFunctionAttributes::TestFuncAttrs(
// Build up an AttributeList from the attributes we've been given by the
// reducer.
- AttrBuilder AB;
+ AttrBuilder AB(M->getContext());
for (auto A : Attrs)
AB.addAttribute(A);
AttributeList NewAttrs;
diff --git a/contrib/llvm-project/llvm/tools/bugpoint/FindBugs.cpp b/contrib/llvm-project/llvm/tools/bugpoint/FindBugs.cpp
index 771329024c09..31945fadd95d 100644
--- a/contrib/llvm-project/llvm/tools/bugpoint/FindBugs.cpp
+++ b/contrib/llvm-project/llvm/tools/bugpoint/FindBugs.cpp
@@ -37,7 +37,7 @@ BugDriver::runManyPasses(const std::vector<std::string> &AllPasses) {
std::mt19937 randomness(std::random_device{}());
unsigned num = 1;
- while (1) {
+ while (true) {
//
// Step 1: Randomize the order of the optimizer passes.
//
diff --git a/contrib/llvm-project/llvm/tools/bugpoint/Miscompilation.cpp b/contrib/llvm-project/llvm/tools/bugpoint/Miscompilation.cpp
index e69fe9ff6c15..38821949d0ff 100644
--- a/contrib/llvm-project/llvm/tools/bugpoint/Miscompilation.cpp
+++ b/contrib/llvm-project/llvm/tools/bugpoint/Miscompilation.cpp
@@ -308,7 +308,7 @@ ExtractLoops(BugDriver &BD,
std::unique_ptr<Module>),
std::vector<Function *> &MiscompiledFunctions) {
bool MadeChange = false;
- while (1) {
+ while (true) {
if (BugpointIsInterrupted)
return MadeChange;
diff --git a/contrib/llvm-project/llvm/tools/bugpoint/OptimizerDriver.cpp b/contrib/llvm-project/llvm/tools/bugpoint/OptimizerDriver.cpp
index 848baf90965b..e67e877c13af 100644
--- a/contrib/llvm-project/llvm/tools/bugpoint/OptimizerDriver.cpp
+++ b/contrib/llvm-project/llvm/tools/bugpoint/OptimizerDriver.cpp
@@ -139,7 +139,7 @@ bool BugDriver::runPasses(Module &Program,
if (EC) {
errs() << getToolName()
<< ": Error making unique filename: " << EC.message() << "\n";
- return 1;
+ return true;
}
OutputFilename = std::string(UniqueFilename.str());
@@ -150,7 +150,7 @@ bool BugDriver::runPasses(Module &Program,
errs() << getToolName()
<< ": Error making unique filename: " << toString(Temp.takeError())
<< "\n";
- return 1;
+ return true;
}
DiscardTemp Discard{*Temp};
raw_fd_ostream OS(Temp->FD, /*shouldClose*/ false);
@@ -160,7 +160,7 @@ bool BugDriver::runPasses(Module &Program,
if (OS.has_error()) {
errs() << "Error writing bitcode file: " << Temp->TmpName << "\n";
OS.clear_error();
- return 1;
+ return true;
}
std::string tool = OptCmd;
@@ -173,11 +173,11 @@ bool BugDriver::runPasses(Module &Program,
}
if (tool.empty()) {
errs() << "Cannot find `opt' in PATH!\n";
- return 1;
+ return true;
}
if (!sys::fs::exists(tool)) {
errs() << "Specified `opt' binary does not exist: " << tool << "\n";
- return 1;
+ return true;
}
std::string Prog;
@@ -190,7 +190,7 @@ bool BugDriver::runPasses(Module &Program,
Prog = tool;
if (Prog.empty()) {
errs() << "Cannot find `valgrind' in PATH!\n";
- return 1;
+ return true;
}
// setup the child process' arguments
diff --git a/contrib/llvm-project/llvm/tools/llvm-ar/llvm-ar.cpp b/contrib/llvm-project/llvm/tools/llvm-ar/llvm-ar.cpp
index 175ec8d022c2..f7b29b884027 100644
--- a/contrib/llvm-project/llvm/tools/llvm-ar/llvm-ar.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-ar/llvm-ar.cpp
@@ -1003,12 +1003,17 @@ static int performOperation(ArchiveOperation Operation,
fail("unable to open '" + ArchiveName + "': " + EC.message());
if (!EC) {
- Error Err = Error::success();
- object::Archive Archive(Buf.get()->getMemBufferRef(), Err);
- failIfError(std::move(Err), "unable to load '" + ArchiveName + "'");
- if (Archive.isThin())
+ Expected<std::unique_ptr<object::Archive>> ArchiveOrError =
+ object::Archive::create(Buf.get()->getMemBufferRef());
+ if (!ArchiveOrError)
+ failIfError(ArchiveOrError.takeError(),
+ "unable to load '" + ArchiveName + "'");
+
+ std::unique_ptr<object::Archive> Archive = std::move(ArchiveOrError.get());
+ if (Archive->isThin())
CompareFullPath = true;
- performOperation(Operation, &Archive, std::move(Buf.get()), NewMembers);
+ performOperation(Operation, Archive.get(), std::move(Buf.get()),
+ NewMembers);
return 0;
}
@@ -1111,11 +1116,11 @@ static void runMRIScript() {
}
static bool handleGenericOption(StringRef arg) {
- if (arg == "-help" || arg == "--help" || arg == "-h") {
+ if (arg == "--help" || arg == "-h") {
printHelpMessage();
return true;
}
- if (arg == "-version" || arg == "--version") {
+ if (arg == "--version") {
cl::PrintVersionMessage();
return true;
}
@@ -1129,8 +1134,6 @@ static const char *matchFlagWithArg(StringRef Expected,
if (Arg.startswith("--"))
Arg = Arg.substr(2);
- else if (Arg.startswith("-"))
- Arg = Arg.substr(1);
size_t len = Expected.size();
if (Arg == Expected) {
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp b/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
index d357ad7c9e10..ef801287c1be 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
@@ -209,7 +209,7 @@ void CodeCoverageTool::addCollectedPath(const std::string &Path) {
error(EC.message(), Path);
return;
}
- sys::path::remove_dots(EffectivePath, /*remove_dot_dots=*/true);
+ sys::path::remove_dots(EffectivePath, /*remove_dot_dot=*/true);
if (!IgnoreFilenameFilters.matchesFilename(EffectivePath))
SourceFiles.emplace_back(EffectivePath.str());
HadSourceFiles = !SourceFiles.empty();
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/CoverageReport.cpp b/contrib/llvm-project/llvm/tools/llvm-cov/CoverageReport.cpp
index 2c08f5309494..c91edc2b0dc0 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/CoverageReport.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/CoverageReport.cpp
@@ -179,7 +179,7 @@ void CoverageReport::render(const FileCoverageSummary &File,
determineCoveragePercentageColor(File.InstantiationCoverage);
auto LineCoverageColor = determineCoveragePercentageColor(File.LineCoverage);
SmallString<256> FileName = File.Name;
- sys::path::remove_dots(FileName, /*remove_dot_dots=*/true);
+ sys::path::remove_dots(FileName, /*remove_dot_dot=*/true);
sys::path::native(FileName);
OS << column(FileName, FileReportColumns[0], Column::NoTrim);
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/CoverageSummaryInfo.h b/contrib/llvm-project/llvm/tools/llvm-cov/CoverageSummaryInfo.h
index 62e7cad1012b..84a3228f22b9 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/CoverageSummaryInfo.h
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/CoverageSummaryInfo.h
@@ -191,8 +191,7 @@ struct FunctionCoverageSummary {
BranchCoverageInfo BranchCoverage;
FunctionCoverageSummary(const std::string &Name)
- : Name(Name), ExecutionCount(0), RegionCoverage(), LineCoverage(),
- BranchCoverage() {}
+ : Name(Name), ExecutionCount(0) {}
FunctionCoverageSummary(const std::string &Name, uint64_t ExecutionCount,
const RegionCoverageInfo &RegionCoverage,
@@ -223,9 +222,7 @@ struct FileCoverageSummary {
FunctionCoverageInfo FunctionCoverage;
FunctionCoverageInfo InstantiationCoverage;
- FileCoverageSummary(StringRef Name)
- : Name(Name), RegionCoverage(), LineCoverage(), FunctionCoverage(),
- InstantiationCoverage() {}
+ FileCoverageSummary(StringRef Name) : Name(Name) {}
FileCoverageSummary &operator+=(const FileCoverageSummary &RHS) {
RegionCoverage += RHS.RegionCoverage;
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageView.cpp b/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageView.cpp
index aca58a07f250..ea86acadf001 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageView.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageView.cpp
@@ -41,7 +41,7 @@ std::string CoveragePrinter::getOutputPath(StringRef Path, StringRef Extension,
sys::path::append(FullPath, getCoverageDir());
SmallString<256> ParentPath = sys::path::parent_path(Path);
- sys::path::remove_dots(ParentPath, /*remove_dot_dots=*/true);
+ sys::path::remove_dots(ParentPath, /*remove_dot_dot=*/true);
sys::path::append(FullPath, sys::path::relative_path(ParentPath));
auto PathFilename = (sys::path::filename(Path) + "." + Extension).str();
@@ -157,7 +157,7 @@ SourceCoverageView::create(StringRef SourceName, const MemoryBuffer &File,
std::string SourceCoverageView::getSourceName() const {
SmallString<128> SourceText(SourceName);
- sys::path::remove_dots(SourceText, /*remove_dot_dots=*/true);
+ sys::path::remove_dots(SourceText, /*remove_dot_dot=*/true);
sys::path::native(SourceText);
return std::string(SourceText.str());
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp b/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp
index 7ab1d6608ccf..56efc40b9349 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp
@@ -322,7 +322,7 @@ std::string
CoveragePrinterHTML::buildLinkToFile(StringRef SF,
const FileCoverageSummary &FCS) const {
SmallString<128> LinkTextStr(sys::path::relative_path(FCS.Name));
- sys::path::remove_dots(LinkTextStr, /*remove_dot_dots=*/true);
+ sys::path::remove_dots(LinkTextStr, /*remove_dot_dot=*/true);
sys::path::native(LinkTextStr);
std::string LinkText = escape(LinkTextStr, Opts);
std::string LinkTarget =
diff --git a/contrib/llvm-project/llvm/tools/llvm-diff/llvm-diff.cpp b/contrib/llvm-project/llvm/tools/llvm-diff/llvm-diff.cpp
index d9d19f35ffee..7349469c80d6 100644
--- a/contrib/llvm-project/llvm/tools/llvm-diff/llvm-diff.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-diff/llvm-diff.cpp
@@ -21,6 +21,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/WithColor.h"
#include <string>
#include <utility>
diff --git a/contrib/llvm-project/llvm/tools/llvm-dis/llvm-dis.cpp b/contrib/llvm-project/llvm/tools/llvm-dis/llvm-dis.cpp
index 62d165fb8413..7b3c3e7706a6 100644
--- a/contrib/llvm-project/llvm/tools/llvm-dis/llvm-dis.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-dis/llvm-dis.cpp
@@ -74,6 +74,11 @@ static cl::opt<bool>
"then materialize only the metadata"),
cl::cat(DisCategory));
+static cl::opt<bool> PrintThinLTOIndexOnly(
+ "print-thinlto-index-only",
+ cl::desc("Only read thinlto index and print the index as LLVM assembly."),
+ cl::init(false), cl::Hidden, cl::cat(DisCategory));
+
namespace {
static void printDebugLoc(const DebugLoc &DL, formatted_raw_ostream &OS) {
@@ -186,12 +191,17 @@ int main(int argc, char **argv) {
for (size_t I = 0; I < N; ++I) {
BitcodeModule MB = IF.Mods[I];
- std::unique_ptr<Module> M = ExitOnErr(
- MB.getLazyModule(Context, MaterializeMetadata, SetImporting));
- if (MaterializeMetadata)
- ExitOnErr(M->materializeMetadata());
- else
- ExitOnErr(M->materializeAll());
+
+ std::unique_ptr<Module> M;
+
+ if (!PrintThinLTOIndexOnly) {
+ M = ExitOnErr(
+ MB.getLazyModule(Context, MaterializeMetadata, SetImporting));
+ if (MaterializeMetadata)
+ ExitOnErr(M->materializeMetadata());
+ else
+ ExitOnErr(M->materializeAll());
+ }
BitcodeLTOInfo LTOInfo = ExitOnErr(MB.getLTOInfo());
std::unique_ptr<ModuleSummaryIndex> Index;
@@ -233,7 +243,8 @@ int main(int argc, char **argv) {
// All that llvm-dis does is write the assembly to a file.
if (!DontPrint) {
- M->print(Out->os(), Annotator.get(), PreserveAssemblyUseListOrder);
+ if (M)
+ M->print(Out->os(), Annotator.get(), PreserveAssemblyUseListOrder);
if (Index)
Index->print(Out->os());
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp b/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
index 995ebacacb87..d78c4dff7db4 100644
--- a/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
@@ -46,6 +46,7 @@
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/WithColor.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
@@ -267,7 +268,7 @@ static cl::opt<bool>
namespace {
struct ModuleInfo {
- std::vector<bool> CanBeHidden;
+ BitVector CanBeHidden;
};
} // end anonymous namespace
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegion.h b/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegion.h
index 0b2590767dfa..0e1e02a533d8 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegion.h
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegion.h
@@ -63,7 +63,7 @@ class CodeRegion {
public:
CodeRegion(llvm::StringRef Desc, llvm::SMLoc Start)
- : Description(Desc), RangeStart(Start), RangeEnd() {}
+ : Description(Desc), RangeStart(Start) {}
void addInstruction(const llvm::MCInst &Instruction) {
Instructions.emplace_back(Instruction);
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/PipelinePrinter.h b/contrib/llvm-project/llvm/tools/llvm-mca/PipelinePrinter.h
index fd262f0a8a5d..d89e913f979f 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/PipelinePrinter.h
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/PipelinePrinter.h
@@ -53,7 +53,7 @@ class PipelinePrinter {
public:
PipelinePrinter(Pipeline &Pipe, const CodeRegion &R, unsigned Idx,
const MCSubtargetInfo &STI, const PipelineOptions &PO)
- : P(Pipe), Region(R), RegionIdx(Idx), STI(STI), PO(PO), Views() {}
+ : P(Pipe), Region(R), RegionIdx(Idx), STI(STI), PO(PO) {}
void addView(std::unique_ptr<View> V) {
P.addEventListener(V.get());
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp b/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp
index 3f6abf4af2cf..caa8554a416a 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp
@@ -32,14 +32,30 @@ void InstructionInfoView::printView(raw_ostream &OS) const {
TempStream << "\n\nInstruction Info:\n";
TempStream << "[1]: #uOps\n[2]: Latency\n[3]: RThroughput\n"
<< "[4]: MayLoad\n[5]: MayStore\n[6]: HasSideEffects (U)\n";
+ if (PrintBarriers) {
+ TempStream << "[7]: LoadBarrier\n[8]: StoreBarrier\n";
+ }
if (PrintEncodings) {
- TempStream << "[7]: Encoding Size\n";
- TempStream << "\n[1] [2] [3] [4] [5] [6] [7] "
- << "Encodings: Instructions:\n";
+ if (PrintBarriers) {
+ TempStream << "[9]: Encoding Size\n";
+ TempStream << "\n[1] [2] [3] [4] [5] [6] [7] [8] "
+ << "[9] Encodings: Instructions:\n";
+ } else {
+ TempStream << "[7]: Encoding Size\n";
+ TempStream << "\n[1] [2] [3] [4] [5] [6] [7] "
+ << "Encodings: Instructions:\n";
+ }
} else {
- TempStream << "\n[1] [2] [3] [4] [5] [6] Instructions:\n";
+ if (PrintBarriers) {
+ TempStream << "\n[1] [2] [3] [4] [5] [6] [7] [8] "
+ << "Instructions:\n";
+ } else {
+ TempStream << "\n[1] [2] [3] [4] [5] [6] "
+ << "Instructions:\n";
+ }
}
+ int Index = 0;
for (const auto &I : enumerate(zip(IIVD, Source))) {
const InstructionInfoViewData &IIVDEntry = std::get<0>(I.value());
@@ -68,6 +84,13 @@ void InstructionInfoView::printView(raw_ostream &OS) const {
TempStream << (IIVDEntry.mayStore ? " * " : " ");
TempStream << (IIVDEntry.hasUnmodeledSideEffects ? " U " : " ");
+ if (PrintBarriers) {
+ TempStream << (LoweredInsts[Index]->isALoadBarrier() ? " * "
+ : " ");
+ TempStream << (LoweredInsts[Index]->isAStoreBarrier() ? " * "
+ : " ");
+ }
+
if (PrintEncodings) {
StringRef Encoding(CE.getEncoding(I.index()));
unsigned EncodingSize = Encoding.size();
@@ -83,6 +106,7 @@ void InstructionInfoView::printView(raw_ostream &OS) const {
const MCInst &Inst = std::get<1>(I.value());
TempStream << printInstructionString(Inst) << '\n';
+ ++Index;
}
TempStream.flush();
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.h b/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.h
index 5d52164e2d50..c35d316775f4 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.h
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/Views/InstructionInfoView.h
@@ -54,6 +54,9 @@ class InstructionInfoView : public InstructionView {
const llvm::MCInstrInfo &MCII;
CodeEmitter &CE;
bool PrintEncodings;
+ bool PrintBarriers;
+ using UniqueInst = std::unique_ptr<Instruction>;
+ ArrayRef<UniqueInst> LoweredInsts;
struct InstructionInfoViewData {
unsigned NumMicroOpcodes = 0;
@@ -72,9 +75,12 @@ public:
InstructionInfoView(const llvm::MCSubtargetInfo &ST,
const llvm::MCInstrInfo &II, CodeEmitter &C,
bool ShouldPrintEncodings, llvm::ArrayRef<llvm::MCInst> S,
- llvm::MCInstPrinter &IP)
+ llvm::MCInstPrinter &IP,
+ ArrayRef<UniqueInst> LoweredInsts,
+ bool ShouldPrintBarriers)
: InstructionView(ST, IP, S), MCII(II), CE(C),
- PrintEncodings(ShouldPrintEncodings) {}
+ PrintEncodings(ShouldPrintEncodings),
+ PrintBarriers(ShouldPrintBarriers), LoweredInsts(LoweredInsts) {}
void printView(llvm::raw_ostream &OS) const override;
StringRef getNameAsString() const override { return "InstructionInfoView"; }
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/llvm-mca.cpp b/contrib/llvm-project/llvm/tools/llvm-mca/llvm-mca.cpp
index 0501336ab207..1826491f3f30 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/llvm-mca.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/llvm-mca.cpp
@@ -219,6 +219,11 @@ static cl::opt<bool> ShowEncoding(
cl::desc("Print encoding information in the instruction info view"),
cl::cat(ViewOptions), cl::init(false));
+static cl::opt<bool> ShowBarriers(
+ "show-barriers",
+ cl::desc("Print memory barrier information in the instruction info view"),
+ cl::cat(ViewOptions), cl::init(false));
+
static cl::opt<bool> DisableCustomBehaviour(
"disable-cb",
cl::desc(
@@ -504,7 +509,7 @@ int main(int argc, char **argv) {
// (which does nothing).
IPP = std::make_unique<mca::InstrPostProcess>(*STI, *MCII);
- std::vector<std::unique_ptr<mca::Instruction>> LoweredSequence;
+ SmallVector<std::unique_ptr<mca::Instruction>> LoweredSequence;
for (const MCInst &MCI : Insts) {
Expected<std::unique_ptr<mca::Instruction>> Inst =
IB.createInstruction(MCI);
@@ -548,7 +553,8 @@ int main(int argc, char **argv) {
// Create the views for this pipeline, execute, and emit a report.
if (PrintInstructionInfoView) {
Printer.addView(std::make_unique<mca::InstructionInfoView>(
- *STI, *MCII, CE, ShowEncoding, Insts, *IP));
+ *STI, *MCII, CE, ShowEncoding, Insts, *IP, LoweredSequence,
+ ShowBarriers));
}
Printer.addView(
std::make_unique<mca::ResourcePressureView>(*STI, *IP, Insts));
@@ -624,7 +630,8 @@ int main(int argc, char **argv) {
if (PrintInstructionInfoView)
Printer.addView(std::make_unique<mca::InstructionInfoView>(
- *STI, *MCII, CE, ShowEncoding, Insts, *IP));
+ *STI, *MCII, CE, ShowEncoding, Insts, *IP, LoweredSequence,
+ ShowBarriers));
// Fetch custom Views that are to be placed after the InstructionInfoView.
// Refer to the comment paired with the CB->getStartViews(*IP, Insts); line
diff --git a/contrib/llvm-project/llvm/tools/llvm-modextract/llvm-modextract.cpp b/contrib/llvm-project/llvm/tools/llvm-modextract/llvm-modextract.cpp
index 9a44cbf68d0d..b1d6bfb790ec 100644
--- a/contrib/llvm-project/llvm/tools/llvm-modextract/llvm-modextract.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-modextract/llvm-modextract.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/WithColor.h"
using namespace llvm;
diff --git a/contrib/llvm-project/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm-project/llvm/tools/llvm-nm/llvm-nm.cpp
index 0864985377ce..f1d8b0026429 100644
--- a/contrib/llvm-project/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -31,6 +31,7 @@
#include "llvm/Object/TapiFile.h"
#include "llvm/Object/TapiUniversal.h"
#include "llvm/Object/Wasm.h"
+#include "llvm/Object/XCOFFObjectFile.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
@@ -127,6 +128,20 @@ static bool HadError = false;
static StringRef ToolName;
+static void warn(Error Err, Twine FileName, Twine Context = Twine()) {
+ assert(Err);
+
+ // Flush the standard output so that the warning isn't interleaved with other
+ // output if stdout and stderr are writing to the same place.
+ outs().flush();
+
+ handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) {
+ WithColor::warning(errs(), ToolName)
+ << FileName << ": " << (Context.str().empty() ? "" : Context + ": ")
+ << EI.message() << "\n";
+ });
+}
+
static void error(Twine Message, Twine Path = Twine()) {
HadError = true;
WithColor::error(errs(), ToolName) << Path << ": " << Message << "\n";
@@ -246,6 +261,9 @@ static char isSymbolList64Bit(SymbolicFile &Obj) {
return Triple(IRObj->getTargetTriple()).isArch64Bit();
if (isa<COFFObjectFile>(Obj) || isa<COFFImportFile>(Obj))
return false;
+ if (XCOFFObjectFile *XCOFFObj = dyn_cast<XCOFFObjectFile>(&Obj))
+ return XCOFFObj->is64Bit();
+
if (isa<WasmObjectFile>(Obj))
return false;
if (TapiFile *Tapi = dyn_cast<TapiFile>(&Obj))
@@ -599,18 +617,30 @@ static void darwinPrintStab(MachOObjectFile *MachO, const NMSymbol &S) {
outs() << format(" %02x", NType);
}
-static Optional<std::string> demangle(const std::string &Name,
- bool StripUnderscore) {
- const char *Mangled = Name.c_str();
- if (StripUnderscore && Mangled[0] == '_')
- Mangled = Mangled + 1;
-
+static Optional<std::string> demangle(StringRef Name) {
std::string Demangled;
- if (nonMicrosoftDemangle(Mangled, Demangled))
+ if (nonMicrosoftDemangle(Name.str().c_str(), Demangled))
return Demangled;
return None;
}
+static Optional<std::string> demangleXCOFF(StringRef Name) {
+ if (Name.empty() || Name[0] != '.')
+ return demangle(Name);
+
+ Name = Name.drop_front();
+ Optional<std::string> DemangledName = demangle(Name);
+ if (DemangledName)
+ return "." + *DemangledName;
+ return None;
+}
+
+static Optional<std::string> demangleMachO(StringRef Name) {
+ if (!Name.empty() && Name[0] == '_')
+ Name = Name.drop_front();
+ return demangle(Name);
+}
+
static bool symbolIsDefined(const NMSymbol &Sym) {
return Sym.TypeChar != 'U' && Sym.TypeChar != 'w' && Sym.TypeChar != 'v';
}
@@ -699,7 +729,12 @@ static void sortAndPrintSymbolList(SymbolicFile &Obj, bool printName,
std::string Name = S.Name;
MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(&Obj);
if (Demangle) {
- if (Optional<std::string> Opt = demangle(S.Name, MachO))
+ function_ref<Optional<std::string>(StringRef)> Fn = ::demangle;
+ if (Obj.isXCOFF())
+ Fn = demangleXCOFF;
+ if (Obj.isMachO())
+ Fn = demangleMachO;
+ if (Optional<std::string> Opt = Fn(S.Name))
Name = *Opt;
}
if (S.Sym.getRawDataRefImpl().p) {
@@ -897,6 +932,42 @@ static char getSymbolNMTypeChar(COFFObjectFile &Obj, symbol_iterator I) {
return '?';
}
+static char getSymbolNMTypeChar(XCOFFObjectFile &Obj, symbol_iterator I) {
+ Expected<uint32_t> TypeOrErr = I->getType();
+ if (!TypeOrErr) {
+ warn(TypeOrErr.takeError(), Obj.getFileName(),
+ "for symbol with index " +
+ Twine(Obj.getSymbolIndex(I->getRawDataRefImpl().p)));
+ return '?';
+ }
+
+ uint32_t SymType = *TypeOrErr;
+
+ if (SymType == SymbolRef::ST_File)
+ return 'f';
+
+ // If the I->getSection() call would return an error, the earlier I->getType()
+ // call will already have returned the same error first.
+ section_iterator SecIter = cantFail(I->getSection());
+
+ if (SecIter == Obj.section_end())
+ return '?';
+
+ if (Obj.isDebugSection(SecIter->getRawDataRefImpl()))
+ return 'N';
+
+ if (SecIter->isText())
+ return 't';
+
+ if (SecIter->isData())
+ return 'd';
+
+ if (SecIter->isBSS())
+ return 'b';
+
+ return '?';
+}
+
static char getSymbolNMTypeChar(COFFImportFile &Obj) {
switch (Obj.getCOFFImportHeader()->getType()) {
case COFF::IMPORT_CODE:
@@ -1045,6 +1116,8 @@ static char getNMSectionTagAndName(SymbolicFile &Obj, basic_symbol_iterator I,
Ret = getSymbolNMTypeChar(*IR, I);
else if (COFFObjectFile *COFF = dyn_cast<COFFObjectFile>(&Obj))
Ret = getSymbolNMTypeChar(*COFF, I);
+ else if (XCOFFObjectFile *XCOFF = dyn_cast<XCOFFObjectFile>(&Obj))
+ Ret = getSymbolNMTypeChar(*XCOFF, I);
else if (COFFImportFile *COFFImport = dyn_cast<COFFImportFile>(&Obj))
Ret = getSymbolNMTypeChar(*COFFImport);
else if (MachOObjectFile *MachO = dyn_cast<MachOObjectFile>(&Obj))
@@ -1631,6 +1704,11 @@ static void dumpSymbolNamesFromObject(SymbolicFile &Obj, bool printName,
S.Address = 0;
if (isa<ELFObjectFileBase>(&Obj))
S.Size = ELFSymbolRef(Sym).getSize();
+
+ if (const XCOFFObjectFile *XCOFFObj =
+ dyn_cast<const XCOFFObjectFile>(&Obj))
+ S.Size = XCOFFObj->getSymbolSize(Sym.getRawDataRefImpl());
+
if (PrintAddress && isa<ObjectFile>(Obj)) {
SymbolRef SymRef(Sym);
Expected<uint64_t> AddressOrErr = SymRef.getAddress();
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFConfig.h b/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFConfig.h
index 3897ff47724b..7bf673fa4af9 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFConfig.h
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFConfig.h
@@ -9,11 +9,17 @@
#ifndef LLVM_TOOLS_LLVM_OBJCOPY_COFF_COFFCONFIG_H
#define LLVM_TOOLS_LLVM_OBJCOPY_COFF_COFFCONFIG_H
+#include "llvm/ADT/Optional.h"
+
namespace llvm {
namespace objcopy {
// Coff specific configuration for copying/stripping a single file.
-struct COFFConfig {};
+struct COFFConfig {
+ Optional<unsigned> Subsystem;
+ Optional<unsigned> MajorSubsystemVersion;
+ Optional<unsigned> MinorSubsystemVersion;
+};
} // namespace objcopy
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFObjcopy.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFObjcopy.cpp
index 38c9cd09433b..e0039cd3a675 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFObjcopy.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/COFF/COFFObjcopy.cpp
@@ -130,7 +130,8 @@ static uint32_t flagsToCharacteristics(SectionFlag AllFlags, uint32_t OldChar) {
return NewCharacteristics;
}
-static Error handleArgs(const CommonConfig &Config, Object &Obj) {
+static Error handleArgs(const CommonConfig &Config,
+ const COFFConfig &COFFConfig, Object &Obj) {
// Perform the actual section removals.
Obj.removeSections([&Config](const Section &Sec) {
// Contrary to --only-keep-debug, --only-section fully removes sections that
@@ -256,18 +257,34 @@ static Error handleArgs(const CommonConfig &Config, Object &Obj) {
if (Error E = addGnuDebugLink(Obj, Config.AddGnuDebugLink))
return E;
+ if (COFFConfig.Subsystem || COFFConfig.MajorSubsystemVersion ||
+ COFFConfig.MinorSubsystemVersion) {
+ if (!Obj.IsPE)
+ return createStringError(
+ errc::invalid_argument,
+ "'" + Config.OutputFilename +
+ "': unable to set subsystem on a relocatable object file");
+ if (COFFConfig.Subsystem)
+ Obj.PeHeader.Subsystem = *COFFConfig.Subsystem;
+ if (COFFConfig.MajorSubsystemVersion)
+ Obj.PeHeader.MajorSubsystemVersion = *COFFConfig.MajorSubsystemVersion;
+ if (COFFConfig.MinorSubsystemVersion)
+ Obj.PeHeader.MinorSubsystemVersion = *COFFConfig.MinorSubsystemVersion;
+ }
+
return Error::success();
}
-Error executeObjcopyOnBinary(const CommonConfig &Config, const COFFConfig &,
- COFFObjectFile &In, raw_ostream &Out) {
+Error executeObjcopyOnBinary(const CommonConfig &Config,
+ const COFFConfig &COFFConfig, COFFObjectFile &In,
+ raw_ostream &Out) {
COFFReader Reader(In);
Expected<std::unique_ptr<Object>> ObjOrErr = Reader.create();
if (!ObjOrErr)
return createFileError(Config.InputFilename, ObjOrErr.takeError());
Object *Obj = ObjOrErr->get();
assert(Obj && "Unable to deserialize COFF object");
- if (Error E = handleArgs(Config, *Obj))
+ if (Error E = handleArgs(Config, COFFConfig, *Obj))
return createFileError(Config.InputFilename, std::move(E));
COFFWriter Writer(*Obj, Out);
if (Error E = Writer.write())
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ConfigManager.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/ConfigManager.cpp
index 2e5cf9357a52..90730c421a46 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ConfigManager.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ConfigManager.cpp
@@ -11,6 +11,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CRC.h"
@@ -675,6 +676,7 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
ConfigManager ConfigMgr;
CommonConfig &Config = ConfigMgr.Common;
+ COFFConfig &COFFConfig = ConfigMgr.COFF;
ELFConfig &ELFConfig = ConfigMgr.ELF;
MachOConfig &MachOConfig = ConfigMgr.MachO;
Config.InputFilename = Positional[0];
@@ -733,6 +735,46 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
VisibilityStr.str().c_str());
}
+ for (const auto *Arg : InputArgs.filtered(OBJCOPY_subsystem)) {
+ StringRef Subsystem, Version;
+ std::tie(Subsystem, Version) = StringRef(Arg->getValue()).split(':');
+ COFFConfig.Subsystem =
+ StringSwitch<unsigned>(Subsystem.lower())
+ .Case("boot_application",
+ COFF::IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION)
+ .Case("console", COFF::IMAGE_SUBSYSTEM_WINDOWS_CUI)
+ .Case("efi_application", COFF::IMAGE_SUBSYSTEM_EFI_APPLICATION)
+ .Case("efi_boot_service_driver",
+ COFF::IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER)
+ .Case("efi_rom", COFF::IMAGE_SUBSYSTEM_EFI_ROM)
+ .Case("efi_runtime_driver",
+ COFF::IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER)
+ .Case("native", COFF::IMAGE_SUBSYSTEM_NATIVE)
+ .Case("posix", COFF::IMAGE_SUBSYSTEM_POSIX_CUI)
+ .Case("windows", COFF::IMAGE_SUBSYSTEM_WINDOWS_GUI)
+ .Default(COFF::IMAGE_SUBSYSTEM_UNKNOWN);
+ if (*COFFConfig.Subsystem == COFF::IMAGE_SUBSYSTEM_UNKNOWN)
+ return createStringError(errc::invalid_argument,
+ "'%s' is not a valid subsystem",
+ Subsystem.str().c_str());
+ if (!Version.empty()) {
+ StringRef Major, Minor;
+ std::tie(Major, Minor) = Version.split('.');
+ unsigned Number;
+ if (Major.getAsInteger(10, Number))
+ return createStringError(errc::invalid_argument,
+ "'%s' is not a valid subsystem major version",
+ Major.str().c_str());
+ COFFConfig.MajorSubsystemVersion = Number;
+ Number = 0;
+ if (!Minor.empty() && Minor.getAsInteger(10, Number))
+ return createStringError(errc::invalid_argument,
+ "'%s' is not a valid subsystem minor version",
+ Minor.str().c_str());
+ COFFConfig.MinorSubsystemVersion = Number;
+ }
+ }
+
Config.OutputFormat = StringSwitch<FileFormat>(OutputFormat)
.Case("binary", FileFormat::Binary)
.Case("ihex", FileFormat::IHex)
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
index 16de84a961b5..f8521fa0d5b7 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/ELFObjcopy.cpp
@@ -227,6 +227,41 @@ static Error replaceDebugSections(
return Obj.replaceSections(FromTo);
}
+static bool isAArch64MappingSymbol(const Symbol &Sym) {
+ if (Sym.Binding != STB_LOCAL || Sym.Type != STT_NOTYPE ||
+ Sym.getShndx() == SHN_UNDEF)
+ return false;
+ StringRef Name = Sym.Name;
+ if (!Name.consume_front("$x") && !Name.consume_front("$d"))
+ return false;
+ return Name.empty() || Name.startswith(".");
+}
+
+static bool isArmMappingSymbol(const Symbol &Sym) {
+ if (Sym.Binding != STB_LOCAL || Sym.Type != STT_NOTYPE ||
+ Sym.getShndx() == SHN_UNDEF)
+ return false;
+ StringRef Name = Sym.Name;
+ if (!Name.consume_front("$a") && !Name.consume_front("$d") &&
+ !Name.consume_front("$t"))
+ return false;
+ return Name.empty() || Name.startswith(".");
+}
+
+// Check if the symbol should be preserved because it is required by ABI.
+static bool isRequiredByABISymbol(const Object &Obj, const Symbol &Sym) {
+ switch (Obj.Machine) {
+ case EM_AARCH64:
+ // Mapping symbols should be preserved for a relocatable object file.
+ return Obj.isRelocatable() && isAArch64MappingSymbol(Sym);
+ case EM_ARM:
+ // Mapping symbols should be preserved for a relocatable object file.
+ return Obj.isRelocatable() && isArmMappingSymbol(Sym);
+ default:
+ return false;
+ }
+}
+
static bool isUnneededSymbol(const Symbol &Sym) {
return !Sym.Referenced &&
(Sym.Binding == STB_LOCAL || Sym.getShndx() == SHN_UNDEF) &&
@@ -297,20 +332,23 @@ static Error updateAndRemoveSymbols(const CommonConfig &Config,
(ELFConfig.KeepFileSymbols && Sym.Type == STT_FILE))
return false;
- if ((Config.DiscardMode == DiscardType::All ||
- (Config.DiscardMode == DiscardType::Locals &&
- StringRef(Sym.Name).startswith(".L"))) &&
- Sym.Binding == STB_LOCAL && Sym.getShndx() != SHN_UNDEF &&
- Sym.Type != STT_FILE && Sym.Type != STT_SECTION)
+ if (Config.SymbolsToRemove.matches(Sym.Name))
return true;
if (Config.StripAll || Config.StripAllGNU)
return true;
+ if (isRequiredByABISymbol(Obj, Sym))
+ return false;
+
if (Config.StripDebug && Sym.Type == STT_FILE)
return true;
- if (Config.SymbolsToRemove.matches(Sym.Name))
+ if ((Config.DiscardMode == DiscardType::All ||
+ (Config.DiscardMode == DiscardType::Locals &&
+ StringRef(Sym.Name).startswith(".L"))) &&
+ Sym.Binding == STB_LOCAL && Sym.getShndx() != SHN_UNDEF &&
+ Sym.Type != STT_FILE && Sym.Type != STT_SECTION)
return true;
if ((Config.StripUnneeded ||
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/Object.h b/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/Object.h
index 439380fc725b..681ab8f56381 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/Object.h
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ELF/Object.h
@@ -934,8 +934,7 @@ class BinaryELFBuilder : public BasicELFBuilder {
public:
BinaryELFBuilder(MemoryBuffer *MB, uint8_t NewSymbolVisibility)
- : BasicELFBuilder(), MemBuf(MB),
- NewSymbolVisibility(NewSymbolVisibility) {}
+ : MemBuf(MB), NewSymbolVisibility(NewSymbolVisibility) {}
Expected<std::unique_ptr<Object>> build();
};
@@ -946,8 +945,7 @@ class IHexELFBuilder : public BasicELFBuilder {
void addDataSections();
public:
- IHexELFBuilder(const std::vector<IHexRecord> &Records)
- : BasicELFBuilder(), Records(Records) {}
+ IHexELFBuilder(const std::vector<IHexRecord> &Records) : Records(Records) {}
Expected<std::unique_ptr<Object>> build();
};
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOObjcopy.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOObjcopy.cpp
index 915394b65b12..0f92ca516bef 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOObjcopy.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOObjcopy.cpp
@@ -317,6 +317,52 @@ static Error addSection(StringRef SecName, StringRef Filename, Object &Obj) {
return Error::success();
}
+static Expected<Section &> findSection(StringRef SecName, Object &O) {
+ StringRef SegName;
+ std::tie(SegName, SecName) = SecName.split(",");
+ auto FoundSeg =
+ llvm::find_if(O.LoadCommands, [SegName](const LoadCommand &LC) {
+ return LC.getSegmentName() == SegName;
+ });
+ if (FoundSeg == O.LoadCommands.end())
+ return createStringError(errc::invalid_argument,
+ "could not find segment with name '%s'",
+ SegName.str().c_str());
+ auto FoundSec = llvm::find_if(FoundSeg->Sections,
+ [SecName](const std::unique_ptr<Section> &Sec) {
+ return Sec->Sectname == SecName;
+ });
+ if (FoundSec == FoundSeg->Sections.end())
+ return createStringError(errc::invalid_argument,
+ "could not find section with name '%s'",
+ SecName.str().c_str());
+
+ assert(FoundSec->get()->CanonicalName == (SegName + "," + SecName).str());
+ return *FoundSec->get();
+}
+
+static Error updateSection(StringRef SecName, StringRef Filename, Object &O) {
+ Expected<Section &> SecToUpdateOrErr = findSection(SecName, O);
+
+ if (!SecToUpdateOrErr)
+ return SecToUpdateOrErr.takeError();
+ Section &Sec = *SecToUpdateOrErr;
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
+ MemoryBuffer::getFile(Filename);
+ if (!BufOrErr)
+ return createFileError(Filename, errorCodeToError(BufOrErr.getError()));
+ std::unique_ptr<MemoryBuffer> Buf = std::move(*BufOrErr);
+
+ if (Buf->getBufferSize() > Sec.Size)
+ return createStringError(
+ errc::invalid_argument,
+ "new section cannot be larger than previous section");
+ Sec.Content = O.NewSectionsContents.save(Buf->getBuffer());
+ Sec.Size = Sec.Content.size();
+ return Error::success();
+}
+
// isValidMachOCannonicalName returns success if Name is a MachO cannonical name
// ("<segment>,<section>") and lengths of both segment and section names are
// valid.
@@ -374,6 +420,16 @@ static Error handleArgs(const CommonConfig &Config,
return E;
}
+ for (const auto &Flag : Config.UpdateSection) {
+ StringRef SectionName;
+ StringRef FileName;
+ std::tie(SectionName, FileName) = Flag.split('=');
+ if (Error E = isValidMachOCannonicalName(SectionName))
+ return E;
+ if (Error E = updateSection(SectionName, FileName, Obj))
+ return E;
+ }
+
if (Error E = processLoadCommands(MachOConfig, Obj))
return E;
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOWriter.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOWriter.cpp
index 688945afe944..52f20794cc57 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOWriter.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/MachO/MachOWriter.cpp
@@ -614,7 +614,7 @@ void MachOWriter::writeExportsTrieData() {
}
void MachOWriter::writeTail() {
- typedef void (MachOWriter::*WriteHandlerType)(void);
+ typedef void (MachOWriter::*WriteHandlerType)();
typedef std::pair<uint64_t, WriteHandlerType> WriteOperation;
SmallVector<WriteOperation, 7> Queue;
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
index bc624442aa51..bfd66caf41ed 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
@@ -105,6 +105,11 @@ defm strip_unneeded_symbols
"if they are not needed by relocations">,
MetaVarName<"filename">;
+defm subsystem
+ : Eq<"subsystem",
+ "Set PE subsystem and version">,
+ MetaVarName<"name[:version]">;
+
def extract_dwo
: Flag<["--"], "extract-dwo">,
HelpText<
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/COFFDump.h b/contrib/llvm-project/llvm/tools/llvm-objdump/COFFDump.h
index f933f79523a0..ffd39671debe 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/COFFDump.h
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/COFFDump.h
@@ -18,7 +18,6 @@ class Error;
namespace object {
class COFFObjectFile;
class COFFImportFile;
-class ObjectFile;
class RelocationRef;
} // namespace object
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/MachODump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/MachODump.cpp
index b0cf1f775ced..31867625f0e5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/MachODump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/MachODump.cpp
@@ -917,10 +917,10 @@ static void PrintRelocationEntries(const MachOObjectFile *O,
else {
SymbolRef Symbol = *O->getSymbolByIndex(r_symbolnum);
Expected<StringRef> SymNameNext = Symbol.getName();
- const char *name = NULL;
+ const char *name = nullptr;
if (SymNameNext)
name = SymNameNext->data();
- if (name == NULL)
+ if (name == nullptr)
outs() << format("?(%d)\n", r_symbolnum);
else
outs() << name << "\n";
@@ -10231,12 +10231,12 @@ static void PrintMachHeader(const MachOObjectFile *Obj, bool verbose) {
}
void objdump::printMachOFileHeader(const object::ObjectFile *Obj) {
- const MachOObjectFile *file = dyn_cast<const MachOObjectFile>(Obj);
+ const MachOObjectFile *file = cast<const MachOObjectFile>(Obj);
PrintMachHeader(file, Verbose);
}
void objdump::printMachOLoadCommands(const object::ObjectFile *Obj) {
- const MachOObjectFile *file = dyn_cast<const MachOObjectFile>(Obj);
+ const MachOObjectFile *file = cast<const MachOObjectFile>(Obj);
uint32_t filetype = 0;
uint32_t cputype = 0;
if (file->is64Bit()) {
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/SourcePrinter.h b/contrib/llvm-project/llvm/tools/llvm-objdump/SourcePrinter.h
index 21d5bdcf8a49..31d46e3108f6 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/SourcePrinter.h
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/SourcePrinter.h
@@ -80,7 +80,7 @@ class LiveVariablePrinter {
public:
LiveVariablePrinter(const MCRegisterInfo &MRI, const MCSubtargetInfo &STI)
- : LiveVariables(), ActiveCols(Column()), MRI(MRI), STI(STI) {}
+ : ActiveCols(Column()), MRI(MRI), STI(STI) {}
void dump() const;
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/WasmDump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/WasmDump.cpp
index 28311361d97e..df0a08e5b1dd 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/WasmDump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/WasmDump.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
using namespace llvm::object;
void objdump::printWasmFileHeader(const object::ObjectFile *Obj) {
- const auto *File = dyn_cast<const WasmObjectFile>(Obj);
+ const auto *File = cast<const WasmObjectFile>(Obj);
outs() << "Program Header:\n";
outs() << "Version: 0x";
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
index a16f760cd1cb..6b238fa01d25 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -2715,7 +2715,7 @@ int main(int argc, char **argv) {
return 0;
}
if (InputArgs.hasArg(HelpHiddenFlag)) {
- T->printHelp(ToolName, /*show_hidden=*/true);
+ T->printHelp(ToolName, /*ShowHidden=*/true);
return 0;
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.h b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.h
index 864a9920efbe..61b6215aa5f5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.h
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.h
@@ -21,10 +21,6 @@ class StringRef;
class Twine;
namespace object {
-class ELFObjectFileBase;
-class ELFSectionRef;
-class MachOObjectFile;
-class MachOUniversalBinary;
class RelocationRef;
struct VersionEntry;
} // namespace object
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/DumpOutputStyle.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/DumpOutputStyle.h
index 796cd7a10c36..041fb93a18a5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/DumpOutputStyle.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/DumpOutputStyle.h
@@ -21,12 +21,6 @@
#include <string>
namespace llvm {
-class BitVector;
-
-namespace codeview {
-class LazyRandomTypeCollection;
-}
-
namespace object {
class COFFObjectFile;
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/InputFile.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/InputFile.h
index a5d2897f5600..633ab34a54d4 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/InputFile.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/InputFile.h
@@ -26,7 +26,6 @@ class LazyRandomTypeCollection;
}
namespace object {
class COFFObjectFile;
-class SectionRef;
} // namespace object
namespace pdb {
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/LinePrinter.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/LinePrinter.h
index aa8159c0e094..b6bb77280fd5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/LinePrinter.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/LinePrinter.h
@@ -20,7 +20,6 @@
#include <list>
namespace llvm {
-class BinaryStreamReader;
namespace msf {
class MSFStreamLayout;
} // namespace msf
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/MinimalSymbolDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-pdbutil/MinimalSymbolDumper.cpp
index 3e8b1e88657b..e6b5d21f36e5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/MinimalSymbolDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/MinimalSymbolDumper.cpp
@@ -207,6 +207,7 @@ static std::string formatSourceLanguage(SourceLanguage Lang) {
RETURN_CASE(SourceLanguage, HLSL, "hlsl");
RETURN_CASE(SourceLanguage, D, "d");
RETURN_CASE(SourceLanguage, Swift, "swift");
+ RETURN_CASE(SourceLanguage, Rust, "rust");
}
return formatUnknownEnum(Lang);
}
@@ -585,8 +586,7 @@ Error MinimalSymbolDumper::visitKnownRecord(CVSymbol &CVR,
AutoIndent Indent(P, 7);
P.formatLine("offset = {0}, range = {1}", Def.Hdr.Offset,
formatRange(Def.Range));
- P.formatLine("gaps = {2}", Def.Hdr.Offset,
- formatGaps(P.getIndentLevel() + 9, Def.Gaps));
+ P.formatLine("gaps = [{0}]", formatGaps(P.getIndentLevel() + 9, Def.Gaps));
return Error::success();
}
@@ -598,7 +598,7 @@ Error MinimalSymbolDumper::visitKnownRecord(CVSymbol &CVR,
formatRegisterId(Def.Hdr.Register, CompilationCPU),
int32_t(Def.Hdr.BasePointerOffset), Def.offsetInParent(),
Def.hasSpilledUDTMember());
- P.formatLine("range = {0}, gaps = {1}", formatRange(Def.Range),
+ P.formatLine("range = {0}, gaps = [{1}]", formatRange(Def.Range),
formatGaps(P.getIndentLevel() + 9, Def.Gaps));
return Error::success();
}
@@ -625,7 +625,7 @@ Error MinimalSymbolDumper::visitKnownRecord(CVSymbol &CVR,
P.formatLine("register = {0}, may have no name = {1}, offset in parent = {2}",
formatRegisterId(Def.Hdr.Register, CompilationCPU), NoName,
uint32_t(Def.Hdr.OffsetInParent));
- P.formatLine("range = {0}, gaps = {1}", formatRange(Def.Range),
+ P.formatLine("range = {0}, gaps = [{1}]", formatRange(Def.Range),
formatGaps(P.getIndentLevel() + 9, Def.Gaps));
return Error::success();
}
@@ -635,7 +635,7 @@ Error MinimalSymbolDumper::visitKnownRecord(CVSymbol &CVR,
AutoIndent Indent(P, 7);
P.formatLine("program = {0}, offset in parent = {1}, range = {2}",
Def.Program, Def.OffsetInParent, formatRange(Def.Range));
- P.formatLine("gaps = {0}", formatGaps(P.getIndentLevel() + 9, Def.Gaps));
+ P.formatLine("gaps = [{0}]", formatGaps(P.getIndentLevel() + 9, Def.Gaps));
return Error::success();
}
@@ -643,7 +643,7 @@ Error MinimalSymbolDumper::visitKnownRecord(CVSymbol &CVR, DefRangeSym &Def) {
AutoIndent Indent(P, 7);
P.formatLine("program = {0}, range = {1}", Def.Program,
formatRange(Def.Range));
- P.formatLine("gaps = {0}", formatGaps(P.getIndentLevel() + 9, Def.Gaps));
+ P.formatLine("gaps = [{0}]", formatGaps(P.getIndentLevel() + 9, Def.Gaps));
return Error::success();
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/OutputStyle.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/OutputStyle.h
index 40b0de8bdf72..da93c32053f3 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/OutputStyle.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/OutputStyle.h
@@ -13,7 +13,6 @@
namespace llvm {
namespace pdb {
-class PDBFile;
class OutputStyle {
public:
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/PdbYaml.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/PdbYaml.h
index 2c2878c16546..c335eef2f1c5 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/PdbYaml.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/PdbYaml.h
@@ -27,13 +27,9 @@
#include <vector>
namespace llvm {
-namespace codeview {
-class DebugStringTableSubsection;
-}
namespace pdb {
namespace yaml {
-struct SerializationContext;
struct MSFHeaders {
msf::SuperBlock SuperBlock;
diff --git a/contrib/llvm-project/llvm/tools/llvm-pdbutil/YAMLOutputStyle.h b/contrib/llvm-project/llvm/tools/llvm-pdbutil/YAMLOutputStyle.h
index 7a50af1abe3f..5d53e0b65d03 100644
--- a/contrib/llvm-project/llvm/tools/llvm-pdbutil/YAMLOutputStyle.h
+++ b/contrib/llvm-project/llvm/tools/llvm-pdbutil/YAMLOutputStyle.h
@@ -17,7 +17,6 @@
namespace llvm {
namespace pdb {
-class ModuleDebugStreamRef;
class YAMLOutputStyle : public OutputStyle {
public:
diff --git a/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp b/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 285b41f57147..5e58c1365d80 100644
--- a/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -204,8 +204,8 @@ struct WriterContext {
WriterContext(bool IsSparse, std::mutex &ErrLock,
SmallSet<instrprof_error, 4> &WriterErrorCodes)
- : Lock(), Writer(IsSparse), Errors(), ErrLock(ErrLock),
- WriterErrorCodes(WriterErrorCodes) {}
+ : Writer(IsSparse), ErrLock(ErrLock), WriterErrorCodes(WriterErrorCodes) {
+ }
};
/// Computer the overlap b/w profile BaseFilename and TestFileName,
@@ -661,8 +661,8 @@ static void populateProfileSymbolList(MemoryBuffer *Buffer,
StringRef Data = Buffer->getBuffer();
Data.split(SymbolVec, '\n', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- for (StringRef symbol : SymbolVec)
- PSL.add(symbol);
+ for (StringRef SymbolStr : SymbolVec)
+ PSL.add(SymbolStr.trim());
}
static void handleExtBinaryWriter(sampleprof::SampleProfileWriter &Writer,
@@ -968,7 +968,7 @@ static int merge_main(int argc, const char *argv[]) {
"gen-cs-nested-profile", cl::Hidden, cl::init(false),
cl::desc("Generate nested function profiles for CSSPGO"));
cl::opt<std::string> DebugInfoFilename(
- "debug-info", cl::init(""), cl::Hidden,
+ "debug-info", cl::init(""),
cl::desc("Use the provided debug info to correlate the raw profile."));
cl::ParseCommandLineOptions(argc, argv, "LLVM profile data merger\n");
@@ -2303,8 +2303,7 @@ struct HotFuncInfo {
uint64_t EntryCount;
HotFuncInfo()
- : FuncName(), TotalCount(0), TotalCountPercent(0.0f), MaxCount(0),
- EntryCount(0) {}
+ : TotalCount(0), TotalCountPercent(0.0f), MaxCount(0), EntryCount(0) {}
HotFuncInfo(StringRef FN, uint64_t TS, double TSP, uint64_t MS, uint64_t ES)
: FuncName(FN.begin(), FN.end()), TotalCount(TS), TotalCountPercent(TSP),
@@ -2487,9 +2486,35 @@ static int showMemProfProfile(const std::string &Filename, raw_fd_ostream &OS) {
return 0;
}
+static int showDebugInfoCorrelation(const std::string &Filename,
+ bool ShowDetailedSummary,
+ bool ShowProfileSymbolList,
+ raw_fd_ostream &OS) {
+ std::unique_ptr<InstrProfCorrelator> Correlator;
+ if (auto Err = InstrProfCorrelator::get(Filename).moveInto(Correlator))
+ exitWithError(std::move(Err), Filename);
+ if (auto Err = Correlator->correlateProfileData())
+ exitWithError(std::move(Err), Filename);
+
+ InstrProfSymtab Symtab;
+ if (auto Err = Symtab.create(
+ StringRef(Correlator->getNamesPointer(), Correlator->getNamesSize())))
+ exitWithError(std::move(Err), Filename);
+
+ if (ShowProfileSymbolList)
+ Symtab.dumpNames(OS);
+ // TODO: Read "Profile Data Type" from debug info to compute and show how many
+ // counters the section holds.
+ if (ShowDetailedSummary)
+ OS << "Counters section size: 0x"
+ << Twine::utohexstr(Correlator->getCountersSectionSize()) << " bytes\n";
+ OS << "Found " << Correlator->getDataSize() << " functions\n";
+
+ return 0;
+}
+
static int show_main(int argc, const char *argv[]) {
- cl::opt<std::string> Filename(cl::Positional, cl::Required,
- cl::desc("<profdata-file>"));
+ cl::opt<std::string> Filename(cl::Positional, cl::desc("<profdata-file>"));
cl::opt<bool> ShowCounts("counts", cl::init(false),
cl::desc("Show counter values for shown functions"));
@@ -2550,9 +2575,18 @@ static int show_main(int argc, const char *argv[]) {
"extbinary format"));
cl::opt<bool> ShowBinaryIds("binary-ids", cl::init(false),
cl::desc("Show binary ids in the profile. "));
+ cl::opt<std::string> DebugInfoFilename(
+ "debug-info", cl::init(""),
+ cl::desc("Read and extract profile metadata from debug info and show "
+ "the functions it found."));
cl::ParseCommandLineOptions(argc, argv, "LLVM profile data summary\n");
+ if (Filename.empty() && DebugInfoFilename.empty())
+ exitWithError(
+ "the positional argument '<profdata-file>' is required unless '--" +
+ DebugInfoFilename.ArgStr + "' is provided");
+
if (Filename == OutputFilename) {
errs() << sys::path::filename(argv[0])
<< ": Input file name cannot be the same as the output file name!\n";
@@ -2567,6 +2601,10 @@ static int show_main(int argc, const char *argv[]) {
if (ShowAllFunctions && !ShowFunction.empty())
WithColor::warning() << "-function argument ignored: showing all functions\n";
+ if (!DebugInfoFilename.empty())
+ return showDebugInfoCorrelation(DebugInfoFilename, ShowDetailedSummary,
+ ShowProfileSymbolList, OS);
+
if (ProfileKind == instr)
return showInstrProfile(
Filename, ShowCounts, TopNFunctions, ShowIndirectCallTargets,
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/COFFDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/COFFDumper.cpp
index b235398e7a45..caeb49af24be 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/COFFDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/COFFDumper.cpp
@@ -52,6 +52,7 @@
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/Win64EH.h"
#include "llvm/Support/raw_ostream.h"
+#include <ctime>
using namespace llvm;
using namespace llvm::object;
@@ -126,7 +127,7 @@ private:
void printCOFFTLSDirectory(const coff_tls_directory<IntTy> *TlsTable);
typedef void (*PrintExtraCB)(raw_ostream &, const uint8_t *);
void printRVATable(uint64_t TableVA, uint64_t Count, uint64_t EntrySize,
- PrintExtraCB PrintExtra = 0);
+ PrintExtraCB PrintExtra = nullptr);
void printCodeViewSymbolSection(StringRef SectionName, const SectionRef &Section);
void printCodeViewTypeSection(StringRef SectionName, const SectionRef &Section);
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
index 9d9c22234726..cfb618117d2b 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -17,6 +17,7 @@
#include "StackMapPrinter.h"
#include "llvm-readobj.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
@@ -30,6 +31,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h"
#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/BinaryFormat/MsgPackDocument.h"
#include "llvm/Demangle/Demangle.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ELF.h"
@@ -1583,7 +1585,8 @@ const EnumEntry<unsigned> ElfHeaderRISCVFlags[] = {
ENUM_ENT(EF_RISCV_FLOAT_ABI_SINGLE, "single-float ABI"),
ENUM_ENT(EF_RISCV_FLOAT_ABI_DOUBLE, "double-float ABI"),
ENUM_ENT(EF_RISCV_FLOAT_ABI_QUAD, "quad-float ABI"),
- ENUM_ENT(EF_RISCV_RVE, "RVE")
+ ENUM_ENT(EF_RISCV_RVE, "RVE"),
+ ENUM_ENT(EF_RISCV_TSO, "TSO"),
};
const EnumEntry<unsigned> ElfHeaderAVRFlags[] = {
@@ -3904,7 +3907,7 @@ void GNUELFDumper<ELFT>::printHashTableSymbols(const Elf_Hash &SysVHash) {
for (uint32_t Buc = 0; Buc < SysVHash.nbucket; Buc++) {
if (Buckets[Buc] == ELF::STN_UNDEF)
continue;
- std::vector<bool> Visited(SysVHash.nchain);
+ BitVector Visited(SysVHash.nchain);
for (uint32_t Ch = Buckets[Buc]; Ch < SysVHash.nchain; Ch = Chains[Ch]) {
if (Ch == ELF::STN_UNDEF)
break;
@@ -4652,7 +4655,7 @@ void GNUELFDumper<ELFT>::printHashHistogram(const Elf_Hash &HashTable) {
// Go over all buckets and and note chain lengths of each bucket (total
// unique chain lengths).
for (size_t B = 0; B < NBucket; B++) {
- std::vector<bool> Visited(NChain);
+ BitVector Visited(NChain);
for (size_t C = Buckets[B]; C < NChain; C = Chains[C]) {
if (C == ELF::STN_UNDEF)
break;
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/MachODumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/MachODumper.cpp
index 945b16b8db86..599b0355917e 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/MachODumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/MachODumper.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Object/MachO.h"
+#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -34,6 +35,7 @@ public:
void printRelocations() override;
void printUnwindInfo() override;
void printStackMap() const override;
+ void printCGProfile() override;
void printNeededLibraries() override;
@@ -49,6 +51,8 @@ private:
template<class MachHeader>
void printFileHeaders(const MachHeader &Header);
+ StringRef getSymbolName(const SymbolRef &Symbol);
+
void printSymbols() override;
void printDynamicSymbols() override;
void printSymbol(const SymbolRef &Symbol);
@@ -551,10 +555,7 @@ void MachODumper::printRelocation(const MachOObjectFile *Obj,
if (IsExtern) {
symbol_iterator Symbol = Reloc.getSymbol();
if (Symbol != Obj->symbol_end()) {
- Expected<StringRef> TargetNameOrErr = Symbol->getName();
- if (!TargetNameOrErr)
- reportError(TargetNameOrErr.takeError(), Obj->getFileName());
- TargetName = *TargetNameOrErr;
+ TargetName = getSymbolName(*Symbol);
}
} else if (!IsScattered) {
section_iterator SecI = Obj->getRelocationSection(DR);
@@ -601,6 +602,14 @@ void MachODumper::printRelocation(const MachOObjectFile *Obj,
}
}
+StringRef MachODumper::getSymbolName(const SymbolRef &Symbol) {
+ Expected<StringRef> SymbolNameOrErr = Symbol.getName();
+ if (!SymbolNameOrErr) {
+ reportError(SymbolNameOrErr.takeError(), Obj->getFileName());
+ }
+ return *SymbolNameOrErr;
+}
+
void MachODumper::printSymbols() {
ListScope Group(W, "Symbols");
@@ -614,13 +623,7 @@ void MachODumper::printDynamicSymbols() {
}
void MachODumper::printSymbol(const SymbolRef &Symbol) {
- StringRef SymbolName;
- Expected<StringRef> SymbolNameOrErr = Symbol.getName();
- if (!SymbolNameOrErr) {
- // TODO: Actually report errors helpfully.
- consumeError(SymbolNameOrErr.takeError());
- } else
- SymbolName = *SymbolNameOrErr;
+ StringRef SymbolName = getSymbolName(Symbol);
MachOSymbol MOSymbol;
getSymbol(Obj, Symbol.getRawDataRefImpl(), MOSymbol);
@@ -696,6 +699,48 @@ void MachODumper::printStackMap() const {
W, StackMapParser<support::big>(StackMapContentsArray));
}
+void MachODumper::printCGProfile() {
+ object::SectionRef CGProfileSection;
+ for (auto Sec : Obj->sections()) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Sec.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__cg_profile") {
+ CGProfileSection = Sec;
+ break;
+ }
+ }
+ if (CGProfileSection == object::SectionRef())
+ return;
+
+ StringRef CGProfileContents =
+ unwrapOrError(Obj->getFileName(), CGProfileSection.getContents());
+ BinaryStreamReader Reader(CGProfileContents, Obj->isLittleEndian()
+ ? llvm::support::little
+ : llvm::support::big);
+
+ ListScope L(W, "CGProfile");
+ while (!Reader.empty()) {
+ uint32_t FromIndex, ToIndex;
+ uint64_t Count;
+ if (Error Err = Reader.readInteger(FromIndex))
+ reportError(std::move(Err), Obj->getFileName());
+ if (Error Err = Reader.readInteger(ToIndex))
+ reportError(std::move(Err), Obj->getFileName());
+ if (Error Err = Reader.readInteger(Count))
+ reportError(std::move(Err), Obj->getFileName());
+ DictScope D(W, "CGProfileEntry");
+ W.printNumber("From", getSymbolName(*Obj->getSymbolByIndex(FromIndex)),
+ FromIndex);
+ W.printNumber("To", getSymbolName(*Obj->getSymbolByIndex(ToIndex)),
+ ToIndex);
+ W.printNumber("Weight", Count);
+ }
+}
+
void MachODumper::printNeededLibraries() {
ListScope D(W, "NeededLibraries");
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/XCOFFDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/XCOFFDumper.cpp
index 38e459cd5425..6e778d558d4f 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/XCOFFDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/XCOFFDumper.cpp
@@ -16,6 +16,7 @@
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/ScopedPrinter.h"
+#include <ctime>
#include <stddef.h>
using namespace llvm;
@@ -44,9 +45,16 @@ private:
template <typename T> void printSectionHeaders(ArrayRef<T> Sections);
template <typename T> void printGenericSectionHeader(T &Sec) const;
template <typename T> void printOverflowSectionHeader(T &Sec) const;
+ template <typename T> const T *getAuxEntPtr(uintptr_t AuxAddress);
void printFileAuxEnt(const XCOFFFileAuxEnt *AuxEntPtr);
void printCsectAuxEnt(XCOFFCsectAuxRef AuxEntRef);
void printSectAuxEntForStat(const XCOFFSectAuxEntForStat *AuxEntPtr);
+ void printExceptionAuxEnt(const XCOFFExceptionAuxEnt *AuxEntPtr);
+ void printFunctionAuxEnt(const XCOFFFunctionAuxEnt32 *AuxEntPtr);
+ void printFunctionAuxEnt(const XCOFFFunctionAuxEnt64 *AuxEntPtr);
+ void printBlockAuxEnt(const XCOFFBlockAuxEnt32 *AuxEntPtr);
+ void printBlockAuxEnt(const XCOFFBlockAuxEnt64 *AuxEntPtr);
+ template <typename T> void printSectAuxEntForDWARF(const T *AuxEntPtr);
void printSymbol(const SymbolRef &);
template <typename RelTy> void printRelocation(RelTy Reloc);
template <typename Shdr, typename RelTy>
@@ -289,6 +297,77 @@ void XCOFFDumper::printSectAuxEntForStat(
W.printNumber("NumberOfLineNum", AuxEntPtr->NumberOfLineNum);
}
+void XCOFFDumper::printExceptionAuxEnt(const XCOFFExceptionAuxEnt *AuxEntPtr) {
+ assert(Obj.is64Bit() && "64-bit interface called on 32-bit object file.");
+
+ DictScope SymDs(W, "Exception Auxiliary Entry");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("OffsetToExceptionTable", AuxEntPtr->OffsetToExceptionTbl);
+ W.printHex("SizeOfFunction", AuxEntPtr->SizeOfFunction);
+ W.printNumber("SymbolIndexOfNextBeyond", AuxEntPtr->SymIdxOfNextBeyond);
+ W.printEnum("Auxiliary Type", static_cast<uint8_t>(AuxEntPtr->AuxType),
+ makeArrayRef(SymAuxType));
+}
+
+void XCOFFDumper::printFunctionAuxEnt(const XCOFFFunctionAuxEnt32 *AuxEntPtr) {
+ assert(!Obj.is64Bit() && "32-bit interface called on 64-bit object file.");
+
+ DictScope SymDs(W, "Function Auxiliary Entry");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("OffsetToExceptionTable", AuxEntPtr->OffsetToExceptionTbl);
+ W.printHex("SizeOfFunction", AuxEntPtr->SizeOfFunction);
+ W.printHex("PointerToLineNum", AuxEntPtr->PtrToLineNum);
+ W.printNumber("SymbolIndexOfNextBeyond", AuxEntPtr->SymIdxOfNextBeyond);
+}
+
+void XCOFFDumper::printFunctionAuxEnt(const XCOFFFunctionAuxEnt64 *AuxEntPtr) {
+ assert(Obj.is64Bit() && "64-bit interface called on 32-bit object file.");
+
+ DictScope SymDs(W, "Function Auxiliary Entry");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("SizeOfFunction", AuxEntPtr->SizeOfFunction);
+ W.printHex("PointerToLineNum", AuxEntPtr->PtrToLineNum);
+ W.printNumber("SymbolIndexOfNextBeyond", AuxEntPtr->SymIdxOfNextBeyond);
+ W.printEnum("Auxiliary Type", static_cast<uint8_t>(AuxEntPtr->AuxType),
+ makeArrayRef(SymAuxType));
+}
+
+void XCOFFDumper::printBlockAuxEnt(const XCOFFBlockAuxEnt32 *AuxEntPtr) {
+ assert(!Obj.is64Bit() && "32-bit interface called on 64-bit object file.");
+
+ DictScope SymDs(W, "Block Auxiliary Entry");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("LineNumber (High 2 Bytes)", AuxEntPtr->LineNumHi);
+ W.printHex("LineNumber (Low 2 Bytes)", AuxEntPtr->LineNumLo);
+}
+
+void XCOFFDumper::printBlockAuxEnt(const XCOFFBlockAuxEnt64 *AuxEntPtr) {
+ assert(Obj.is64Bit() && "64-bit interface called on 32-bit object file.");
+
+ DictScope SymDs(W, "Block Auxiliary Entry");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("LineNumber", AuxEntPtr->LineNum);
+ W.printEnum("Auxiliary Type", static_cast<uint8_t>(AuxEntPtr->AuxType),
+ makeArrayRef(SymAuxType));
+}
+
+template <typename T>
+void XCOFFDumper::printSectAuxEntForDWARF(const T *AuxEntPtr) {
+ DictScope SymDs(W, "Sect Auxiliary Entry For DWARF");
+ W.printNumber("Index",
+ Obj.getSymbolIndex(reinterpret_cast<uintptr_t>(AuxEntPtr)));
+ W.printHex("LengthOfSectionPortion", AuxEntPtr->LengthOfSectionPortion);
+ W.printNumber("NumberOfRelocEntries", AuxEntPtr->NumberOfRelocEnt);
+ if (Obj.is64Bit())
+ W.printEnum("Auxiliary Type", static_cast<uint8_t>(XCOFF::AUX_SECT),
+ makeArrayRef(SymAuxType));
+}
+
const EnumEntry<XCOFF::StorageClass> SymStorageClass[] = {
#define ECase(X) \
{ #X, XCOFF::X }
@@ -314,11 +393,13 @@ static StringRef GetSymbolValueName(XCOFF::StorageClass SC) {
case XCOFF::C_WEAKEXT:
case XCOFF::C_HIDEXT:
case XCOFF::C_STAT:
+ case XCOFF::C_FCN:
+ case XCOFF::C_BLOCK:
return "Value (RelocatableAddress)";
case XCOFF::C_FILE:
return "Value (SymbolTableIndex)";
- case XCOFF::C_FCN:
- case XCOFF::C_BLOCK:
+ case XCOFF::C_DWARF:
+ return "Value (OffsetInDWARF)";
case XCOFF::C_FUN:
case XCOFF::C_STSYM:
case XCOFF::C_BINCL:
@@ -330,7 +411,6 @@ static StringRef GetSymbolValueName(XCOFF::StorageClass SC) {
case XCOFF::C_RPSYM:
case XCOFF::C_RSYM:
case XCOFF::C_ECOML:
- case XCOFF::C_DWARF:
assert(false && "This StorageClass for the symbol is not yet implemented.");
return "";
default:
@@ -352,6 +432,22 @@ const EnumEntry<XCOFF::CFileCpuId> CFileCpuIdClass[] = {
#undef ECase
};
+template <typename T> const T *XCOFFDumper::getAuxEntPtr(uintptr_t AuxAddress) {
+ const T *AuxEntPtr = reinterpret_cast<const T *>(AuxAddress);
+ Obj.checkSymbolEntryPointer(reinterpret_cast<uintptr_t>(AuxEntPtr));
+ return AuxEntPtr;
+}
+
+static void printUnexpectedRawAuxEnt(ScopedPrinter &W, uintptr_t AuxAddress) {
+ W.startLine() << "!Unexpected raw auxiliary entry data:\n";
+ W.startLine() << format_bytes(
+ ArrayRef<uint8_t>(
+ reinterpret_cast<const uint8_t *>(AuxAddress),
+ XCOFF::SymbolTableEntrySize),
+ None, XCOFF::SymbolTableEntrySize)
+ << "\n";
+}
+
void XCOFFDumper::printSymbol(const SymbolRef &S) {
DataRefImpl SymbolDRI = S.getRawDataRefImpl();
XCOFFSymbolRef SymbolEntRef = Obj.toSymbolRef(SymbolDRI);
@@ -363,16 +459,18 @@ void XCOFFDumper::printSymbol(const SymbolRef &S) {
StringRef SymbolName =
unwrapOrError(Obj.getFileName(), SymbolEntRef.getName());
- W.printNumber("Index", Obj.getSymbolIndex(SymbolEntRef.getEntryAddress()));
+ uint32_t SymbolIdx = Obj.getSymbolIndex(SymbolEntRef.getEntryAddress());
+ XCOFF::StorageClass SymbolClass = SymbolEntRef.getStorageClass();
+
+ W.printNumber("Index", SymbolIdx);
W.printString("Name", SymbolName);
- W.printHex(GetSymbolValueName(SymbolEntRef.getStorageClass()),
- SymbolEntRef.getValue());
+ W.printHex(GetSymbolValueName(SymbolClass), SymbolEntRef.getValue());
StringRef SectionName =
unwrapOrError(Obj.getFileName(), Obj.getSymbolSectionName(SymbolEntRef));
W.printString("Section", SectionName);
- if (SymbolEntRef.getStorageClass() == XCOFF::C_FILE) {
+ if (SymbolClass == XCOFF::C_FILE) {
W.printEnum("Source Language ID", SymbolEntRef.getLanguageIdForCFile(),
makeArrayRef(CFileLangIdClass));
W.printEnum("CPU Version ID", SymbolEntRef.getCPUTypeIddForCFile(),
@@ -380,15 +478,24 @@ void XCOFFDumper::printSymbol(const SymbolRef &S) {
} else
W.printHex("Type", SymbolEntRef.getSymbolType());
- W.printEnum("StorageClass",
- static_cast<uint8_t>(SymbolEntRef.getStorageClass()),
+ W.printEnum("StorageClass", static_cast<uint8_t>(SymbolClass),
makeArrayRef(SymStorageClass));
W.printNumber("NumberOfAuxEntries", NumberOfAuxEntries);
if (NumberOfAuxEntries == 0)
return;
- switch (SymbolEntRef.getStorageClass()) {
+ auto checkNumOfAux = [=] {
+ if (NumberOfAuxEntries > 1)
+ reportUniqueWarning("the " +
+ enumToString(static_cast<uint8_t>(SymbolClass),
+ makeArrayRef(SymStorageClass)) +
+ " symbol at index " + Twine(SymbolIdx) +
+ " should not have more than 1 "
+ "auxiliary entry");
+ };
+
+ switch (SymbolClass) {
case XCOFF::C_FILE:
// If the symbol is C_FILE and has auxiliary entries...
for (int I = 1; I <= NumberOfAuxEntries; I++) {
@@ -397,55 +504,62 @@ void XCOFFDumper::printSymbol(const SymbolRef &S) {
if (Obj.is64Bit() &&
*Obj.getSymbolAuxType(AuxAddress) != XCOFF::SymbolAuxType::AUX_FILE) {
- W.startLine() << "!Unexpected raw auxiliary entry data:\n";
- W.startLine() << format_bytes(
- ArrayRef<uint8_t>(
- reinterpret_cast<const uint8_t *>(AuxAddress),
- XCOFF::SymbolTableEntrySize),
- 0, XCOFF::SymbolTableEntrySize)
- << "\n";
+ printUnexpectedRawAuxEnt(W, AuxAddress);
continue;
}
const XCOFFFileAuxEnt *FileAuxEntPtr =
- reinterpret_cast<const XCOFFFileAuxEnt *>(AuxAddress);
-#ifndef NDEBUG
- Obj.checkSymbolEntryPointer(reinterpret_cast<uintptr_t>(FileAuxEntPtr));
-#endif
+ getAuxEntPtr<XCOFFFileAuxEnt>(AuxAddress);
printFileAuxEnt(FileAuxEntPtr);
}
break;
case XCOFF::C_EXT:
case XCOFF::C_WEAKEXT:
case XCOFF::C_HIDEXT: {
- // If the symbol is for a function, and it has more than 1 auxiliary entry,
- // then one of them must be function auxiliary entry which we do not
- // support yet.
- if (SymbolEntRef.isFunction() && NumberOfAuxEntries >= 2)
- report_fatal_error("Function auxiliary entry printing is unimplemented.");
-
- // If there is more than 1 auxiliary entry, instead of printing out
- // error information, print out the raw Auxiliary entry.
- // For 32-bit object, print from first to the last - 1. The last one must be
- // a CSECT Auxiliary Entry.
- // For 64-bit object, print from first to last and skips if SymbolAuxType is
- // AUX_CSECT.
+ if (!SymbolEntRef.isFunction() && NumberOfAuxEntries > 1)
+ reportUniqueWarning("the non-function " +
+ enumToString(static_cast<uint8_t>(SymbolClass),
+ makeArrayRef(SymStorageClass)) +
+ " symbol at index " + Twine(SymbolIdx) +
+ " should have only 1 auxiliary entry, i.e. the CSECT "
+ "auxiliary entry");
+
+ // For 32-bit objects, print the function auxiliary symbol table entry. The
+ // last one must be a CSECT auxiliary entry.
+ // For 64-bit objects, both a function auxiliary entry and an exception
+ // auxiliary entry may appear, print them in the loop and skip printing the
+ // CSECT auxiliary entry, which will be printed outside the loop.
for (int I = 1; I <= NumberOfAuxEntries; I++) {
- if (I == NumberOfAuxEntries && !Obj.is64Bit())
+ if ((I == NumberOfAuxEntries && !Obj.is64Bit()) ||
+ !SymbolEntRef.isFunction())
break;
uintptr_t AuxAddress = XCOFFObjectFile::getAdvancedSymbolEntryAddress(
SymbolEntRef.getEntryAddress(), I);
- if (Obj.is64Bit() &&
- *Obj.getSymbolAuxType(AuxAddress) == XCOFF::SymbolAuxType::AUX_CSECT)
- continue;
- W.startLine() << "!Unexpected raw auxiliary entry data:\n";
- W.startLine() << format_bytes(
- ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(AuxAddress),
- XCOFF::SymbolTableEntrySize));
+ if (Obj.is64Bit()) {
+ XCOFF::SymbolAuxType Type = *Obj.getSymbolAuxType(AuxAddress);
+ if (Type == XCOFF::SymbolAuxType::AUX_CSECT)
+ continue;
+ if (Type == XCOFF::SymbolAuxType::AUX_FCN) {
+ const XCOFFFunctionAuxEnt64 *AuxEntPtr =
+ getAuxEntPtr<XCOFFFunctionAuxEnt64>(AuxAddress);
+ printFunctionAuxEnt(AuxEntPtr);
+ } else if (Type == XCOFF::SymbolAuxType::AUX_EXCEPT) {
+ const XCOFFExceptionAuxEnt *AuxEntPtr =
+ getAuxEntPtr<XCOFFExceptionAuxEnt>(AuxAddress);
+ printExceptionAuxEnt(AuxEntPtr);
+ } else {
+ printUnexpectedRawAuxEnt(W, AuxAddress);
+ }
+ } else {
+ const XCOFFFunctionAuxEnt32 *AuxEntPtr =
+ getAuxEntPtr<XCOFFFunctionAuxEnt32>(AuxAddress);
+ printFunctionAuxEnt(AuxEntPtr);
+ }
}
+ // Print the CSECT auxiliary entry.
auto ErrOrCsectAuxRef = SymbolEntRef.getXCOFFCsectAuxRef();
if (!ErrOrCsectAuxRef)
reportUniqueWarning(ErrOrCsectAuxRef.takeError());
@@ -454,34 +568,56 @@ void XCOFFDumper::printSymbol(const SymbolRef &S) {
break;
}
- case XCOFF::C_STAT:
- if (NumberOfAuxEntries > 1)
- report_fatal_error(
- "C_STAT symbol should not have more than 1 auxiliary entry.");
-
- const XCOFFSectAuxEntForStat *StatAuxEntPtr;
- StatAuxEntPtr = reinterpret_cast<const XCOFFSectAuxEntForStat *>(
- XCOFFObjectFile::getAdvancedSymbolEntryAddress(
- SymbolEntRef.getEntryAddress(), 1));
-#ifndef NDEBUG
- Obj.checkSymbolEntryPointer(reinterpret_cast<uintptr_t>(StatAuxEntPtr));
-#endif
+ case XCOFF::C_STAT: {
+ checkNumOfAux();
+
+ const XCOFFSectAuxEntForStat *StatAuxEntPtr =
+ getAuxEntPtr<XCOFFSectAuxEntForStat>(
+ XCOFFObjectFile::getAdvancedSymbolEntryAddress(
+ SymbolEntRef.getEntryAddress(), 1));
printSectAuxEntForStat(StatAuxEntPtr);
break;
- case XCOFF::C_DWARF:
+ }
+ case XCOFF::C_DWARF: {
+ checkNumOfAux();
+
+ uintptr_t AuxAddress = XCOFFObjectFile::getAdvancedSymbolEntryAddress(
+ SymbolEntRef.getEntryAddress(), 1);
+
+ if (Obj.is64Bit()) {
+ const XCOFFSectAuxEntForDWARF64 *AuxEntPtr =
+ getAuxEntPtr<XCOFFSectAuxEntForDWARF64>(AuxAddress);
+ printSectAuxEntForDWARF<XCOFFSectAuxEntForDWARF64>(AuxEntPtr);
+ } else {
+ const XCOFFSectAuxEntForDWARF32 *AuxEntPtr =
+ getAuxEntPtr<XCOFFSectAuxEntForDWARF32>(AuxAddress);
+ printSectAuxEntForDWARF<XCOFFSectAuxEntForDWARF32>(AuxEntPtr);
+ }
+ break;
+ }
case XCOFF::C_BLOCK:
- case XCOFF::C_FCN:
- report_fatal_error("Symbol table entry printing for this storage class "
- "type is unimplemented.");
+ case XCOFF::C_FCN: {
+ checkNumOfAux();
+
+ uintptr_t AuxAddress = XCOFFObjectFile::getAdvancedSymbolEntryAddress(
+ SymbolEntRef.getEntryAddress(), 1);
+
+ if (Obj.is64Bit()) {
+ const XCOFFBlockAuxEnt64 *AuxEntPtr =
+ getAuxEntPtr<XCOFFBlockAuxEnt64>(AuxAddress);
+ printBlockAuxEnt(AuxEntPtr);
+ } else {
+ const XCOFFBlockAuxEnt32 *AuxEntPtr =
+ getAuxEntPtr<XCOFFBlockAuxEnt32>(AuxAddress);
+ printBlockAuxEnt(AuxEntPtr);
+ }
break;
+ }
default:
for (int i = 1; i <= NumberOfAuxEntries; i++) {
- W.startLine() << "!Unexpected raw auxiliary entry data:\n";
- W.startLine() << format_bytes(
- ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(
- XCOFFObjectFile::getAdvancedSymbolEntryAddress(
- SymbolEntRef.getEntryAddress(), i)),
- XCOFF::SymbolTableEntrySize));
+ printUnexpectedRawAuxEnt(W,
+ XCOFFObjectFile::getAdvancedSymbolEntryAddress(
+ SymbolEntRef.getEntryAddress(), i));
}
break;
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/llvm-readobj.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/llvm-readobj.cpp
index 46862bbad7cb..543b0de82cdf 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/llvm-readobj.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/llvm-readobj.cpp
@@ -286,8 +286,8 @@ static void parseOptions(const opt::InputArgList &Args) {
namespace {
struct ReadObjTypeTableBuilder {
ReadObjTypeTableBuilder()
- : Allocator(), IDTable(Allocator), TypeTable(Allocator),
- GlobalIDTable(Allocator), GlobalTypeTable(Allocator) {}
+ : IDTable(Allocator), TypeTable(Allocator), GlobalIDTable(Allocator),
+ GlobalTypeTable(Allocator) {}
llvm::BumpPtrAllocator Allocator;
llvm::codeview::MergingTypeTableBuilder IDTable;
@@ -448,6 +448,8 @@ static void dumpObject(ObjectFile &Obj, ScopedPrinter &Writer,
Dumper->printMachOVersionMin();
if (opts::MachODysymtab)
Dumper->printMachODysymtab();
+ if (opts::CGProfile)
+ Dumper->printCGProfile();
}
if (opts::PrintStackMap)
Dumper->printStackMap();
diff --git a/contrib/llvm-project/llvm/tools/llvm-stress/llvm-stress.cpp b/contrib/llvm-project/llvm/tools/llvm-stress/llvm-stress.cpp
index f2be4e7d0712..941b529da9b2 100644
--- a/contrib/llvm-project/llvm/tools/llvm-stress/llvm-stress.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -41,6 +41,7 @@
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/WithColor.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -345,8 +346,7 @@ struct LoadModifier: public Modifier {
void Act() override {
// Try to use predefined pointers. If non-exist, use undef pointer value;
Value *Ptr = getRandomPointerValue();
- PointerType *Tp = cast<PointerType>(Ptr->getType());
- Value *V = new LoadInst(Tp->getElementType(), Ptr, "L",
+ Value *V = new LoadInst(Ptr->getType()->getPointerElementType(), Ptr, "L",
BB->getTerminator());
PT->push_back(V);
}
@@ -359,8 +359,7 @@ struct StoreModifier: public Modifier {
void Act() override {
// Try to use predefined pointers. If non-exist, use undef pointer value;
Value *Ptr = getRandomPointerValue();
- PointerType *Tp = cast<PointerType>(Ptr->getType());
- Value *Val = getRandomValue(Tp->getElementType());
+ Value *Val = getRandomValue(Ptr->getType()->getPointerElementType());
Type *ValTy = Val->getType();
// Do not store vectors of i1s because they are unsupported
diff --git a/contrib/llvm-project/llvm/tools/llvm-strings/llvm-strings.cpp b/contrib/llvm-project/llvm/tools/llvm-strings/llvm-strings.cpp
index 26be3914fb92..438eed33d283 100644
--- a/contrib/llvm-project/llvm/tools/llvm-strings/llvm-strings.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-strings/llvm-strings.cpp
@@ -61,7 +61,7 @@ public:
};
} // namespace
-const char ToolName[] = "llvm-strings";
+static StringRef ToolName;
static cl::list<std::string> InputFileNames(cl::Positional,
cl::desc("<input object files>"),
@@ -129,6 +129,7 @@ int main(int argc, char **argv) {
BumpPtrAllocator A;
StringSaver Saver(A);
StringsOptTable Tbl;
+ ToolName = argv[0];
opt::InputArgList Args =
Tbl.parseArgs(argc, argv, OPT_UNKNOWN, Saver,
[&](StringRef Msg) { reportCmdLineError(Msg); });
diff --git a/contrib/llvm-project/llvm/tools/llvm-tapi-diff/DiffEngine.cpp b/contrib/llvm-project/llvm/tools/llvm-tapi-diff/DiffEngine.cpp
index ff60d52f5c65..45a1f78525a7 100644
--- a/contrib/llvm-project/llvm/tools/llvm-tapi-diff/DiffEngine.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-tapi-diff/DiffEngine.cpp
@@ -94,8 +94,7 @@ std::string SymScalar::stringifySymbolFlag(MachO::SymbolFlags Flag) {
void SymScalar::print(raw_ostream &OS, std::string Indent, MachO::Target Targ) {
if (Val->getKind() == MachO::SymbolKind::ObjectiveCClass) {
- if (Targ.Arch == MachO::AK_i386 &&
- Targ.Platform == MachO::PlatformKind::macOS) {
+ if (Targ.Arch == MachO::AK_i386 && Targ.Platform == MachO::PLATFORM_MACOS) {
OS << Indent << "\t\t" << ((Order == lhs) ? "< " : "> ")
<< ObjC1ClassNamePrefix << Val->getName()
<< getFlagString(Val->getFlags()) << "\n";
diff --git a/contrib/llvm-project/llvm/tools/opt/NewPMDriver.cpp b/contrib/llvm-project/llvm/tools/opt/NewPMDriver.cpp
index 631d8eed5d7a..af3308939442 100644
--- a/contrib/llvm-project/llvm/tools/opt/NewPMDriver.cpp
+++ b/contrib/llvm-project/llvm/tools/opt/NewPMDriver.cpp
@@ -324,19 +324,6 @@ bool llvm::runPassPipeline(StringRef Arg0, Module &M, TargetMachine *TM,
PassPlugin->registerPassBuilderCallbacks(PB);
}
- // Register a callback that creates the debugify passes as needed.
- PB.registerPipelineParsingCallback(
- [](StringRef Name, ModulePassManager &MPM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (Name == "debugify") {
- MPM.addPass(NewPMDebugifyPass());
- return true;
- } else if (Name == "check-debugify") {
- MPM.addPass(NewPMCheckDebugifyPass());
- return true;
- }
- return false;
- });
PB.registerPipelineParsingCallback(
[](StringRef Name, ModulePassManager &MPM,
ArrayRef<PassBuilder::PipelineElement>) {
diff --git a/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.cpp
index 3933ce6e1106..78b698c31b2b 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -342,7 +342,7 @@ static void ParseConstraints(StringRef CStr, CGIOperandList &Ops, Record *Rec) {
}
void CGIOperandList::ProcessDisableEncoding(StringRef DisableEncoding) {
- while (1) {
+ while (true) {
StringRef OpName;
std::tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t");
if (OpName.empty()) break;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.h b/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.h
index e35d2191ea45..e0ce5d433602 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/CodeGenInstruction.h
@@ -13,6 +13,7 @@
#ifndef LLVM_UTILS_TABLEGEN_CODEGENINSTRUCTION_H
#define LLVM_UTILS_TABLEGEN_CODEGENINSTRUCTION_H
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/SMLoc.h"
@@ -106,7 +107,7 @@ template <typename T> class ArrayRef;
/// DoNotEncode - Bools are set to true in this vector for each operand in
/// the DisableEncoding list. These should not be emitted by the code
/// emitter.
- std::vector<bool> DoNotEncode;
+ BitVector DoNotEncode;
/// MIOperandInfo - Default MI operand type. Note an operand may be made
/// up of multiple MI operands.
diff --git a/contrib/llvm-project/llvm/utils/TableGen/CodeGenIntrinsics.h b/contrib/llvm-project/llvm/utils/TableGen/CodeGenIntrinsics.h
index dbfad3bf6b17..b005a5866f80 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -21,7 +21,6 @@
namespace llvm {
class Record;
class RecordKeeper;
-class CodeGenTarget;
struct CodeGenIntrinsic {
Record *TheDef; // The actual record defining this intrinsic.
diff --git a/contrib/llvm-project/llvm/utils/TableGen/CodeGenSchedule.h b/contrib/llvm-project/llvm/utils/TableGen/CodeGenSchedule.h
index 9020447c940b..a331a30b51a8 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/CodeGenSchedule.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/CodeGenSchedule.h
@@ -28,7 +28,6 @@ namespace llvm {
class CodeGenTarget;
class CodeGenSchedModels;
class CodeGenInstruction;
-class CodeGenRegisterClass;
using RecVec = std::vector<Record*>;
using RecIter = std::vector<Record*>::const_iterator;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/contrib/llvm-project/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index 6746fdd676a7..4273bd69b87d 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -250,7 +250,7 @@ static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
// current sets of nodes and this node don't matter. Look past it to see if
// we can merge anything else into this matching group.
unsigned Scan = OptionIdx;
- while (1) {
+ while (true) {
// If we ran out of stuff to scan, we're done.
if (Scan == e) break;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDag.h b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDag.h
index 567580540877..37570648cad1 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDag.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDag.h
@@ -84,9 +84,7 @@ protected:
bool HasPostMatchPredicate = false;
public:
- GIMatchDag(GIMatchDagContext &Ctx)
- : Ctx(Ctx), InstrNodes(), PredicateNodes(), Edges(),
- PredicateDependencies() {}
+ GIMatchDag(GIMatchDagContext &Ctx) : Ctx(Ctx) {}
GIMatchDag(const GIMatchDag &) = delete;
GIMatchDagContext &getContext() const { return Ctx; }
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagInstr.h b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagInstr.h
index 4a07767a2e19..0c39b50442b4 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagInstr.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagInstr.h
@@ -61,7 +61,7 @@ protected:
/// For debugging purposes, it's helpful to have access to a description of
/// the Opcode. However, this object shouldn't use it for more than debugging
/// output since predicates are expected to be handled outside the DAG.
- CodeGenInstruction *OpcodeAnnotation = 0;
+ CodeGenInstruction *OpcodeAnnotation = nullptr;
/// When true, this instruction will be a starting point for a match attempt.
bool IsMatchRoot = false;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h
index 865455fe4e4d..9552adc5c625 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h
@@ -14,7 +14,6 @@
namespace llvm {
class GIMatchDag;
class GIMatchDagInstr;
-class GIMatchDagEdge;
class GIMatchDagPredicate;
/// Represents a dependency that must be met to evaluate a predicate.
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.cpp b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.cpp
index d08a83333c30..00d57404b069 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.cpp
@@ -82,7 +82,6 @@ GIMatchTreeBuilderLeafInfo::GIMatchTreeBuilderLeafInfo(
GIMatchTreeBuilder &Builder, StringRef Name, unsigned RootIdx,
const GIMatchDag &MatchDag, void *Data)
: Builder(Builder), Info(Name, RootIdx, Data), MatchDag(MatchDag),
- InstrNodeToInfo(),
RemainingInstrNodes(BitVector(MatchDag.getNumInstrNodes(), true)),
RemainingEdges(BitVector(MatchDag.getNumEdges(), true)),
RemainingPredicates(BitVector(MatchDag.getNumPredicates(), true)),
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.h b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.h
index bf41a2e0e234..56df37731c09 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchTree.h
@@ -588,7 +588,7 @@ class GIMatchTreeVRegDefPartitioner : public GIMatchTreePartitioner {
unsigned OpIdx;
std::vector<BitVector> TraversedEdges;
DenseMap<unsigned, unsigned> ResultToPartition;
- std::vector<bool> PartitionToResult;
+ BitVector PartitionToResult;
void addToPartition(bool Result, unsigned LeafIdx);
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 7b1bd41a951b..25bc0adc2a81 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -883,9 +883,7 @@ protected:
public:
RuleMatcher(ArrayRef<SMLoc> SrcLoc)
- : Matchers(), Actions(), InsnVariableIDs(), MutatableInsns(),
- DefinedOperands(), NextInsnVarID(0), NextOutputInsnID(0),
- NextTempRegID(0), SrcLoc(SrcLoc), ComplexSubOperands(),
+ : NextInsnVarID(0), NextOutputInsnID(0), NextTempRegID(0), SrcLoc(SrcLoc),
RuleID(NextRuleID++) {}
RuleMatcher(RuleMatcher &&Other) = default;
RuleMatcher &operator=(RuleMatcher &&Other) = default;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/InstrInfoEmitter.cpp
index aee887a906e5..3c92aa0cc27a 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -863,13 +863,13 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
OS << "#ifdef GET_INSTRINFO_HELPER_DECLS\n";
OS << "#undef GET_INSTRINFO_HELPER_DECLS\n\n";
- emitTIIHelperMethods(OS, TargetName, /* ExpandDefintion = */false);
+ emitTIIHelperMethods(OS, TargetName, /* ExpandDefinition = */ false);
OS << "\n";
OS << "#endif // GET_INSTRINFO_HELPER_DECLS\n\n";
OS << "#ifdef GET_INSTRINFO_HELPERS\n";
OS << "#undef GET_INSTRINFO_HELPERS\n\n";
- emitTIIHelperMethods(OS, TargetName, /* ExpandDefintion = */true);
+ emitTIIHelperMethods(OS, TargetName, /* ExpandDefinition = */ true);
OS << "#endif // GET_INSTRINFO_HELPERS\n\n";
OS << "#ifdef GET_INSTRINFO_CTOR_DTOR\n";
diff --git a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
index 29cca92d902c..9e7a4a3925ac 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/PredicateExpander.h
@@ -111,7 +111,7 @@ class STIPredicateExpander : public PredicateExpander {
public:
STIPredicateExpander(StringRef Target)
- : PredicateExpander(Target), ClassPrefix(), ExpandDefinition(false) {}
+ : PredicateExpander(Target), ExpandDefinition(false) {}
bool shouldExpandDefinition() const { return ExpandDefinition; }
StringRef getClassPrefix() const { return ClassPrefix; }
diff --git a/contrib/llvm-project/llvm/utils/TableGen/RegisterBankEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/RegisterBankEmitter.cpp
index 0725657150f8..61f71309b6fb 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/RegisterBankEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/RegisterBankEmitter.cpp
@@ -42,7 +42,7 @@ private:
public:
RegisterBank(const Record &TheDef)
- : TheDef(TheDef), RCs(), RCWithLargestRegsSize(nullptr) {}
+ : TheDef(TheDef), RCWithLargestRegsSize(nullptr) {}
/// Get the human-readable name for the bank.
StringRef getName() const { return TheDef.getValueAsString("Name"); }
diff --git a/contrib/llvm-project/llvm/utils/TableGen/SDNodeProperties.cpp b/contrib/llvm-project/llvm/utils/TableGen/SDNodeProperties.cpp
index 1843a78aa3cf..2aec41aac625 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/SDNodeProperties.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/SDNodeProperties.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "SDNodeProperties.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
@@ -15,34 +16,25 @@ using namespace llvm;
unsigned llvm::parseSDPatternOperatorProperties(Record *R) {
unsigned Properties = 0;
for (Record *Property : R->getValueAsListOfDefs("Properties")) {
- if (Property->getName() == "SDNPCommutative") {
- Properties |= 1 << SDNPCommutative;
- } else if (Property->getName() == "SDNPAssociative") {
- Properties |= 1 << SDNPAssociative;
- } else if (Property->getName() == "SDNPHasChain") {
- Properties |= 1 << SDNPHasChain;
- } else if (Property->getName() == "SDNPOutGlue") {
- Properties |= 1 << SDNPOutGlue;
- } else if (Property->getName() == "SDNPInGlue") {
- Properties |= 1 << SDNPInGlue;
- } else if (Property->getName() == "SDNPOptInGlue") {
- Properties |= 1 << SDNPOptInGlue;
- } else if (Property->getName() == "SDNPMayStore") {
- Properties |= 1 << SDNPMayStore;
- } else if (Property->getName() == "SDNPMayLoad") {
- Properties |= 1 << SDNPMayLoad;
- } else if (Property->getName() == "SDNPSideEffect") {
- Properties |= 1 << SDNPSideEffect;
- } else if (Property->getName() == "SDNPMemOperand") {
- Properties |= 1 << SDNPMemOperand;
- } else if (Property->getName() == "SDNPVariadic") {
- Properties |= 1 << SDNPVariadic;
- } else {
+ auto Offset = StringSwitch<unsigned>(Property->getName())
+ .Case("SDNPCommutative", SDNPCommutative)
+ .Case("SDNPAssociative", SDNPAssociative)
+ .Case("SDNPHasChain", SDNPHasChain)
+ .Case("SDNPOutGlue", SDNPOutGlue)
+ .Case("SDNPInGlue", SDNPInGlue)
+ .Case("SDNPOptInGlue", SDNPOptInGlue)
+ .Case("SDNPMayStore", SDNPMayStore)
+ .Case("SDNPMayLoad", SDNPMayLoad)
+ .Case("SDNPSideEffect", SDNPSideEffect)
+ .Case("SDNPMemOperand", SDNPMemOperand)
+ .Case("SDNPVariadic", SDNPVariadic)
+ .Default(-1u);
+ if (Offset != -1u)
+ Properties |= 1 << Offset;
+ else
PrintFatalError(R->getLoc(), "Unknown SD Node property '" +
Property->getName() + "' on node '" +
R->getName() + "'!");
- }
}
-
return Properties;
}
diff --git a/contrib/llvm-project/llvm/utils/TableGen/SearchableTableEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 7803848aafc4..327b90d59db6 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -649,6 +649,12 @@ void SearchableTableEmitter::collectTableEntries(
Field.IsInstruction = true;
}
}
+
+ SearchIndex Idx;
+ std::copy(Table.Fields.begin(), Table.Fields.end(),
+ std::back_inserter(Idx.Fields));
+ std::sort(Table.Entries.begin(), Table.Entries.end(),
+ [&](Record *LHS, Record *RHS) { return compareBy(LHS, RHS, Idx); });
}
void SearchableTableEmitter::run(raw_ostream &OS) {
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm-project/llvm/utils/TableGen/X86DisassemblerTables.cpp
index d1a9ecb06a2b..90e71a354d17 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -657,7 +657,7 @@ static const char* stringForDecisionType(ModRMDecisionType dt) {
}
DisassemblerTables::DisassemblerTables() {
- for (unsigned i = 0; i < array_lengthof(Tables); i++)
+ for (unsigned i = 0; i < llvm::array_lengthof(Tables); i++)
Tables[i] = std::make_unique<ContextDecision>();
HasConflicts = false;
diff --git a/contrib/llvm-project/llvm/utils/TableGen/X86ModRMFilters.h b/contrib/llvm-project/llvm/utils/TableGen/X86ModRMFilters.h
index f0b8af5fb82a..e2d0907b4f8b 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/X86ModRMFilters.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/X86ModRMFilters.h
@@ -73,10 +73,7 @@ public:
/// otherwise. The name r derives from the fact that the mod
/// bits indicate whether the R/M bits [bits 2-0] signify a
/// register or a memory operand.
- ModFilter(bool r) :
- ModRMFilter(),
- R(r) {
- }
+ ModFilter(bool r) : R(r) {}
bool accepts(uint8_t modRM) const override {
return (R == ((modRM & 0xc0) == 0xc0));
@@ -95,11 +92,7 @@ public:
/// \param r True if the mod field must be set to 11; false otherwise.
/// The name is explained at ModFilter.
/// \param nnn The required value of the nnn field.
- ExtendedFilter(bool r, uint8_t nnn) :
- ModRMFilter(),
- R(r),
- NNN(nnn) {
- }
+ ExtendedFilter(bool r, uint8_t nnn) : R(r), NNN(nnn) {}
bool accepts(uint8_t modRM) const override {
return (((R && ((modRM & 0xc0) == 0xc0)) ||
@@ -120,11 +113,7 @@ public:
/// \param r True if the mod field must be set to 11; false otherwise.
/// The name is explained at ModFilter.
/// \param nnn The required value of the nnn field.
- ExtendedRMFilter(bool r, uint8_t nnn) :
- ModRMFilter(),
- R(r),
- NNN(nnn) {
- }
+ ExtendedRMFilter(bool r, uint8_t nnn) : R(r), NNN(nnn) {}
bool accepts(uint8_t modRM) const override {
return ((R && ((modRM & 0xc0) == 0xc0)) &&
@@ -140,10 +129,7 @@ public:
/// Constructor
///
/// \param modRM The required value of the full ModR/M byte.
- ExactFilter(uint8_t modRM) :
- ModRMFilter(),
- ModRM(modRM) {
- }
+ ExactFilter(uint8_t modRM) : ModRM(modRM) {}
bool accepts(uint8_t modRM) const override {
return (ModRM == modRM);
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp.h b/contrib/llvm-project/openmp/runtime/src/kmp.h
index ede6aa992d53..a41265a4763a 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp.h
@@ -1315,6 +1315,82 @@ static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
#define KMP_X86_MXCSR_MASK 0xffffffc0 /* ignore status flags (6 lsb) */
+// User-level Monitor/Mwait
+#if KMP_HAVE_UMWAIT
+// We always try for UMWAIT first
+#if KMP_HAVE_WAITPKG_INTRINSICS
+#if KMP_HAVE_IMMINTRIN_H
+#include <immintrin.h>
+#elif KMP_HAVE_INTRIN_H
+#include <intrin.h>
+#endif
+#endif // KMP_HAVE_WAITPKG_INTRINSICS
+
+KMP_ATTRIBUTE_TARGET_WAITPKG
+static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
+#if !KMP_HAVE_WAITPKG_INTRINSICS
+ uint32_t timeHi = uint32_t(counter >> 32);
+ uint32_t timeLo = uint32_t(counter & 0xffffffff);
+ char flag;
+ __asm__ volatile("#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
+ "setb %0"
+ : "=r"(flag)
+ : "a"(timeLo), "d"(timeHi), "c"(hint)
+ :);
+ return flag;
+#else
+ return _tpause(hint, counter);
+#endif
+}
+KMP_ATTRIBUTE_TARGET_WAITPKG
+static inline void __kmp_umonitor(void *cacheline) {
+#if !KMP_HAVE_WAITPKG_INTRINSICS
+ __asm__ volatile("# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
+ :
+ : "a"(cacheline)
+ :);
+#else
+ _umonitor(cacheline);
+#endif
+}
+KMP_ATTRIBUTE_TARGET_WAITPKG
+static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
+#if !KMP_HAVE_WAITPKG_INTRINSICS
+ uint32_t timeHi = uint32_t(counter >> 32);
+ uint32_t timeLo = uint32_t(counter & 0xffffffff);
+ char flag;
+ __asm__ volatile("#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
+ "setb %0"
+ : "=r"(flag)
+ : "a"(timeLo), "d"(timeHi), "c"(hint)
+ :);
+ return flag;
+#else
+ return _umwait(hint, counter);
+#endif
+}
+#elif KMP_HAVE_MWAIT
+#if KMP_OS_UNIX
+#include <pmmintrin.h>
+#else
+#include <intrin.h>
+#endif
+#if KMP_OS_UNIX
+__attribute__((target("sse3")))
+#endif
+static inline void
+__kmp_mm_monitor(void *cacheline, unsigned extensions, unsigned hints) {
+ _mm_monitor(cacheline, extensions, hints);
+}
+#if KMP_OS_UNIX
+__attribute__((target("sse3")))
+#endif
+static inline void
+__kmp_mm_mwait(unsigned extensions, unsigned hints) {
+ _mm_mwait(extensions, hints);
+}
+#endif // KMP_HAVE_UMWAIT
+
#if KMP_ARCH_X86
extern void __kmp_x86_pause(void);
#elif KMP_MIC
@@ -1344,6 +1420,9 @@ static inline void __kmp_x86_pause(void) { _mm_pause(); }
#define KMP_INIT_YIELD(count) \
{ (count) = __kmp_yield_init; }
+#define KMP_INIT_BACKOFF(time) \
+ { (time) = __kmp_pause_init; }
+
#define KMP_OVERSUBSCRIBED \
(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
@@ -1381,7 +1460,36 @@ static inline void __kmp_x86_pause(void) { _mm_pause(); }
} \
}
-#define KMP_YIELD_OVERSUB_ELSE_SPIN(count) \
+// If TPAUSE is available & enabled, use it. If oversubscribed, use the slower
+// (C0.2) state, which improves performance of other SMT threads on the same
+// core, otherwise, use the fast (C0.1) default state, or whatever the user has
+// requested. Uses a timed TPAUSE, and exponential backoff. If TPAUSE isn't
+// available, fall back to the regular CPU pause and yield combination.
+#if KMP_HAVE_UMWAIT
+#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
+ { \
+ if (__kmp_tpause_enabled) { \
+ if (KMP_OVERSUBSCRIBED) { \
+ __kmp_tpause(0, (time)); \
+ } else { \
+ __kmp_tpause(__kmp_tpause_hint, (time)); \
+ } \
+ (time) *= 2; \
+ } else { \
+ KMP_CPU_PAUSE(); \
+ if ((KMP_TRY_YIELD_OVERSUB)) { \
+ __kmp_yield(); \
+ } else if (__kmp_use_yield == 1) { \
+ (count) -= 2; \
+ if (!(count)) { \
+ __kmp_yield(); \
+ (count) = __kmp_yield_next; \
+ } \
+ } \
+ } \
+ }
+#else
+#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
{ \
KMP_CPU_PAUSE(); \
if ((KMP_TRY_YIELD_OVERSUB)) \
@@ -1394,80 +1502,6 @@ static inline void __kmp_x86_pause(void) { _mm_pause(); }
} \
} \
}
-
-// User-level Monitor/Mwait
-#if KMP_HAVE_UMWAIT
-// We always try for UMWAIT first
-#if KMP_HAVE_WAITPKG_INTRINSICS
-#if KMP_HAVE_IMMINTRIN_H
-#include <immintrin.h>
-#elif KMP_HAVE_INTRIN_H
-#include <intrin.h>
-#endif
-#endif // KMP_HAVE_WAITPKG_INTRINSICS
-KMP_ATTRIBUTE_TARGET_WAITPKG
-static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
-#if !KMP_HAVE_WAITPKG_INTRINSICS
- uint32_t timeHi = uint32_t(counter >> 32);
- uint32_t timeLo = uint32_t(counter & 0xffffffff);
- char flag;
- __asm__ volatile("#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
- "setb %0"
- : "=r"(flag)
- : "a"(timeLo), "d"(timeHi), "c"(hint)
- :);
- return flag;
-#else
- return _tpause(hint, counter);
-#endif
-}
-KMP_ATTRIBUTE_TARGET_WAITPKG
-static inline void __kmp_umonitor(void *cacheline) {
-#if !KMP_HAVE_WAITPKG_INTRINSICS
- __asm__ volatile("# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
- :
- : "a"(cacheline)
- :);
-#else
- _umonitor(cacheline);
-#endif
-}
-KMP_ATTRIBUTE_TARGET_WAITPKG
-static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
-#if !KMP_HAVE_WAITPKG_INTRINSICS
- uint32_t timeHi = uint32_t(counter >> 32);
- uint32_t timeLo = uint32_t(counter & 0xffffffff);
- char flag;
- __asm__ volatile("#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
- "setb %0"
- : "=r"(flag)
- : "a"(timeLo), "d"(timeHi), "c"(hint)
- :);
- return flag;
-#else
- return _umwait(hint, counter);
-#endif
-}
-#elif KMP_HAVE_MWAIT
-#if KMP_OS_UNIX
-#include <pmmintrin.h>
-#else
-#include <intrin.h>
-#endif
-#if KMP_OS_UNIX
-__attribute__((target("sse3")))
-#endif
-static inline void
-__kmp_mm_monitor(void *cacheline, unsigned extensions, unsigned hints) {
- _mm_monitor(cacheline, extensions, hints);
-}
-#if KMP_OS_UNIX
-__attribute__((target("sse3")))
-#endif
-static inline void
-__kmp_mm_mwait(unsigned extensions, unsigned hints) {
- _mm_mwait(extensions, hints);
-}
#endif // KMP_HAVE_UMWAIT
/* ------------------------------------------------------------------------ */
@@ -3088,6 +3122,7 @@ extern kmp_int32 __kmp_use_yield;
extern kmp_int32 __kmp_use_yield_exp_set;
extern kmp_uint32 __kmp_yield_init;
extern kmp_uint32 __kmp_yield_next;
+extern kmp_uint64 __kmp_pause_init;
/* ------------------------------------------------------------------------- */
extern int __kmp_allThreadsSpecified;
@@ -3290,6 +3325,13 @@ extern int __kmp_mwait_enabled; // Runtime check if ring3 mwait is enabled
extern int __kmp_mwait_hints; // Hints to pass in to mwait
#endif
+#if KMP_HAVE_UMWAIT
+extern int __kmp_waitpkg_enabled; // Runtime check if waitpkg exists
+extern int __kmp_tpause_state; // 0 (default), 1=C0.1, 2=C0.2; from KMP_TPAUSE
+extern int __kmp_tpause_hint; // 1=C0.1 (default), 0=C0.2; from KMP_TPAUSE
+extern int __kmp_tpause_enabled; // 0 (default), 1 (KMP_TPAUSE is non-zero)
+#endif
+
/* ------------------------------------------------------------------------- */
extern kmp_global_t __kmp_global; /* global status */
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp
index 83d646054f63..0bd7b1a41ac4 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_atomic.cpp
@@ -1235,10 +1235,12 @@ MIN_MAX_COMPXCHG(float8, max, kmp_real64, 64, <, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_max
MIN_MAX_COMPXCHG(float8, min, kmp_real64, 64, >, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_min
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
MIN_MAX_CRITICAL(float10, max, long double, <, 10r,
1) // __kmpc_atomic_float10_max
MIN_MAX_CRITICAL(float10, min, long double, >, 10r,
1) // __kmpc_atomic_float10_min
+#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL(float16, max, QUAD_LEGACY, <, 16r,
1) // __kmpc_atomic_float16_max
@@ -1317,6 +1319,7 @@ ATOMIC_CMPX_EQV(fixed8, eqv, kmp_int64, 64, ^~, 8i, 7,
}
/* ------------------------------------------------------------------------- */
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// routines for long double type
ATOMIC_CRITICAL(float10, add, long double, +, 10r,
1) // __kmpc_atomic_float10_add
@@ -1326,6 +1329,7 @@ ATOMIC_CRITICAL(float10, mul, long double, *, 10r,
1) // __kmpc_atomic_float10_mul
ATOMIC_CRITICAL(float10, div, long double, /, 10r,
1) // __kmpc_atomic_float10_div
+#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL(float16, add, QUAD_LEGACY, +, 16r,
@@ -1371,6 +1375,7 @@ ATOMIC_CRITICAL(cmplx8, add, kmp_cmplx64, +, 16c, 1) // __kmpc_atomic_cmplx8_add
ATOMIC_CRITICAL(cmplx8, sub, kmp_cmplx64, -, 16c, 1) // __kmpc_atomic_cmplx8_sub
ATOMIC_CRITICAL(cmplx8, mul, kmp_cmplx64, *, 16c, 1) // __kmpc_atomic_cmplx8_mul
ATOMIC_CRITICAL(cmplx8, div, kmp_cmplx64, /, 16c, 1) // __kmpc_atomic_cmplx8_div
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
ATOMIC_CRITICAL(cmplx10, add, kmp_cmplx80, +, 20c,
1) // __kmpc_atomic_cmplx10_add
ATOMIC_CRITICAL(cmplx10, sub, kmp_cmplx80, -, 20c,
@@ -1379,6 +1384,7 @@ ATOMIC_CRITICAL(cmplx10, mul, kmp_cmplx80, *, 20c,
1) // __kmpc_atomic_cmplx10_mul
ATOMIC_CRITICAL(cmplx10, div, kmp_cmplx80, /, 20c,
1) // __kmpc_atomic_cmplx10_div
+#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL(cmplx16, add, CPLX128_LEG, +, 32c,
1) // __kmpc_atomic_cmplx16_add
@@ -1797,6 +1803,7 @@ ATOMIC_CMPXCHG_MIX(float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7,
ATOMIC_CMPXCHG_MIX(float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_div_fp
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
ATOMIC_CRITICAL_FP(float10, long double, add, +, fp, _Quad, 10r,
1) // __kmpc_atomic_float10_add_fp
ATOMIC_CRITICAL_FP(float10, long double, sub, -, fp, _Quad, 10r,
@@ -1806,7 +1813,6 @@ ATOMIC_CRITICAL_FP(float10, long double, mul, *, fp, _Quad, 10r,
ATOMIC_CRITICAL_FP(float10, long double, div, /, fp, _Quad, 10r,
1) // __kmpc_atomic_float10_div_fp
-#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// Reverse operations
ATOMIC_CMPXCHG_REV_MIX(fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0,
KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_rev_fp
@@ -3594,7 +3600,7 @@ void __kmpc_atomic_8(ident_t *id_ref, int gtid, void *lhs, void *rhs,
__kmp_release_atomic_lock(&__kmp_atomic_lock_8i, gtid);
}
}
-
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
@@ -3615,6 +3621,7 @@ void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_10r, gtid);
}
+#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
@@ -3636,7 +3643,7 @@ void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_16c, gtid);
}
-
+#if KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
@@ -3657,7 +3664,7 @@ void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_20c, gtid);
}
-
+#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_32(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_barrier.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_barrier.cpp
index 97bf9811bcd0..ee05bb3587ca 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_barrier.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_barrier.cpp
@@ -2037,8 +2037,10 @@ static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split,
}
#endif
- KMP_DEBUG_ASSERT(this_thr->th.th_task_team->tt.tt_found_proxy_tasks ==
- TRUE);
+ KMP_DEBUG_ASSERT(
+ this_thr->th.th_task_team->tt.tt_found_proxy_tasks == TRUE ||
+ this_thr->th.th_task_team->tt.tt_hidden_helper_task_encountered ==
+ TRUE);
__kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
__kmp_task_team_setup(this_thr, team, 0);
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_csupport.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_csupport.cpp
index e95c2f072509..e263558517d0 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_csupport.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_csupport.cpp
@@ -531,7 +531,8 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
kmp_task_team_t *task_team = this_thr->th.th_task_team;
// we need to wait for the proxy tasks before finishing the thread
- if (task_team != NULL && task_team->tt.tt_found_proxy_tasks)
+ if (task_team != NULL && (task_team->tt.tt_found_proxy_tasks ||
+ task_team->tt.tt_hidden_helper_task_encountered))
__kmp_task_team_wait(this_thr, serial_team USE_ITT_BUILD_ARG(NULL));
KMP_MB();
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.cpp
index 1aaffc76909a..648332109dbb 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.cpp
@@ -2655,9 +2655,11 @@ __kmp_wait_4(volatile kmp_uint32 *spinner, kmp_uint32 checker,
kmp_uint32 spins;
kmp_uint32 (*f)(kmp_uint32, kmp_uint32) = pred;
kmp_uint32 r;
+ kmp_uint64 time;
KMP_FSYNC_SPIN_INIT(obj, CCAST(kmp_uint32 *, spin));
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
// main wait spin loop
while (!f(r = TCR_4(*spin), check)) {
KMP_FSYNC_SPIN_PREPARE(obj);
@@ -2665,7 +2667,7 @@ __kmp_wait_4(volatile kmp_uint32 *spinner, kmp_uint32 checker,
split. It causes problems with infinite recursion because of exit lock */
/* if ( TCR_4(__kmp_global.g.g_done) && __kmp_global.g.g_abort)
__kmp_abort_thread(); */
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
}
KMP_FSYNC_SPIN_ACQUIRED(obj);
return r;
@@ -2680,15 +2682,17 @@ void __kmp_wait_4_ptr(void *spinner, kmp_uint32 checker,
kmp_uint32 check = checker;
kmp_uint32 spins;
kmp_uint32 (*f)(void *, kmp_uint32) = pred;
+ kmp_uint64 time;
KMP_FSYNC_SPIN_INIT(obj, spin);
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
// main wait spin loop
while (!f(spin, check)) {
KMP_FSYNC_SPIN_PREPARE(obj);
/* if we have waited a bit, or are noversubscribed, yield */
/* pause is in the following code */
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
}
KMP_FSYNC_SPIN_ACQUIRED(obj);
}
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.h b/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.h
index ae11361ca512..154db174613d 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_dispatch.h
@@ -292,10 +292,12 @@ static UT __kmp_wait(volatile UT *spinner, UT checker,
UT check = checker;
kmp_uint32 spins;
kmp_uint32 (*f)(UT, UT) = pred;
+ kmp_uint64 time;
UT r;
KMP_FSYNC_SPIN_INIT(obj, CCAST(UT *, spin));
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
// main wait spin loop
while (!f(r = *spin, check)) {
KMP_FSYNC_SPIN_PREPARE(obj);
@@ -305,7 +307,7 @@ static UT __kmp_wait(volatile UT *spinner, UT checker,
/* if ( TCR_4(__kmp_global.g.g_done) && __kmp_global.g.g_abort)
__kmp_abort_thread(); */
// If oversubscribed, or have waited a bit then yield.
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
}
KMP_FSYNC_SPIN_ACQUIRED(obj);
return r;
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_global.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_global.cpp
index 4aea5a2d8663..fdabaad21f7b 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_global.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_global.cpp
@@ -219,6 +219,13 @@ int __kmp_mwait_enabled = FALSE;
int __kmp_mwait_hints = 0;
#endif
+#if KMP_HAVE_UMWAIT
+int __kmp_waitpkg_enabled = 0;
+int __kmp_tpause_state = 0;
+int __kmp_tpause_hint = 1;
+int __kmp_tpause_enabled = 0;
+#endif
+
/* map OMP 3.0 schedule types with our internal schedule types */
enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext +
kmp_sched_upper_std - kmp_sched_lower - 2] = {
@@ -425,6 +432,7 @@ kmp_int32 __kmp_use_yield_exp_set = 0;
kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT;
kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT;
+kmp_uint64 __kmp_pause_init = 1; // for tpause
/* ------------------------------------------------------ */
/* STATE mostly syncronized with global lock */
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_lock.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_lock.cpp
index f3bdb03663a6..fff7305b57f5 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_lock.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_lock.cpp
@@ -96,12 +96,19 @@ __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
}
kmp_uint32 spins;
+ kmp_uint64 time;
KMP_FSYNC_PREPARE(lck);
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
kmp_backoff_t backoff = __kmp_spin_backoff_params;
do {
+#if !KMP_HAVE_UMWAIT
__kmp_spin_backoff(&backoff);
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+#else
+ if (!__kmp_tpause_enabled)
+ __kmp_spin_backoff(&backoff);
+#endif
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
} while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
!__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy));
KMP_FSYNC_ACQUIRED(lck);
@@ -2227,10 +2234,12 @@ __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
// The current implementation of KMP_WAIT doesn't allow for mask
// and poll to be re-read every spin iteration.
kmp_uint32 spins;
+ kmp_uint64 time;
KMP_FSYNC_PREPARE(lck);
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
while (polls[ticket & mask] < ticket) { // atomic load
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
// Re-read the mask and the poll pointer from the lock structure.
//
// Make certain that "mask" is read before "polls" !!!
@@ -2659,9 +2668,17 @@ void __kmp_spin_backoff(kmp_backoff_t *boff) {
kmp_uint32 i;
for (i = boff->step; i > 0; i--) {
kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
- do {
- KMP_CPU_PAUSE();
- } while (before(__kmp_tsc(), goal));
+#if KMP_HAVE_UMWAIT
+ if (__kmp_umwait_enabled) {
+ __kmp_tpause(0, boff->min_tick);
+ } else {
+#endif
+ do {
+ KMP_CPU_PAUSE();
+ } while (before(__kmp_tsc(), goal));
+#if KMP_HAVE_UMWAIT
+ }
+#endif
}
boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
}
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_lock.h b/contrib/llvm-project/openmp/runtime/src/kmp_lock.h
index 90afd8fd7eb3..a19f4ca323b8 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_lock.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_lock.h
@@ -651,12 +651,15 @@ extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
if (lck->tas.lk.poll != 0 || \
!__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
kmp_uint32 spins; \
+ kmp_uint64 time; \
KMP_FSYNC_PREPARE(lck); \
KMP_INIT_YIELD(spins); \
+ KMP_INIT_BACKOFF(time); \
do { \
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins); \
- } while (lck->tas.lk.poll != 0 || !__kmp_atomic_compare_store_acq( \
- &lck->tas.lk.poll, 0, gtid + 1)); \
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
+ } while ( \
+ lck->tas.lk.poll != 0 || \
+ !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
} \
KMP_FSYNC_ACQUIRED(lck); \
} else { \
@@ -758,10 +761,12 @@ extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
if ((lck->tas.lk.poll != 0) || \
!__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
kmp_uint32 spins; \
+ kmp_uint64 time; \
KMP_FSYNC_PREPARE(lck); \
KMP_INIT_YIELD(spins); \
+ KMP_INIT_BACKOFF(time); \
do { \
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins); \
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
} while ( \
(lck->tas.lk.poll != 0) || \
!__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
index 6efc26df8de3..e1af2f43dae7 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
@@ -4106,7 +4106,8 @@ void __kmp_unregister_root_current_thread(int gtid) {
kmp_task_team_t *task_team = thread->th.th_task_team;
// we need to wait for the proxy tasks before finishing the thread
- if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
+ if (task_team != NULL && (task_team->tt.tt_found_proxy_tasks ||
+ task_team->tt.tt_hidden_helper_task_encountered)) {
#if OMPT_SUPPORT
// the runtime is shutting down so we won't report any events
thread->th.ompt_thread_info.state = ompt_state_undefined;
@@ -6894,7 +6895,9 @@ static void __kmp_check_mic_type() {
static void __kmp_user_level_mwait_init() {
struct kmp_cpuid buf;
__kmp_x86_cpuid(7, 0, &buf);
- __kmp_umwait_enabled = ((buf.ecx >> 5) & 1) && __kmp_user_level_mwait;
+ __kmp_waitpkg_enabled = ((buf.ecx >> 5) & 1);
+ __kmp_umwait_enabled = __kmp_waitpkg_enabled && __kmp_user_level_mwait;
+ __kmp_tpause_enabled = __kmp_waitpkg_enabled && (__kmp_tpause_state > 0);
KF_TRACE(30, ("__kmp_user_level_mwait_init: __kmp_umwait_enabled = %d\n",
__kmp_umwait_enabled));
}
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_settings.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_settings.cpp
index 302abb4042d8..27ab51dbf7ed 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_settings.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_settings.cpp
@@ -5171,6 +5171,27 @@ static void __kmp_stg_print_mwait_hints(kmp_str_buf_t *buffer, char const *name,
#endif // KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
+#if KMP_HAVE_UMWAIT
+// -----------------------------------------------------------------------------
+// KMP_TPAUSE
+// 0 = don't use TPAUSE, 1 = use C0.1 state, 2 = use C0.2 state
+
+static void __kmp_stg_parse_tpause(char const *name, char const *value,
+ void *data) {
+ __kmp_stg_parse_int(name, value, 0, INT_MAX, &__kmp_tpause_state);
+ if (__kmp_tpause_state != 0) {
+ // The actual hint passed to tpause is: 0 for C0.2 and 1 for C0.1
+ if (__kmp_tpause_state == 2) // use C0.2
+ __kmp_tpause_hint = 0; // default was set to 1 for C0.1
+ }
+} // __kmp_stg_parse_tpause
+
+static void __kmp_stg_print_tpause(kmp_str_buf_t *buffer, char const *name,
+ void *data) {
+ __kmp_stg_print_int(buffer, name, __kmp_tpause_state);
+} // __kmp_stg_print_tpause
+#endif // KMP_HAVE_UMWAIT
+
// -----------------------------------------------------------------------------
// OMP_DISPLAY_ENV
@@ -5536,6 +5557,10 @@ static kmp_setting_t __kmp_stg_table[] = {
{"KMP_MWAIT_HINTS", __kmp_stg_parse_mwait_hints,
__kmp_stg_print_mwait_hints, NULL, 0, 0},
#endif
+
+#if KMP_HAVE_UMWAIT
+ {"KMP_TPAUSE", __kmp_stg_parse_tpause, __kmp_stg_print_tpause, NULL, 0, 0},
+#endif
{"", NULL, NULL, NULL, 0, 0}}; // settings
static int const __kmp_stg_count =
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_taskdeps.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_taskdeps.cpp
index 7d2774a738fb..501830eaa758 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_taskdeps.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_taskdeps.cpp
@@ -829,8 +829,10 @@ void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
bool ignore = current_task->td_flags.team_serial ||
current_task->td_flags.tasking_ser ||
current_task->td_flags.final;
- ignore = ignore && thread->th.th_task_team != NULL &&
- thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
+ ignore =
+ ignore && thread->th.th_task_team != NULL &&
+ thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE &&
+ thread->th.th_task_team->tt.tt_hidden_helper_task_encountered == FALSE;
ignore = ignore || current_task->td_dephash == NULL;
if (ignore) {
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_tasking.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_tasking.cpp
index d956df1b2a37..e445438524c8 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_tasking.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_tasking.cpp
@@ -3074,6 +3074,18 @@ static inline int __kmp_execute_tasks_template(
return FALSE;
}
+ // Check the flag again to see if it has already done in case to be trapped
+ // into infinite loop when a if0 task depends on a hidden helper task
+ // outside any parallel region. Detached tasks are not impacted in this case
+ // because the only thread executing this function has to execute the proxy
+ // task so it is in another code path that has the same check.
+ if (flag == NULL || (!final_spin && flag->done_check())) {
+ KA_TRACE(15,
+ ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
+ gtid));
+ return TRUE;
+ }
+
// We could be getting tasks from target constructs; if this is the only
// thread, keep trying to execute tasks from own queue
if (nthreads == 1 &&
@@ -3478,6 +3490,7 @@ static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
TCW_4(task_team->tt.tt_found_tasks, FALSE);
TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
+ TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
@@ -3539,9 +3552,11 @@ void __kmp_reap_task_teams(void) {
void __kmp_wait_to_unref_task_teams(void) {
kmp_info_t *thread;
kmp_uint32 spins;
+ kmp_uint64 time;
int done;
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
for (;;) {
done = TRUE;
@@ -3591,7 +3606,7 @@ void __kmp_wait_to_unref_task_teams(void) {
}
// If oversubscribed or have waited a bit, yield.
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
}
}
@@ -3640,6 +3655,7 @@ void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
TCW_4(task_team->tt.tt_found_tasks, FALSE);
TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
+ TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
team->t.t_nproc);
TCW_4(task_team->tt.tt_active, TRUE);
@@ -3732,8 +3748,10 @@ void __kmp_task_team_wait(
"setting active to false, setting local and team's pointer to NULL\n",
__kmp_gtid_from_thread(this_thr), task_team));
KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
- task_team->tt.tt_found_proxy_tasks == TRUE);
+ task_team->tt.tt_found_proxy_tasks == TRUE ||
+ task_team->tt.tt_hidden_helper_task_encountered == TRUE);
TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
+ TCW_SYNC_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
TCW_SYNC_4(task_team->tt.tt_active, FALSE);
KMP_MB();
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_wait_release.h b/contrib/llvm-project/openmp/runtime/src/kmp_wait_release.h
index 226150dfb781..b32cb15de1b2 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_wait_release.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_wait_release.h
@@ -377,6 +377,7 @@ __kmp_wait_template(kmp_info_t *this_thr,
#else
kmp_uint32 hibernate;
#endif
+ kmp_uint64 time;
KMP_FSYNC_SPIN_INIT(spin, NULL);
if (flag->done_check()) {
@@ -476,6 +477,7 @@ final_spin=FALSE)
#endif
KMP_INIT_YIELD(spins); // Setup for waiting
+ KMP_INIT_BACKOFF(time);
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME ||
__kmp_pause_status == kmp_soft_paused) {
@@ -563,7 +565,7 @@ final_spin=FALSE)
// If we are oversubscribed, or have waited a bit (and
// KMP_LIBRARY=throughput), then yield
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
#if KMP_STATS_ENABLED
// Check if thread has been signalled to idle state
diff --git a/contrib/llvm-project/openmp/runtime/src/z_Windows_NT_util.cpp b/contrib/llvm-project/openmp/runtime/src/z_Windows_NT_util.cpp
index 0a0801c7ece2..8fa198563a79 100644
--- a/contrib/llvm-project/openmp/runtime/src/z_Windows_NT_util.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/z_Windows_NT_util.cpp
@@ -1327,16 +1327,18 @@ static void __kmp_reap_common(kmp_info_t *th) {
// KMP_WAIT to cover this usage also.
void *obj = NULL;
kmp_uint32 spins;
+ kmp_uint64 time;
#if USE_ITT_BUILD
KMP_FSYNC_SPIN_INIT(obj, (void *)&th->th.th_info.ds.ds_alive);
#endif /* USE_ITT_BUILD */
KMP_INIT_YIELD(spins);
+ KMP_INIT_BACKOFF(time);
do {
#if USE_ITT_BUILD
KMP_FSYNC_SPIN_PREPARE(obj);
#endif /* USE_ITT_BUILD */
__kmp_is_thread_alive(th, &exit_val);
- KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
+ KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
} while (exit_val == STILL_ACTIVE && TCR_4(th->th.th_info.ds.ds_alive));
#if USE_ITT_BUILD
if (exit_val == STILL_ACTIVE) {